- supported.conf: Added sparse_keymap (eeepc_laptop depends on it)
[linux-flexiantxendom0-3.2.10.git] / drivers / net / sfc / sfc_resource / vi_resource_alloc.c
1 /****************************************************************************
2  * Driver for Solarflare network controllers -
3  *          resource management for Xen backend, OpenOnload, etc
4  *           (including support for SFE4001 10GBT NIC)
5  *
6  * This file contains allocation of VI resources.
7  *
8  * Copyright 2005-2007: Solarflare Communications Inc,
9  *                      9501 Jeronimo Road, Suite 250,
10  *                      Irvine, CA 92618, USA
11  *
12  * Developed and maintained by Solarflare Communications:
13  *                      <linux-xen-drivers@solarflare.com>
14  *                      <onload-dev@solarflare.com>
15  *
16  * Certain parts of the driver were implemented by
17  *          Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
18  *          OKTET Labs Ltd, Russia,
19  *          http://oktetlabs.ru, <info@oktetlabs.ru>
20  *          by request of Solarflare Communications
21  *
22  *
23  * This program is free software; you can redistribute it and/or modify it
24  * under the terms of the GNU General Public License version 2 as published
25  * by the Free Software Foundation, incorporated herein by reference.
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
30  * GNU General Public License for more details.
31  *
32  * You should have received a copy of the GNU General Public License
33  * along with this program; if not, write to the Free Software
34  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
35  ****************************************************************************
36  */
37
38 #include <ci/efrm/nic_table.h>
39 #include <ci/efhw/iopage.h>
40 #include <ci/driver/efab/hardware.h>
41 #include <ci/efhw/public.h>
42 #include <ci/efhw/falcon.h>
43 #include <ci/efrm/private.h>
44 #include <ci/efrm/buffer_table.h>
45 #include <ci/efrm/vi_resource_private.h>
46 #include <ci/efrm/efrm_client.h>
47 #include "efrm_internal.h"
48
49
50 /*** Data definitions ****************************************************/
51
52 static const char *dmaq_names[] = { "TX", "RX" };
53
54 struct vi_resource_manager *efrm_vi_manager;
55
56 /*** Forward references **************************************************/
57
58 static int
59 efrm_vi_resource_alloc_or_free(struct efrm_client *client,
60                                int alloc, struct vi_resource *evq_virs,
61                                uint16_t vi_flags, int32_t evq_capacity,
62                                int32_t txq_capacity, int32_t rxq_capacity,
63                                uint8_t tx_q_tag, uint8_t rx_q_tag,
64                                struct vi_resource **virs_in_out);
65
66 /*** Reference count handling ********************************************/
67
68 static inline void efrm_vi_rm_get_ref(struct vi_resource *virs)
69 {
70         atomic_inc(&virs->evq_refs);
71 }
72
73 static inline void efrm_vi_rm_drop_ref(struct vi_resource *virs)
74 {
75         EFRM_ASSERT(atomic_read(&virs->evq_refs) != 0);
76         if (atomic_dec_and_test(&virs->evq_refs))
77                 efrm_vi_resource_alloc_or_free(virs->rs.rs_client, false, NULL,
78                                                0, 0, 0, 0, 0, 0, &virs);
79 }
80
81 /*** Instance numbers ****************************************************/
82
83 static inline int efrm_vi_rm_alloc_id(uint16_t vi_flags, int32_t evq_capacity)
84 {
85         irq_flags_t lock_flags;
86         int instance;
87         int rc;
88
89         if (efrm_nic_tablep->a_nic == NULL)     /* ?? FIXME: surely not right */
90                 return -ENODEV;
91
92         spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
93
94         /* Falcon A1 RX phys addr wierdness. */
95         if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
96             (vi_flags & EFHW_VI_RX_PHYS_ADDR_EN)) {
97                 if (vi_flags & EFHW_VI_JUMBO_EN) {
98                         /* Falcon-A cannot do phys + scatter. */
99                         EFRM_WARN
100                             ("%s: falcon-A does not support phys+scatter mode",
101                              __func__);
102                         instance = -1;
103                 } else if (efrm_vi_manager->iscsi_dmaq_instance_is_free
104                            && evq_capacity == 0) {
105                         /* Falcon-A has a single RXQ that gives the correct
106                          * semantics for physical addressing.  However, it
107                          * happens to have the same instance number as the
108                          * 'char' event queue, so we cannot also hand out
109                          * the event queue. */
110                         efrm_vi_manager->iscsi_dmaq_instance_is_free = false;
111                         instance = FALCON_A1_ISCSI_DMAQ;
112                 } else {
113                         EFRM_WARN("%s: iSCSI receive queue not free",
114                                   __func__);
115                         instance = -1;
116                 }
117                 goto unlock_out;
118         }
119
120         if (vi_flags & EFHW_VI_RM_WITH_INTERRUPT) {
121                 rc = __kfifo_get(efrm_vi_manager->instances_with_interrupt,
122                                  (unsigned char *)&instance, sizeof(instance));
123                 if (rc != sizeof(instance)) {
124                         EFRM_ASSERT(rc == 0);
125                         instance = -1;
126                 }
127                 goto unlock_out;
128         }
129
130         /* Otherwise a normal run-of-the-mill VI. */
131         rc = __kfifo_get(efrm_vi_manager->instances_with_timer,
132                          (unsigned char *)&instance, sizeof(instance));
133         if (rc != sizeof(instance)) {
134                 EFRM_ASSERT(rc == 0);
135                 instance = -1;
136         }
137
138 unlock_out:
139         spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
140         return instance;
141 }
142
143 static void efrm_vi_rm_free_id(int instance)
144 {
145         irq_flags_t lock_flags;
146         struct kfifo *instances;
147
148         if (efrm_nic_tablep->a_nic == NULL)     /* ?? FIXME: surely not right */
149                 return;
150
151         if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
152             instance == FALCON_A1_ISCSI_DMAQ) {
153                 EFRM_ASSERT(efrm_vi_manager->iscsi_dmaq_instance_is_free ==
154                             false);
155                 spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
156                 efrm_vi_manager->iscsi_dmaq_instance_is_free = true;
157                 spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock,
158                                        lock_flags);
159         } else {
160                 if (instance >= efrm_vi_manager->with_timer_base &&
161                     instance < efrm_vi_manager->with_timer_limit) {
162                         instances = efrm_vi_manager->instances_with_timer;
163                 } else {
164                         EFRM_ASSERT(instance >=
165                                     efrm_vi_manager->with_interrupt_base);
166                         EFRM_ASSERT(instance <
167                                     efrm_vi_manager->with_interrupt_limit);
168                         instances = efrm_vi_manager->instances_with_interrupt;
169                 }
170
171                 EFRM_VERIFY_EQ(kfifo_put(instances, (unsigned char *)&instance,
172                                          sizeof(instance)), sizeof(instance));
173         }
174 }
175
176 /*** Queue sizes *********************************************************/
177
178 /* NB. This should really take a nic as an argument, but that makes
179  * the buffer table allocation difficult. */
180 uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs
181                               /*,struct efhw_nic *nic */)
182 {
183         return virs->evq_capacity * sizeof(efhw_event_t);
184 }
185 EXPORT_SYMBOL(efrm_vi_rm_evq_bytes);
186
187 /* NB. This should really take a nic as an argument, but that makes
188  * the buffer table allocation difficult. */
189 uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs
190                               /*,struct efhw_nic *nic */)
191 {
192         return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] *
193             FALCON_DMA_TX_DESC_BYTES;
194 }
195 EXPORT_SYMBOL(efrm_vi_rm_txq_bytes);
196
197 /* NB. This should really take a nic as an argument, but that makes
198  * the buffer table allocation difficult. */
199 uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs
200                               /*,struct efhw_nic *nic */)
201 {
202         uint32_t bytes_per_desc = ((virs->flags & EFHW_VI_RX_PHYS_ADDR_EN)
203                                    ? FALCON_DMA_RX_PHYS_DESC_BYTES
204                                    : FALCON_DMA_RX_BUF_DESC_BYTES);
205         return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] * bytes_per_desc;
206 }
207 EXPORT_SYMBOL(efrm_vi_rm_rxq_bytes);
208
209 static int choose_size(int size_rq, unsigned sizes)
210 {
211         int size;
212
213         /* size_rq < 0 means default, but we interpret this as 'minimum'. */
214
215         for (size = 256;; size <<= 1)
216                 if ((size & sizes) && size >= size_rq)
217                         return size;
218                 else if ((sizes & ~((size - 1) | size)) == 0)
219                         return -1;
220 }
221
222 static int
223 efrm_vi_rm_adjust_alloc_request(struct vi_resource *virs, struct efhw_nic *nic)
224 {
225         int capacity;
226
227         EFRM_ASSERT(nic->efhw_func);
228
229         if (virs->evq_capacity) {
230                 capacity = choose_size(virs->evq_capacity, nic->evq_sizes);
231                 if (capacity < 0) {
232                         EFRM_ERR("vi_resource: bad evq size %d (supported=%x)",
233                                  virs->evq_capacity, nic->evq_sizes);
234                         return -E2BIG;
235                 }
236                 virs->evq_capacity = capacity;
237         }
238         if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
239                 capacity =
240                     choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX],
241                                 nic->txq_sizes);
242                 if (capacity < 0) {
243                         EFRM_ERR("vi_resource: bad txq size %d (supported=%x)",
244                                  virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX],
245                                  nic->txq_sizes);
246                         return -E2BIG;
247                 }
248                 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = capacity;
249         }
250         if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
251                 capacity =
252                     choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX],
253                                 nic->rxq_sizes);
254                 if (capacity < 0) {
255                         EFRM_ERR("vi_resource: bad rxq size %d (supported=%x)",
256                                  virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX],
257                                  nic->rxq_sizes);
258                         return -E2BIG;
259                 }
260                 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = capacity;
261         }
262
263         return 0;
264 }
265
266 /* remove the reference to the event queue in this VI resource and decrement
267    the event queue's use count */
268 static inline void efrm_vi_rm_detach_evq(struct vi_resource *virs)
269 {
270         struct vi_resource *evq_virs;
271
272         EFRM_ASSERT(virs != NULL);
273
274         evq_virs = virs->evq_virs;
275
276         if (evq_virs != NULL) {
277                 virs->evq_virs = NULL;
278                 if (evq_virs == virs) {
279                         EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
280                                    " had internal event queue ", __func__,
281                                    EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
282                 } else {
283                         efrm_vi_rm_drop_ref(evq_virs);
284                         EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " had event queue "
285                                    EFRM_RESOURCE_FMT, __func__,
286                                    EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
287                                    EFRM_RESOURCE_PRI_ARG(evq_virs->rs.
288                                                          rs_handle));
289                 }
290         } else {
291                 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
292                            " had no event queue (nothing to do)",
293                            __func__,
294                            EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
295         }
296 }
297
298 /*** Buffer Table allocations ********************************************/
299
300 static int
301 efrm_vi_rm_alloc_or_free_buffer_table(struct vi_resource *virs, bool is_alloc)
302 {
303         uint32_t bytes;
304         int page_order;
305         int rc;
306
307         if (!is_alloc)
308                 goto destroy;
309
310         if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
311                 bytes = efrm_vi_rm_txq_bytes(virs);
312                 page_order = get_order(bytes);
313                 rc = efrm_buffer_table_alloc(page_order,
314                                              (virs->dmaq_buf_tbl_alloc +
315                                               EFRM_VI_RM_DMA_QUEUE_TX));
316                 if (rc != 0) {
317                         EFRM_TRACE
318                             ("%s: Error %d allocating TX buffer table entry",
319                              __func__, rc);
320                         goto fail_txq_alloc;
321                 }
322         }
323
324         if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
325                 bytes = efrm_vi_rm_rxq_bytes(virs);
326                 page_order = get_order(bytes);
327                 rc = efrm_buffer_table_alloc(page_order,
328                                              (virs->dmaq_buf_tbl_alloc +
329                                               EFRM_VI_RM_DMA_QUEUE_RX));
330                 if (rc != 0) {
331                         EFRM_TRACE
332                             ("%s: Error %d allocating RX buffer table entry",
333                              __func__, rc);
334                         goto fail_rxq_alloc;
335                 }
336         }
337         return 0;
338
339 destroy:
340         rc = 0;
341
342         if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
343                 efrm_buffer_table_free(&virs->
344                                        dmaq_buf_tbl_alloc
345                                        [EFRM_VI_RM_DMA_QUEUE_RX]);
346         }
347 fail_rxq_alloc:
348
349         if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
350                 efrm_buffer_table_free(&virs->
351                                        dmaq_buf_tbl_alloc
352                                        [EFRM_VI_RM_DMA_QUEUE_TX]);
353         }
354 fail_txq_alloc:
355
356         return rc;
357 }
358
359 /*** Per-NIC allocations *************************************************/
360
361 static inline int
362 efrm_vi_rm_init_evq(struct vi_resource *virs, struct efhw_nic *nic)
363 {
364         int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
365         struct eventq_resource_hardware *evq_hw =
366             &virs->nic_info.evq_pages;
367         uint32_t buf_bytes = efrm_vi_rm_evq_bytes(virs);
368         int rc;
369
370         if (virs->evq_capacity == 0)
371                 return 0;
372         evq_hw->capacity = virs->evq_capacity;
373
374         /* Allocate buffer table entries to map onto the iobuffer.  This
375          * currently allocates its own buffer table entries on Falcon which is
376          * a bit wasteful on a multi-NIC system. */
377         evq_hw->buf_tbl_alloc.base = (unsigned)-1;
378         rc = efrm_buffer_table_alloc(get_order(buf_bytes),
379                                      &evq_hw->buf_tbl_alloc);
380         if (rc < 0) {
381                 EFHW_WARN("%s: failed (%d) to alloc %d buffer table entries",
382                           __func__, rc, get_order(buf_bytes));
383                 return rc;
384         }
385
386         /* Allocate the event queue memory. */
387         rc = efhw_nic_event_queue_alloc_iobuffer(nic, evq_hw, instance,
388                                                  buf_bytes);
389         if (rc != 0) {
390                 EFRM_ERR("%s: Error allocating iobuffer: %d", __func__, rc);
391                 efrm_buffer_table_free(&evq_hw->buf_tbl_alloc);
392                 return rc;
393         }
394
395         /* Initialise the event queue hardware */
396         efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity,
397                                     efhw_iopages_dma_addr(&evq_hw->iobuff) +
398                                     evq_hw->iobuff_off,
399                                     evq_hw->buf_tbl_alloc.base,
400                                     instance < 64);
401
402         EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " capacity=%u", __func__,
403                    EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
404                    virs->evq_capacity);
405
406 #if defined(__ia64__)
407         /* Page size may be large, so for now just increase the
408          * size of the requested evq up to a round number of
409          * pages
410          */
411         buf_bytes = CI_ROUNDUP(buf_bytes, PAGE_SIZE);
412 #endif
413         EFRM_ASSERT(buf_bytes % PAGE_SIZE == 0);
414
415         virs->mem_mmap_bytes += buf_bytes;
416
417         return 0;
418 }
419
420 static inline void
421 efrm_vi_rm_fini_evq(struct vi_resource *virs, struct efhw_nic *nic)
422 {
423         int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
424         struct vi_resource_nic_info *nic_info = &virs->nic_info;
425
426         if (virs->evq_capacity == 0)
427                 return;
428
429         /* Zero the timer-value for this queue.
430            And Tell NIC to stop using this event queue. */
431         efhw_nic_event_queue_disable(nic, instance, 0);
432
433         if (nic_info->evq_pages.buf_tbl_alloc.base != (unsigned)-1)
434                 efrm_buffer_table_free(&nic_info->evq_pages.buf_tbl_alloc);
435
436         efhw_iopages_free(nic, &nic_info->evq_pages.iobuff);
437 }
438
439 /*! FIXME: we should make sure this number is never zero (=> unprotected) */
440 /*! FIXME: put this definition in a relevant header (e.g. as (evqid)+1) */
441 #define EFAB_EVQ_OWNER_ID(evqid) ((evqid))
442
443 void
444 efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_type,
445                      struct efhw_nic *nic)
446 {
447         int instance;
448         int evq_instance;
449         efhw_buffer_addr_t buf_addr;
450
451         instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
452         evq_instance = EFRM_RESOURCE_INSTANCE(virs->evq_virs->rs.rs_handle);
453
454         buf_addr = virs->dmaq_buf_tbl_alloc[queue_type].base;
455
456         if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) {
457                 efhw_nic_dmaq_tx_q_init(nic,
458                         instance,       /* dmaq */
459                         evq_instance,   /* evq */
460                         EFAB_EVQ_OWNER_ID(evq_instance),        /* owner */
461                         virs->dmaq_tag[queue_type],     /* tag */
462                         virs->dmaq_capacity[queue_type], /* size of queue */
463                         buf_addr,       /* buffer index */
464                         virs->flags);   /* user specified Q attrs */
465         } else {
466                 efhw_nic_dmaq_rx_q_init(nic,
467                         instance,       /* dmaq */
468                         evq_instance,   /* evq */
469                         EFAB_EVQ_OWNER_ID(evq_instance),        /* owner */
470                         virs->dmaq_tag[queue_type],     /* tag */
471                         virs->dmaq_capacity[queue_type], /* size of queue */
472                         buf_addr,       /* buffer index */
473                         virs->flags);   /* user specified Q attrs */
474         }
475 }
476
477 static int
478 efrm_vi_rm_init_or_fini_dmaq(struct vi_resource *virs,
479                              int queue_type, int init,
480                              struct efhw_nic *nic)
481 {
482         int rc;
483         int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
484         uint32_t buf_bytes;
485         struct vi_resource_nic_info *nic_info = &virs->nic_info;
486         int page_order;
487         uint32_t num_pages;
488         struct efhw_iopages *iobuff;
489
490         if (!init)
491                 goto destroy;
492
493         /* Ignore disabled queues. */
494         if (virs->dmaq_capacity[queue_type] == 0) {
495                 if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX)
496                         efhw_nic_dmaq_tx_q_disable(nic, instance);
497                 else
498                         efhw_nic_dmaq_rx_q_disable(nic, instance);
499                 return 0;
500         }
501
502         buf_bytes = (queue_type == EFRM_VI_RM_DMA_QUEUE_TX
503                      ? efrm_vi_rm_txq_bytes(virs)
504                      : efrm_vi_rm_rxq_bytes(virs));
505
506         page_order = get_order(buf_bytes);
507
508         rc = efhw_iopages_alloc(nic, &nic_info->dmaq_pages[queue_type],
509                               page_order);
510         if (rc != 0) {
511                 EFRM_ERR("%s: Failed to allocate %s DMA buffer.", __func__,
512                          dmaq_names[queue_type]);
513                 goto fail_iopages;
514         }
515
516         num_pages = 1 << page_order;
517         iobuff = &nic_info->dmaq_pages[queue_type];
518         efhw_nic_buffer_table_set_n(nic,
519                                     virs->dmaq_buf_tbl_alloc[queue_type].base,
520                                     efhw_iopages_dma_addr(iobuff),
521                                     EFHW_NIC_PAGE_SIZE, 0, num_pages, 0);
522
523         falcon_nic_buffer_table_confirm(nic);
524
525         virs->mem_mmap_bytes += roundup(buf_bytes, PAGE_SIZE);
526
527         /* Make sure there is an event queue. */
528         if (virs->evq_virs->evq_capacity <= 0) {
529                 EFRM_ERR("%s: Cannot use empty event queue for %s DMA",
530                          __func__, dmaq_names[queue_type]);
531                 rc = -EINVAL;
532                 goto fail_evq;
533         }
534
535         efrm_vi_rm_init_dmaq(virs, queue_type, nic);
536
537         return 0;
538
539 destroy:
540         rc = 0;
541
542         /* Ignore disabled queues. */
543         if (virs->dmaq_capacity[queue_type] == 0)
544                 return 0;
545
546         /* Ensure TX pacing turned off -- queue flush doesn't reset this. */
547         if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX)
548                 falcon_nic_pace(nic, instance, 0);
549
550         /* No need to disable the queue here.  Nobody is using it anyway. */
551
552 fail_evq:
553         efhw_iopages_free(nic, &nic_info->dmaq_pages[queue_type]);
554 fail_iopages:
555
556         return rc;
557 }
558
559 static int
560 efrm_vi_rm_init_or_fini_nic(struct vi_resource *virs, int init,
561                             struct efhw_nic *nic)
562 {
563         int rc;
564 #ifndef NDEBUG
565         int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
566 #endif
567
568         if (!init)
569                 goto destroy;
570
571         rc = efrm_vi_rm_init_evq(virs, nic);
572         if (rc != 0)
573                 goto fail_evq;
574
575         rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX,
576                                           init, nic);
577         if (rc != 0)
578                 goto fail_txq;
579
580         rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX,
581                                           init, nic);
582         if (rc != 0)
583                 goto fail_rxq;
584
585         /* Allocate space for the control page. */
586         EFRM_ASSERT(falcon_tx_dma_page_offset(instance) < PAGE_SIZE);
587         EFRM_ASSERT(falcon_rx_dma_page_offset(instance) < PAGE_SIZE);
588         EFRM_ASSERT(falcon_timer_page_offset(instance) < PAGE_SIZE);
589         virs->bar_mmap_bytes += PAGE_SIZE;
590
591         return 0;
592
593 destroy:
594         rc = 0;
595
596         efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX,
597                                      false, nic);
598 fail_rxq:
599
600         efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX,
601                                      false, nic);
602 fail_txq:
603
604         efrm_vi_rm_fini_evq(virs, nic);
605 fail_evq:
606
607         EFRM_ASSERT(rc != 0 || !init);
608         return rc;
609 }
610
611 static int
612 efrm_vi_resource_alloc_or_free(struct efrm_client *client,
613                                int alloc, struct vi_resource *evq_virs,
614                                uint16_t vi_flags, int32_t evq_capacity,
615                                int32_t txq_capacity, int32_t rxq_capacity,
616                                uint8_t tx_q_tag, uint8_t rx_q_tag,
617                                struct vi_resource **virs_in_out)
618 {
619         struct efhw_nic *nic = client->nic;
620         struct vi_resource *virs;
621         int rc;
622         int instance;
623
624         EFRM_ASSERT(virs_in_out);
625         EFRM_ASSERT(efrm_vi_manager);
626         EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm);
627
628         if (!alloc)
629                 goto destroy;
630
631         rx_q_tag &= (1 << TX_DESCQ_LABEL_WIDTH) - 1;
632         tx_q_tag &= (1 << RX_DESCQ_LABEL_WIDTH) - 1;
633
634         virs = kmalloc(sizeof(*virs), GFP_KERNEL);
635         if (virs == NULL) {
636                 EFRM_ERR("%s: Error allocating VI resource object",
637                          __func__);
638                 rc = -ENOMEM;
639                 goto fail_alloc;
640         }
641         memset(virs, 0, sizeof(*virs));
642
643         /* Some macros make the assumption that the struct efrm_resource is
644          * the first member of a struct vi_resource. */
645         EFRM_ASSERT(&virs->rs == (struct efrm_resource *) (virs));
646
647         instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity);
648         if (instance < 0) {
649                 /* Clear out the close list... */
650                 efrm_vi_rm_salvage_flushed_vis();
651                 instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity);
652                 if (instance >= 0)
653                         EFRM_TRACE("%s: Salvaged a closed VI.", __func__);
654         }
655
656         if (instance < 0) {
657                 /* Could flush resources and try again here. */
658                 EFRM_ERR("%s: Out of appropriate VI resources", __func__);
659                 rc = -EBUSY;
660                 goto fail_alloc_id;
661         }
662
663         EFRM_TRACE("%s: new VI ID %d", __func__, instance);
664         efrm_resource_init(&virs->rs, EFRM_RESOURCE_VI, instance);
665
666         /* Start with one reference.  Any external VIs using the EVQ of this
667          * resource will increment this reference rather than the resource
668          * reference to avoid DMAQ flushes from waiting for other DMAQ
669          * flushes to complete.  When the resource reference goes to zero,
670          * the DMAQ flush happens.  When the flush completes, this reference
671          * is decremented.  When this reference reaches zero, the instance
672          * is freed. */
673         atomic_set(&virs->evq_refs, 1);
674
675         virs->bar_mmap_bytes = 0;
676         virs->mem_mmap_bytes = 0;
677         virs->evq_capacity = evq_capacity;
678         virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = txq_capacity;
679         virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = rxq_capacity;
680         virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_TX] = tx_q_tag;
681         virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_RX] = rx_q_tag;
682         virs->flags = vi_flags;
683         INIT_LIST_HEAD(&virs->tx_flush_link);
684         INIT_LIST_HEAD(&virs->rx_flush_link);
685         virs->tx_flushing = 0;
686         virs->rx_flushing = 0;
687
688         /* Adjust the queue sizes. */
689         rc = efrm_vi_rm_adjust_alloc_request(virs, nic);
690         if (rc != 0)
691                 goto fail_adjust_request;
692
693         /* Attach the EVQ early so that we can ensure that the NIC sets
694          * match. */
695         if (evq_virs == NULL) {
696                 evq_virs = virs;
697                 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
698                            " has no external event queue", __func__,
699                            EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
700         } else {
701                 /* Make sure the resource managers are the same. */
702                 if (EFRM_RESOURCE_TYPE(evq_virs->rs.rs_handle) !=
703                     EFRM_RESOURCE_VI) {
704                         EFRM_ERR("%s: Mismatched owner for event queue VI "
705                                  EFRM_RESOURCE_FMT, __func__,
706                                  EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle));
707                         return -EINVAL;
708                 }
709                 EFRM_ASSERT(atomic_read(&evq_virs->evq_refs) != 0);
710                 efrm_vi_rm_get_ref(evq_virs);
711                 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " uses event queue "
712                            EFRM_RESOURCE_FMT,
713                            __func__,
714                            EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
715                            EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle));
716         }
717         virs->evq_virs = evq_virs;
718
719         rc = efrm_vi_rm_alloc_or_free_buffer_table(virs, true);
720         if (rc != 0)
721                 goto fail_buffer_table;
722
723         rc = efrm_vi_rm_init_or_fini_nic(virs, true, nic);
724         if (rc != 0)
725                 goto fail_init_nic;
726
727         efrm_client_add_resource(client, &virs->rs);
728         *virs_in_out = virs;
729         EFRM_TRACE("%s: Allocated " EFRM_RESOURCE_FMT, __func__,
730                    EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
731         return 0;
732
733 destroy:
734         virs = *virs_in_out;
735         EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 1);
736         instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
737
738         EFRM_TRACE("%s: Freeing %d", __func__,
739                    EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle));
740
741         /* Destroying the VI.  The reference count must be zero. */
742         EFRM_ASSERT(atomic_read(&virs->evq_refs) == 0);
743
744         /* The EVQ should have gone (and DMA disabled) so that this
745          * function can't be re-entered to destroy the EVQ VI. */
746         EFRM_ASSERT(virs->evq_virs == NULL);
747         rc = 0;
748
749 fail_init_nic:
750         efrm_vi_rm_init_or_fini_nic(virs, false, nic);
751
752         efrm_vi_rm_alloc_or_free_buffer_table(virs, false);
753 fail_buffer_table:
754
755         efrm_vi_rm_detach_evq(virs);
756
757 fail_adjust_request:
758
759         EFRM_ASSERT(virs->evq_callback_fn == NULL);
760         EFRM_TRACE("%s: delete VI ID %d", __func__, instance);
761         efrm_vi_rm_free_id(instance);
762 fail_alloc_id:
763         if (!alloc)
764                 efrm_client_put(virs->rs.rs_client);
765         EFRM_DO_DEBUG(memset(virs, 0, sizeof(*virs)));
766         kfree(virs);
767 fail_alloc:
768         *virs_in_out = NULL;
769
770         return rc;
771 }
772
773 /*** Resource object  ****************************************************/
774
775 int
776 efrm_vi_resource_alloc(struct efrm_client *client,
777                        struct vi_resource *evq_virs,
778                        uint16_t vi_flags, int32_t evq_capacity,
779                        int32_t txq_capacity, int32_t rxq_capacity,
780                        uint8_t tx_q_tag, uint8_t rx_q_tag,
781                        struct vi_resource **virs_out,
782                        uint32_t *out_io_mmap_bytes,
783                        uint32_t *out_mem_mmap_bytes,
784                        uint32_t *out_txq_capacity, uint32_t *out_rxq_capacity)
785 {
786         int rc;
787         EFRM_ASSERT(client != NULL);
788         rc = efrm_vi_resource_alloc_or_free(client, true, evq_virs, vi_flags,
789                                             evq_capacity, txq_capacity,
790                                             rxq_capacity, tx_q_tag, rx_q_tag,
791                                             virs_out);
792         if (rc == 0) {
793                 if (out_io_mmap_bytes != NULL)
794                         *out_io_mmap_bytes = (*virs_out)->bar_mmap_bytes;
795                 if (out_mem_mmap_bytes != NULL)
796                         *out_mem_mmap_bytes = (*virs_out)->mem_mmap_bytes;
797                 if (out_txq_capacity != NULL)
798                         *out_txq_capacity =
799                             (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX];
800                 if (out_rxq_capacity != NULL)
801                         *out_rxq_capacity =
802                             (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX];
803         }
804
805         return rc;
806 }
807 EXPORT_SYMBOL(efrm_vi_resource_alloc);
808
809 void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs)
810 {
811         EFRM_ASSERT(virs != NULL);
812         EFRM_ASSERT(virs->rs.rs_ref_count == 0);
813
814         EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__,
815                    EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
816         /* release the associated event queue then drop our own reference
817          * count */
818         efrm_vi_rm_detach_evq(virs);
819         efrm_vi_rm_drop_ref(virs);
820 }