1 /****************************************************************************
2 * Driver for Solarflare network controllers -
3 * resource management for Xen backend, OpenOnload, etc
4 * (including support for SFE4001 10GBT NIC)
6 * This file contains allocation of VI resources.
8 * Copyright 2005-2007: Solarflare Communications Inc,
9 * 9501 Jeronimo Road, Suite 250,
10 * Irvine, CA 92618, USA
12 * Developed and maintained by Solarflare Communications:
13 * <linux-xen-drivers@solarflare.com>
14 * <onload-dev@solarflare.com>
16 * Certain parts of the driver were implemented by
17 * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
18 * OKTET Labs Ltd, Russia,
19 * http://oktetlabs.ru, <info@oktetlabs.ru>
20 * by request of Solarflare Communications
23 * This program is free software; you can redistribute it and/or modify it
24 * under the terms of the GNU General Public License version 2 as published
25 * by the Free Software Foundation, incorporated herein by reference.
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
35 ****************************************************************************
38 #include <ci/efrm/nic_table.h>
39 #include <ci/efhw/iopage.h>
40 #include <ci/driver/efab/hardware.h>
41 #include <ci/efhw/public.h>
42 #include <ci/efhw/falcon.h>
43 #include <ci/efrm/private.h>
44 #include <ci/efrm/buffer_table.h>
45 #include <ci/efrm/vi_resource_private.h>
46 #include <ci/efrm/efrm_client.h>
47 #include "efrm_internal.h"
50 /*** Data definitions ****************************************************/
52 static const char *dmaq_names[] = { "TX", "RX" };
54 struct vi_resource_manager *efrm_vi_manager;
56 /*** Forward references **************************************************/
59 efrm_vi_resource_alloc_or_free(struct efrm_client *client,
60 int alloc, struct vi_resource *evq_virs,
61 uint16_t vi_flags, int32_t evq_capacity,
62 int32_t txq_capacity, int32_t rxq_capacity,
63 uint8_t tx_q_tag, uint8_t rx_q_tag,
64 struct vi_resource **virs_in_out);
66 /*** Reference count handling ********************************************/
68 static inline void efrm_vi_rm_get_ref(struct vi_resource *virs)
70 atomic_inc(&virs->evq_refs);
73 static inline void efrm_vi_rm_drop_ref(struct vi_resource *virs)
75 EFRM_ASSERT(atomic_read(&virs->evq_refs) != 0);
76 if (atomic_dec_and_test(&virs->evq_refs))
77 efrm_vi_resource_alloc_or_free(virs->rs.rs_client, false, NULL,
78 0, 0, 0, 0, 0, 0, &virs);
81 /*** Instance numbers ****************************************************/
83 static inline int efrm_vi_rm_alloc_id(uint16_t vi_flags, int32_t evq_capacity)
85 irq_flags_t lock_flags;
89 if (efrm_nic_tablep->a_nic == NULL) /* ?? FIXME: surely not right */
92 spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
94 /* Falcon A1 RX phys addr wierdness. */
95 if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
96 (vi_flags & EFHW_VI_RX_PHYS_ADDR_EN)) {
97 if (vi_flags & EFHW_VI_JUMBO_EN) {
98 /* Falcon-A cannot do phys + scatter. */
100 ("%s: falcon-A does not support phys+scatter mode",
103 } else if (efrm_vi_manager->iscsi_dmaq_instance_is_free
104 && evq_capacity == 0) {
105 /* Falcon-A has a single RXQ that gives the correct
106 * semantics for physical addressing. However, it
107 * happens to have the same instance number as the
108 * 'char' event queue, so we cannot also hand out
109 * the event queue. */
110 efrm_vi_manager->iscsi_dmaq_instance_is_free = false;
111 instance = FALCON_A1_ISCSI_DMAQ;
113 EFRM_WARN("%s: iSCSI receive queue not free",
120 if (vi_flags & EFHW_VI_RM_WITH_INTERRUPT) {
121 rc = __kfifo_get(efrm_vi_manager->instances_with_interrupt,
122 (unsigned char *)&instance, sizeof(instance));
123 if (rc != sizeof(instance)) {
124 EFRM_ASSERT(rc == 0);
130 /* Otherwise a normal run-of-the-mill VI. */
131 rc = __kfifo_get(efrm_vi_manager->instances_with_timer,
132 (unsigned char *)&instance, sizeof(instance));
133 if (rc != sizeof(instance)) {
134 EFRM_ASSERT(rc == 0);
139 spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
143 static void efrm_vi_rm_free_id(int instance)
145 irq_flags_t lock_flags;
146 struct kfifo *instances;
148 if (efrm_nic_tablep->a_nic == NULL) /* ?? FIXME: surely not right */
151 if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
152 instance == FALCON_A1_ISCSI_DMAQ) {
153 EFRM_ASSERT(efrm_vi_manager->iscsi_dmaq_instance_is_free ==
155 spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
156 efrm_vi_manager->iscsi_dmaq_instance_is_free = true;
157 spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock,
160 if (instance >= efrm_vi_manager->with_timer_base &&
161 instance < efrm_vi_manager->with_timer_limit) {
162 instances = efrm_vi_manager->instances_with_timer;
164 EFRM_ASSERT(instance >=
165 efrm_vi_manager->with_interrupt_base);
166 EFRM_ASSERT(instance <
167 efrm_vi_manager->with_interrupt_limit);
168 instances = efrm_vi_manager->instances_with_interrupt;
171 EFRM_VERIFY_EQ(kfifo_put(instances, (unsigned char *)&instance,
172 sizeof(instance)), sizeof(instance));
176 /*** Queue sizes *********************************************************/
178 /* NB. This should really take a nic as an argument, but that makes
179 * the buffer table allocation difficult. */
180 uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs
181 /*,struct efhw_nic *nic */)
183 return virs->evq_capacity * sizeof(efhw_event_t);
185 EXPORT_SYMBOL(efrm_vi_rm_evq_bytes);
187 /* NB. This should really take a nic as an argument, but that makes
188 * the buffer table allocation difficult. */
189 uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs
190 /*,struct efhw_nic *nic */)
192 return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] *
193 FALCON_DMA_TX_DESC_BYTES;
195 EXPORT_SYMBOL(efrm_vi_rm_txq_bytes);
197 /* NB. This should really take a nic as an argument, but that makes
198 * the buffer table allocation difficult. */
199 uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs
200 /*,struct efhw_nic *nic */)
202 uint32_t bytes_per_desc = ((virs->flags & EFHW_VI_RX_PHYS_ADDR_EN)
203 ? FALCON_DMA_RX_PHYS_DESC_BYTES
204 : FALCON_DMA_RX_BUF_DESC_BYTES);
205 return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] * bytes_per_desc;
207 EXPORT_SYMBOL(efrm_vi_rm_rxq_bytes);
209 static int choose_size(int size_rq, unsigned sizes)
213 /* size_rq < 0 means default, but we interpret this as 'minimum'. */
215 for (size = 256;; size <<= 1)
216 if ((size & sizes) && size >= size_rq)
218 else if ((sizes & ~((size - 1) | size)) == 0)
223 efrm_vi_rm_adjust_alloc_request(struct vi_resource *virs, struct efhw_nic *nic)
227 EFRM_ASSERT(nic->efhw_func);
229 if (virs->evq_capacity) {
230 capacity = choose_size(virs->evq_capacity, nic->evq_sizes);
232 EFRM_ERR("vi_resource: bad evq size %d (supported=%x)",
233 virs->evq_capacity, nic->evq_sizes);
236 virs->evq_capacity = capacity;
238 if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
240 choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX],
243 EFRM_ERR("vi_resource: bad txq size %d (supported=%x)",
244 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX],
248 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = capacity;
250 if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
252 choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX],
255 EFRM_ERR("vi_resource: bad rxq size %d (supported=%x)",
256 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX],
260 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = capacity;
266 /* remove the reference to the event queue in this VI resource and decrement
267 the event queue's use count */
268 static inline void efrm_vi_rm_detach_evq(struct vi_resource *virs)
270 struct vi_resource *evq_virs;
272 EFRM_ASSERT(virs != NULL);
274 evq_virs = virs->evq_virs;
276 if (evq_virs != NULL) {
277 virs->evq_virs = NULL;
278 if (evq_virs == virs) {
279 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
280 " had internal event queue ", __func__,
281 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
283 efrm_vi_rm_drop_ref(evq_virs);
284 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " had event queue "
285 EFRM_RESOURCE_FMT, __func__,
286 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
287 EFRM_RESOURCE_PRI_ARG(evq_virs->rs.
291 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
292 " had no event queue (nothing to do)",
294 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
298 /*** Buffer Table allocations ********************************************/
301 efrm_vi_rm_alloc_or_free_buffer_table(struct vi_resource *virs, bool is_alloc)
310 if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
311 bytes = efrm_vi_rm_txq_bytes(virs);
312 page_order = get_order(bytes);
313 rc = efrm_buffer_table_alloc(page_order,
314 (virs->dmaq_buf_tbl_alloc +
315 EFRM_VI_RM_DMA_QUEUE_TX));
318 ("%s: Error %d allocating TX buffer table entry",
324 if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
325 bytes = efrm_vi_rm_rxq_bytes(virs);
326 page_order = get_order(bytes);
327 rc = efrm_buffer_table_alloc(page_order,
328 (virs->dmaq_buf_tbl_alloc +
329 EFRM_VI_RM_DMA_QUEUE_RX));
332 ("%s: Error %d allocating RX buffer table entry",
342 if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
343 efrm_buffer_table_free(&virs->
345 [EFRM_VI_RM_DMA_QUEUE_RX]);
349 if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
350 efrm_buffer_table_free(&virs->
352 [EFRM_VI_RM_DMA_QUEUE_TX]);
359 /*** Per-NIC allocations *************************************************/
362 efrm_vi_rm_init_evq(struct vi_resource *virs, struct efhw_nic *nic)
364 int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
365 struct eventq_resource_hardware *evq_hw =
366 &virs->nic_info.evq_pages;
367 uint32_t buf_bytes = efrm_vi_rm_evq_bytes(virs);
370 if (virs->evq_capacity == 0)
372 evq_hw->capacity = virs->evq_capacity;
374 /* Allocate buffer table entries to map onto the iobuffer. This
375 * currently allocates its own buffer table entries on Falcon which is
376 * a bit wasteful on a multi-NIC system. */
377 evq_hw->buf_tbl_alloc.base = (unsigned)-1;
378 rc = efrm_buffer_table_alloc(get_order(buf_bytes),
379 &evq_hw->buf_tbl_alloc);
381 EFHW_WARN("%s: failed (%d) to alloc %d buffer table entries",
382 __func__, rc, get_order(buf_bytes));
386 /* Allocate the event queue memory. */
387 rc = efhw_nic_event_queue_alloc_iobuffer(nic, evq_hw, instance,
390 EFRM_ERR("%s: Error allocating iobuffer: %d", __func__, rc);
391 efrm_buffer_table_free(&evq_hw->buf_tbl_alloc);
395 /* Initialise the event queue hardware */
396 efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity,
397 efhw_iopages_dma_addr(&evq_hw->iobuff) +
399 evq_hw->buf_tbl_alloc.base,
402 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " capacity=%u", __func__,
403 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
406 #if defined(__ia64__)
407 /* Page size may be large, so for now just increase the
408 * size of the requested evq up to a round number of
411 buf_bytes = CI_ROUNDUP(buf_bytes, PAGE_SIZE);
413 EFRM_ASSERT(buf_bytes % PAGE_SIZE == 0);
415 virs->mem_mmap_bytes += buf_bytes;
421 efrm_vi_rm_fini_evq(struct vi_resource *virs, struct efhw_nic *nic)
423 int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
424 struct vi_resource_nic_info *nic_info = &virs->nic_info;
426 if (virs->evq_capacity == 0)
429 /* Zero the timer-value for this queue.
430 And Tell NIC to stop using this event queue. */
431 efhw_nic_event_queue_disable(nic, instance, 0);
433 if (nic_info->evq_pages.buf_tbl_alloc.base != (unsigned)-1)
434 efrm_buffer_table_free(&nic_info->evq_pages.buf_tbl_alloc);
436 efhw_iopages_free(nic, &nic_info->evq_pages.iobuff);
439 /*! FIXME: we should make sure this number is never zero (=> unprotected) */
440 /*! FIXME: put this definition in a relevant header (e.g. as (evqid)+1) */
441 #define EFAB_EVQ_OWNER_ID(evqid) ((evqid))
444 efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_type,
445 struct efhw_nic *nic)
449 efhw_buffer_addr_t buf_addr;
451 instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
452 evq_instance = EFRM_RESOURCE_INSTANCE(virs->evq_virs->rs.rs_handle);
454 buf_addr = virs->dmaq_buf_tbl_alloc[queue_type].base;
456 if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) {
457 efhw_nic_dmaq_tx_q_init(nic,
459 evq_instance, /* evq */
460 EFAB_EVQ_OWNER_ID(evq_instance), /* owner */
461 virs->dmaq_tag[queue_type], /* tag */
462 virs->dmaq_capacity[queue_type], /* size of queue */
463 buf_addr, /* buffer index */
464 virs->flags); /* user specified Q attrs */
466 efhw_nic_dmaq_rx_q_init(nic,
468 evq_instance, /* evq */
469 EFAB_EVQ_OWNER_ID(evq_instance), /* owner */
470 virs->dmaq_tag[queue_type], /* tag */
471 virs->dmaq_capacity[queue_type], /* size of queue */
472 buf_addr, /* buffer index */
473 virs->flags); /* user specified Q attrs */
478 efrm_vi_rm_init_or_fini_dmaq(struct vi_resource *virs,
479 int queue_type, int init,
480 struct efhw_nic *nic)
483 int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
485 struct vi_resource_nic_info *nic_info = &virs->nic_info;
488 struct efhw_iopages *iobuff;
493 /* Ignore disabled queues. */
494 if (virs->dmaq_capacity[queue_type] == 0) {
495 if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX)
496 efhw_nic_dmaq_tx_q_disable(nic, instance);
498 efhw_nic_dmaq_rx_q_disable(nic, instance);
502 buf_bytes = (queue_type == EFRM_VI_RM_DMA_QUEUE_TX
503 ? efrm_vi_rm_txq_bytes(virs)
504 : efrm_vi_rm_rxq_bytes(virs));
506 page_order = get_order(buf_bytes);
508 rc = efhw_iopages_alloc(nic, &nic_info->dmaq_pages[queue_type],
511 EFRM_ERR("%s: Failed to allocate %s DMA buffer.", __func__,
512 dmaq_names[queue_type]);
516 num_pages = 1 << page_order;
517 iobuff = &nic_info->dmaq_pages[queue_type];
518 efhw_nic_buffer_table_set_n(nic,
519 virs->dmaq_buf_tbl_alloc[queue_type].base,
520 efhw_iopages_dma_addr(iobuff),
521 EFHW_NIC_PAGE_SIZE, 0, num_pages, 0);
523 falcon_nic_buffer_table_confirm(nic);
525 virs->mem_mmap_bytes += roundup(buf_bytes, PAGE_SIZE);
527 /* Make sure there is an event queue. */
528 if (virs->evq_virs->evq_capacity <= 0) {
529 EFRM_ERR("%s: Cannot use empty event queue for %s DMA",
530 __func__, dmaq_names[queue_type]);
535 efrm_vi_rm_init_dmaq(virs, queue_type, nic);
542 /* Ignore disabled queues. */
543 if (virs->dmaq_capacity[queue_type] == 0)
546 /* Ensure TX pacing turned off -- queue flush doesn't reset this. */
547 if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX)
548 falcon_nic_pace(nic, instance, 0);
550 /* No need to disable the queue here. Nobody is using it anyway. */
553 efhw_iopages_free(nic, &nic_info->dmaq_pages[queue_type]);
560 efrm_vi_rm_init_or_fini_nic(struct vi_resource *virs, int init,
561 struct efhw_nic *nic)
565 int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
571 rc = efrm_vi_rm_init_evq(virs, nic);
575 rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX,
580 rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX,
585 /* Allocate space for the control page. */
586 EFRM_ASSERT(falcon_tx_dma_page_offset(instance) < PAGE_SIZE);
587 EFRM_ASSERT(falcon_rx_dma_page_offset(instance) < PAGE_SIZE);
588 EFRM_ASSERT(falcon_timer_page_offset(instance) < PAGE_SIZE);
589 virs->bar_mmap_bytes += PAGE_SIZE;
596 efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX,
600 efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX,
604 efrm_vi_rm_fini_evq(virs, nic);
607 EFRM_ASSERT(rc != 0 || !init);
612 efrm_vi_resource_alloc_or_free(struct efrm_client *client,
613 int alloc, struct vi_resource *evq_virs,
614 uint16_t vi_flags, int32_t evq_capacity,
615 int32_t txq_capacity, int32_t rxq_capacity,
616 uint8_t tx_q_tag, uint8_t rx_q_tag,
617 struct vi_resource **virs_in_out)
619 struct efhw_nic *nic = client->nic;
620 struct vi_resource *virs;
624 EFRM_ASSERT(virs_in_out);
625 EFRM_ASSERT(efrm_vi_manager);
626 EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm);
631 rx_q_tag &= (1 << TX_DESCQ_LABEL_WIDTH) - 1;
632 tx_q_tag &= (1 << RX_DESCQ_LABEL_WIDTH) - 1;
634 virs = kmalloc(sizeof(*virs), GFP_KERNEL);
636 EFRM_ERR("%s: Error allocating VI resource object",
641 memset(virs, 0, sizeof(*virs));
643 /* Some macros make the assumption that the struct efrm_resource is
644 * the first member of a struct vi_resource. */
645 EFRM_ASSERT(&virs->rs == (struct efrm_resource *) (virs));
647 instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity);
649 /* Clear out the close list... */
650 efrm_vi_rm_salvage_flushed_vis();
651 instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity);
653 EFRM_TRACE("%s: Salvaged a closed VI.", __func__);
657 /* Could flush resources and try again here. */
658 EFRM_ERR("%s: Out of appropriate VI resources", __func__);
663 EFRM_TRACE("%s: new VI ID %d", __func__, instance);
664 efrm_resource_init(&virs->rs, EFRM_RESOURCE_VI, instance);
666 /* Start with one reference. Any external VIs using the EVQ of this
667 * resource will increment this reference rather than the resource
668 * reference to avoid DMAQ flushes from waiting for other DMAQ
669 * flushes to complete. When the resource reference goes to zero,
670 * the DMAQ flush happens. When the flush completes, this reference
671 * is decremented. When this reference reaches zero, the instance
673 atomic_set(&virs->evq_refs, 1);
675 virs->bar_mmap_bytes = 0;
676 virs->mem_mmap_bytes = 0;
677 virs->evq_capacity = evq_capacity;
678 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = txq_capacity;
679 virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = rxq_capacity;
680 virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_TX] = tx_q_tag;
681 virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_RX] = rx_q_tag;
682 virs->flags = vi_flags;
683 INIT_LIST_HEAD(&virs->tx_flush_link);
684 INIT_LIST_HEAD(&virs->rx_flush_link);
685 virs->tx_flushing = 0;
686 virs->rx_flushing = 0;
688 /* Adjust the queue sizes. */
689 rc = efrm_vi_rm_adjust_alloc_request(virs, nic);
691 goto fail_adjust_request;
693 /* Attach the EVQ early so that we can ensure that the NIC sets
695 if (evq_virs == NULL) {
697 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
698 " has no external event queue", __func__,
699 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
701 /* Make sure the resource managers are the same. */
702 if (EFRM_RESOURCE_TYPE(evq_virs->rs.rs_handle) !=
704 EFRM_ERR("%s: Mismatched owner for event queue VI "
705 EFRM_RESOURCE_FMT, __func__,
706 EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle));
709 EFRM_ASSERT(atomic_read(&evq_virs->evq_refs) != 0);
710 efrm_vi_rm_get_ref(evq_virs);
711 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " uses event queue "
714 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
715 EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle));
717 virs->evq_virs = evq_virs;
719 rc = efrm_vi_rm_alloc_or_free_buffer_table(virs, true);
721 goto fail_buffer_table;
723 rc = efrm_vi_rm_init_or_fini_nic(virs, true, nic);
727 efrm_client_add_resource(client, &virs->rs);
729 EFRM_TRACE("%s: Allocated " EFRM_RESOURCE_FMT, __func__,
730 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
735 EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 1);
736 instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
738 EFRM_TRACE("%s: Freeing %d", __func__,
739 EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle));
741 /* Destroying the VI. The reference count must be zero. */
742 EFRM_ASSERT(atomic_read(&virs->evq_refs) == 0);
744 /* The EVQ should have gone (and DMA disabled) so that this
745 * function can't be re-entered to destroy the EVQ VI. */
746 EFRM_ASSERT(virs->evq_virs == NULL);
750 efrm_vi_rm_init_or_fini_nic(virs, false, nic);
752 efrm_vi_rm_alloc_or_free_buffer_table(virs, false);
755 efrm_vi_rm_detach_evq(virs);
759 EFRM_ASSERT(virs->evq_callback_fn == NULL);
760 EFRM_TRACE("%s: delete VI ID %d", __func__, instance);
761 efrm_vi_rm_free_id(instance);
764 efrm_client_put(virs->rs.rs_client);
765 EFRM_DO_DEBUG(memset(virs, 0, sizeof(*virs)));
773 /*** Resource object ****************************************************/
776 efrm_vi_resource_alloc(struct efrm_client *client,
777 struct vi_resource *evq_virs,
778 uint16_t vi_flags, int32_t evq_capacity,
779 int32_t txq_capacity, int32_t rxq_capacity,
780 uint8_t tx_q_tag, uint8_t rx_q_tag,
781 struct vi_resource **virs_out,
782 uint32_t *out_io_mmap_bytes,
783 uint32_t *out_mem_mmap_bytes,
784 uint32_t *out_txq_capacity, uint32_t *out_rxq_capacity)
787 EFRM_ASSERT(client != NULL);
788 rc = efrm_vi_resource_alloc_or_free(client, true, evq_virs, vi_flags,
789 evq_capacity, txq_capacity,
790 rxq_capacity, tx_q_tag, rx_q_tag,
793 if (out_io_mmap_bytes != NULL)
794 *out_io_mmap_bytes = (*virs_out)->bar_mmap_bytes;
795 if (out_mem_mmap_bytes != NULL)
796 *out_mem_mmap_bytes = (*virs_out)->mem_mmap_bytes;
797 if (out_txq_capacity != NULL)
799 (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX];
800 if (out_rxq_capacity != NULL)
802 (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX];
807 EXPORT_SYMBOL(efrm_vi_resource_alloc);
809 void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs)
811 EFRM_ASSERT(virs != NULL);
812 EFRM_ASSERT(virs->rs.rs_ref_count == 0);
814 EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__,
815 EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
816 /* release the associated event queue then drop our own reference
818 efrm_vi_rm_detach_evq(virs);
819 efrm_vi_rm_drop_ref(virs);