2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/prefetch.h>
46 #include "firmware_exports.h"
47 #include "cxgb3_offload.h"
51 #define SGE_RX_SM_BUF_SIZE 1536
53 #define SGE_RX_COPY_THRES 256
54 #define SGE_RX_PULL_LEN 128
56 #define SGE_PG_RSVD SMP_CACHE_BYTES
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
63 #define FL0_PG_CHUNK_SIZE 2048
65 /* Use skbuffs for XEN kernels. LRO is already disabled */
66 #define FL0_PG_CHUNK_SIZE 0
69 #define FL0_PG_ORDER 0
70 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
73 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
74 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
76 #define FL1_PG_CHUNK_SIZE 0
77 #define FL1_PG_ORDER 0
80 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
82 #define SGE_RX_DROP_THRES 16
83 #define RX_RECLAIM_PERIOD (HZ/4)
86 * Max number of Rx buffers we replenish at a time.
88 #define MAX_RX_REFILL 16U
90 * Period of the Tx buffer reclaim timer. This timer does not need to run
91 * frequently as Tx buffers are usually reclaimed by new Tx packets.
93 #define TX_RECLAIM_PERIOD (HZ / 4)
94 #define TX_RECLAIM_TIMER_CHUNK 64U
95 #define TX_RECLAIM_CHUNK 16U
97 /* WR size in bytes */
98 #define WR_LEN (WR_FLITS * 8)
101 * Types of Tx queues in each queue set. Order here matters, do not change.
103 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
105 /* Values for sge_txq.flags */
107 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
108 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
112 __be64 flit[TX_DESC_FLITS];
122 struct tx_sw_desc { /* SW state per Tx descriptor */
124 u8 eop; /* set if last descriptor for packet */
125 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
126 u8 fragidx; /* first page fragment associated with descriptor */
127 s8 sflit; /* start flit of first SGL entry in descriptor */
130 struct rx_sw_desc { /* SW state per Rx descriptor */
133 struct fl_pg_chunk pg_chunk;
135 DEFINE_DMA_UNMAP_ADDR(dma_addr);
138 struct rsp_desc { /* response queue descriptor */
139 struct rss_header rss_hdr;
147 * Holds unmapping information for Tx packets that need deferred unmapping.
148 * This structure lives at skb->head and must be allocated by callers.
150 struct deferred_unmap_info {
151 struct pci_dev *pdev;
152 dma_addr_t addr[MAX_SKB_FRAGS + 1];
156 * Maps a number of flits to the number of Tx descriptors that can hold them.
159 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
161 * HW allows up to 4 descriptors to be combined into a WR.
163 static u8 flit_desc_map[] = {
165 #if SGE_NUM_GENBITS == 1
166 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
167 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
168 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
169 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
170 #elif SGE_NUM_GENBITS == 2
171 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
172 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
173 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
174 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
176 # error "SGE_NUM_GENBITS must be 1 or 2"
180 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
182 return container_of(q, struct sge_qset, fl[qidx]);
185 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
187 return container_of(q, struct sge_qset, rspq);
190 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
192 return container_of(q, struct sge_qset, txq[qidx]);
196 * refill_rspq - replenish an SGE response queue
197 * @adapter: the adapter
198 * @q: the response queue to replenish
199 * @credits: how many new responses to make available
201 * Replenishes a response queue by making the supplied number of responses
204 static inline void refill_rspq(struct adapter *adapter,
205 const struct sge_rspq *q, unsigned int credits)
208 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
209 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
213 * need_skb_unmap - does the platform need unmapping of sk_buffs?
215 * Returns true if the platform needs sk_buff unmapping. The compiler
216 * optimizes away unnecessary code if this returns true.
218 static inline int need_skb_unmap(void)
220 #ifdef CONFIG_NEED_DMA_MAP_STATE
228 * unmap_skb - unmap a packet main body and its page fragments
230 * @q: the Tx queue containing Tx descriptors for the packet
231 * @cidx: index of Tx descriptor
232 * @pdev: the PCI device
234 * Unmap the main body of an sk_buff and its page fragments, if any.
235 * Because of the fairly complicated structure of our SGLs and the desire
236 * to conserve space for metadata, the information necessary to unmap an
237 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
238 * descriptors (the physical addresses of the various data buffers), and
239 * the SW descriptor state (assorted indices). The send functions
240 * initialize the indices for the first packet descriptor so we can unmap
241 * the buffers held in the first Tx descriptor here, and we have enough
242 * information at this point to set the state for the next Tx descriptor.
244 * Note that it is possible to clean up the first descriptor of a packet
245 * before the send routines have written the next descriptors, but this
246 * race does not cause any problem. We just end up writing the unmapping
247 * info for the descriptor first.
249 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
250 unsigned int cidx, struct pci_dev *pdev)
252 const struct sg_ent *sgp;
253 struct tx_sw_desc *d = &q->sdesc[cidx];
254 int nfrags, frag_idx, curflit, j = d->addr_idx;
256 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
257 frag_idx = d->fragidx;
259 if (frag_idx == 0 && skb_headlen(skb)) {
260 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
261 skb_headlen(skb), PCI_DMA_TODEVICE);
265 curflit = d->sflit + 1 + j;
266 nfrags = skb_shinfo(skb)->nr_frags;
268 while (frag_idx < nfrags && curflit < WR_FLITS) {
269 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
270 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
281 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
282 d = cidx + 1 == q->size ? q->sdesc : d + 1;
283 d->fragidx = frag_idx;
285 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
290 * free_tx_desc - reclaims Tx descriptors and their buffers
291 * @adapter: the adapter
292 * @q: the Tx queue to reclaim descriptors from
293 * @n: the number of descriptors to reclaim
295 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
296 * Tx buffers. Called with the Tx queue lock held.
298 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
301 struct tx_sw_desc *d;
302 struct pci_dev *pdev = adapter->pdev;
303 unsigned int cidx = q->cidx;
305 const int need_unmap = need_skb_unmap() &&
306 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
310 if (d->skb) { /* an SGL is present */
312 unmap_skb(d->skb, q, cidx, pdev);
319 if (++cidx == q->size) {
328 * reclaim_completed_tx - reclaims completed Tx descriptors
329 * @adapter: the adapter
330 * @q: the Tx queue to reclaim completed descriptors from
331 * @chunk: maximum number of descriptors to reclaim
333 * Reclaims Tx descriptors that the SGE has indicated it has processed,
334 * and frees the associated buffers if possible. Called with the Tx
337 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
341 unsigned int reclaim = q->processed - q->cleaned;
343 reclaim = min(chunk, reclaim);
345 free_tx_desc(adapter, q, reclaim);
346 q->cleaned += reclaim;
347 q->in_use -= reclaim;
349 return q->processed - q->cleaned;
353 * should_restart_tx - are there enough resources to restart a Tx queue?
356 * Checks if there are enough descriptors to restart a suspended Tx queue.
358 static inline int should_restart_tx(const struct sge_txq *q)
360 unsigned int r = q->processed - q->cleaned;
362 return q->in_use - r < (q->size >> 1);
365 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
366 struct rx_sw_desc *d)
368 if (q->use_pages && d->pg_chunk.page) {
369 (*d->pg_chunk.p_cnt)--;
370 if (!*d->pg_chunk.p_cnt)
373 q->alloc_size, PCI_DMA_FROMDEVICE);
375 put_page(d->pg_chunk.page);
376 d->pg_chunk.page = NULL;
378 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
379 q->buf_size, PCI_DMA_FROMDEVICE);
386 * free_rx_bufs - free the Rx buffers on an SGE free list
387 * @pdev: the PCI device associated with the adapter
388 * @rxq: the SGE free list to clean up
390 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
391 * this queue should be stopped before calling this function.
393 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
395 unsigned int cidx = q->cidx;
397 while (q->credits--) {
398 struct rx_sw_desc *d = &q->sdesc[cidx];
401 clear_rx_desc(pdev, q, d);
402 if (++cidx == q->size)
406 if (q->pg_chunk.page) {
407 __free_pages(q->pg_chunk.page, q->order);
408 q->pg_chunk.page = NULL;
413 * add_one_rx_buf - add a packet buffer to a free-buffer list
414 * @va: buffer start VA
415 * @len: the buffer length
416 * @d: the HW Rx descriptor to write
417 * @sd: the SW Rx descriptor to write
418 * @gen: the generation bit value
419 * @pdev: the PCI device associated with the adapter
421 * Add a buffer of the given length to the supplied HW and SW Rx
424 static inline int add_one_rx_buf(void *va, unsigned int len,
425 struct rx_desc *d, struct rx_sw_desc *sd,
426 unsigned int gen, struct pci_dev *pdev)
430 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
431 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
434 dma_unmap_addr_set(sd, dma_addr, mapping);
436 d->addr_lo = cpu_to_be32(mapping);
437 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
439 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
440 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
444 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
447 d->addr_lo = cpu_to_be32(mapping);
448 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
450 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
451 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
455 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
456 struct rx_sw_desc *sd, gfp_t gfp,
459 if (!q->pg_chunk.page) {
462 q->pg_chunk.page = alloc_pages(gfp, order);
463 if (unlikely(!q->pg_chunk.page))
465 q->pg_chunk.va = page_address(q->pg_chunk.page);
466 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
468 q->pg_chunk.offset = 0;
469 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
470 0, q->alloc_size, PCI_DMA_FROMDEVICE);
471 q->pg_chunk.mapping = mapping;
473 sd->pg_chunk = q->pg_chunk;
475 prefetch(sd->pg_chunk.p_cnt);
477 q->pg_chunk.offset += q->buf_size;
478 if (q->pg_chunk.offset == (PAGE_SIZE << order))
479 q->pg_chunk.page = NULL;
481 q->pg_chunk.va += q->buf_size;
482 get_page(q->pg_chunk.page);
485 if (sd->pg_chunk.offset == 0)
486 *sd->pg_chunk.p_cnt = 1;
488 *sd->pg_chunk.p_cnt += 1;
493 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
495 if (q->pend_cred >= q->credits / 4) {
498 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
503 * refill_fl - refill an SGE free-buffer list
504 * @adapter: the adapter
505 * @q: the free-list to refill
506 * @n: the number of new buffers to allocate
507 * @gfp: the gfp flags for allocating new buffers
509 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
510 * allocated with the supplied gfp flags. The caller must assure that
511 * @n does not exceed the queue's capacity.
513 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
515 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
516 struct rx_desc *d = &q->desc[q->pidx];
517 unsigned int count = 0;
524 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
526 nomem: q->alloc_failed++;
529 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
530 dma_unmap_addr_set(sd, dma_addr, mapping);
532 add_one_rx_chunk(mapping, d, q->gen);
533 pci_dma_sync_single_for_device(adap->pdev, mapping,
534 q->buf_size - SGE_PG_RSVD,
539 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
544 buf_start = skb->data;
545 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
548 clear_rx_desc(adap->pdev, q, sd);
555 if (++q->pidx == q->size) {
565 q->pend_cred += count;
571 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
573 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
574 GFP_ATOMIC | __GFP_COMP);
578 * recycle_rx_buf - recycle a receive buffer
579 * @adapter: the adapter
580 * @q: the SGE free list
581 * @idx: index of buffer to recycle
583 * Recycles the specified buffer on the given free list by adding it at
584 * the next available slot on the list.
586 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
589 struct rx_desc *from = &q->desc[idx];
590 struct rx_desc *to = &q->desc[q->pidx];
592 q->sdesc[q->pidx] = q->sdesc[idx];
593 to->addr_lo = from->addr_lo; /* already big endian */
594 to->addr_hi = from->addr_hi; /* likewise */
596 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
597 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
599 if (++q->pidx == q->size) {
610 * alloc_ring - allocate resources for an SGE descriptor ring
611 * @pdev: the PCI device
612 * @nelem: the number of descriptors
613 * @elem_size: the size of each descriptor
614 * @sw_size: the size of the SW state associated with each ring element
615 * @phys: the physical address of the allocated ring
616 * @metadata: address of the array holding the SW state for the ring
618 * Allocates resources for an SGE descriptor ring, such as Tx queues,
619 * free buffer lists, or response queues. Each SGE ring requires
620 * space for its HW descriptors plus, optionally, space for the SW state
621 * associated with each HW entry (the metadata). The function returns
622 * three values: the virtual address for the HW ring (the return value
623 * of the function), the physical address of the HW ring, and the address
626 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
627 size_t sw_size, dma_addr_t * phys, void *metadata)
629 size_t len = nelem * elem_size;
631 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
635 if (sw_size && metadata) {
636 s = kcalloc(nelem, sw_size, GFP_KERNEL);
639 dma_free_coherent(&pdev->dev, len, p, *phys);
642 *(void **)metadata = s;
649 * t3_reset_qset - reset a sge qset
652 * Reset the qset structure.
653 * the NAPI structure is preserved in the event of
654 * the qset's reincarnation, for example during EEH recovery.
656 static void t3_reset_qset(struct sge_qset *q)
659 !(q->adap->flags & NAPI_INIT)) {
660 memset(q, 0, sizeof(*q));
665 memset(&q->rspq, 0, sizeof(q->rspq));
666 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
667 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
669 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
670 q->rx_reclaim_timer.function = NULL;
672 napi_free_frags(&q->napi);
677 * free_qset - free the resources of an SGE queue set
678 * @adapter: the adapter owning the queue set
681 * Release the HW and SW resources associated with an SGE queue set, such
682 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
683 * queue set must be quiesced prior to calling this.
685 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
688 struct pci_dev *pdev = adapter->pdev;
690 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
692 spin_lock_irq(&adapter->sge.reg_lock);
693 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
694 spin_unlock_irq(&adapter->sge.reg_lock);
695 free_rx_bufs(pdev, &q->fl[i]);
696 kfree(q->fl[i].sdesc);
697 dma_free_coherent(&pdev->dev,
699 sizeof(struct rx_desc), q->fl[i].desc,
703 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
704 if (q->txq[i].desc) {
705 spin_lock_irq(&adapter->sge.reg_lock);
706 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
707 spin_unlock_irq(&adapter->sge.reg_lock);
708 if (q->txq[i].sdesc) {
709 free_tx_desc(adapter, &q->txq[i],
711 kfree(q->txq[i].sdesc);
713 dma_free_coherent(&pdev->dev,
715 sizeof(struct tx_desc),
716 q->txq[i].desc, q->txq[i].phys_addr);
717 __skb_queue_purge(&q->txq[i].sendq);
721 spin_lock_irq(&adapter->sge.reg_lock);
722 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
723 spin_unlock_irq(&adapter->sge.reg_lock);
724 dma_free_coherent(&pdev->dev,
725 q->rspq.size * sizeof(struct rsp_desc),
726 q->rspq.desc, q->rspq.phys_addr);
733 * init_qset_cntxt - initialize an SGE queue set context info
735 * @id: the queue set id
737 * Initializes the TIDs and context ids for the queues of a queue set.
739 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
741 qs->rspq.cntxt_id = id;
742 qs->fl[0].cntxt_id = 2 * id;
743 qs->fl[1].cntxt_id = 2 * id + 1;
744 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
745 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
746 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
747 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
748 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
752 * sgl_len - calculates the size of an SGL of the given capacity
753 * @n: the number of SGL entries
755 * Calculates the number of flits needed for a scatter/gather list that
756 * can hold the given number of entries.
758 static inline unsigned int sgl_len(unsigned int n)
760 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
761 return (3 * n) / 2 + (n & 1);
765 * flits_to_desc - returns the num of Tx descriptors for the given flits
766 * @n: the number of flits
768 * Calculates the number of Tx descriptors needed for the supplied number
771 static inline unsigned int flits_to_desc(unsigned int n)
773 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
774 return flit_desc_map[n];
778 * get_packet - return the next ingress packet buffer from a free list
779 * @adap: the adapter that received the packet
780 * @fl: the SGE free list holding the packet
781 * @len: the packet length including any SGE padding
782 * @drop_thres: # of remaining buffers before we start dropping packets
784 * Get the next packet from a free list and complete setup of the
785 * sk_buff. If the packet is small we make a copy and recycle the
786 * original buffer, otherwise we use the original buffer itself. If a
787 * positive drop threshold is supplied packets are dropped and their
788 * buffers recycled if (a) the number of remaining buffers is under the
789 * threshold and the packet is too big to copy, or (b) the packet should
790 * be copied but there is no memory for the copy.
792 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
793 unsigned int len, unsigned int drop_thres)
795 struct sk_buff *skb = NULL;
796 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
798 prefetch(sd->skb->data);
801 if (len <= SGE_RX_COPY_THRES) {
802 skb = alloc_skb(len, GFP_ATOMIC);
803 if (likely(skb != NULL)) {
805 pci_dma_sync_single_for_cpu(adap->pdev,
806 dma_unmap_addr(sd, dma_addr), len,
808 memcpy(skb->data, sd->skb->data, len);
809 pci_dma_sync_single_for_device(adap->pdev,
810 dma_unmap_addr(sd, dma_addr), len,
812 } else if (!drop_thres)
815 recycle_rx_buf(adap, fl, fl->cidx);
819 if (unlikely(fl->credits < drop_thres) &&
820 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
821 GFP_ATOMIC | __GFP_COMP) == 0)
825 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
826 fl->buf_size, PCI_DMA_FROMDEVICE);
829 __refill_fl(adap, fl);
834 * get_packet_pg - return the next ingress packet buffer from a free list
835 * @adap: the adapter that received the packet
836 * @fl: the SGE free list holding the packet
837 * @len: the packet length including any SGE padding
838 * @drop_thres: # of remaining buffers before we start dropping packets
840 * Get the next packet from a free list populated with page chunks.
841 * If the packet is small we make a copy and recycle the original buffer,
842 * otherwise we attach the original buffer as a page fragment to a fresh
843 * sk_buff. If a positive drop threshold is supplied packets are dropped
844 * and their buffers recycled if (a) the number of remaining buffers is
845 * under the threshold and the packet is too big to copy, or (b) there's
848 * Note: this function is similar to @get_packet but deals with Rx buffers
849 * that are page chunks rather than sk_buffs.
851 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
852 struct sge_rspq *q, unsigned int len,
853 unsigned int drop_thres)
855 struct sk_buff *newskb, *skb;
856 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
858 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
860 newskb = skb = q->pg_skb;
861 if (!skb && (len <= SGE_RX_COPY_THRES)) {
862 newskb = alloc_skb(len, GFP_ATOMIC);
863 if (likely(newskb != NULL)) {
864 __skb_put(newskb, len);
865 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
867 memcpy(newskb->data, sd->pg_chunk.va, len);
868 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
871 } else if (!drop_thres)
875 recycle_rx_buf(adap, fl, fl->cidx);
880 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
883 prefetch(sd->pg_chunk.p_cnt);
886 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
888 if (unlikely(!newskb)) {
894 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
896 (*sd->pg_chunk.p_cnt)--;
897 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
898 pci_unmap_page(adap->pdev,
899 sd->pg_chunk.mapping,
903 __skb_put(newskb, SGE_RX_PULL_LEN);
904 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
905 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
906 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
907 len - SGE_RX_PULL_LEN);
909 newskb->data_len = len - SGE_RX_PULL_LEN;
910 newskb->truesize += newskb->data_len;
912 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
914 sd->pg_chunk.offset, len);
916 newskb->data_len += len;
917 newskb->truesize += len;
922 * We do not refill FLs here, we let the caller do it to overlap a
929 * get_imm_packet - return the next ingress packet buffer from a response
930 * @resp: the response descriptor containing the packet data
932 * Return a packet containing the immediate data of the given response.
934 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
936 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
939 __skb_put(skb, IMMED_PKT_SIZE);
940 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
946 * calc_tx_descs - calculate the number of Tx descriptors for a packet
949 * Returns the number of Tx descriptors needed for the given Ethernet
950 * packet. Ethernet packets require addition of WR and CPL headers.
952 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
956 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
959 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
960 if (skb_shinfo(skb)->gso_size)
962 return flits_to_desc(flits);
966 * make_sgl - populate a scatter/gather list for a packet
968 * @sgp: the SGL to populate
969 * @start: start address of skb main body data to include in the SGL
970 * @len: length of skb main body data to include in the SGL
971 * @pdev: the PCI device
973 * Generates a scatter/gather list for the buffers that make up a packet
974 * and returns the SGL size in 8-byte words. The caller must size the SGL
977 static inline unsigned int make_sgl(const struct sk_buff *skb,
978 struct sg_ent *sgp, unsigned char *start,
979 unsigned int len, struct pci_dev *pdev)
982 unsigned int i, j = 0, nfrags;
985 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
986 sgp->len[0] = cpu_to_be32(len);
987 sgp->addr[0] = cpu_to_be64(mapping);
991 nfrags = skb_shinfo(skb)->nr_frags;
992 for (i = 0; i < nfrags; i++) {
993 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
995 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
997 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
998 sgp->addr[j] = cpu_to_be64(mapping);
1005 return ((nfrags + (len != 0)) * 3) / 2 + j;
1009 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1010 * @adap: the adapter
1013 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1014 * where the HW is going to sleep just after we checked, however,
1015 * then the interrupt handler will detect the outstanding TX packet
1016 * and ring the doorbell for us.
1018 * When GTS is disabled we unconditionally ring the doorbell.
1020 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1023 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1024 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1025 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1026 t3_write_reg(adap, A_SG_KDOORBELL,
1027 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1030 wmb(); /* write descriptors before telling HW */
1031 t3_write_reg(adap, A_SG_KDOORBELL,
1032 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1036 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1038 #if SGE_NUM_GENBITS == 2
1039 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1044 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1045 * @ndesc: number of Tx descriptors spanned by the SGL
1046 * @skb: the packet corresponding to the WR
1047 * @d: first Tx descriptor to be written
1048 * @pidx: index of above descriptors
1049 * @q: the SGE Tx queue
1051 * @flits: number of flits to the start of the SGL in the first descriptor
1052 * @sgl_flits: the SGL size in flits
1053 * @gen: the Tx descriptor generation
1054 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1055 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1057 * Write a work request header and an associated SGL. If the SGL is
1058 * small enough to fit into one Tx descriptor it has already been written
1059 * and we just need to write the WR header. Otherwise we distribute the
1060 * SGL across the number of descriptors it spans.
1062 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1063 struct tx_desc *d, unsigned int pidx,
1064 const struct sge_txq *q,
1065 const struct sg_ent *sgl,
1066 unsigned int flits, unsigned int sgl_flits,
1067 unsigned int gen, __be32 wr_hi,
1070 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1071 struct tx_sw_desc *sd = &q->sdesc[pidx];
1074 if (need_skb_unmap()) {
1080 if (likely(ndesc == 1)) {
1082 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1083 V_WR_SGLSFLT(flits)) | wr_hi;
1085 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1086 V_WR_GEN(gen)) | wr_lo;
1089 unsigned int ogen = gen;
1090 const u64 *fp = (const u64 *)sgl;
1091 struct work_request_hdr *wp = wrp;
1093 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1094 V_WR_SGLSFLT(flits)) | wr_hi;
1097 unsigned int avail = WR_FLITS - flits;
1099 if (avail > sgl_flits)
1101 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1111 if (++pidx == q->size) {
1119 wrp = (struct work_request_hdr *)d;
1120 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1121 V_WR_SGLSFLT(1)) | wr_hi;
1122 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1124 V_WR_GEN(gen)) | wr_lo;
1129 wrp->wr_hi |= htonl(F_WR_EOP);
1131 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1132 wr_gen2((struct tx_desc *)wp, ogen);
1133 WARN_ON(ndesc != 0);
1138 * write_tx_pkt_wr - write a TX_PKT work request
1139 * @adap: the adapter
1140 * @skb: the packet to send
1141 * @pi: the egress interface
1142 * @pidx: index of the first Tx descriptor to write
1143 * @gen: the generation value to use
1145 * @ndesc: number of descriptors the packet will occupy
1146 * @compl: the value of the COMPL bit to use
1148 * Generate a TX_PKT work request to send the supplied packet.
1150 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1151 const struct port_info *pi,
1152 unsigned int pidx, unsigned int gen,
1153 struct sge_txq *q, unsigned int ndesc,
1156 unsigned int flits, sgl_flits, cntrl, tso_info;
1157 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1158 struct tx_desc *d = &q->desc[pidx];
1159 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1161 cpl->len = htonl(skb->len);
1162 cntrl = V_TXPKT_INTF(pi->port_id);
1164 if (vlan_tx_tag_present(skb))
1165 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1167 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1170 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1173 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1174 hdr->cntrl = htonl(cntrl);
1175 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1176 CPL_ETH_II : CPL_ETH_II_VLAN;
1177 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1178 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1179 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1180 hdr->lso_info = htonl(tso_info);
1183 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1184 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1185 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1186 cpl->cntrl = htonl(cntrl);
1188 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1189 q->sdesc[pidx].skb = NULL;
1191 skb_copy_from_linear_data(skb, &d->flit[2],
1194 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1196 flits = (skb->len + 7) / 8 + 2;
1197 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1198 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1199 | F_WR_SOP | F_WR_EOP | compl);
1201 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1202 V_WR_TID(q->token));
1211 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1212 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1214 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1215 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1216 htonl(V_WR_TID(q->token)));
1219 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1220 struct sge_qset *qs, struct sge_txq *q)
1222 netif_tx_stop_queue(txq);
1223 set_bit(TXQ_ETH, &qs->txq_stopped);
1228 * eth_xmit - add a packet to the Ethernet Tx queue
1230 * @dev: the egress net device
1232 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1234 netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1237 unsigned int ndesc, pidx, credits, gen, compl;
1238 const struct port_info *pi = netdev_priv(dev);
1239 struct adapter *adap = pi->adapter;
1240 struct netdev_queue *txq;
1241 struct sge_qset *qs;
1245 * The chip min packet length is 9 octets but play safe and reject
1246 * anything shorter than an Ethernet header.
1248 if (unlikely(skb->len < ETH_HLEN)) {
1250 return NETDEV_TX_OK;
1253 qidx = skb_get_queue_mapping(skb);
1255 q = &qs->txq[TXQ_ETH];
1256 txq = netdev_get_tx_queue(dev, qidx);
1258 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1260 credits = q->size - q->in_use;
1261 ndesc = calc_tx_descs(skb);
1263 if (unlikely(credits < ndesc)) {
1264 t3_stop_tx_queue(txq, qs, q);
1265 dev_err(&adap->pdev->dev,
1266 "%s: Tx ring %u full while queue awake!\n",
1267 dev->name, q->cntxt_id & 7);
1268 return NETDEV_TX_BUSY;
1272 if (unlikely(credits - ndesc < q->stop_thres)) {
1273 t3_stop_tx_queue(txq, qs, q);
1275 if (should_restart_tx(q) &&
1276 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1278 netif_tx_start_queue(txq);
1283 q->unacked += ndesc;
1286 * Some Guest OS clients get terrible performance when they have bad
1287 * message size / socket send buffer space parameters. For instance,
1288 * if an application selects an 8KB message size and an 8KB send
1289 * socket buffer size. This forces the application into a single
1290 * packet stop-and-go mode where it's only willing to have a single
1291 * message outstanding. The next message is only sent when the
1292 * previous message is noted as having been sent. Until we issue a
1293 * kfree_skb() against the TX skb, the skb is charged against the
1294 * application's send buffer space. We only free up TX skbs when we
1295 * get a TX credit return from the hardware / firmware which is fairly
1296 * lazy about this. So we request a TX WR Completion Notification on
1297 * every TX descriptor in order to accellerate TX credit returns. See
1298 * also the change in handle_rsp_cntrl_info() to free up TX skb's when
1299 * we receive the TX WR Completion Notifications ...
1303 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1308 if (q->pidx >= q->size) {
1313 /* update port statistics */
1314 if (skb->ip_summed == CHECKSUM_COMPLETE)
1315 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1316 if (skb_shinfo(skb)->gso_size)
1317 qs->port_stats[SGE_PSTAT_TSO]++;
1318 if (vlan_tx_tag_present(skb))
1319 qs->port_stats[SGE_PSTAT_VLANINS]++;
1322 * We do not use Tx completion interrupts to free DMAd Tx packets.
1323 * This is good for performance but means that we rely on new Tx
1324 * packets arriving to run the destructors of completed packets,
1325 * which open up space in their sockets' send queues. Sometimes
1326 * we do not get such new packets causing Tx to stall. A single
1327 * UDP transmitter is a good example of this situation. We have
1328 * a clean up timer that periodically reclaims completed packets
1329 * but it doesn't run often enough (nor do we want it to) to prevent
1330 * lengthy stalls. A solution to this problem is to run the
1331 * destructor early, after the packet is queued but before it's DMAd.
1332 * A cons is that we lie to socket memory accounting, but the amount
1333 * of extra memory is reasonable (limited by the number of Tx
1334 * descriptors), the packets do actually get freed quickly by new
1335 * packets almost always, and for protocols like TCP that wait for
1336 * acks to really free up the data the extra memory is even less.
1337 * On the positive side we run the destructors on the sending CPU
1338 * rather than on a potentially different completing CPU, usually a
1339 * good thing. We also run them without holding our Tx queue lock,
1340 * unlike what reclaim_completed_tx() would otherwise do.
1342 * Run the destructor before telling the DMA engine about the packet
1343 * to make sure it doesn't complete and get freed prematurely.
1345 if (likely(!skb_shared(skb)))
1348 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1349 check_ring_tx_db(adap, q);
1350 return NETDEV_TX_OK;
1354 * write_imm - write a packet into a Tx descriptor as immediate data
1355 * @d: the Tx descriptor to write
1357 * @len: the length of packet data to write as immediate data
1358 * @gen: the generation bit value to write
1360 * Writes a packet as immediate data into a Tx descriptor. The packet
1361 * contains a work request at its beginning. We must write the packet
1362 * carefully so the SGE doesn't read it accidentally before it's written
1365 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1366 unsigned int len, unsigned int gen)
1368 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1369 struct work_request_hdr *to = (struct work_request_hdr *)d;
1371 if (likely(!skb->data_len))
1372 memcpy(&to[1], &from[1], len - sizeof(*from));
1374 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1376 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1377 V_WR_BCNTLFLT(len & 7));
1379 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1380 V_WR_LEN((len + 7) / 8));
1386 * check_desc_avail - check descriptor availability on a send queue
1387 * @adap: the adapter
1388 * @q: the send queue
1389 * @skb: the packet needing the descriptors
1390 * @ndesc: the number of Tx descriptors needed
1391 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1393 * Checks if the requested number of Tx descriptors is available on an
1394 * SGE send queue. If the queue is already suspended or not enough
1395 * descriptors are available the packet is queued for later transmission.
1396 * Must be called with the Tx queue locked.
1398 * Returns 0 if enough descriptors are available, 1 if there aren't
1399 * enough descriptors and the packet has been queued, and 2 if the caller
1400 * needs to retry because there weren't enough descriptors at the
1401 * beginning of the call but some freed up in the mean time.
1403 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1404 struct sk_buff *skb, unsigned int ndesc,
1407 if (unlikely(!skb_queue_empty(&q->sendq))) {
1408 addq_exit:__skb_queue_tail(&q->sendq, skb);
1411 if (unlikely(q->size - q->in_use < ndesc)) {
1412 struct sge_qset *qs = txq_to_qset(q, qid);
1414 set_bit(qid, &qs->txq_stopped);
1415 smp_mb__after_clear_bit();
1417 if (should_restart_tx(q) &&
1418 test_and_clear_bit(qid, &qs->txq_stopped))
1428 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1429 * @q: the SGE control Tx queue
1431 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1432 * that send only immediate data (presently just the control queues) and
1433 * thus do not have any sk_buffs to release.
1435 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1437 unsigned int reclaim = q->processed - q->cleaned;
1439 q->in_use -= reclaim;
1440 q->cleaned += reclaim;
1443 static inline int immediate(const struct sk_buff *skb)
1445 return skb->len <= WR_LEN;
1449 * ctrl_xmit - send a packet through an SGE control Tx queue
1450 * @adap: the adapter
1451 * @q: the control queue
1454 * Send a packet through an SGE control Tx queue. Packets sent through
1455 * a control queue must fit entirely as immediate data in a single Tx
1456 * descriptor and have no page fragments.
1458 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1459 struct sk_buff *skb)
1462 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1464 if (unlikely(!immediate(skb))) {
1467 return NET_XMIT_SUCCESS;
1470 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1471 wrp->wr_lo = htonl(V_WR_TID(q->token));
1473 spin_lock(&q->lock);
1474 again:reclaim_completed_tx_imm(q);
1476 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1477 if (unlikely(ret)) {
1479 spin_unlock(&q->lock);
1485 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1488 if (++q->pidx >= q->size) {
1492 spin_unlock(&q->lock);
1494 t3_write_reg(adap, A_SG_KDOORBELL,
1495 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1496 return NET_XMIT_SUCCESS;
1500 * restart_ctrlq - restart a suspended control queue
1501 * @qs: the queue set cotaining the control queue
1503 * Resumes transmission on a suspended Tx control queue.
1505 static void restart_ctrlq(unsigned long data)
1507 struct sk_buff *skb;
1508 struct sge_qset *qs = (struct sge_qset *)data;
1509 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1511 spin_lock(&q->lock);
1512 again:reclaim_completed_tx_imm(q);
1514 while (q->in_use < q->size &&
1515 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1517 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1519 if (++q->pidx >= q->size) {
1526 if (!skb_queue_empty(&q->sendq)) {
1527 set_bit(TXQ_CTRL, &qs->txq_stopped);
1528 smp_mb__after_clear_bit();
1530 if (should_restart_tx(q) &&
1531 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1536 spin_unlock(&q->lock);
1538 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1539 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1543 * Send a management message through control queue 0
1545 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1549 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1556 * deferred_unmap_destructor - unmap a packet when it is freed
1559 * This is the packet destructor used for Tx packets that need to remain
1560 * mapped until they are freed rather than until their Tx descriptors are
1563 static void deferred_unmap_destructor(struct sk_buff *skb)
1566 const dma_addr_t *p;
1567 const struct skb_shared_info *si;
1568 const struct deferred_unmap_info *dui;
1570 dui = (struct deferred_unmap_info *)skb->head;
1573 if (skb->tail - skb->transport_header)
1574 pci_unmap_single(dui->pdev, *p++,
1575 skb->tail - skb->transport_header,
1578 si = skb_shinfo(skb);
1579 for (i = 0; i < si->nr_frags; i++)
1580 pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
1584 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1585 const struct sg_ent *sgl, int sgl_flits)
1588 struct deferred_unmap_info *dui;
1590 dui = (struct deferred_unmap_info *)skb->head;
1592 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1593 *p++ = be64_to_cpu(sgl->addr[0]);
1594 *p++ = be64_to_cpu(sgl->addr[1]);
1597 *p = be64_to_cpu(sgl->addr[0]);
1601 * write_ofld_wr - write an offload work request
1602 * @adap: the adapter
1603 * @skb: the packet to send
1605 * @pidx: index of the first Tx descriptor to write
1606 * @gen: the generation value to use
1607 * @ndesc: number of descriptors the packet will occupy
1609 * Write an offload work request to send the supplied packet. The packet
1610 * data already carry the work request with most fields populated.
1612 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1613 struct sge_txq *q, unsigned int pidx,
1614 unsigned int gen, unsigned int ndesc)
1616 unsigned int sgl_flits, flits;
1617 struct work_request_hdr *from;
1618 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1619 struct tx_desc *d = &q->desc[pidx];
1621 if (immediate(skb)) {
1622 q->sdesc[pidx].skb = NULL;
1623 write_imm(d, skb, skb->len, gen);
1627 /* Only TX_DATA builds SGLs */
1629 from = (struct work_request_hdr *)skb->data;
1630 memcpy(&d->flit[1], &from[1],
1631 skb_transport_offset(skb) - sizeof(*from));
1633 flits = skb_transport_offset(skb) / 8;
1634 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1635 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1636 skb->tail - skb->transport_header,
1638 if (need_skb_unmap()) {
1639 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1640 skb->destructor = deferred_unmap_destructor;
1643 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1644 gen, from->wr_hi, from->wr_lo);
1648 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1651 * Returns the number of Tx descriptors needed for the given offload
1652 * packet. These packets are already fully constructed.
1654 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1656 unsigned int flits, cnt;
1658 if (skb->len <= WR_LEN)
1659 return 1; /* packet fits as immediate data */
1661 flits = skb_transport_offset(skb) / 8; /* headers */
1662 cnt = skb_shinfo(skb)->nr_frags;
1663 if (skb->tail != skb->transport_header)
1665 return flits_to_desc(flits + sgl_len(cnt));
1669 * ofld_xmit - send a packet through an offload queue
1670 * @adap: the adapter
1671 * @q: the Tx offload queue
1674 * Send an offload packet through an SGE offload queue.
1676 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1677 struct sk_buff *skb)
1680 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1682 spin_lock(&q->lock);
1683 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1685 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1686 if (unlikely(ret)) {
1688 skb->priority = ndesc; /* save for restart */
1689 spin_unlock(&q->lock);
1699 if (q->pidx >= q->size) {
1703 spin_unlock(&q->lock);
1705 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1706 check_ring_tx_db(adap, q);
1707 return NET_XMIT_SUCCESS;
1711 * restart_offloadq - restart a suspended offload queue
1712 * @qs: the queue set cotaining the offload queue
1714 * Resumes transmission on a suspended Tx offload queue.
1716 static void restart_offloadq(unsigned long data)
1718 struct sk_buff *skb;
1719 struct sge_qset *qs = (struct sge_qset *)data;
1720 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1721 const struct port_info *pi = netdev_priv(qs->netdev);
1722 struct adapter *adap = pi->adapter;
1724 spin_lock(&q->lock);
1725 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1727 while ((skb = skb_peek(&q->sendq)) != NULL) {
1728 unsigned int gen, pidx;
1729 unsigned int ndesc = skb->priority;
1731 if (unlikely(q->size - q->in_use < ndesc)) {
1732 set_bit(TXQ_OFLD, &qs->txq_stopped);
1733 smp_mb__after_clear_bit();
1735 if (should_restart_tx(q) &&
1736 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1746 if (q->pidx >= q->size) {
1750 __skb_unlink(skb, &q->sendq);
1751 spin_unlock(&q->lock);
1753 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1754 spin_lock(&q->lock);
1756 spin_unlock(&q->lock);
1759 set_bit(TXQ_RUNNING, &q->flags);
1760 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1763 t3_write_reg(adap, A_SG_KDOORBELL,
1764 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1768 * queue_set - return the queue set a packet should use
1771 * Maps a packet to the SGE queue set it should use. The desired queue
1772 * set is carried in bits 1-3 in the packet's priority.
1774 static inline int queue_set(const struct sk_buff *skb)
1776 return skb->priority >> 1;
1780 * is_ctrl_pkt - return whether an offload packet is a control packet
1783 * Determines whether an offload packet should use an OFLD or a CTRL
1784 * Tx queue. This is indicated by bit 0 in the packet's priority.
1786 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1788 return skb->priority & 1;
1792 * t3_offload_tx - send an offload packet
1793 * @tdev: the offload device to send to
1796 * Sends an offload packet. We use the packet priority to select the
1797 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1798 * should be sent as regular or control, bits 1-3 select the queue set.
1800 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1802 struct adapter *adap = tdev2adap(tdev);
1803 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1805 if (unlikely(is_ctrl_pkt(skb)))
1806 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1808 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1812 * offload_enqueue - add an offload packet to an SGE offload receive queue
1813 * @q: the SGE response queue
1816 * Add a new offload packet to an SGE response queue's offload packet
1817 * queue. If the packet is the first on the queue it schedules the RX
1818 * softirq to process the queue.
1820 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1822 int was_empty = skb_queue_empty(&q->rx_queue);
1824 __skb_queue_tail(&q->rx_queue, skb);
1827 struct sge_qset *qs = rspq_to_qset(q);
1829 napi_schedule(&qs->napi);
1834 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1835 * @tdev: the offload device that will be receiving the packets
1836 * @q: the SGE response queue that assembled the bundle
1837 * @skbs: the partial bundle
1838 * @n: the number of packets in the bundle
1840 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1842 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1844 struct sk_buff *skbs[], int n)
1847 q->offload_bundles++;
1848 tdev->recv(tdev, skbs, n);
1853 * ofld_poll - NAPI handler for offload packets in interrupt mode
1854 * @dev: the network device doing the polling
1855 * @budget: polling budget
1857 * The NAPI handler for offload packets when a response queue is serviced
1858 * by the hard interrupt handler, i.e., when it's operating in non-polling
1859 * mode. Creates small packet batches and sends them through the offload
1860 * receive handler. Batches need to be of modest size as we do prefetches
1861 * on the packets in each.
1863 static int ofld_poll(struct napi_struct *napi, int budget)
1865 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1866 struct sge_rspq *q = &qs->rspq;
1867 struct adapter *adapter = qs->adap;
1870 while (work_done < budget) {
1871 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1872 struct sk_buff_head queue;
1875 spin_lock_irq(&q->lock);
1876 __skb_queue_head_init(&queue);
1877 skb_queue_splice_init(&q->rx_queue, &queue);
1878 if (skb_queue_empty(&queue)) {
1879 napi_complete(napi);
1880 spin_unlock_irq(&q->lock);
1883 spin_unlock_irq(&q->lock);
1886 skb_queue_walk_safe(&queue, skb, tmp) {
1887 if (work_done >= budget)
1891 __skb_unlink(skb, &queue);
1892 prefetch(skb->data);
1893 skbs[ngathered] = skb;
1894 if (++ngathered == RX_BUNDLE_SIZE) {
1895 q->offload_bundles++;
1896 adapter->tdev.recv(&adapter->tdev, skbs,
1901 if (!skb_queue_empty(&queue)) {
1902 /* splice remaining packets back onto Rx queue */
1903 spin_lock_irq(&q->lock);
1904 skb_queue_splice(&queue, &q->rx_queue);
1905 spin_unlock_irq(&q->lock);
1907 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1914 * rx_offload - process a received offload packet
1915 * @tdev: the offload device receiving the packet
1916 * @rq: the response queue that received the packet
1918 * @rx_gather: a gather list of packets if we are building a bundle
1919 * @gather_idx: index of the next available slot in the bundle
1921 * Process an ingress offload pakcet and add it to the offload ingress
1922 * queue. Returns the index of the next available slot in the bundle.
1924 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1925 struct sk_buff *skb, struct sk_buff *rx_gather[],
1926 unsigned int gather_idx)
1928 skb_reset_mac_header(skb);
1929 skb_reset_network_header(skb);
1930 skb_reset_transport_header(skb);
1933 rx_gather[gather_idx++] = skb;
1934 if (gather_idx == RX_BUNDLE_SIZE) {
1935 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1937 rq->offload_bundles++;
1940 offload_enqueue(rq, skb);
1946 * restart_tx - check whether to restart suspended Tx queues
1947 * @qs: the queue set to resume
1949 * Restarts suspended Tx queues of an SGE queue set if they have enough
1950 * free resources to resume operation.
1952 static void restart_tx(struct sge_qset *qs)
1954 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1955 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1956 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1957 qs->txq[TXQ_ETH].restarts++;
1958 if (netif_running(qs->netdev))
1959 netif_tx_wake_queue(qs->tx_q);
1962 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1963 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1964 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1965 qs->txq[TXQ_OFLD].restarts++;
1966 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1968 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1969 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1970 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1971 qs->txq[TXQ_CTRL].restarts++;
1972 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1977 * cxgb3_arp_process - process an ARP request probing a private IP address
1978 * @adapter: the adapter
1979 * @skb: the skbuff containing the ARP request
1981 * Check if the ARP request is probing the private IP address
1982 * dedicated to iSCSI, generate an ARP reply if so.
1984 static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
1986 struct net_device *dev = skb->dev;
1988 unsigned char *arp_ptr;
1995 skb_reset_network_header(skb);
1998 if (arp->ar_op != htons(ARPOP_REQUEST))
2001 arp_ptr = (unsigned char *)(arp + 1);
2003 arp_ptr += dev->addr_len;
2004 memcpy(&sip, arp_ptr, sizeof(sip));
2005 arp_ptr += sizeof(sip);
2006 arp_ptr += dev->addr_len;
2007 memcpy(&tip, arp_ptr, sizeof(tip));
2009 if (tip != pi->iscsi_ipv4addr)
2012 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2013 pi->iscsic.mac_addr, sha);
2017 static inline int is_arp(struct sk_buff *skb)
2019 return skb->protocol == htons(ETH_P_ARP);
2022 static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2023 struct sk_buff *skb)
2026 cxgb3_arp_process(pi, skb);
2030 if (pi->iscsic.recv)
2031 pi->iscsic.recv(pi, skb);
2036 * rx_eth - process an ingress ethernet packet
2037 * @adap: the adapter
2038 * @rq: the response queue that received the packet
2040 * @pad: amount of padding at the start of the buffer
2042 * Process an ingress ethernet pakcet and deliver it to the stack.
2043 * The padding is 2 if the packet was delivered in an Rx buffer and 0
2044 * if it was immediate data in a response.
2046 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2047 struct sk_buff *skb, int pad, int lro)
2049 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2050 struct sge_qset *qs = rspq_to_qset(rq);
2051 struct port_info *pi;
2053 skb_pull(skb, sizeof(*p) + pad);
2054 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2055 pi = netdev_priv(skb->dev);
2056 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2057 p->csum == htons(0xffff) && !p->fragment) {
2058 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2059 skb->ip_summed = CHECKSUM_UNNECESSARY;
2061 skb_checksum_none_assert(skb);
2062 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2064 if (p->vlan_valid) {
2065 qs->port_stats[SGE_PSTAT_VLANEX]++;
2066 __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
2070 napi_gro_receive(&qs->napi, skb);
2072 if (unlikely(pi->iscsic.flags))
2073 cxgb3_process_iscsi_prov_pack(pi, skb);
2074 netif_receive_skb(skb);
2080 static inline int is_eth_tcp(u32 rss)
2082 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2086 * lro_add_page - add a page chunk to an LRO session
2087 * @adap: the adapter
2088 * @qs: the associated queue set
2089 * @fl: the free list containing the page chunk to add
2090 * @len: packet length
2091 * @complete: Indicates the last fragment of a frame
2093 * Add a received packet contained in a page chunk to an existing LRO
2096 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2097 struct sge_fl *fl, int len, int complete)
2099 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2100 struct port_info *pi = netdev_priv(qs->netdev);
2101 struct sk_buff *skb = NULL;
2102 struct cpl_rx_pkt *cpl;
2103 struct skb_frag_struct *rx_frag;
2108 skb = napi_get_frags(&qs->napi);
2114 pci_dma_sync_single_for_cpu(adap->pdev,
2115 dma_unmap_addr(sd, dma_addr),
2116 fl->buf_size - SGE_PG_RSVD,
2117 PCI_DMA_FROMDEVICE);
2119 (*sd->pg_chunk.p_cnt)--;
2120 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2121 pci_unmap_page(adap->pdev,
2122 sd->pg_chunk.mapping,
2124 PCI_DMA_FROMDEVICE);
2127 put_page(sd->pg_chunk.page);
2133 rx_frag = skb_shinfo(skb)->frags;
2134 nr_frags = skb_shinfo(skb)->nr_frags;
2137 offset = 2 + sizeof(struct cpl_rx_pkt);
2138 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2140 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2141 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2142 skb->ip_summed = CHECKSUM_UNNECESSARY;
2143 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2145 skb->ip_summed = CHECKSUM_NONE;
2151 rx_frag += nr_frags;
2152 __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2153 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2154 skb_frag_size_set(rx_frag, len);
2157 skb->data_len += len;
2158 skb->truesize += len;
2159 skb_shinfo(skb)->nr_frags++;
2164 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2166 if (cpl->vlan_valid)
2167 __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
2168 napi_gro_frags(&qs->napi);
2172 * handle_rsp_cntrl_info - handles control information in a response
2173 * @qs: the queue set corresponding to the response
2174 * @flags: the response control flags
2176 * Handles the control information of an SGE response, such as GTS
2177 * indications and completion credits for the queue set's Tx queues.
2178 * HW coalesces credits, we don't do any extra SW coalescing.
2180 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2182 unsigned int credits;
2185 if (flags & F_RSPD_TXQ0_GTS)
2186 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2189 credits = G_RSPD_TXQ0_CR(flags);
2191 qs->txq[TXQ_ETH].processed += credits;
2194 * In the normal Linux driver t3_eth_xmit() routine, we call
2195 * skb_orphan() on unshared TX skb. This results in a call to
2196 * the destructor for the skb which frees up the send buffer
2197 * space it was holding down. This, in turn, allows the
2198 * application to make forward progress generating more data
2199 * which is important at 10Gb/s. For Virtual Machine Guest
2200 * Operating Systems this doesn't work since the send buffer
2201 * space is being held down in the Virtual Machine. Thus we
2202 * need to get the TX skb's freed up as soon as possible in
2203 * order to prevent applications from stalling.
2205 * This code is largely copied from the corresponding code in
2206 * sge_timer_tx() and should probably be kept in sync with any
2209 if (__netif_tx_trylock(qs->tx_q)) {
2210 struct port_info *pi = netdev_priv(qs->netdev);
2211 struct adapter *adap = pi->adapter;
2213 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2215 __netif_tx_unlock(qs->tx_q);
2220 credits = G_RSPD_TXQ2_CR(flags);
2222 qs->txq[TXQ_CTRL].processed += credits;
2225 if (flags & F_RSPD_TXQ1_GTS)
2226 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2228 credits = G_RSPD_TXQ1_CR(flags);
2230 qs->txq[TXQ_OFLD].processed += credits;
2234 * check_ring_db - check if we need to ring any doorbells
2235 * @adapter: the adapter
2236 * @qs: the queue set whose Tx queues are to be examined
2237 * @sleeping: indicates which Tx queue sent GTS
2239 * Checks if some of a queue set's Tx queues need to ring their doorbells
2240 * to resume transmission after idling while they still have unprocessed
2243 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2244 unsigned int sleeping)
2246 if (sleeping & F_RSPD_TXQ0_GTS) {
2247 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2249 if (txq->cleaned + txq->in_use != txq->processed &&
2250 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2251 set_bit(TXQ_RUNNING, &txq->flags);
2252 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2253 V_EGRCNTX(txq->cntxt_id));
2257 if (sleeping & F_RSPD_TXQ1_GTS) {
2258 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2260 if (txq->cleaned + txq->in_use != txq->processed &&
2261 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2262 set_bit(TXQ_RUNNING, &txq->flags);
2263 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2264 V_EGRCNTX(txq->cntxt_id));
2270 * is_new_response - check if a response is newly written
2271 * @r: the response descriptor
2272 * @q: the response queue
2274 * Returns true if a response descriptor contains a yet unprocessed
2277 static inline int is_new_response(const struct rsp_desc *r,
2278 const struct sge_rspq *q)
2280 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2283 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2286 q->rx_recycle_buf = 0;
2289 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2290 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2291 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2292 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2293 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2295 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2296 #define NOMEM_INTR_DELAY 2500
2299 * process_responses - process responses from an SGE response queue
2300 * @adap: the adapter
2301 * @qs: the queue set to which the response queue belongs
2302 * @budget: how many responses can be processed in this round
2304 * Process responses from an SGE response queue up to the supplied budget.
2305 * Responses include received packets as well as credits and other events
2306 * for the queues that belong to the response queue's queue set.
2307 * A negative budget is effectively unlimited.
2309 * Additionally choose the interrupt holdoff time for the next interrupt
2310 * on this queue. If the system is under memory shortage use a fairly
2311 * long delay to help recovery.
2313 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2316 struct sge_rspq *q = &qs->rspq;
2317 struct rsp_desc *r = &q->desc[q->cidx];
2318 int budget_left = budget;
2319 unsigned int sleeping = 0;
2320 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2323 q->next_holdoff = q->holdoff_tmr;
2325 while (likely(budget_left && is_new_response(r, q))) {
2326 int packet_complete, eth, ethpad = 2;
2327 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2328 struct sk_buff *skb = NULL;
2330 __be32 rss_hi, rss_lo;
2333 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2334 rss_hi = *(const __be32 *)r;
2335 rss_lo = r->rss_hdr.rss_hash_val;
2336 flags = ntohl(r->flags);
2338 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2339 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2343 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2344 skb->data[0] = CPL_ASYNC_NOTIF;
2345 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2347 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2348 skb = get_imm_packet(r);
2349 if (unlikely(!skb)) {
2351 q->next_holdoff = NOMEM_INTR_DELAY;
2353 /* consume one credit since we tried */
2359 } else if ((len = ntohl(r->len_cq)) != 0) {
2362 lro &= eth && is_eth_tcp(rss_hi);
2364 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2365 if (fl->use_pages) {
2366 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2369 #if L1_CACHE_BYTES < 128
2370 prefetch(addr + L1_CACHE_BYTES);
2372 __refill_fl(adap, fl);
2374 lro_add_page(adap, qs, fl,
2376 flags & F_RSPD_EOP);
2380 skb = get_packet_pg(adap, fl, q,
2383 SGE_RX_DROP_THRES : 0);
2386 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2387 eth ? SGE_RX_DROP_THRES : 0);
2388 if (unlikely(!skb)) {
2392 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2395 if (++fl->cidx == fl->size)
2400 if (flags & RSPD_CTRL_MASK) {
2401 sleeping |= flags & RSPD_GTS_MASK;
2402 handle_rsp_cntrl_info(qs, flags);
2406 if (unlikely(++q->cidx == q->size)) {
2413 if (++q->credits >= (q->size / 4)) {
2414 refill_rspq(adap, q, q->credits);
2418 packet_complete = flags &
2419 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2420 F_RSPD_ASYNC_NOTIF);
2422 if (skb != NULL && packet_complete) {
2424 rx_eth(adap, q, skb, ethpad, lro);
2427 /* Preserve the RSS info in csum & priority */
2429 skb->priority = rss_lo;
2430 ngathered = rx_offload(&adap->tdev, q, skb,
2435 if (flags & F_RSPD_EOP)
2436 clear_rspq_bufstate(q);
2441 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2444 check_ring_db(adap, qs, sleeping);
2446 smp_mb(); /* commit Tx queue .processed updates */
2447 if (unlikely(qs->txq_stopped != 0))
2450 budget -= budget_left;
2454 static inline int is_pure_response(const struct rsp_desc *r)
2456 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2458 return (n | r->len_cq) == 0;
2462 * napi_rx_handler - the NAPI handler for Rx processing
2463 * @napi: the napi instance
2464 * @budget: how many packets we can process in this round
2466 * Handler for new data events when using NAPI.
2468 static int napi_rx_handler(struct napi_struct *napi, int budget)
2470 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2471 struct adapter *adap = qs->adap;
2472 int work_done = process_responses(adap, qs, budget);
2474 if (likely(work_done < budget)) {
2475 napi_complete(napi);
2478 * Because we don't atomically flush the following
2479 * write it is possible that in very rare cases it can
2480 * reach the device in a way that races with a new
2481 * response being written plus an error interrupt
2482 * causing the NAPI interrupt handler below to return
2483 * unhandled status to the OS. To protect against
2484 * this would require flushing the write and doing
2485 * both the write and the flush with interrupts off.
2486 * Way too expensive and unjustifiable given the
2487 * rarity of the race.
2489 * The race cannot happen at all with MSI-X.
2491 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2492 V_NEWTIMER(qs->rspq.next_holdoff) |
2493 V_NEWINDEX(qs->rspq.cidx));
2499 * Returns true if the device is already scheduled for polling.
2501 static inline int napi_is_scheduled(struct napi_struct *napi)
2503 return test_bit(NAPI_STATE_SCHED, &napi->state);
2507 * process_pure_responses - process pure responses from a response queue
2508 * @adap: the adapter
2509 * @qs: the queue set owning the response queue
2510 * @r: the first pure response to process
2512 * A simpler version of process_responses() that handles only pure (i.e.,
2513 * non data-carrying) responses. Such respones are too light-weight to
2514 * justify calling a softirq under NAPI, so we handle them specially in
2515 * the interrupt handler. The function is called with a pointer to a
2516 * response, which the caller must ensure is a valid pure response.
2518 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2520 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2523 struct sge_rspq *q = &qs->rspq;
2524 unsigned int sleeping = 0;
2527 u32 flags = ntohl(r->flags);
2530 if (unlikely(++q->cidx == q->size)) {
2537 if (flags & RSPD_CTRL_MASK) {
2538 sleeping |= flags & RSPD_GTS_MASK;
2539 handle_rsp_cntrl_info(qs, flags);
2543 if (++q->credits >= (q->size / 4)) {
2544 refill_rspq(adap, q, q->credits);
2547 if (!is_new_response(r, q))
2550 } while (is_pure_response(r));
2553 check_ring_db(adap, qs, sleeping);
2555 smp_mb(); /* commit Tx queue .processed updates */
2556 if (unlikely(qs->txq_stopped != 0))
2559 return is_new_response(r, q);
2563 * handle_responses - decide what to do with new responses in NAPI mode
2564 * @adap: the adapter
2565 * @q: the response queue
2567 * This is used by the NAPI interrupt handlers to decide what to do with
2568 * new SGE responses. If there are no new responses it returns -1. If
2569 * there are new responses and they are pure (i.e., non-data carrying)
2570 * it handles them straight in hard interrupt context as they are very
2571 * cheap and don't deliver any packets. Finally, if there are any data
2572 * signaling responses it schedules the NAPI handler. Returns 1 if it
2573 * schedules NAPI, 0 if all new responses were pure.
2575 * The caller must ascertain NAPI is not already running.
2577 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2579 struct sge_qset *qs = rspq_to_qset(q);
2580 struct rsp_desc *r = &q->desc[q->cidx];
2582 if (!is_new_response(r, q))
2585 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2586 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2587 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2590 napi_schedule(&qs->napi);
2595 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2596 * (i.e., response queue serviced in hard interrupt).
2598 static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2600 struct sge_qset *qs = cookie;
2601 struct adapter *adap = qs->adap;
2602 struct sge_rspq *q = &qs->rspq;
2604 spin_lock(&q->lock);
2605 if (process_responses(adap, qs, -1) == 0)
2606 q->unhandled_irqs++;
2607 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2608 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2609 spin_unlock(&q->lock);
2614 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2615 * (i.e., response queue serviced by NAPI polling).
2617 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2619 struct sge_qset *qs = cookie;
2620 struct sge_rspq *q = &qs->rspq;
2622 spin_lock(&q->lock);
2624 if (handle_responses(qs->adap, q) < 0)
2625 q->unhandled_irqs++;
2626 spin_unlock(&q->lock);
2631 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2632 * SGE response queues as well as error and other async events as they all use
2633 * the same MSI vector. We use one SGE response queue per port in this mode
2634 * and protect all response queues with queue 0's lock.
2636 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2638 int new_packets = 0;
2639 struct adapter *adap = cookie;
2640 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2642 spin_lock(&q->lock);
2644 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2645 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2646 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2650 if (adap->params.nports == 2 &&
2651 process_responses(adap, &adap->sge.qs[1], -1)) {
2652 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2654 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2655 V_NEWTIMER(q1->next_holdoff) |
2656 V_NEWINDEX(q1->cidx));
2660 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2661 q->unhandled_irqs++;
2663 spin_unlock(&q->lock);
2667 static int rspq_check_napi(struct sge_qset *qs)
2669 struct sge_rspq *q = &qs->rspq;
2671 if (!napi_is_scheduled(&qs->napi) &&
2672 is_new_response(&q->desc[q->cidx], q)) {
2673 napi_schedule(&qs->napi);
2680 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2681 * by NAPI polling). Handles data events from SGE response queues as well as
2682 * error and other async events as they all use the same MSI vector. We use
2683 * one SGE response queue per port in this mode and protect all response
2684 * queues with queue 0's lock.
2686 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2689 struct adapter *adap = cookie;
2690 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2692 spin_lock(&q->lock);
2694 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2695 if (adap->params.nports == 2)
2696 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2697 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2698 q->unhandled_irqs++;
2700 spin_unlock(&q->lock);
2705 * A helper function that processes responses and issues GTS.
2707 static inline int process_responses_gts(struct adapter *adap,
2708 struct sge_rspq *rq)
2712 work = process_responses(adap, rspq_to_qset(rq), -1);
2713 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2714 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2719 * The legacy INTx interrupt handler. This needs to handle data events from
2720 * SGE response queues as well as error and other async events as they all use
2721 * the same interrupt pin. We use one SGE response queue per port in this mode
2722 * and protect all response queues with queue 0's lock.
2724 static irqreturn_t t3_intr(int irq, void *cookie)
2726 int work_done, w0, w1;
2727 struct adapter *adap = cookie;
2728 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2729 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2731 spin_lock(&q0->lock);
2733 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2734 w1 = adap->params.nports == 2 &&
2735 is_new_response(&q1->desc[q1->cidx], q1);
2737 if (likely(w0 | w1)) {
2738 t3_write_reg(adap, A_PL_CLI, 0);
2739 t3_read_reg(adap, A_PL_CLI); /* flush */
2742 process_responses_gts(adap, q0);
2745 process_responses_gts(adap, q1);
2747 work_done = w0 | w1;
2749 work_done = t3_slow_intr_handler(adap);
2751 spin_unlock(&q0->lock);
2752 return IRQ_RETVAL(work_done != 0);
2756 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2757 * Handles data events from SGE response queues as well as error and other
2758 * async events as they all use the same interrupt pin. We use one SGE
2759 * response queue per port in this mode and protect all response queues with
2762 static irqreturn_t t3b_intr(int irq, void *cookie)
2765 struct adapter *adap = cookie;
2766 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2768 t3_write_reg(adap, A_PL_CLI, 0);
2769 map = t3_read_reg(adap, A_SG_DATA_INTR);
2771 if (unlikely(!map)) /* shared interrupt, most likely */
2774 spin_lock(&q0->lock);
2776 if (unlikely(map & F_ERRINTR))
2777 t3_slow_intr_handler(adap);
2779 if (likely(map & 1))
2780 process_responses_gts(adap, q0);
2783 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2785 spin_unlock(&q0->lock);
2790 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2791 * Handles data events from SGE response queues as well as error and other
2792 * async events as they all use the same interrupt pin. We use one SGE
2793 * response queue per port in this mode and protect all response queues with
2796 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2799 struct adapter *adap = cookie;
2800 struct sge_qset *qs0 = &adap->sge.qs[0];
2801 struct sge_rspq *q0 = &qs0->rspq;
2803 t3_write_reg(adap, A_PL_CLI, 0);
2804 map = t3_read_reg(adap, A_SG_DATA_INTR);
2806 if (unlikely(!map)) /* shared interrupt, most likely */
2809 spin_lock(&q0->lock);
2811 if (unlikely(map & F_ERRINTR))
2812 t3_slow_intr_handler(adap);
2814 if (likely(map & 1))
2815 napi_schedule(&qs0->napi);
2818 napi_schedule(&adap->sge.qs[1].napi);
2820 spin_unlock(&q0->lock);
2825 * t3_intr_handler - select the top-level interrupt handler
2826 * @adap: the adapter
2827 * @polling: whether using NAPI to service response queues
2829 * Selects the top-level interrupt handler based on the type of interrupts
2830 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2833 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2835 if (adap->flags & USING_MSIX)
2836 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2837 if (adap->flags & USING_MSI)
2838 return polling ? t3_intr_msi_napi : t3_intr_msi;
2839 if (adap->params.rev > 0)
2840 return polling ? t3b_intr_napi : t3b_intr;
2844 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2845 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2846 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2847 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2849 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2850 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2854 * t3_sge_err_intr_handler - SGE async event interrupt handler
2855 * @adapter: the adapter
2857 * Interrupt handler for SGE asynchronous (non-data) events.
2859 void t3_sge_err_intr_handler(struct adapter *adapter)
2861 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2864 if (status & SGE_PARERR)
2865 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2866 status & SGE_PARERR);
2867 if (status & SGE_FRAMINGERR)
2868 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2869 status & SGE_FRAMINGERR);
2871 if (status & F_RSPQCREDITOVERFOW)
2872 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2874 if (status & F_RSPQDISABLED) {
2875 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2878 "packet delivered to disabled response queue "
2879 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2882 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2883 queue_work(cxgb3_wq, &adapter->db_drop_task);
2885 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2886 queue_work(cxgb3_wq, &adapter->db_full_task);
2888 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2889 queue_work(cxgb3_wq, &adapter->db_empty_task);
2891 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2892 if (status & SGE_FATALERR)
2893 t3_fatal_err(adapter);
2897 * sge_timer_tx - perform periodic maintenance of an SGE qset
2898 * @data: the SGE queue set to maintain
2900 * Runs periodically from a timer to perform maintenance of an SGE queue
2901 * set. It performs two tasks:
2903 * Cleans up any completed Tx descriptors that may still be pending.
2904 * Normal descriptor cleanup happens when new packets are added to a Tx
2905 * queue so this timer is relatively infrequent and does any cleanup only
2906 * if the Tx queue has not seen any new packets in a while. We make a
2907 * best effort attempt to reclaim descriptors, in that we don't wait
2908 * around if we cannot get a queue's lock (which most likely is because
2909 * someone else is queueing new packets and so will also handle the clean
2910 * up). Since control queues use immediate data exclusively we don't
2911 * bother cleaning them up here.
2914 static void sge_timer_tx(unsigned long data)
2916 struct sge_qset *qs = (struct sge_qset *)data;
2917 struct port_info *pi = netdev_priv(qs->netdev);
2918 struct adapter *adap = pi->adapter;
2919 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2920 unsigned long next_period;
2922 if (__netif_tx_trylock(qs->tx_q)) {
2923 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2924 TX_RECLAIM_TIMER_CHUNK);
2925 __netif_tx_unlock(qs->tx_q);
2928 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2929 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2930 TX_RECLAIM_TIMER_CHUNK);
2931 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2934 next_period = TX_RECLAIM_PERIOD >>
2935 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2936 TX_RECLAIM_TIMER_CHUNK);
2937 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2941 * sge_timer_rx - perform periodic maintenance of an SGE qset
2942 * @data: the SGE queue set to maintain
2944 * a) Replenishes Rx queues that have run out due to memory shortage.
2945 * Normally new Rx buffers are added when existing ones are consumed but
2946 * when out of memory a queue can become empty. We try to add only a few
2947 * buffers here, the queue will be replenished fully as these new buffers
2948 * are used up if memory shortage has subsided.
2950 * b) Return coalesced response queue credits in case a response queue is
2954 static void sge_timer_rx(unsigned long data)
2957 struct sge_qset *qs = (struct sge_qset *)data;
2958 struct port_info *pi = netdev_priv(qs->netdev);
2959 struct adapter *adap = pi->adapter;
2962 lock = adap->params.rev > 0 ?
2963 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2965 if (!spin_trylock_irq(lock))
2968 if (napi_is_scheduled(&qs->napi))
2971 if (adap->params.rev < 4) {
2972 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2974 if (status & (1 << qs->rspq.cntxt_id)) {
2976 if (qs->rspq.credits) {
2978 refill_rspq(adap, &qs->rspq, 1);
2979 qs->rspq.restarted++;
2980 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2981 1 << qs->rspq.cntxt_id);
2986 if (qs->fl[0].credits < qs->fl[0].size)
2987 __refill_fl(adap, &qs->fl[0]);
2988 if (qs->fl[1].credits < qs->fl[1].size)
2989 __refill_fl(adap, &qs->fl[1]);
2992 spin_unlock_irq(lock);
2994 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2998 * t3_update_qset_coalesce - update coalescing settings for a queue set
2999 * @qs: the SGE queue set
3000 * @p: new queue set parameters
3002 * Update the coalescing settings for an SGE queue set. Nothing is done
3003 * if the queue set is not initialized yet.
3005 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3007 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3008 qs->rspq.polling = p->polling;
3009 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3013 * t3_sge_alloc_qset - initialize an SGE queue set
3014 * @adapter: the adapter
3015 * @id: the queue set id
3016 * @nports: how many Ethernet ports will be using this queue set
3017 * @irq_vec_idx: the IRQ vector index for response queue interrupts
3018 * @p: configuration parameters for this queue set
3019 * @ntxq: number of Tx queues for the queue set
3020 * @netdev: net device associated with this queue set
3021 * @netdevq: net device TX queue associated with this queue set
3023 * Allocate resources and initialize an SGE queue set. A queue set
3024 * comprises a response queue, two Rx free-buffer queues, and up to 3
3025 * Tx queues. The Tx queues are assigned roles in the order Ethernet
3026 * queue, offload queue, and control queue.
3028 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3029 int irq_vec_idx, const struct qset_params *p,
3030 int ntxq, struct net_device *dev,
3031 struct netdev_queue *netdevq)
3033 int i, avail, ret = -ENOMEM;
3034 struct sge_qset *q = &adapter->sge.qs[id];
3036 init_qset_cntxt(q, id);
3037 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
3038 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
3040 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3041 sizeof(struct rx_desc),
3042 sizeof(struct rx_sw_desc),
3043 &q->fl[0].phys_addr, &q->fl[0].sdesc);
3047 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3048 sizeof(struct rx_desc),
3049 sizeof(struct rx_sw_desc),
3050 &q->fl[1].phys_addr, &q->fl[1].sdesc);
3054 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3055 sizeof(struct rsp_desc), 0,
3056 &q->rspq.phys_addr, NULL);
3060 for (i = 0; i < ntxq; ++i) {
3062 * The control queue always uses immediate data so does not
3063 * need to keep track of any sk_buffs.
3065 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3067 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3068 sizeof(struct tx_desc), sz,
3069 &q->txq[i].phys_addr,
3071 if (!q->txq[i].desc)
3075 q->txq[i].size = p->txq_size[i];
3076 spin_lock_init(&q->txq[i].lock);
3077 skb_queue_head_init(&q->txq[i].sendq);
3080 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3082 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3085 q->fl[0].gen = q->fl[1].gen = 1;
3086 q->fl[0].size = p->fl_size;
3087 q->fl[1].size = p->jumbo_size;
3090 q->rspq.size = p->rspq_size;
3091 spin_lock_init(&q->rspq.lock);
3092 skb_queue_head_init(&q->rspq.rx_queue);
3094 q->txq[TXQ_ETH].stop_thres = nports *
3095 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3097 #if FL0_PG_CHUNK_SIZE > 0
3098 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3100 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3102 #if FL1_PG_CHUNK_SIZE > 0
3103 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3105 q->fl[1].buf_size = is_offload(adapter) ?
3106 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3107 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3110 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3111 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3112 q->fl[0].order = FL0_PG_ORDER;
3113 q->fl[1].order = FL1_PG_ORDER;
3114 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3115 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3117 spin_lock_irq(&adapter->sge.reg_lock);
3119 /* FL threshold comparison uses < */
3120 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3121 q->rspq.phys_addr, q->rspq.size,
3122 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3126 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3127 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3128 q->fl[i].phys_addr, q->fl[i].size,
3129 q->fl[i].buf_size - SGE_PG_RSVD,
3130 p->cong_thres, 1, 0);
3135 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3136 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3137 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3143 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3144 USE_GTS, SGE_CNTXT_OFLD, id,
3145 q->txq[TXQ_OFLD].phys_addr,
3146 q->txq[TXQ_OFLD].size, 0, 1, 0);
3152 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3154 q->txq[TXQ_CTRL].phys_addr,
3155 q->txq[TXQ_CTRL].size,
3156 q->txq[TXQ_CTRL].token, 1, 0);
3161 spin_unlock_irq(&adapter->sge.reg_lock);
3166 t3_update_qset_coalesce(q, p);
3168 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3169 GFP_KERNEL | __GFP_COMP);
3171 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3174 if (avail < q->fl[0].size)
3175 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3178 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3179 GFP_KERNEL | __GFP_COMP);
3180 if (avail < q->fl[1].size)
3181 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3183 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3185 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3186 V_NEWTIMER(q->rspq.holdoff_tmr));
3191 spin_unlock_irq(&adapter->sge.reg_lock);
3193 t3_free_qset(adapter, q);
3198 * t3_start_sge_timers - start SGE timer call backs
3199 * @adap: the adapter
3201 * Starts each SGE queue set's timer call back
3203 void t3_start_sge_timers(struct adapter *adap)
3207 for (i = 0; i < SGE_QSETS; ++i) {
3208 struct sge_qset *q = &adap->sge.qs[i];
3210 if (q->tx_reclaim_timer.function)
3211 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3213 if (q->rx_reclaim_timer.function)
3214 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3219 * t3_stop_sge_timers - stop SGE timer call backs
3220 * @adap: the adapter
3222 * Stops each SGE queue set's timer call back
3224 void t3_stop_sge_timers(struct adapter *adap)
3228 for (i = 0; i < SGE_QSETS; ++i) {
3229 struct sge_qset *q = &adap->sge.qs[i];
3231 if (q->tx_reclaim_timer.function)
3232 del_timer_sync(&q->tx_reclaim_timer);
3233 if (q->rx_reclaim_timer.function)
3234 del_timer_sync(&q->rx_reclaim_timer);
3239 * t3_free_sge_resources - free SGE resources
3240 * @adap: the adapter
3242 * Frees resources used by the SGE queue sets.
3244 void t3_free_sge_resources(struct adapter *adap)
3248 for (i = 0; i < SGE_QSETS; ++i)
3249 t3_free_qset(adap, &adap->sge.qs[i]);
3253 * t3_sge_start - enable SGE
3254 * @adap: the adapter
3256 * Enables the SGE for DMAs. This is the last step in starting packet
3259 void t3_sge_start(struct adapter *adap)
3261 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3265 * t3_sge_stop - disable SGE operation
3266 * @adap: the adapter
3268 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3269 * from error interrupts) or from normal process context. In the latter
3270 * case it also disables any pending queue restart tasklets. Note that
3271 * if it is called in interrupt context it cannot disable the restart
3272 * tasklets as it cannot wait, however the tasklets will have no effect
3273 * since the doorbells are disabled and the driver will call this again
3274 * later from process context, at which time the tasklets will be stopped
3275 * if they are still running.
3277 void t3_sge_stop(struct adapter *adap)
3279 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3280 if (!in_interrupt()) {
3283 for (i = 0; i < SGE_QSETS; ++i) {
3284 struct sge_qset *qs = &adap->sge.qs[i];
3286 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3287 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3293 * t3_sge_init - initialize SGE
3294 * @adap: the adapter
3295 * @p: the SGE parameters
3297 * Performs SGE initialization needed every time after a chip reset.
3298 * We do not initialize any of the queue sets here, instead the driver
3299 * top-level must request those individually. We also do not enable DMA
3300 * here, that should be done after the queues have been set up.
3302 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3304 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3306 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3307 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3308 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3309 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3310 #if SGE_NUM_GENBITS == 1
3311 ctrl |= F_EGRGENCTRL;
3313 if (adap->params.rev > 0) {
3314 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3315 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3317 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3318 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3319 V_LORCQDRBTHRSH(512));
3320 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3321 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3322 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3323 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3324 adap->params.rev < T3_REV_C ? 1000 : 500);
3325 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3326 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3327 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3328 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3329 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3333 * t3_sge_prep - one-time SGE initialization
3334 * @adap: the associated adapter
3335 * @p: SGE parameters
3337 * Performs one-time initialization of SGE SW state. Includes determining
3338 * defaults for the assorted SGE parameters, which admins can change until
3339 * they are used to initialize the SGE.
3341 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3345 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3346 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3348 for (i = 0; i < SGE_QSETS; ++i) {
3349 struct qset_params *q = p->qset + i;
3351 q->polling = adap->params.rev > 0;
3352 q->coalesce_usecs = 5;
3353 q->rspq_size = 1024;
3355 q->jumbo_size = 512;
3356 q->txq_size[TXQ_ETH] = 1024;
3357 q->txq_size[TXQ_OFLD] = 1024;
3358 q->txq_size[TXQ_CTRL] = 256;
3362 spin_lock_init(&adap->sge.reg_lock);