1 /****************************************************************************
2 * Copyright 2002-2005: Level 5 Networks Inc.
3 * Copyright 2005-2008: Solarflare Communications Inc,
4 * 9501 Jeronimo Road, Suite 250,
5 * Irvine, CA 92618, USA
7 * Maintained by Solarflare Communications
8 * <linux-xen-drivers@solarflare.com>
9 * <onload-dev@solarflare.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation, incorporated herein by reference.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 ****************************************************************************
28 * \brief Routine to poll event queues.
33 #include "ef_vi_internal.h"
35 /* Be worried about this on byteswapped machines */
36 /* Due to crazy chipsets, we see the event words being written in
37 ** arbitrary order (bug4539). So test for presence of event must ensure
38 ** that both halves have changed from the null.
40 # define EF_VI_IS_EVENT(evp) \
41 ( (((evp)->opaque.a != (uint32_t)-1) && \
42 ((evp)->opaque.b != (uint32_t)-1)) )
52 /*! Check for RX events with inconsistent SOP/CONT
54 ** Returns true if this event should be discarded
56 ef_vi_inline int ef_eventq_is_rx_sop_cont_bad_efab(ef_vi* vi,
57 const ef_vi_qword* ev)
59 ef_rx_dup_state_t* rx_dup_state;
62 unsigned label = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
63 unsigned sop = QWORD_TEST_BIT(RX_SOP, *ev);
66 ef_assert_lt(label, EFAB_DMAQS_PER_EVQ_MAX);
68 rx_dup_state = &vi->evq_state->rx_dup_state[label];
69 bad_sop = &rx_dup_state->bad_sop;
71 if( ! ((vi->vi_flags & EF_VI_BUG5692_WORKAROUND) || IS_DEBUG) ) {
72 *bad_sop = (*bad_sop && !sop);
75 unsigned cont = QWORD_TEST_BIT(RX_JUMBO_CONT, *ev);
76 uint8_t *frag_num = &rx_dup_state->frag_num;
78 /* bad_sop should latch till the next sop */
79 *bad_sop = (*bad_sop && !sop) || ( !!sop != (*frag_num==0) );
81 /* we do not check the number of bytes relative to the
82 * fragment number and size of the user rx buffer here
83 * because we don't know the size of the user rx
84 * buffer - we probably should perform this check in
85 * the nearest code calling this though.
87 *frag_num = cont ? (*frag_num + 1) : 0;
94 ef_vi_inline int falcon_rx_check_dup(ef_vi* evq, ef_event* ev_out,
95 const ef_vi_qword* ev)
97 unsigned q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
98 uint16_t desc_ptr = QWORD_GET_U(RX_EV_DESC_PTR, *ev);
99 ef_rx_dup_state_t* rx_dup_state = &evq->evq_state->rx_dup_state[q_id];
101 if(likely( desc_ptr != rx_dup_state->rx_last_desc_ptr )) {
102 rx_dup_state->rx_last_desc_ptr = desc_ptr;
106 rx_dup_state->rx_last_desc_ptr = desc_ptr;
107 rx_dup_state->bad_sop = 1;
109 rx_dup_state->frag_num = 0;
111 BUG_ON(!QWORD_TEST_BIT(RX_EV_FRM_TRUNC, *ev));
112 BUG_ON( QWORD_TEST_BIT(RX_EV_PKT_OK, *ev));
113 BUG_ON(!QWORD_GET_U(RX_EV_BYTE_CNT, *ev) == 0);
114 ev_out->rx_no_desc_trunc.type = EF_EVENT_TYPE_RX_NO_DESC_TRUNC;
115 ev_out->rx_no_desc_trunc.q_id = q_id;
120 ef_vi_inline void falcon_rx_event(ef_event* ev_out, const ef_vi_qword* ev)
122 if(likely( QWORD_TEST_BIT(RX_EV_PKT_OK, *ev) )) {
123 ev_out->rx.type = EF_EVENT_TYPE_RX;
124 ev_out->rx.q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
125 ev_out->rx.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev);
126 if( QWORD_TEST_BIT(RX_SOP, *ev) )
127 ev_out->rx.flags = EF_EVENT_FLAG_SOP;
129 ev_out->rx.flags = 0;
130 if( QWORD_TEST_BIT(RX_JUMBO_CONT, *ev) )
131 ev_out->rx.flags |= EF_EVENT_FLAG_CONT;
132 if( QWORD_TEST_BIT(RX_iSCSI_PKT_OK, *ev) )
133 ev_out->rx.flags |= EF_EVENT_FLAG_ISCSI_OK;
136 ev_out->rx_discard.type = EF_EVENT_TYPE_RX_DISCARD;
137 ev_out->rx_discard.q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
138 ev_out->rx_discard.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev);
139 #if 1 /* hack for ptloop compatability: ?? TODO purge */
140 if( QWORD_TEST_BIT(RX_SOP, *ev) )
141 ev_out->rx_discard.flags = EF_EVENT_FLAG_SOP;
143 ev_out->rx_discard.flags = 0;
144 if( QWORD_TEST_BIT(RX_JUMBO_CONT, *ev) )
145 ev_out->rx_discard.flags |= EF_EVENT_FLAG_CONT;
146 if( QWORD_TEST_BIT(RX_iSCSI_PKT_OK, *ev) )
147 ev_out->rx_discard.flags |= EF_EVENT_FLAG_ISCSI_OK;
149 /* Order matters here: more fundamental errors first. */
150 if( QWORD_TEST_BIT(RX_EV_BUF_OWNER_ID_ERR, *ev) )
151 ev_out->rx_discard.subtype =
152 EF_EVENT_RX_DISCARD_RIGHTS;
153 else if( QWORD_TEST_BIT(RX_EV_FRM_TRUNC, *ev) )
154 ev_out->rx_discard.subtype =
155 EF_EVENT_RX_DISCARD_TRUNC;
156 else if( QWORD_TEST_BIT(RX_EV_ETH_CRC_ERR, *ev) )
157 ev_out->rx_discard.subtype =
158 EF_EVENT_RX_DISCARD_CRC_BAD;
159 else if( QWORD_TEST_BIT(RX_EV_IP_HDR_CHKSUM_ERR, *ev) )
160 ev_out->rx_discard.subtype =
161 EF_EVENT_RX_DISCARD_CSUM_BAD;
162 else if( QWORD_TEST_BIT(RX_EV_TCP_UDP_CHKSUM_ERR, *ev) )
163 ev_out->rx_discard.subtype =
164 EF_EVENT_RX_DISCARD_CSUM_BAD;
166 ev_out->rx_discard.subtype =
167 EF_EVENT_RX_DISCARD_OTHER;
172 ef_vi_inline void falcon_tx_event(ef_event* ev_out, const ef_vi_qword* ev)
174 /* Danger danger! No matter what we ask for wrt batching, we
175 ** will get a batched event every 16 descriptors, and we also
176 ** get dma-queue-empty events. i.e. Duplicates are expected.
178 ** In addition, if it's been requested in the descriptor, we
179 ** get an event per descriptor. (We don't currently request
182 if(likely( QWORD_TEST_BIT(TX_EV_COMP, *ev) )) {
183 ev_out->tx.type = EF_EVENT_TYPE_TX;
184 ev_out->tx.q_id = QWORD_GET_U(TX_EV_Q_LABEL, *ev);
187 ev_out->tx_error.type = EF_EVENT_TYPE_TX_ERROR;
188 ev_out->tx_error.q_id = QWORD_GET_U(TX_EV_Q_LABEL, *ev);
189 if(likely( QWORD_TEST_BIT(TX_EV_BUF_OWNER_ID_ERR, *ev) ))
190 ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_RIGHTS;
191 else if(likely( QWORD_TEST_BIT(TX_EV_WQ_FF_FULL, *ev) ))
192 ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_OFLOW;
193 else if(likely( QWORD_TEST_BIT(TX_EV_PKT_TOO_BIG, *ev) ))
194 ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_2BIG;
195 else if(likely( QWORD_TEST_BIT(TX_EV_PKT_ERR, *ev) ))
196 ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_BUS;
201 static void mark_bad(ef_event* ev)
203 ev->generic.ev.u64[0] &=~ ((uint64_t) 1u << RX_EV_PKT_OK_LBN);
207 int ef_eventq_poll_evs(ef_vi* evq, ef_event* evs, int evs_len,
208 ef_event_handler_fn *exception, void *expt_priv)
210 int evs_len_orig = evs_len;
212 EF_VI_CHECK_EVENT_Q(evq);
214 ef_assert_gt(evs_len, 0);
216 if(unlikely( EF_VI_IS_EVENT(EF_VI_EVENT_PTR(evq, 1)) ))
220 { /* Read the event out of the ring, then fiddle with
221 * copied version. Reason is that the ring is
222 * likely to get pushed out of cache by another
223 * event being delivered by hardware. */
224 ef_vi_event* ev = EF_VI_EVENT_PTR(evq, 0);
225 if( ! EF_VI_IS_EVENT(ev) )
227 evs->generic.ev.u64[0] = cpu_to_le64 (ev->u64);
228 evq->evq_state->evq_ptr += sizeof(ef_vi_event);
229 ev->u64 = (uint64_t)(int64_t) -1;
232 /* Ugly: Exploit the fact that event code lies in top
234 ef_assert_ge(EV_CODE_LBN, 32u);
235 switch( evs->generic.ev.u32[1] >> (EV_CODE_LBN - 32u) ) {
236 case RX_IP_EV_DECODE:
237 /* Look for duplicate desc_ptr: it signals
238 * that a jumbo frame was truncated because we
239 * ran out of descriptors. */
240 if(unlikely( falcon_rx_check_dup
241 (evq, evs, &evs->generic.ev) )) {
247 /* Cope with FalconA1 bugs where RX
248 * gives inconsistent RX events Mark
249 * events as bad until SOP becomes
251 * ef_eventq_is_rx_sop_cont_bad() has
252 * side effects - order is important
255 (ef_eventq_is_rx_sop_cont_bad_efab
256 (evq, &evs->generic.ev) )) {
260 falcon_rx_event(evs, &evs->generic.ev);
265 case TX_IP_EV_DECODE:
266 falcon_tx_event(evs, &evs->generic.ev);
276 return evs_len_orig - evs_len;
280 evs->generic.type = EF_EVENT_TYPE_OFLOW;
281 evs->generic.ev.u64[0] = (uint64_t)((int64_t)-1);
286 int/*bool*/ ef_eventq_poll_exception(void* priv, ef_vi* evq, ef_event* ev)
288 int /*bool*/ handled = 0;
290 switch( ev->generic.ev.u32[1] >> (EV_CODE_LBN - 32u) ) {
291 case DRIVER_EV_DECODE:
292 if( QWORD_GET_U(DRIVER_EV_SUB_CODE, ev->generic.ev) ==
293 EVQ_INIT_DONE_EV_DECODE )
294 /* EVQ initialised event: ignore. */
302 void ef_eventq_iterate(ef_vi* vi,
303 void (*fn)(void* arg, ef_vi*, int rel_pos,
304 int abs_pos, void* event),
305 void* arg, int stop_at_end)
307 int i, size_evs = (vi->evq_mask + 1) / sizeof(ef_vi_event);
309 for( i = 0; i < size_evs; ++i ) {
310 ef_vi_event* e = EF_VI_EVENT_PTR(vi, -i);
311 if( EF_VI_IS_EVENT(e) )
313 EF_VI_EVENT_OFFSET(vi, -i) / sizeof(ef_vi_event),
315 else if( stop_at_end )
321 int ef_eventq_has_event(ef_vi* vi)
323 return EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, 0));
327 int ef_eventq_has_many_events(ef_vi* vi, int look_ahead)
329 ef_assert_ge(look_ahead, 0);
330 return EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, -look_ahead));
334 int ef_eventq_has_rx_event(ef_vi* vi)
339 for( i = 0; EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, i)); --i ) {
340 ev = EF_VI_EVENT_PTR(vi, i);
341 if( EFVI_FALCON_EVENT_CODE(ev) == EF_EVENT_TYPE_RX ) n_evs++;