tree-wide: fix comment/printk typos
[linux-flexiantxendom0-natty.git] / drivers / scsi / bfa / bfa_fcpim.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfa_modules.h"
19 #include "bfa_cb_ioim.h"
20
21 BFA_TRC_FILE(HAL, FCPIM);
22 BFA_MODULE(fcpim);
23
24
25 #define bfa_fcpim_add_iostats(__l, __r, __stats)        \
26         (__l->__stats += __r->__stats)
27
28
29 /**
30  *  BFA ITNIM Related definitions
31  */
32 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
35         (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37 #define bfa_fcpim_additn(__itnim)                                       \
38         list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39 #define bfa_fcpim_delitn(__itnim)       do {                            \
40         bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));      \
41         bfa_itnim_update_del_itn_stats(__itnim);      \
42         list_del(&(__itnim)->qe);      \
43         bfa_assert(list_empty(&(__itnim)->io_q));      \
44         bfa_assert(list_empty(&(__itnim)->io_cleanup_q));      \
45         bfa_assert(list_empty(&(__itnim)->pending_q));      \
46 } while (0)
47
48 #define bfa_itnim_online_cb(__itnim) do {                               \
49         if ((__itnim)->bfa->fcs)                                        \
50                 bfa_cb_itnim_online((__itnim)->ditn);      \
51         else {                                                          \
52                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
53                 __bfa_cb_itnim_online, (__itnim));      \
54         }                                                               \
55 } while (0)
56
57 #define bfa_itnim_offline_cb(__itnim) do {                              \
58         if ((__itnim)->bfa->fcs)                                        \
59                 bfa_cb_itnim_offline((__itnim)->ditn);      \
60         else {                                                          \
61                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
62                 __bfa_cb_itnim_offline, (__itnim));      \
63         }                                                               \
64 } while (0)
65
66 #define bfa_itnim_sler_cb(__itnim) do {                                 \
67         if ((__itnim)->bfa->fcs)                                        \
68                 bfa_cb_itnim_sler((__itnim)->ditn);      \
69         else {                                                          \
70                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
71                 __bfa_cb_itnim_sler, (__itnim));      \
72         }                                                               \
73 } while (0)
74
75 /**
76  *  bfa_itnim_sm BFA itnim state machine
77  */
78
79
80 enum bfa_itnim_event {
81         BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
82         BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
83         BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
84         BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
85         BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
86         BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
87         BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
88         BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
89         BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
90 };
91
92 /**
93  *  BFA IOIM related definitions
94  */
95 #define bfa_ioim_move_to_comp_q(__ioim) do {                            \
96         list_del(&(__ioim)->qe);                                        \
97         list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
98 } while (0)
99
100
101 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
102         if ((__fcpim)->profile_comp)                                    \
103                 (__fcpim)->profile_comp(__ioim);                        \
104 } while (0)
105
106 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
107         if ((__fcpim)->profile_start)                                   \
108                 (__fcpim)->profile_start(__ioim);                       \
109 } while (0)
110 /**
111  *  hal_ioim_sm
112  */
113
114 /**
115  * IO state machine events
116  */
117 enum bfa_ioim_event {
118         BFA_IOIM_SM_START       = 1,    /*  io start request from host */
119         BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
120         BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
121         BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
122         BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
123         BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
124         BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
125         BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
126         BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
127         BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
128         BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
129         BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
130         BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
131         BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
132         BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
133         BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
134         BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
135         BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
136 };
137
138
139 /**
140  *  BFA TSKIM related definitions
141  */
142
143 /**
144  * task management completion handling
145  */
146 #define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
147         bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
148         bfa_tskim_notify_comp(__tskim);      \
149 } while (0)
150
151 #define bfa_tskim_notify_comp(__tskim) do {                             \
152         if ((__tskim)->notify)                                          \
153                 bfa_itnim_tskdone((__tskim)->itnim);      \
154 } while (0)
155
156
157 enum bfa_tskim_event {
158         BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
159         BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
160         BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
161         BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
162         BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
163         BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
164         BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
165         BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
166 };
167
168 /**
169  * forward declaration for BFA ITNIM functions
170  */
171 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
172 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
173 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
174 static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
175 static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
176 static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
177 static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
178 static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
179 static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
180 static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
181 static void     bfa_itnim_iotov(void *itnim_arg);
182 static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
183 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
184 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
185
186 /**
187  * forward declaration of ITNIM state machine
188  */
189 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
190                                         enum bfa_itnim_event event);
191 static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
192                                         enum bfa_itnim_event event);
193 static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
194                                         enum bfa_itnim_event event);
195 static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
196                                         enum bfa_itnim_event event);
197 static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
198                                         enum bfa_itnim_event event);
199 static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
200                                         enum bfa_itnim_event event);
201 static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
202                                         enum bfa_itnim_event event);
203 static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
204                                         enum bfa_itnim_event event);
205 static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
206                                         enum bfa_itnim_event event);
207 static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
208                                         enum bfa_itnim_event event);
209 static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
210                                         enum bfa_itnim_event event);
211 static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
212                                         enum bfa_itnim_event event);
213 static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
214                                         enum bfa_itnim_event event);
215 static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
216                                         enum bfa_itnim_event event);
217 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
218                                         enum bfa_itnim_event event);
219
220 /**
221  * forward declaration for BFA IOIM functions
222  */
223 static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
224 static bfa_boolean_t    bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
225 static void             bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
226 static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
227 static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
228 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
229 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
230 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
231 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
232 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
233 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
234
235
236 /**
237  * forward declaration of BFA IO state machine
238  */
239 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
240                                         enum bfa_ioim_event event);
241 static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
242                                         enum bfa_ioim_event event);
243 static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
244                                         enum bfa_ioim_event event);
245 static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
246                                         enum bfa_ioim_event event);
247 static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
248                                         enum bfa_ioim_event event);
249 static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
250                                         enum bfa_ioim_event event);
251 static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
252                                         enum bfa_ioim_event event);
253 static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
254                                         enum bfa_ioim_event event);
255 static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
256                                         enum bfa_ioim_event event);
257 static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
258                                         enum bfa_ioim_event event);
259 static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
260                                         enum bfa_ioim_event event);
261 static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
262                                         enum bfa_ioim_event event);
263
264 /**
265  * forward declaration for BFA TSKIM functions
266  */
267 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
268 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
269 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
270                                         lun_t lun);
271 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
272 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
273 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
274 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
275 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
276 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
277
278
279 /**
280  * forward declaration of BFA TSKIM state machine
281  */
282 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
283                                         enum bfa_tskim_event event);
284 static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
285                                         enum bfa_tskim_event event);
286 static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
287                                         enum bfa_tskim_event event);
288 static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
289                                         enum bfa_tskim_event event);
290 static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
291                                         enum bfa_tskim_event event);
292 static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
293                                         enum bfa_tskim_event event);
294 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
295                                         enum bfa_tskim_event event);
296
297 /**
298  *  hal_fcpim_mod BFA FCP Initiator Mode module
299  */
300
301 /**
302  *      Compute and return memory needed by FCP(im) module.
303  */
304 static void
305 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
306                 u32 *dm_len)
307 {
308         bfa_itnim_meminfo(cfg, km_len, dm_len);
309
310         /**
311          * IO memory
312          */
313         if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
314                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
315         else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
316                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
317
318         *km_len += cfg->fwcfg.num_ioim_reqs *
319           (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
320
321         *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
322
323         /**
324          * task management command memory
325          */
326         if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
327                 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
328         *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
329 }
330
331
332 static void
333 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
334                 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
335 {
336         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
337
338         bfa_trc(bfa, cfg->drvcfg.path_tov);
339         bfa_trc(bfa, cfg->fwcfg.num_rports);
340         bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
341         bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
342
343         fcpim->bfa              = bfa;
344         fcpim->num_itnims       = cfg->fwcfg.num_rports;
345         fcpim->num_ioim_reqs  = cfg->fwcfg.num_ioim_reqs;
346         fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
347         fcpim->path_tov         = cfg->drvcfg.path_tov;
348         fcpim->delay_comp       = cfg->drvcfg.delay_comp;
349         fcpim->profile_comp = NULL;
350         fcpim->profile_start = NULL;
351
352         bfa_itnim_attach(fcpim, meminfo);
353         bfa_tskim_attach(fcpim, meminfo);
354         bfa_ioim_attach(fcpim, meminfo);
355 }
356
357 static void
358 bfa_fcpim_detach(struct bfa_s *bfa)
359 {
360         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361
362         bfa_ioim_detach(fcpim);
363         bfa_tskim_detach(fcpim);
364 }
365
366 static void
367 bfa_fcpim_start(struct bfa_s *bfa)
368 {
369 }
370
371 static void
372 bfa_fcpim_stop(struct bfa_s *bfa)
373 {
374 }
375
376 static void
377 bfa_fcpim_iocdisable(struct bfa_s *bfa)
378 {
379         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
380         struct bfa_itnim_s *itnim;
381         struct list_head *qe, *qen;
382
383         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
384                 itnim = (struct bfa_itnim_s *) qe;
385                 bfa_itnim_iocdisable(itnim);
386         }
387 }
388
389 void
390 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
391                 struct bfa_itnim_iostats_s *rstats)
392 {
393         bfa_fcpim_add_iostats(lstats, rstats, total_ios);
394         bfa_fcpim_add_iostats(lstats, rstats, qresumes);
395         bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
396         bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
397         bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
398         bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
399         bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
400         bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
401         bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
402         bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
403         bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
404         bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
405         bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
406         bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
407         bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
408         bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
409         bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
410         bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
411         bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
412         bfa_fcpim_add_iostats(lstats, rstats, onlines);
413         bfa_fcpim_add_iostats(lstats, rstats, offlines);
414         bfa_fcpim_add_iostats(lstats, rstats, creates);
415         bfa_fcpim_add_iostats(lstats, rstats, deletes);
416         bfa_fcpim_add_iostats(lstats, rstats, create_comps);
417         bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
418         bfa_fcpim_add_iostats(lstats, rstats, sler_events);
419         bfa_fcpim_add_iostats(lstats, rstats, fw_create);
420         bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
421         bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
422         bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
423         bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
424         bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
425         bfa_fcpim_add_iostats(lstats, rstats, tm_success);
426         bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
427         bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
428         bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
429         bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
430         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
431         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
432         bfa_fcpim_add_iostats(lstats, rstats, io_comps);
433         bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
434         bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
435         bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
436         bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
437 }
438
439 void
440 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
441 {
442         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
443
444         fcpim->path_tov = path_tov * 1000;
445         if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
446                 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
447 }
448
449 u16
450 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
451 {
452         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
453
454         return fcpim->path_tov / 1000;
455 }
456
457 bfa_status_t
458 bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
459         u8 lp_tag)
460 {
461         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
462         struct list_head *qe, *qen;
463         struct bfa_itnim_s *itnim;
464
465         /* accumulate IO stats from itnim */
466         bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
467         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
468                 itnim = (struct bfa_itnim_s *) qe;
469                 if (itnim->rport->rport_info.lp_tag != lp_tag)
470                         continue;
471                 bfa_fcpim_add_stats(stats, &(itnim->stats));
472         }
473         return BFA_STATUS_OK;
474 }
475 bfa_status_t
476 bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
477 {
478         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
479         struct list_head *qe, *qen;
480         struct bfa_itnim_s *itnim;
481
482         /* accumulate IO stats from itnim */
483         bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
484         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
485                 itnim = (struct bfa_itnim_s *) qe;
486                 bfa_fcpim_add_stats(modstats, &(itnim->stats));
487         }
488         return BFA_STATUS_OK;
489 }
490
491 bfa_status_t
492 bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
493          struct bfa_fcpim_del_itn_stats_s *modstats)
494 {
495         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
496
497         *modstats = fcpim->del_itn_stats;
498
499         return BFA_STATUS_OK;
500 }
501
502
503 bfa_status_t
504 bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
505 {
506         struct bfa_itnim_s *itnim;
507         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
508         struct list_head *qe, *qen;
509
510         /* accumulate IO stats from itnim */
511         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
512                 itnim = (struct bfa_itnim_s *) qe;
513                 bfa_itnim_clear_stats(itnim);
514         }
515         fcpim->io_profile = BFA_TRUE;
516         fcpim->io_profile_start_time = time;
517         fcpim->profile_comp = bfa_ioim_profile_comp;
518         fcpim->profile_start = bfa_ioim_profile_start;
519
520         return BFA_STATUS_OK;
521 }
522 bfa_status_t
523 bfa_fcpim_profile_off(struct bfa_s *bfa)
524 {
525         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
526         fcpim->io_profile = BFA_FALSE;
527         fcpim->io_profile_start_time = 0;
528         fcpim->profile_comp = NULL;
529         fcpim->profile_start = NULL;
530         return BFA_STATUS_OK;
531 }
532
533 bfa_status_t
534 bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
535 {
536         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
537         struct list_head *qe, *qen;
538         struct bfa_itnim_s *itnim;
539
540         /* clear IO stats from all active itnims */
541         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
542                 itnim = (struct bfa_itnim_s *) qe;
543                 if (itnim->rport->rport_info.lp_tag != lp_tag)
544                         continue;
545                 bfa_itnim_clear_stats(itnim);
546         }
547         return BFA_STATUS_OK;
548
549 }
550
551 bfa_status_t
552 bfa_fcpim_clr_modstats(struct bfa_s *bfa)
553 {
554         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
555         struct list_head *qe, *qen;
556         struct bfa_itnim_s *itnim;
557
558         /* clear IO stats from all active itnims */
559         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
560                 itnim = (struct bfa_itnim_s *) qe;
561                 bfa_itnim_clear_stats(itnim);
562         }
563         bfa_os_memset(&fcpim->del_itn_stats, 0,
564                 sizeof(struct bfa_fcpim_del_itn_stats_s));
565
566         return BFA_STATUS_OK;
567 }
568
569 void
570 bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
571 {
572         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
573
574         bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
575
576         fcpim->q_depth = q_depth;
577 }
578
579 u16
580 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
581 {
582         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
583
584         return fcpim->q_depth;
585 }
586
587 void
588 bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
589 {
590         bfa_boolean_t ioredirect;
591
592         /*
593          * IO redirection is turned off when QoS is enabled and vice versa
594          */
595         ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
596 }
597
598 void
599 bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
600 {
601         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
602         fcpim->ioredirect = state;
603 }
604
605
606
607 /**
608  *  BFA ITNIM module state machine functions
609  */
610
611 /**
612  *      Beginning/unallocated state - no events expected.
613  */
614 static void
615 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
616 {
617         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
618         bfa_trc(itnim->bfa, event);
619
620         switch (event) {
621         case BFA_ITNIM_SM_CREATE:
622                 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
623                 itnim->is_online = BFA_FALSE;
624                 bfa_fcpim_additn(itnim);
625                 break;
626
627         default:
628                 bfa_sm_fault(itnim->bfa, event);
629         }
630 }
631
632 /**
633  *      Beginning state, only online event expected.
634  */
635 static void
636 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
637 {
638         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
639         bfa_trc(itnim->bfa, event);
640
641         switch (event) {
642         case BFA_ITNIM_SM_ONLINE:
643                 if (bfa_itnim_send_fwcreate(itnim))
644                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
645                 else
646                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
647                 break;
648
649         case BFA_ITNIM_SM_DELETE:
650                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
651                 bfa_fcpim_delitn(itnim);
652                 break;
653
654         case BFA_ITNIM_SM_HWFAIL:
655                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
656                 break;
657
658         default:
659                 bfa_sm_fault(itnim->bfa, event);
660         }
661 }
662
663 /**
664  *      Waiting for itnim create response from firmware.
665  */
666 static void
667 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
668 {
669         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670         bfa_trc(itnim->bfa, event);
671
672         switch (event) {
673         case BFA_ITNIM_SM_FWRSP:
674                 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
675                 itnim->is_online = BFA_TRUE;
676                 bfa_itnim_iotov_online(itnim);
677                 bfa_itnim_online_cb(itnim);
678                 break;
679
680         case BFA_ITNIM_SM_DELETE:
681                 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
682                 break;
683
684         case BFA_ITNIM_SM_OFFLINE:
685                 if (bfa_itnim_send_fwdelete(itnim))
686                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
687                 else
688                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
689                 break;
690
691         case BFA_ITNIM_SM_HWFAIL:
692                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
693                 break;
694
695         default:
696                 bfa_sm_fault(itnim->bfa, event);
697         }
698 }
699
700 static void
701 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
702                         enum bfa_itnim_event event)
703 {
704         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
705         bfa_trc(itnim->bfa, event);
706
707         switch (event) {
708         case BFA_ITNIM_SM_QRESUME:
709                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
710                 bfa_itnim_send_fwcreate(itnim);
711                 break;
712
713         case BFA_ITNIM_SM_DELETE:
714                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
715                 bfa_reqq_wcancel(&itnim->reqq_wait);
716                 bfa_fcpim_delitn(itnim);
717                 break;
718
719         case BFA_ITNIM_SM_OFFLINE:
720                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
721                 bfa_reqq_wcancel(&itnim->reqq_wait);
722                 bfa_itnim_offline_cb(itnim);
723                 break;
724
725         case BFA_ITNIM_SM_HWFAIL:
726                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
727                 bfa_reqq_wcancel(&itnim->reqq_wait);
728                 break;
729
730         default:
731                 bfa_sm_fault(itnim->bfa, event);
732         }
733 }
734
735 /**
736  *      Waiting for itnim create response from firmware, a delete is pending.
737  */
738 static void
739 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
740                                 enum bfa_itnim_event event)
741 {
742         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
743         bfa_trc(itnim->bfa, event);
744
745         switch (event) {
746         case BFA_ITNIM_SM_FWRSP:
747                 if (bfa_itnim_send_fwdelete(itnim))
748                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
749                 else
750                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
751                 break;
752
753         case BFA_ITNIM_SM_HWFAIL:
754                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
755                 bfa_fcpim_delitn(itnim);
756                 break;
757
758         default:
759                 bfa_sm_fault(itnim->bfa, event);
760         }
761 }
762
763 /**
764  *      Online state - normal parking state.
765  */
766 static void
767 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
768 {
769         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
770         bfa_trc(itnim->bfa, event);
771
772         switch (event) {
773         case BFA_ITNIM_SM_OFFLINE:
774                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
775                 itnim->is_online = BFA_FALSE;
776                 bfa_itnim_iotov_start(itnim);
777                 bfa_itnim_cleanup(itnim);
778                 break;
779
780         case BFA_ITNIM_SM_DELETE:
781                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
782                 itnim->is_online = BFA_FALSE;
783                 bfa_itnim_cleanup(itnim);
784                 break;
785
786         case BFA_ITNIM_SM_SLER:
787                 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
788                 itnim->is_online = BFA_FALSE;
789                 bfa_itnim_iotov_start(itnim);
790                 bfa_itnim_sler_cb(itnim);
791                 break;
792
793         case BFA_ITNIM_SM_HWFAIL:
794                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
795                 itnim->is_online = BFA_FALSE;
796                 bfa_itnim_iotov_start(itnim);
797                 bfa_itnim_iocdisable_cleanup(itnim);
798                 break;
799
800         default:
801                 bfa_sm_fault(itnim->bfa, event);
802         }
803 }
804
805 /**
806  *      Second level error recovery need.
807  */
808 static void
809 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
810 {
811         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
812         bfa_trc(itnim->bfa, event);
813
814         switch (event) {
815         case BFA_ITNIM_SM_OFFLINE:
816                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
817                 bfa_itnim_cleanup(itnim);
818                 break;
819
820         case BFA_ITNIM_SM_DELETE:
821                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
822                 bfa_itnim_cleanup(itnim);
823                 bfa_itnim_iotov_delete(itnim);
824                 break;
825
826         case BFA_ITNIM_SM_HWFAIL:
827                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
828                 bfa_itnim_iocdisable_cleanup(itnim);
829                 break;
830
831         default:
832                 bfa_sm_fault(itnim->bfa, event);
833         }
834 }
835
836 /**
837  *      Going offline. Waiting for active IO cleanup.
838  */
839 static void
840 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
841                                  enum bfa_itnim_event event)
842 {
843         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
844         bfa_trc(itnim->bfa, event);
845
846         switch (event) {
847         case BFA_ITNIM_SM_CLEANUP:
848                 if (bfa_itnim_send_fwdelete(itnim))
849                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
850                 else
851                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
852                 break;
853
854         case BFA_ITNIM_SM_DELETE:
855                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
856                 bfa_itnim_iotov_delete(itnim);
857                 break;
858
859         case BFA_ITNIM_SM_HWFAIL:
860                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
861                 bfa_itnim_iocdisable_cleanup(itnim);
862                 bfa_itnim_offline_cb(itnim);
863                 break;
864
865         case BFA_ITNIM_SM_SLER:
866                 break;
867
868         default:
869                 bfa_sm_fault(itnim->bfa, event);
870         }
871 }
872
873 /**
874  *      Deleting itnim. Waiting for active IO cleanup.
875  */
876 static void
877 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
878                                 enum bfa_itnim_event event)
879 {
880         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
881         bfa_trc(itnim->bfa, event);
882
883         switch (event) {
884         case BFA_ITNIM_SM_CLEANUP:
885                 if (bfa_itnim_send_fwdelete(itnim))
886                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
887                 else
888                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
889                 break;
890
891         case BFA_ITNIM_SM_HWFAIL:
892                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
893                 bfa_itnim_iocdisable_cleanup(itnim);
894                 break;
895
896         default:
897                 bfa_sm_fault(itnim->bfa, event);
898         }
899 }
900
901 /**
902  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
903  */
904 static void
905 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
906 {
907         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
908         bfa_trc(itnim->bfa, event);
909
910         switch (event) {
911         case BFA_ITNIM_SM_FWRSP:
912                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
913                 bfa_itnim_offline_cb(itnim);
914                 break;
915
916         case BFA_ITNIM_SM_DELETE:
917                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
918                 break;
919
920         case BFA_ITNIM_SM_HWFAIL:
921                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
922                 bfa_itnim_offline_cb(itnim);
923                 break;
924
925         default:
926                 bfa_sm_fault(itnim->bfa, event);
927         }
928 }
929
930 static void
931 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
932                         enum bfa_itnim_event event)
933 {
934         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
935         bfa_trc(itnim->bfa, event);
936
937         switch (event) {
938         case BFA_ITNIM_SM_QRESUME:
939                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
940                 bfa_itnim_send_fwdelete(itnim);
941                 break;
942
943         case BFA_ITNIM_SM_DELETE:
944                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
945                 break;
946
947         case BFA_ITNIM_SM_HWFAIL:
948                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
949                 bfa_reqq_wcancel(&itnim->reqq_wait);
950                 bfa_itnim_offline_cb(itnim);
951                 break;
952
953         default:
954                 bfa_sm_fault(itnim->bfa, event);
955         }
956 }
957
958 /**
959  *      Offline state.
960  */
961 static void
962 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
963 {
964         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
965         bfa_trc(itnim->bfa, event);
966
967         switch (event) {
968         case BFA_ITNIM_SM_DELETE:
969                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
970                 bfa_itnim_iotov_delete(itnim);
971                 bfa_fcpim_delitn(itnim);
972                 break;
973
974         case BFA_ITNIM_SM_ONLINE:
975                 if (bfa_itnim_send_fwcreate(itnim))
976                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
977                 else
978                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
979                 break;
980
981         case BFA_ITNIM_SM_HWFAIL:
982                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
983                 break;
984
985         default:
986                 bfa_sm_fault(itnim->bfa, event);
987         }
988 }
989
990 /**
991  *      IOC h/w failed state.
992  */
993 static void
994 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
995                                 enum bfa_itnim_event event)
996 {
997         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
998         bfa_trc(itnim->bfa, event);
999
1000         switch (event) {
1001         case BFA_ITNIM_SM_DELETE:
1002                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1003                 bfa_itnim_iotov_delete(itnim);
1004                 bfa_fcpim_delitn(itnim);
1005                 break;
1006
1007         case BFA_ITNIM_SM_OFFLINE:
1008                 bfa_itnim_offline_cb(itnim);
1009                 break;
1010
1011         case BFA_ITNIM_SM_ONLINE:
1012                 if (bfa_itnim_send_fwcreate(itnim))
1013                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1014                 else
1015                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1016                 break;
1017
1018         case BFA_ITNIM_SM_HWFAIL:
1019                 break;
1020
1021         default:
1022                 bfa_sm_fault(itnim->bfa, event);
1023         }
1024 }
1025
1026 /**
1027  *      Itnim is deleted, waiting for firmware response to delete.
1028  */
1029 static void
1030 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1031 {
1032         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1033         bfa_trc(itnim->bfa, event);
1034
1035         switch (event) {
1036         case BFA_ITNIM_SM_FWRSP:
1037         case BFA_ITNIM_SM_HWFAIL:
1038                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1039                 bfa_fcpim_delitn(itnim);
1040                 break;
1041
1042         default:
1043                 bfa_sm_fault(itnim->bfa, event);
1044         }
1045 }
1046
1047 static void
1048 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1049                 enum bfa_itnim_event event)
1050 {
1051         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1052         bfa_trc(itnim->bfa, event);
1053
1054         switch (event) {
1055         case BFA_ITNIM_SM_QRESUME:
1056                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1057                 bfa_itnim_send_fwdelete(itnim);
1058                 break;
1059
1060         case BFA_ITNIM_SM_HWFAIL:
1061                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1062                 bfa_reqq_wcancel(&itnim->reqq_wait);
1063                 bfa_fcpim_delitn(itnim);
1064                 break;
1065
1066         default:
1067                 bfa_sm_fault(itnim->bfa, event);
1068         }
1069 }
1070
1071 /**
1072  *      Initiate cleanup of all IOs on an IOC failure.
1073  */
1074 static void
1075 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1076 {
1077         struct bfa_tskim_s *tskim;
1078         struct bfa_ioim_s *ioim;
1079         struct list_head        *qe, *qen;
1080
1081         list_for_each_safe(qe, qen, &itnim->tsk_q) {
1082                 tskim = (struct bfa_tskim_s *) qe;
1083                 bfa_tskim_iocdisable(tskim);
1084         }
1085
1086         list_for_each_safe(qe, qen, &itnim->io_q) {
1087                 ioim = (struct bfa_ioim_s *) qe;
1088                 bfa_ioim_iocdisable(ioim);
1089         }
1090
1091         /**
1092          * For IO request in pending queue, we pretend an early timeout.
1093          */
1094         list_for_each_safe(qe, qen, &itnim->pending_q) {
1095                 ioim = (struct bfa_ioim_s *) qe;
1096                 bfa_ioim_tov(ioim);
1097         }
1098
1099         list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1100                 ioim = (struct bfa_ioim_s *) qe;
1101                 bfa_ioim_iocdisable(ioim);
1102         }
1103 }
1104
1105 /**
1106  *      IO cleanup completion
1107  */
1108 static void
1109 bfa_itnim_cleanp_comp(void *itnim_cbarg)
1110 {
1111         struct bfa_itnim_s *itnim = itnim_cbarg;
1112
1113         bfa_stats(itnim, cleanup_comps);
1114         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1115 }
1116
1117 /**
1118  *      Initiate cleanup of all IOs.
1119  */
1120 static void
1121 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1122 {
1123         struct bfa_ioim_s  *ioim;
1124         struct bfa_tskim_s *tskim;
1125         struct list_head        *qe, *qen;
1126
1127         bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1128
1129         list_for_each_safe(qe, qen, &itnim->io_q) {
1130                 ioim = (struct bfa_ioim_s *) qe;
1131
1132                 /**
1133                  * Move IO to a cleanup queue from active queue so that a later
1134                  * TM will not pickup this IO.
1135                  */
1136                 list_del(&ioim->qe);
1137                 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1138
1139                 bfa_wc_up(&itnim->wc);
1140                 bfa_ioim_cleanup(ioim);
1141         }
1142
1143         list_for_each_safe(qe, qen, &itnim->tsk_q) {
1144                 tskim = (struct bfa_tskim_s *) qe;
1145                 bfa_wc_up(&itnim->wc);
1146                 bfa_tskim_cleanup(tskim);
1147         }
1148
1149         bfa_wc_wait(&itnim->wc);
1150 }
1151
1152 static void
1153 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1154 {
1155         struct bfa_itnim_s *itnim = cbarg;
1156
1157         if (complete)
1158                 bfa_cb_itnim_online(itnim->ditn);
1159 }
1160
1161 static void
1162 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1163 {
1164         struct bfa_itnim_s *itnim = cbarg;
1165
1166         if (complete)
1167                 bfa_cb_itnim_offline(itnim->ditn);
1168 }
1169
1170 static void
1171 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1172 {
1173         struct bfa_itnim_s *itnim = cbarg;
1174
1175         if (complete)
1176                 bfa_cb_itnim_sler(itnim->ditn);
1177 }
1178
1179 /**
1180  * Call to resume any I/O requests waiting for room in request queue.
1181  */
1182 static void
1183 bfa_itnim_qresume(void *cbarg)
1184 {
1185         struct bfa_itnim_s *itnim = cbarg;
1186
1187         bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1188 }
1189
1190
1191
1192
1193 /**
1194  *  bfa_itnim_public
1195  */
1196
1197 void
1198 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1199 {
1200         bfa_wc_down(&itnim->wc);
1201 }
1202
1203 void
1204 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1205 {
1206         bfa_wc_down(&itnim->wc);
1207 }
1208
1209 void
1210 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1211                 u32 *dm_len)
1212 {
1213         /**
1214          * ITN memory
1215          */
1216         *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1217 }
1218
1219 void
1220 bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1221 {
1222         struct bfa_s    *bfa = fcpim->bfa;
1223         struct bfa_itnim_s *itnim;
1224         int     i, j;
1225
1226         INIT_LIST_HEAD(&fcpim->itnim_q);
1227
1228         itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1229         fcpim->itnim_arr = itnim;
1230
1231         for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1232                 bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
1233                 itnim->bfa = bfa;
1234                 itnim->fcpim = fcpim;
1235                 itnim->reqq = BFA_REQQ_QOS_LO;
1236                 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1237                 itnim->iotov_active = BFA_FALSE;
1238                 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1239
1240                 INIT_LIST_HEAD(&itnim->io_q);
1241                 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1242                 INIT_LIST_HEAD(&itnim->pending_q);
1243                 INIT_LIST_HEAD(&itnim->tsk_q);
1244                 INIT_LIST_HEAD(&itnim->delay_comp_q);
1245                 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1246                         itnim->ioprofile.io_latency.min[j] = ~0;
1247                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1248         }
1249
1250         bfa_meminfo_kva(minfo) = (u8 *) itnim;
1251 }
1252
1253 void
1254 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1255 {
1256         bfa_stats(itnim, ioc_disabled);
1257         bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1258 }
1259
1260 static bfa_boolean_t
1261 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1262 {
1263         struct bfi_itnim_create_req_s *m;
1264
1265         itnim->msg_no++;
1266
1267         /**
1268          * check for room in queue to send request now
1269          */
1270         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1271         if (!m) {
1272                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1273                 return BFA_FALSE;
1274         }
1275
1276         bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1277                         bfa_lpuid(itnim->bfa));
1278         m->fw_handle = itnim->rport->fw_handle;
1279         m->class = FC_CLASS_3;
1280         m->seq_rec = itnim->seq_rec;
1281         m->msg_no = itnim->msg_no;
1282         bfa_stats(itnim, fw_create);
1283
1284         /**
1285          * queue I/O message to firmware
1286          */
1287         bfa_reqq_produce(itnim->bfa, itnim->reqq);
1288         return BFA_TRUE;
1289 }
1290
1291 static bfa_boolean_t
1292 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1293 {
1294         struct bfi_itnim_delete_req_s *m;
1295
1296         /**
1297          * check for room in queue to send request now
1298          */
1299         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1300         if (!m) {
1301                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1302                 return BFA_FALSE;
1303         }
1304
1305         bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1306                         bfa_lpuid(itnim->bfa));
1307         m->fw_handle = itnim->rport->fw_handle;
1308         bfa_stats(itnim, fw_delete);
1309
1310         /**
1311          * queue I/O message to firmware
1312          */
1313         bfa_reqq_produce(itnim->bfa, itnim->reqq);
1314         return BFA_TRUE;
1315 }
1316
1317 /**
1318  * Cleanup all pending failed inflight requests.
1319  */
1320 static void
1321 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1322 {
1323         struct bfa_ioim_s *ioim;
1324         struct list_head *qe, *qen;
1325
1326         list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1327                 ioim = (struct bfa_ioim_s *)qe;
1328                 bfa_ioim_delayed_comp(ioim, iotov);
1329         }
1330 }
1331
1332 /**
1333  * Start all pending IO requests.
1334  */
1335 static void
1336 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1337 {
1338         struct bfa_ioim_s *ioim;
1339
1340         bfa_itnim_iotov_stop(itnim);
1341
1342         /**
1343          * Abort all inflight IO requests in the queue
1344          */
1345         bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1346
1347         /**
1348          * Start all pending IO requests.
1349          */
1350         while (!list_empty(&itnim->pending_q)) {
1351                 bfa_q_deq(&itnim->pending_q, &ioim);
1352                 list_add_tail(&ioim->qe, &itnim->io_q);
1353                 bfa_ioim_start(ioim);
1354         }
1355 }
1356
1357 /**
1358  * Fail all pending IO requests
1359  */
1360 static void
1361 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1362 {
1363         struct bfa_ioim_s *ioim;
1364
1365         /**
1366          * Fail all inflight IO requests in the queue
1367          */
1368         bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1369
1370         /**
1371          * Fail any pending IO requests.
1372          */
1373         while (!list_empty(&itnim->pending_q)) {
1374                 bfa_q_deq(&itnim->pending_q, &ioim);
1375                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1376                 bfa_ioim_tov(ioim);
1377         }
1378 }
1379
1380 /**
1381  * IO TOV timer callback. Fail any pending IO requests.
1382  */
1383 static void
1384 bfa_itnim_iotov(void *itnim_arg)
1385 {
1386         struct bfa_itnim_s *itnim = itnim_arg;
1387
1388         itnim->iotov_active = BFA_FALSE;
1389
1390         bfa_cb_itnim_tov_begin(itnim->ditn);
1391         bfa_itnim_iotov_cleanup(itnim);
1392         bfa_cb_itnim_tov(itnim->ditn);
1393 }
1394
1395 /**
1396  * Start IO TOV timer for failing back pending IO requests in offline state.
1397  */
1398 static void
1399 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1400 {
1401         if (itnim->fcpim->path_tov > 0) {
1402
1403                 itnim->iotov_active = BFA_TRUE;
1404                 bfa_assert(bfa_itnim_hold_io(itnim));
1405                 bfa_timer_start(itnim->bfa, &itnim->timer,
1406                         bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1407         }
1408 }
1409
1410 /**
1411  * Stop IO TOV timer.
1412  */
1413 static void
1414 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1415 {
1416         if (itnim->iotov_active) {
1417                 itnim->iotov_active = BFA_FALSE;
1418                 bfa_timer_stop(&itnim->timer);
1419         }
1420 }
1421
1422 /**
1423  * Stop IO TOV timer.
1424  */
1425 static void
1426 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1427 {
1428         bfa_boolean_t pathtov_active = BFA_FALSE;
1429
1430         if (itnim->iotov_active)
1431                 pathtov_active = BFA_TRUE;
1432
1433         bfa_itnim_iotov_stop(itnim);
1434         if (pathtov_active)
1435                 bfa_cb_itnim_tov_begin(itnim->ditn);
1436         bfa_itnim_iotov_cleanup(itnim);
1437         if (pathtov_active)
1438                 bfa_cb_itnim_tov(itnim->ditn);
1439 }
1440
1441 static void
1442 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1443 {
1444         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1445         fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1446                 itnim->stats.iocomp_aborted;
1447         fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1448                 itnim->stats.iocomp_timedout;
1449         fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1450                 itnim->stats.iocom_sqer_needed;
1451         fcpim->del_itn_stats.del_itn_iocom_res_free +=
1452                 itnim->stats.iocom_res_free;
1453         fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1454                 itnim->stats.iocom_hostabrts;
1455         fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1456         fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1457         fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1458 }
1459
1460
1461
1462 /**
1463  *  bfa_itnim_public
1464  */
1465
1466 /**
1467  *      Itnim interrupt processing.
1468  */
1469 void
1470 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1471 {
1472         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1473         union bfi_itnim_i2h_msg_u msg;
1474         struct bfa_itnim_s *itnim;
1475
1476         bfa_trc(bfa, m->mhdr.msg_id);
1477
1478         msg.msg = m;
1479
1480         switch (m->mhdr.msg_id) {
1481         case BFI_ITNIM_I2H_CREATE_RSP:
1482                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483                                                 msg.create_rsp->bfa_handle);
1484                 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1485                 bfa_stats(itnim, create_comps);
1486                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487                 break;
1488
1489         case BFI_ITNIM_I2H_DELETE_RSP:
1490                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491                                                 msg.delete_rsp->bfa_handle);
1492                 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1493                 bfa_stats(itnim, delete_comps);
1494                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1495                 break;
1496
1497         case BFI_ITNIM_I2H_SLER_EVENT:
1498                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1499                                                 msg.sler_event->bfa_handle);
1500                 bfa_stats(itnim, sler_events);
1501                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1502                 break;
1503
1504         default:
1505                 bfa_trc(bfa, m->mhdr.msg_id);
1506                 bfa_assert(0);
1507         }
1508 }
1509
1510
1511
1512 /**
1513  *  bfa_itnim_api
1514  */
1515
1516 struct bfa_itnim_s *
1517 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1518 {
1519         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1520         struct bfa_itnim_s *itnim;
1521
1522         itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1523         bfa_assert(itnim->rport == rport);
1524
1525         itnim->ditn = ditn;
1526
1527         bfa_stats(itnim, creates);
1528         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1529
1530         return itnim;
1531 }
1532
1533 void
1534 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1535 {
1536         bfa_stats(itnim, deletes);
1537         bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1538 }
1539
1540 void
1541 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1542 {
1543         itnim->seq_rec = seq_rec;
1544         bfa_stats(itnim, onlines);
1545         bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1546 }
1547
1548 void
1549 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1550 {
1551         bfa_stats(itnim, offlines);
1552         bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1553 }
1554
1555 /**
1556  * Return true if itnim is considered offline for holding off IO request.
1557  * IO is not held if itnim is being deleted.
1558  */
1559 bfa_boolean_t
1560 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1561 {
1562         return itnim->fcpim->path_tov && itnim->iotov_active &&
1563                 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1564                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1565                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1566                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1567                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1568                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1569 }
1570
1571 bfa_status_t
1572 bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1573                 struct bfa_itnim_ioprofile_s *ioprofile)
1574 {
1575         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1576         if (!fcpim->io_profile)
1577                 return BFA_STATUS_IOPROFILE_OFF;
1578
1579         itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1580         itnim->ioprofile.io_profile_start_time =
1581                 bfa_io_profile_start_time(itnim->bfa);
1582         itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1583         itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1584         *ioprofile = itnim->ioprofile;
1585
1586         return BFA_STATUS_OK;
1587 }
1588
1589 void
1590 bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1591         struct bfa_itnim_iostats_s *stats)
1592 {
1593         *stats = itnim->stats;
1594 }
1595
1596 void
1597 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1598 {
1599         int j;
1600         bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
1601         bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1602         for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1603                 itnim->ioprofile.io_latency.min[j] = ~0;
1604 }
1605
1606 /**
1607  *  BFA IO module state machine functions
1608  */
1609
1610 /**
1611  *      IO is not started (unallocated).
1612  */
1613 static void
1614 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615 {
1616         bfa_trc_fp(ioim->bfa, ioim->iotag);
1617         bfa_trc_fp(ioim->bfa, event);
1618
1619         switch (event) {
1620         case BFA_IOIM_SM_START:
1621                 if (!bfa_itnim_is_online(ioim->itnim)) {
1622                         if (!bfa_itnim_hold_io(ioim->itnim)) {
1623                                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1624                                 list_del(&ioim->qe);
1625                                 list_add_tail(&ioim->qe,
1626                                         &ioim->fcpim->ioim_comp_q);
1627                                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1628                                                 __bfa_cb_ioim_pathtov, ioim);
1629                         } else {
1630                                 list_del(&ioim->qe);
1631                                 list_add_tail(&ioim->qe,
1632                                         &ioim->itnim->pending_q);
1633                         }
1634                         break;
1635                 }
1636
1637                 if (ioim->nsges > BFI_SGE_INLINE) {
1638                         if (!bfa_ioim_sge_setup(ioim)) {
1639                                 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1640                                 return;
1641                         }
1642                 }
1643
1644                 if (!bfa_ioim_send_ioreq(ioim)) {
1645                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1646                         break;
1647                 }
1648
1649                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1650                 break;
1651
1652         case BFA_IOIM_SM_IOTOV:
1653                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1654                 bfa_ioim_move_to_comp_q(ioim);
1655                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1656                                 __bfa_cb_ioim_pathtov, ioim);
1657                 break;
1658
1659         case BFA_IOIM_SM_ABORT:
1660                 /**
1661                  * IO in pending queue can get abort requests. Complete abort
1662                  * requests immediately.
1663                  */
1664                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1665                 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1666                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1667                                 __bfa_cb_ioim_abort, ioim);
1668                 break;
1669
1670         default:
1671                 bfa_sm_fault(ioim->bfa, event);
1672         }
1673 }
1674
1675 /**
1676  *      IO is waiting for SG pages.
1677  */
1678 static void
1679 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1680 {
1681         bfa_trc(ioim->bfa, ioim->iotag);
1682         bfa_trc(ioim->bfa, event);
1683
1684         switch (event) {
1685         case BFA_IOIM_SM_SGALLOCED:
1686                 if (!bfa_ioim_send_ioreq(ioim)) {
1687                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1688                         break;
1689                 }
1690                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1691                 break;
1692
1693         case BFA_IOIM_SM_CLEANUP:
1694                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1695                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1696                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1697                               ioim);
1698                 bfa_ioim_notify_cleanup(ioim);
1699                 break;
1700
1701         case BFA_IOIM_SM_ABORT:
1702                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1703                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1704                 bfa_ioim_move_to_comp_q(ioim);
1705                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1706                               ioim);
1707                 break;
1708
1709         case BFA_IOIM_SM_HWFAIL:
1710                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1711                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1712                 bfa_ioim_move_to_comp_q(ioim);
1713                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1714                               ioim);
1715                 break;
1716
1717         default:
1718                 bfa_sm_fault(ioim->bfa, event);
1719         }
1720 }
1721
1722 /**
1723  *      IO is active.
1724  */
1725 static void
1726 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1727 {
1728         bfa_trc_fp(ioim->bfa, ioim->iotag);
1729         bfa_trc_fp(ioim->bfa, event);
1730
1731         switch (event) {
1732         case BFA_IOIM_SM_COMP_GOOD:
1733                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1734                 bfa_ioim_move_to_comp_q(ioim);
1735                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1736                               __bfa_cb_ioim_good_comp, ioim);
1737                 break;
1738
1739         case BFA_IOIM_SM_COMP:
1740                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1741                 bfa_ioim_move_to_comp_q(ioim);
1742                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1743                               ioim);
1744                 break;
1745
1746         case BFA_IOIM_SM_DONE:
1747                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1748                 bfa_ioim_move_to_comp_q(ioim);
1749                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1750                               ioim);
1751                 break;
1752
1753         case BFA_IOIM_SM_ABORT:
1754                 ioim->iosp->abort_explicit = BFA_TRUE;
1755                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1756
1757                 if (bfa_ioim_send_abort(ioim))
1758                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1759                 else {
1760                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1761                         bfa_stats(ioim->itnim, qwait);
1762                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1763                                           &ioim->iosp->reqq_wait);
1764                 }
1765                 break;
1766
1767         case BFA_IOIM_SM_CLEANUP:
1768                 ioim->iosp->abort_explicit = BFA_FALSE;
1769                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1770
1771                 if (bfa_ioim_send_abort(ioim))
1772                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1773                 else {
1774                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1775                         bfa_stats(ioim->itnim, qwait);
1776                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1777                                           &ioim->iosp->reqq_wait);
1778                 }
1779                 break;
1780
1781         case BFA_IOIM_SM_HWFAIL:
1782                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1783                 bfa_ioim_move_to_comp_q(ioim);
1784                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1785                               ioim);
1786                 break;
1787
1788         case BFA_IOIM_SM_SQRETRY:
1789                 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1790                         /* max retry completed free IO */
1791                         bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1792                         bfa_ioim_move_to_comp_q(ioim);
1793                         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1794                                         __bfa_cb_ioim_failed, ioim);
1795                         break;
1796                 }
1797                 /* waiting for IO tag resource free */
1798                 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1799                 break;
1800
1801         default:
1802                 bfa_sm_fault(ioim->bfa, event);
1803         }
1804 }
1805
1806 /**
1807 *       IO is retried with new tag.
1808 */
1809 static void
1810 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811 {
1812         bfa_trc_fp(ioim->bfa, ioim->iotag);
1813         bfa_trc_fp(ioim->bfa, event);
1814
1815         switch (event) {
1816         case BFA_IOIM_SM_FREE:
1817                 /* abts and rrq done. Now retry the IO with new tag */
1818                 if (!bfa_ioim_send_ioreq(ioim)) {
1819                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1820                         break;
1821                 }
1822                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1823         break;
1824
1825         case BFA_IOIM_SM_CLEANUP:
1826                 ioim->iosp->abort_explicit = BFA_FALSE;
1827                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1828
1829                 if (bfa_ioim_send_abort(ioim))
1830                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1831                 else {
1832                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1833                         bfa_stats(ioim->itnim, qwait);
1834                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1835                                           &ioim->iosp->reqq_wait);
1836                 }
1837         break;
1838
1839         case BFA_IOIM_SM_HWFAIL:
1840                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1841                 bfa_ioim_move_to_comp_q(ioim);
1842                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1843                          __bfa_cb_ioim_failed, ioim);
1844                 break;
1845
1846         case BFA_IOIM_SM_ABORT:
1847                 /** in this state IO abort is done.
1848                  * Waiting for IO tag resource free.
1849                  */
1850                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1851                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1852                               ioim);
1853                 break;
1854
1855         default:
1856                 bfa_sm_fault(ioim->bfa, event);
1857         }
1858 }
1859
1860 /**
1861  *      IO is being aborted, waiting for completion from firmware.
1862  */
1863 static void
1864 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1865 {
1866         bfa_trc(ioim->bfa, ioim->iotag);
1867         bfa_trc(ioim->bfa, event);
1868
1869         switch (event) {
1870         case BFA_IOIM_SM_COMP_GOOD:
1871         case BFA_IOIM_SM_COMP:
1872         case BFA_IOIM_SM_DONE:
1873         case BFA_IOIM_SM_FREE:
1874                 break;
1875
1876         case BFA_IOIM_SM_ABORT_DONE:
1877                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1878                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1879                               ioim);
1880                 break;
1881
1882         case BFA_IOIM_SM_ABORT_COMP:
1883                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1884                 bfa_ioim_move_to_comp_q(ioim);
1885                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1886                               ioim);
1887                 break;
1888
1889         case BFA_IOIM_SM_COMP_UTAG:
1890                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891                 bfa_ioim_move_to_comp_q(ioim);
1892                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1893                               ioim);
1894                 break;
1895
1896         case BFA_IOIM_SM_CLEANUP:
1897                 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1898                 ioim->iosp->abort_explicit = BFA_FALSE;
1899
1900                 if (bfa_ioim_send_abort(ioim))
1901                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1902                 else {
1903                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1904                         bfa_stats(ioim->itnim, qwait);
1905                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1906                                           &ioim->iosp->reqq_wait);
1907                 }
1908                 break;
1909
1910         case BFA_IOIM_SM_HWFAIL:
1911                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1912                 bfa_ioim_move_to_comp_q(ioim);
1913                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1914                               ioim);
1915                 break;
1916
1917         default:
1918                 bfa_sm_fault(ioim->bfa, event);
1919         }
1920 }
1921
1922 /**
1923  * IO is being cleaned up (implicit abort), waiting for completion from
1924  * firmware.
1925  */
1926 static void
1927 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1928 {
1929         bfa_trc(ioim->bfa, ioim->iotag);
1930         bfa_trc(ioim->bfa, event);
1931
1932         switch (event) {
1933         case BFA_IOIM_SM_COMP_GOOD:
1934         case BFA_IOIM_SM_COMP:
1935         case BFA_IOIM_SM_DONE:
1936         case BFA_IOIM_SM_FREE:
1937                 break;
1938
1939         case BFA_IOIM_SM_ABORT:
1940                 /**
1941                  * IO is already being aborted implicitly
1942                  */
1943                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1944                 break;
1945
1946         case BFA_IOIM_SM_ABORT_DONE:
1947                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1948                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1949                 bfa_ioim_notify_cleanup(ioim);
1950                 break;
1951
1952         case BFA_IOIM_SM_ABORT_COMP:
1953                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1954                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1955                 bfa_ioim_notify_cleanup(ioim);
1956                 break;
1957
1958         case BFA_IOIM_SM_COMP_UTAG:
1959                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1960                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1961                 bfa_ioim_notify_cleanup(ioim);
1962                 break;
1963
1964         case BFA_IOIM_SM_HWFAIL:
1965                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1966                 bfa_ioim_move_to_comp_q(ioim);
1967                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1968                               ioim);
1969                 break;
1970
1971         case BFA_IOIM_SM_CLEANUP:
1972                 /**
1973                  * IO can be in cleanup state already due to TM command.
1974                  * 2nd cleanup request comes from ITN offline event.
1975                  */
1976                 break;
1977
1978         default:
1979                 bfa_sm_fault(ioim->bfa, event);
1980         }
1981 }
1982
1983 /**
1984  *      IO is waiting for room in request CQ
1985  */
1986 static void
1987 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1988 {
1989         bfa_trc(ioim->bfa, ioim->iotag);
1990         bfa_trc(ioim->bfa, event);
1991
1992         switch (event) {
1993         case BFA_IOIM_SM_QRESUME:
1994                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1995                 bfa_ioim_send_ioreq(ioim);
1996                 break;
1997
1998         case BFA_IOIM_SM_ABORT:
1999                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2000                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2001                 bfa_ioim_move_to_comp_q(ioim);
2002                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2003                               ioim);
2004                 break;
2005
2006         case BFA_IOIM_SM_CLEANUP:
2007                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2008                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2009                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2010                               ioim);
2011                 bfa_ioim_notify_cleanup(ioim);
2012                 break;
2013
2014         case BFA_IOIM_SM_HWFAIL:
2015                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2016                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2017                 bfa_ioim_move_to_comp_q(ioim);
2018                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2019                               ioim);
2020                 break;
2021
2022         default:
2023                 bfa_sm_fault(ioim->bfa, event);
2024         }
2025 }
2026
2027 /**
2028  *      Active IO is being aborted, waiting for room in request CQ.
2029  */
2030 static void
2031 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2032 {
2033         bfa_trc(ioim->bfa, ioim->iotag);
2034         bfa_trc(ioim->bfa, event);
2035
2036         switch (event) {
2037         case BFA_IOIM_SM_QRESUME:
2038                 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2039                 bfa_ioim_send_abort(ioim);
2040                 break;
2041
2042         case BFA_IOIM_SM_CLEANUP:
2043                 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2044                 ioim->iosp->abort_explicit = BFA_FALSE;
2045                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2046                 break;
2047
2048         case BFA_IOIM_SM_COMP_GOOD:
2049         case BFA_IOIM_SM_COMP:
2050                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2051                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2052                 bfa_ioim_move_to_comp_q(ioim);
2053                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2054                               ioim);
2055                 break;
2056
2057         case BFA_IOIM_SM_DONE:
2058                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2059                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2060                 bfa_ioim_move_to_comp_q(ioim);
2061                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2062                               ioim);
2063                 break;
2064
2065         case BFA_IOIM_SM_HWFAIL:
2066                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2067                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2068                 bfa_ioim_move_to_comp_q(ioim);
2069                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2070                               ioim);
2071                 break;
2072
2073         default:
2074                 bfa_sm_fault(ioim->bfa, event);
2075         }
2076 }
2077
2078 /**
2079  *      Active IO is being cleaned up, waiting for room in request CQ.
2080  */
2081 static void
2082 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2083 {
2084         bfa_trc(ioim->bfa, ioim->iotag);
2085         bfa_trc(ioim->bfa, event);
2086
2087         switch (event) {
2088         case BFA_IOIM_SM_QRESUME:
2089                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2090                 bfa_ioim_send_abort(ioim);
2091                 break;
2092
2093         case BFA_IOIM_SM_ABORT:
2094                 /**
2095                  * IO is already being cleaned up implicitly
2096                  */
2097                 ioim->io_cbfn = __bfa_cb_ioim_abort;
2098                 break;
2099
2100         case BFA_IOIM_SM_COMP_GOOD:
2101         case BFA_IOIM_SM_COMP:
2102                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2103                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2104                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2105                 bfa_ioim_notify_cleanup(ioim);
2106                 break;
2107
2108         case BFA_IOIM_SM_DONE:
2109                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2110                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2111                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2112                 bfa_ioim_notify_cleanup(ioim);
2113                 break;
2114
2115         case BFA_IOIM_SM_HWFAIL:
2116                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2117                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2118                 bfa_ioim_move_to_comp_q(ioim);
2119                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2120                               ioim);
2121                 break;
2122
2123         default:
2124                 bfa_sm_fault(ioim->bfa, event);
2125         }
2126 }
2127
2128 /**
2129  * IO bfa callback is pending.
2130  */
2131 static void
2132 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2133 {
2134         bfa_trc_fp(ioim->bfa, ioim->iotag);
2135         bfa_trc_fp(ioim->bfa, event);
2136
2137         switch (event) {
2138         case BFA_IOIM_SM_HCB:
2139                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2140                 bfa_ioim_free(ioim);
2141                 break;
2142
2143         case BFA_IOIM_SM_CLEANUP:
2144                 bfa_ioim_notify_cleanup(ioim);
2145                 break;
2146
2147         case BFA_IOIM_SM_HWFAIL:
2148                 break;
2149
2150         default:
2151                 bfa_sm_fault(ioim->bfa, event);
2152         }
2153 }
2154
2155 /**
2156  * IO bfa callback is pending. IO resource cannot be freed.
2157  */
2158 static void
2159 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2160 {
2161         bfa_trc(ioim->bfa, ioim->iotag);
2162         bfa_trc(ioim->bfa, event);
2163
2164         switch (event) {
2165         case BFA_IOIM_SM_HCB:
2166                 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2167                 list_del(&ioim->qe);
2168                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2169                 break;
2170
2171         case BFA_IOIM_SM_FREE:
2172                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2173                 break;
2174
2175         case BFA_IOIM_SM_CLEANUP:
2176                 bfa_ioim_notify_cleanup(ioim);
2177                 break;
2178
2179         case BFA_IOIM_SM_HWFAIL:
2180                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2181                 break;
2182
2183         default:
2184                 bfa_sm_fault(ioim->bfa, event);
2185         }
2186 }
2187
2188 /**
2189  * IO is completed, waiting resource free from firmware.
2190  */
2191 static void
2192 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2193 {
2194         bfa_trc(ioim->bfa, ioim->iotag);
2195         bfa_trc(ioim->bfa, event);
2196
2197         switch (event) {
2198         case BFA_IOIM_SM_FREE:
2199                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2200                 bfa_ioim_free(ioim);
2201                 break;
2202
2203         case BFA_IOIM_SM_CLEANUP:
2204                 bfa_ioim_notify_cleanup(ioim);
2205                 break;
2206
2207         case BFA_IOIM_SM_HWFAIL:
2208                 break;
2209
2210         default:
2211                 bfa_sm_fault(ioim->bfa, event);
2212         }
2213 }
2214
2215
2216
2217 /**
2218  *  hal_ioim_private
2219  */
2220
2221 static void
2222 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2223 {
2224         struct bfa_ioim_s *ioim = cbarg;
2225
2226         if (!complete) {
2227                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2228                 return;
2229         }
2230
2231         bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2232 }
2233
2234 static void
2235 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2236 {
2237         struct bfa_ioim_s       *ioim = cbarg;
2238         struct bfi_ioim_rsp_s *m;
2239         u8      *snsinfo = NULL;
2240         u8      sns_len = 0;
2241         s32     residue = 0;
2242
2243         if (!complete) {
2244                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2245                 return;
2246         }
2247
2248         m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2249         if (m->io_status == BFI_IOIM_STS_OK) {
2250                 /**
2251                  * setup sense information, if present
2252                  */
2253                 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2254                                         m->sns_len) {
2255                         sns_len = m->sns_len;
2256                         snsinfo = ioim->iosp->snsinfo;
2257                 }
2258
2259                 /**
2260                  * setup residue value correctly for normal completions
2261                  */
2262                 if (m->resid_flags == FCP_RESID_UNDER) {
2263                         residue = bfa_os_ntohl(m->residue);
2264                         bfa_stats(ioim->itnim, iocomp_underrun);
2265                 }
2266                 if (m->resid_flags == FCP_RESID_OVER) {
2267                         residue = bfa_os_ntohl(m->residue);
2268                         residue = -residue;
2269                         bfa_stats(ioim->itnim, iocomp_overrun);
2270                 }
2271         }
2272
2273         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2274                           m->scsi_status, sns_len, snsinfo, residue);
2275 }
2276
2277 static void
2278 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2279 {
2280         struct bfa_ioim_s *ioim = cbarg;
2281
2282         if (!complete) {
2283                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2284                 return;
2285         }
2286
2287         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2288                           0, 0, NULL, 0);
2289 }
2290
2291 static void
2292 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2293 {
2294         struct bfa_ioim_s *ioim = cbarg;
2295
2296         bfa_stats(ioim->itnim, path_tov_expired);
2297         if (!complete) {
2298                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2299                 return;
2300         }
2301
2302         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2303                           0, 0, NULL, 0);
2304 }
2305
2306 static void
2307 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2308 {
2309         struct bfa_ioim_s *ioim = cbarg;
2310
2311         if (!complete) {
2312                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2313                 return;
2314         }
2315
2316         bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2317 }
2318
2319 static void
2320 bfa_ioim_sgpg_alloced(void *cbarg)
2321 {
2322         struct bfa_ioim_s *ioim = cbarg;
2323
2324         ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2325         list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2326         bfa_ioim_sgpg_setup(ioim);
2327         bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2328 }
2329
2330 /**
2331  * Send I/O request to firmware.
2332  */
2333 static  bfa_boolean_t
2334 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2335 {
2336         struct bfa_itnim_s *itnim = ioim->itnim;
2337         struct bfi_ioim_req_s *m;
2338         static struct fcp_cmnd_s cmnd_z0 = { 0 };
2339         struct bfi_sge_s      *sge;
2340         u32     pgdlen = 0;
2341         u32     fcp_dl;
2342         u64 addr;
2343         struct scatterlist *sg;
2344         struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2345
2346         /**
2347          * check for room in queue to send request now
2348          */
2349         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2350         if (!m) {
2351                 bfa_stats(ioim->itnim, qwait);
2352                 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2353                                   &ioim->iosp->reqq_wait);
2354                 return BFA_FALSE;
2355         }
2356
2357         /**
2358          * build i/o request message next
2359          */
2360         m->io_tag = bfa_os_htons(ioim->iotag);
2361         m->rport_hdl = ioim->itnim->rport->fw_handle;
2362         m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2363
2364         /**
2365          * build inline IO SG element here
2366          */
2367         sge = &m->sges[0];
2368         if (ioim->nsges) {
2369                 sg = (struct scatterlist *)scsi_sglist(cmnd);
2370                 addr = bfa_os_sgaddr(sg_dma_address(sg));
2371                 sge->sga = *(union bfi_addr_u *) &addr;
2372                 pgdlen = sg_dma_len(sg);
2373                 sge->sg_len = pgdlen;
2374                 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2375                                         BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2376                 bfa_sge_to_be(sge);
2377                 sge++;
2378         }
2379
2380         if (ioim->nsges > BFI_SGE_INLINE) {
2381                 sge->sga = ioim->sgpg->sgpg_pa;
2382         } else {
2383                 sge->sga.a32.addr_lo = 0;
2384                 sge->sga.a32.addr_hi = 0;
2385         }
2386         sge->sg_len = pgdlen;
2387         sge->flags = BFI_SGE_PGDLEN;
2388         bfa_sge_to_be(sge);
2389
2390         /**
2391          * set up I/O command parameters
2392          */
2393         bfa_os_assign(m->cmnd, cmnd_z0);
2394         m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
2395         m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
2396         bfa_os_assign(m->cmnd.cdb,
2397                         *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
2398         fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2399         m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl);
2400
2401         /**
2402          * set up I/O message header
2403          */
2404         switch (m->cmnd.iodir) {
2405         case FCP_IODIR_READ:
2406                 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2407                 bfa_stats(itnim, input_reqs);
2408                 ioim->itnim->stats.rd_throughput += fcp_dl;
2409                 break;
2410         case FCP_IODIR_WRITE:
2411                 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2412                 bfa_stats(itnim, output_reqs);
2413                 ioim->itnim->stats.wr_throughput += fcp_dl;
2414                 break;
2415         case FCP_IODIR_RW:
2416                 bfa_stats(itnim, input_reqs);
2417                 bfa_stats(itnim, output_reqs);
2418         default:
2419                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2420         }
2421         if (itnim->seq_rec ||
2422             (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
2423                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2424
2425 #ifdef IOIM_ADVANCED
2426         m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
2427         m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
2428         m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
2429
2430         /**
2431          * Handle large CDB (>16 bytes).
2432          */
2433         m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
2434                                         FCP_CMND_CDB_LEN) / sizeof(u32);
2435         if (m->cmnd.addl_cdb_len) {
2436                 bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
2437                                 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
2438                                 m->cmnd.addl_cdb_len * sizeof(u32));
2439                 fcp_cmnd_fcpdl(&m->cmnd) =
2440                                 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
2441         }
2442 #endif
2443
2444         /**
2445          * queue I/O message to firmware
2446          */
2447         bfa_reqq_produce(ioim->bfa, ioim->reqq);
2448         return BFA_TRUE;
2449 }
2450
2451 /**
2452  * Setup any additional SG pages needed.Inline SG element is setup
2453  * at queuing time.
2454  */
2455 static bfa_boolean_t
2456 bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2457 {
2458         u16     nsgpgs;
2459
2460         bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2461
2462         /**
2463          * allocate SG pages needed
2464          */
2465         nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2466         if (!nsgpgs)
2467                 return BFA_TRUE;
2468
2469         if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2470             != BFA_STATUS_OK) {
2471                 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2472                 return BFA_FALSE;
2473         }
2474
2475         ioim->nsgpgs = nsgpgs;
2476         bfa_ioim_sgpg_setup(ioim);
2477
2478         return BFA_TRUE;
2479 }
2480
2481 static void
2482 bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2483 {
2484         int             sgeid, nsges, i;
2485         struct bfi_sge_s      *sge;
2486         struct bfa_sgpg_s *sgpg;
2487         u32     pgcumsz;
2488         u64        addr;
2489         struct scatterlist *sg;
2490         struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2491
2492         sgeid = BFI_SGE_INLINE;
2493         ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
2494
2495         sg = scsi_sglist(cmnd);
2496         sg = sg_next(sg);
2497
2498         do {
2499                 sge = sgpg->sgpg->sges;
2500                 nsges = ioim->nsges - sgeid;
2501                 if (nsges > BFI_SGPG_DATA_SGES)
2502                         nsges = BFI_SGPG_DATA_SGES;
2503
2504                 pgcumsz = 0;
2505                 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
2506                         addr = bfa_os_sgaddr(sg_dma_address(sg));
2507                         sge->sga = *(union bfi_addr_u *) &addr;
2508                         sge->sg_len = sg_dma_len(sg);
2509                         pgcumsz += sge->sg_len;
2510
2511                         /**
2512                          * set flags
2513                          */
2514                         if (i < (nsges - 1))
2515                                 sge->flags = BFI_SGE_DATA;
2516                         else if (sgeid < (ioim->nsges - 1))
2517                                 sge->flags = BFI_SGE_DATA_CPL;
2518                         else
2519                                 sge->flags = BFI_SGE_DATA_LAST;
2520
2521                         bfa_sge_to_le(sge);
2522                 }
2523
2524                 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2525
2526                 /**
2527                  * set the link element of each page
2528                  */
2529                 if (sgeid == ioim->nsges) {
2530                         sge->flags = BFI_SGE_PGDLEN;
2531                         sge->sga.a32.addr_lo = 0;
2532                         sge->sga.a32.addr_hi = 0;
2533                 } else {
2534                         sge->flags = BFI_SGE_LINK;
2535                         sge->sga = sgpg->sgpg_pa;
2536                 }
2537                 sge->sg_len = pgcumsz;
2538
2539                 bfa_sge_to_le(sge);
2540         } while (sgeid < ioim->nsges);
2541 }
2542
2543 /**
2544  * Send I/O abort request to firmware.
2545  */
2546 static  bfa_boolean_t
2547 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2548 {
2549         struct bfi_ioim_abort_req_s *m;
2550         enum bfi_ioim_h2i       msgop;
2551
2552         /**
2553          * check for room in queue to send request now
2554          */
2555         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2556         if (!m)
2557                 return BFA_FALSE;
2558
2559         /**
2560          * build i/o request message next
2561          */
2562         if (ioim->iosp->abort_explicit)
2563                 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2564         else
2565                 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2566
2567         bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2568         m->io_tag    = bfa_os_htons(ioim->iotag);
2569         m->abort_tag = ++ioim->abort_tag;
2570
2571         /**
2572          * queue I/O message to firmware
2573          */
2574         bfa_reqq_produce(ioim->bfa, ioim->reqq);
2575         return BFA_TRUE;
2576 }
2577
2578 /**
2579  * Call to resume any I/O requests waiting for room in request queue.
2580  */
2581 static void
2582 bfa_ioim_qresume(void *cbarg)
2583 {
2584         struct bfa_ioim_s *ioim = cbarg;
2585
2586         bfa_stats(ioim->itnim, qresumes);
2587         bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2588 }
2589
2590
2591 static void
2592 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2593 {
2594         /**
2595          * Move IO from itnim queue to fcpim global queue since itnim will be
2596          * freed.
2597          */
2598         list_del(&ioim->qe);
2599         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2600
2601         if (!ioim->iosp->tskim) {
2602                 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2603                         bfa_cb_dequeue(&ioim->hcb_qe);
2604                         list_del(&ioim->qe);
2605                         list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2606                 }
2607                 bfa_itnim_iodone(ioim->itnim);
2608         } else
2609                 bfa_tskim_iodone(ioim->iosp->tskim);
2610 }
2611
2612 static bfa_boolean_t
2613 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2614 {
2615         if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2616             (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2617             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2618             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2619             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2620             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2621             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2622                 return BFA_FALSE;
2623
2624         return BFA_TRUE;
2625 }
2626
2627 /**
2628  *      or after the link comes back.
2629  */
2630 void
2631 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2632 {
2633         /**
2634          * If path tov timer expired, failback with PATHTOV status - these
2635          * IO requests are not normally retried by IO stack.
2636          *
2637          * Otherwise device cameback online and fail it with normal failed
2638          * status so that IO stack retries these failed IO requests.
2639          */
2640         if (iotov)
2641                 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2642         else {
2643                 ioim->io_cbfn = __bfa_cb_ioim_failed;
2644                 bfa_stats(ioim->itnim, iocom_nexus_abort);
2645         }
2646         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2647
2648         /**
2649          * Move IO to fcpim global queue since itnim will be
2650          * freed.
2651          */
2652         list_del(&ioim->qe);
2653         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2654 }
2655
2656
2657
2658 /**
2659  *  hal_ioim_friend
2660  */
2661
2662 /**
2663  * Memory allocation and initialization.
2664  */
2665 void
2666 bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2667 {
2668         struct bfa_ioim_s               *ioim;
2669         struct bfa_ioim_sp_s    *iosp;
2670         u16             i;
2671         u8                      *snsinfo;
2672         u32             snsbufsz;
2673
2674         /**
2675          * claim memory first
2676          */
2677         ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2678         fcpim->ioim_arr = ioim;
2679         bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2680
2681         iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2682         fcpim->ioim_sp_arr = iosp;
2683         bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2684
2685         /**
2686          * Claim DMA memory for per IO sense data.
2687          */
2688         snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2689         fcpim->snsbase.pa  = bfa_meminfo_dma_phys(minfo);
2690         bfa_meminfo_dma_phys(minfo) += snsbufsz;
2691
2692         fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2693         bfa_meminfo_dma_virt(minfo) += snsbufsz;
2694         snsinfo = fcpim->snsbase.kva;
2695         bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2696
2697         /**
2698          * Initialize ioim free queues
2699          */
2700         INIT_LIST_HEAD(&fcpim->ioim_free_q);
2701         INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2702         INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2703
2704         for (i = 0; i < fcpim->num_ioim_reqs;
2705              i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2706                 /*
2707                  * initialize IOIM
2708                  */
2709                 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
2710                 ioim->iotag   = i;
2711                 ioim->bfa     = fcpim->bfa;
2712                 ioim->fcpim   = fcpim;
2713                 ioim->iosp    = iosp;
2714                 iosp->snsinfo = snsinfo;
2715                 INIT_LIST_HEAD(&ioim->sgpg_q);
2716                 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2717                                    bfa_ioim_qresume, ioim);
2718                 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2719                                    bfa_ioim_sgpg_alloced, ioim);
2720                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2721
2722                 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2723         }
2724 }
2725
2726 /**
2727  * Driver detach time call.
2728  */
2729 void
2730 bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
2731 {
2732 }
2733
2734 void
2735 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2736 {
2737         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2738         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2739         struct bfa_ioim_s *ioim;
2740         u16     iotag;
2741         enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2742
2743         iotag = bfa_os_ntohs(rsp->io_tag);
2744
2745         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2746         bfa_assert(ioim->iotag == iotag);
2747
2748         bfa_trc(ioim->bfa, ioim->iotag);
2749         bfa_trc(ioim->bfa, rsp->io_status);
2750         bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2751
2752         if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2753                 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
2754
2755         switch (rsp->io_status) {
2756         case BFI_IOIM_STS_OK:
2757                 bfa_stats(ioim->itnim, iocomp_ok);
2758                 if (rsp->reuse_io_tag == 0)
2759                         evt = BFA_IOIM_SM_DONE;
2760                 else
2761                         evt = BFA_IOIM_SM_COMP;
2762                 break;
2763
2764         case BFI_IOIM_STS_TIMEDOUT:
2765                 bfa_stats(ioim->itnim, iocomp_timedout);
2766         case BFI_IOIM_STS_ABORTED:
2767                 rsp->io_status = BFI_IOIM_STS_ABORTED;
2768                 bfa_stats(ioim->itnim, iocomp_aborted);
2769                 if (rsp->reuse_io_tag == 0)
2770                         evt = BFA_IOIM_SM_DONE;
2771                 else
2772                         evt = BFA_IOIM_SM_COMP;
2773                 break;
2774
2775         case BFI_IOIM_STS_PROTO_ERR:
2776                 bfa_stats(ioim->itnim, iocom_proto_err);
2777                 bfa_assert(rsp->reuse_io_tag);
2778                 evt = BFA_IOIM_SM_COMP;
2779                 break;
2780
2781         case BFI_IOIM_STS_SQER_NEEDED:
2782                 bfa_stats(ioim->itnim, iocom_sqer_needed);
2783                 bfa_assert(rsp->reuse_io_tag == 0);
2784                 evt = BFA_IOIM_SM_SQRETRY;
2785                 break;
2786
2787         case BFI_IOIM_STS_RES_FREE:
2788                 bfa_stats(ioim->itnim, iocom_res_free);
2789                 evt = BFA_IOIM_SM_FREE;
2790                 break;
2791
2792         case BFI_IOIM_STS_HOST_ABORTED:
2793                 bfa_stats(ioim->itnim, iocom_hostabrts);
2794                 if (rsp->abort_tag != ioim->abort_tag) {
2795                         bfa_trc(ioim->bfa, rsp->abort_tag);
2796                         bfa_trc(ioim->bfa, ioim->abort_tag);
2797                         return;
2798                 }
2799
2800                 if (rsp->reuse_io_tag)
2801                         evt = BFA_IOIM_SM_ABORT_COMP;
2802                 else
2803                         evt = BFA_IOIM_SM_ABORT_DONE;
2804                 break;
2805
2806         case BFI_IOIM_STS_UTAG:
2807                 bfa_stats(ioim->itnim, iocom_utags);
2808                 evt = BFA_IOIM_SM_COMP_UTAG;
2809                 break;
2810
2811         default:
2812                 bfa_assert(0);
2813         }
2814
2815         bfa_sm_send_event(ioim, evt);
2816 }
2817
2818 void
2819 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2820 {
2821         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2822         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2823         struct bfa_ioim_s *ioim;
2824         u16     iotag;
2825
2826         iotag = bfa_os_ntohs(rsp->io_tag);
2827
2828         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2829         bfa_assert(ioim->iotag == iotag);
2830
2831         bfa_trc_fp(ioim->bfa, ioim->iotag);
2832         bfa_ioim_cb_profile_comp(fcpim, ioim);
2833
2834         bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2835 }
2836
2837 void
2838 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2839 {
2840         ioim->start_time = bfa_os_get_clock();
2841 }
2842
2843 void
2844 bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2845 {
2846         u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2847         u32 index = bfa_ioim_get_index(fcp_dl);
2848         u64 end_time = bfa_os_get_clock();
2849         struct bfa_itnim_latency_s *io_lat =
2850                         &(ioim->itnim->ioprofile.io_latency);
2851         u32 val = (u32)(end_time - ioim->start_time);
2852
2853         bfa_itnim_ioprofile_update(ioim->itnim, index);
2854
2855         io_lat->count[index]++;
2856         io_lat->min[index] = (io_lat->min[index] < val) ?
2857                 io_lat->min[index] : val;
2858         io_lat->max[index] = (io_lat->max[index] > val) ?
2859                 io_lat->max[index] : val;
2860         io_lat->avg[index] += val;
2861 }
2862 /**
2863  * Called by itnim to clean up IO while going offline.
2864  */
2865 void
2866 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2867 {
2868         bfa_trc(ioim->bfa, ioim->iotag);
2869         bfa_stats(ioim->itnim, io_cleanups);
2870
2871         ioim->iosp->tskim = NULL;
2872         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2873 }
2874
2875 void
2876 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2877 {
2878         bfa_trc(ioim->bfa, ioim->iotag);
2879         bfa_stats(ioim->itnim, io_tmaborts);
2880
2881         ioim->iosp->tskim = tskim;
2882         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2883 }
2884
2885 /**
2886  * IOC failure handling.
2887  */
2888 void
2889 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2890 {
2891         bfa_trc(ioim->bfa, ioim->iotag);
2892         bfa_stats(ioim->itnim, io_iocdowns);
2893         bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2894 }
2895
2896 /**
2897  * IO offline TOV popped. Fail the pending IO.
2898  */
2899 void
2900 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2901 {
2902         bfa_trc(ioim->bfa, ioim->iotag);
2903         bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2904 }
2905
2906
2907
2908 /**
2909  *  hal_ioim_api
2910  */
2911
2912 /**
2913  * Allocate IOIM resource for initiator mode I/O request.
2914  */
2915 struct bfa_ioim_s *
2916 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2917                 struct bfa_itnim_s *itnim, u16 nsges)
2918 {
2919         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2920         struct bfa_ioim_s *ioim;
2921
2922         /**
2923          * alocate IOIM resource
2924          */
2925         bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2926         if (!ioim) {
2927                 bfa_stats(itnim, no_iotags);
2928                 return NULL;
2929         }
2930
2931         ioim->dio = dio;
2932         ioim->itnim = itnim;
2933         ioim->nsges = nsges;
2934         ioim->nsgpgs = 0;
2935
2936         bfa_stats(itnim, total_ios);
2937         fcpim->ios_active++;
2938
2939         list_add_tail(&ioim->qe, &itnim->io_q);
2940         bfa_trc_fp(ioim->bfa, ioim->iotag);
2941
2942         return ioim;
2943 }
2944
2945 void
2946 bfa_ioim_free(struct bfa_ioim_s *ioim)
2947 {
2948         struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2949
2950         bfa_trc_fp(ioim->bfa, ioim->iotag);
2951         bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2952
2953         bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2954                         (ioim->nsges > BFI_SGE_INLINE));
2955
2956         if (ioim->nsgpgs > 0)
2957                 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2958
2959         bfa_stats(ioim->itnim, io_comps);
2960         fcpim->ios_active--;
2961
2962         list_del(&ioim->qe);
2963         list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2964 }
2965
2966 void
2967 bfa_ioim_start(struct bfa_ioim_s *ioim)
2968 {
2969         bfa_trc_fp(ioim->bfa, ioim->iotag);
2970
2971         bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2972
2973         /**
2974          * Obtain the queue over which this request has to be issued
2975          */
2976         ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2977                         bfa_cb_ioim_get_reqq(ioim->dio) :
2978                         bfa_itnim_get_reqq(ioim);
2979
2980         bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2981 }
2982
2983 /**
2984  * Driver I/O abort request.
2985  */
2986 bfa_status_t
2987 bfa_ioim_abort(struct bfa_ioim_s *ioim)
2988 {
2989
2990         bfa_trc(ioim->bfa, ioim->iotag);
2991
2992         if (!bfa_ioim_is_abortable(ioim))
2993                 return BFA_STATUS_FAILED;
2994
2995         bfa_stats(ioim->itnim, io_aborts);
2996         bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2997
2998         return BFA_STATUS_OK;
2999 }
3000
3001
3002 /**
3003  *  BFA TSKIM state machine functions
3004  */
3005
3006 /**
3007  *      Task management command beginning state.
3008  */
3009 static void
3010 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3011 {
3012         bfa_trc(tskim->bfa, event);
3013
3014         switch (event) {
3015         case BFA_TSKIM_SM_START:
3016                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3017                 bfa_tskim_gather_ios(tskim);
3018
3019                 /**
3020                  * If device is offline, do not send TM on wire. Just cleanup
3021                  * any pending IO requests and complete TM request.
3022                  */
3023                 if (!bfa_itnim_is_online(tskim->itnim)) {
3024                         bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3025                         tskim->tsk_status = BFI_TSKIM_STS_OK;
3026                         bfa_tskim_cleanup_ios(tskim);
3027                         return;
3028                 }
3029
3030                 if (!bfa_tskim_send(tskim)) {
3031                         bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3032                         bfa_stats(tskim->itnim, tm_qwait);
3033                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3034                                           &tskim->reqq_wait);
3035                 }
3036                 break;
3037
3038         default:
3039                 bfa_sm_fault(tskim->bfa, event);
3040         }
3041 }
3042
3043 /**
3044  * brief
3045  *      TM command is active, awaiting completion from firmware to
3046  *      cleanup IO requests in TM scope.
3047  */
3048 static void
3049 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3050 {
3051         bfa_trc(tskim->bfa, event);
3052
3053         switch (event) {
3054         case BFA_TSKIM_SM_DONE:
3055                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3056                 bfa_tskim_cleanup_ios(tskim);
3057                 break;
3058
3059         case BFA_TSKIM_SM_CLEANUP:
3060                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3061                 if (!bfa_tskim_send_abort(tskim)) {
3062                         bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3063                         bfa_stats(tskim->itnim, tm_qwait);
3064                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3065                                 &tskim->reqq_wait);
3066                 }
3067                 break;
3068
3069         case BFA_TSKIM_SM_HWFAIL:
3070                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3071                 bfa_tskim_iocdisable_ios(tskim);
3072                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3073                 break;
3074
3075         default:
3076                 bfa_sm_fault(tskim->bfa, event);
3077         }
3078 }
3079
3080 /**
3081  *      An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3082  *      completion event from firmware.
3083  */
3084 static void
3085 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3086 {
3087         bfa_trc(tskim->bfa, event);
3088
3089         switch (event) {
3090         case BFA_TSKIM_SM_DONE:
3091                 /**
3092                  * Ignore and wait for ABORT completion from firmware.
3093                  */
3094                 break;
3095
3096         case BFA_TSKIM_SM_CLEANUP_DONE:
3097                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3098                 bfa_tskim_cleanup_ios(tskim);
3099                 break;
3100
3101         case BFA_TSKIM_SM_HWFAIL:
3102                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3103                 bfa_tskim_iocdisable_ios(tskim);
3104                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3105                 break;
3106
3107         default:
3108                 bfa_sm_fault(tskim->bfa, event);
3109         }
3110 }
3111
3112 static void
3113 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3114 {
3115         bfa_trc(tskim->bfa, event);
3116
3117         switch (event) {
3118         case BFA_TSKIM_SM_IOS_DONE:
3119                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3120                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3121                 break;
3122
3123         case BFA_TSKIM_SM_CLEANUP:
3124                 /**
3125                  * Ignore, TM command completed on wire.
3126                  * Notify TM conmpletion on IO cleanup completion.
3127                  */
3128                 break;
3129
3130         case BFA_TSKIM_SM_HWFAIL:
3131                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3132                 bfa_tskim_iocdisable_ios(tskim);
3133                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3134                 break;
3135
3136         default:
3137                 bfa_sm_fault(tskim->bfa, event);
3138         }
3139 }
3140
3141 /**
3142  *      Task management command is waiting for room in request CQ
3143  */
3144 static void
3145 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3146 {
3147         bfa_trc(tskim->bfa, event);
3148
3149         switch (event) {
3150         case BFA_TSKIM_SM_QRESUME:
3151                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3152                 bfa_tskim_send(tskim);
3153                 break;
3154
3155         case BFA_TSKIM_SM_CLEANUP:
3156                 /**
3157                  * No need to send TM on wire since ITN is offline.
3158                  */
3159                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3160                 bfa_reqq_wcancel(&tskim->reqq_wait);
3161                 bfa_tskim_cleanup_ios(tskim);
3162                 break;
3163
3164         case BFA_TSKIM_SM_HWFAIL:
3165                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3166                 bfa_reqq_wcancel(&tskim->reqq_wait);
3167                 bfa_tskim_iocdisable_ios(tskim);
3168                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3169                 break;
3170
3171         default:
3172                 bfa_sm_fault(tskim->bfa, event);
3173         }
3174 }
3175
3176 /**
3177  *      Task management command is active, awaiting for room in request CQ
3178  *      to send clean up request.
3179  */
3180 static void
3181 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3182                 enum bfa_tskim_event event)
3183 {
3184         bfa_trc(tskim->bfa, event);
3185
3186         switch (event) {
3187         case BFA_TSKIM_SM_DONE:
3188                 bfa_reqq_wcancel(&tskim->reqq_wait);
3189                 /**
3190                  *
3191                  * Fall through !!!
3192                  */
3193
3194         case BFA_TSKIM_SM_QRESUME:
3195                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3196                 bfa_tskim_send_abort(tskim);
3197                 break;
3198
3199         case BFA_TSKIM_SM_HWFAIL:
3200                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3201                 bfa_reqq_wcancel(&tskim->reqq_wait);
3202                 bfa_tskim_iocdisable_ios(tskim);
3203                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3204                 break;
3205
3206         default:
3207                 bfa_sm_fault(tskim->bfa, event);
3208         }
3209 }
3210
3211 /**
3212  *      BFA callback is pending
3213  */
3214 static void
3215 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3216 {
3217         bfa_trc(tskim->bfa, event);
3218
3219         switch (event) {
3220         case BFA_TSKIM_SM_HCB:
3221                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3222                 bfa_tskim_free(tskim);
3223                 break;
3224
3225         case BFA_TSKIM_SM_CLEANUP:
3226                 bfa_tskim_notify_comp(tskim);
3227                 break;
3228
3229         case BFA_TSKIM_SM_HWFAIL:
3230                 break;
3231
3232         default:
3233                 bfa_sm_fault(tskim->bfa, event);
3234         }
3235 }
3236
3237
3238
3239 /**
3240  *  hal_tskim_private
3241  */
3242
3243 static void
3244 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3245 {
3246         struct bfa_tskim_s *tskim = cbarg;
3247
3248         if (!complete) {
3249                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3250                 return;
3251         }
3252
3253         bfa_stats(tskim->itnim, tm_success);
3254         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3255 }
3256
3257 static void
3258 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3259 {
3260         struct bfa_tskim_s *tskim = cbarg;
3261
3262         if (!complete) {
3263                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3264                 return;
3265         }
3266
3267         bfa_stats(tskim->itnim, tm_failures);
3268         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3269                                 BFI_TSKIM_STS_FAILED);
3270 }
3271
3272 static  bfa_boolean_t
3273 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3274 {
3275         switch (tskim->tm_cmnd) {
3276         case FCP_TM_TARGET_RESET:
3277                 return BFA_TRUE;
3278
3279         case FCP_TM_ABORT_TASK_SET:
3280         case FCP_TM_CLEAR_TASK_SET:
3281         case FCP_TM_LUN_RESET:
3282         case FCP_TM_CLEAR_ACA:
3283                 return (tskim->lun == lun);
3284
3285         default:
3286                 bfa_assert(0);
3287         }
3288
3289         return BFA_FALSE;
3290 }
3291
3292 /**
3293  *      Gather affected IO requests and task management commands.
3294  */
3295 static void
3296 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3297 {
3298         struct bfa_itnim_s *itnim = tskim->itnim;
3299         struct bfa_ioim_s *ioim;
3300         struct list_head        *qe, *qen;
3301
3302         INIT_LIST_HEAD(&tskim->io_q);
3303
3304         /**
3305          * Gather any active IO requests first.
3306          */
3307         list_for_each_safe(qe, qen, &itnim->io_q) {
3308                 ioim = (struct bfa_ioim_s *) qe;
3309                 if (bfa_tskim_match_scope
3310                         (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3311                         list_del(&ioim->qe);
3312                         list_add_tail(&ioim->qe, &tskim->io_q);
3313                 }
3314         }
3315
3316         /**
3317          * Failback any pending IO requests immediately.
3318          */
3319         list_for_each_safe(qe, qen, &itnim->pending_q) {
3320                 ioim = (struct bfa_ioim_s *) qe;
3321                 if (bfa_tskim_match_scope
3322                         (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3323                         list_del(&ioim->qe);
3324                         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3325                         bfa_ioim_tov(ioim);
3326                 }
3327         }
3328 }
3329
3330 /**
3331  *      IO cleanup completion
3332  */
3333 static void
3334 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3335 {
3336         struct bfa_tskim_s *tskim = tskim_cbarg;
3337
3338         bfa_stats(tskim->itnim, tm_io_comps);
3339         bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3340 }
3341
3342 /**
3343  *      Gather affected IO requests and task management commands.
3344  */
3345 static void
3346 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3347 {
3348         struct bfa_ioim_s *ioim;
3349         struct list_head        *qe, *qen;
3350
3351         bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3352
3353         list_for_each_safe(qe, qen, &tskim->io_q) {
3354                 ioim = (struct bfa_ioim_s *) qe;
3355                 bfa_wc_up(&tskim->wc);
3356                 bfa_ioim_cleanup_tm(ioim, tskim);
3357         }
3358
3359         bfa_wc_wait(&tskim->wc);
3360 }
3361
3362 /**
3363  *      Send task management request to firmware.
3364  */
3365 static bfa_boolean_t
3366 bfa_tskim_send(struct bfa_tskim_s *tskim)
3367 {
3368         struct bfa_itnim_s *itnim = tskim->itnim;
3369         struct bfi_tskim_req_s *m;
3370
3371         /**
3372          * check for room in queue to send request now
3373          */
3374         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3375         if (!m)
3376                 return BFA_FALSE;
3377
3378         /**
3379          * build i/o request message next
3380          */
3381         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3382                         bfa_lpuid(tskim->bfa));
3383
3384         m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
3385         m->itn_fhdl = tskim->itnim->rport->fw_handle;
3386         m->t_secs = tskim->tsecs;
3387         m->lun = tskim->lun;
3388         m->tm_flags = tskim->tm_cmnd;
3389
3390         /**
3391          * queue I/O message to firmware
3392          */
3393         bfa_reqq_produce(tskim->bfa, itnim->reqq);
3394         return BFA_TRUE;
3395 }
3396
3397 /**
3398  *      Send abort request to cleanup an active TM to firmware.
3399  */
3400 static bfa_boolean_t
3401 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3402 {
3403         struct bfa_itnim_s      *itnim = tskim->itnim;
3404         struct bfi_tskim_abortreq_s     *m;
3405
3406         /**
3407          * check for room in queue to send request now
3408          */
3409         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3410         if (!m)
3411                 return BFA_FALSE;
3412
3413         /**
3414          * build i/o request message next
3415          */
3416         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3417                         bfa_lpuid(tskim->bfa));
3418
3419         m->tsk_tag  = bfa_os_htons(tskim->tsk_tag);
3420
3421         /**
3422          * queue I/O message to firmware
3423          */
3424         bfa_reqq_produce(tskim->bfa, itnim->reqq);
3425         return BFA_TRUE;
3426 }
3427
3428 /**
3429  *      Call to resume task management cmnd waiting for room in request queue.
3430  */
3431 static void
3432 bfa_tskim_qresume(void *cbarg)
3433 {
3434         struct bfa_tskim_s *tskim = cbarg;
3435
3436         bfa_stats(tskim->itnim, tm_qresumes);
3437         bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3438 }
3439
3440 /**
3441  * Cleanup IOs associated with a task mangement command on IOC failures.
3442  */
3443 static void
3444 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3445 {
3446         struct bfa_ioim_s *ioim;
3447         struct list_head        *qe, *qen;
3448
3449         list_for_each_safe(qe, qen, &tskim->io_q) {
3450                 ioim = (struct bfa_ioim_s *) qe;
3451                 bfa_ioim_iocdisable(ioim);
3452         }
3453 }
3454
3455
3456
3457 /**
3458  *  hal_tskim_friend
3459  */
3460
3461 /**
3462  * Notification on completions from related ioim.
3463  */
3464 void
3465 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3466 {
3467         bfa_wc_down(&tskim->wc);
3468 }
3469
3470 /**
3471  * Handle IOC h/w failure notification from itnim.
3472  */
3473 void
3474 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3475 {
3476         tskim->notify = BFA_FALSE;
3477         bfa_stats(tskim->itnim, tm_iocdowns);
3478         bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3479 }
3480
3481 /**
3482  * Cleanup TM command and associated IOs as part of ITNIM offline.
3483  */
3484 void
3485 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3486 {
3487         tskim->notify = BFA_TRUE;
3488         bfa_stats(tskim->itnim, tm_cleanups);
3489         bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3490 }
3491
3492 /**
3493  *      Memory allocation and initialization.
3494  */
3495 void
3496 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3497 {
3498         struct bfa_tskim_s *tskim;
3499         u16     i;
3500
3501         INIT_LIST_HEAD(&fcpim->tskim_free_q);
3502
3503         tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3504         fcpim->tskim_arr = tskim;
3505
3506         for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3507                 /*
3508                  * initialize TSKIM
3509                  */
3510                 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
3511                 tskim->tsk_tag = i;
3512                 tskim->bfa      = fcpim->bfa;
3513                 tskim->fcpim    = fcpim;
3514                 tskim->notify  = BFA_FALSE;
3515                 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3516                                         tskim);
3517                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3518
3519                 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3520         }
3521
3522         bfa_meminfo_kva(minfo) = (u8 *) tskim;
3523 }
3524
3525 void
3526 bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
3527 {
3528         /**
3529         * @todo
3530         */
3531 }
3532
3533 void
3534 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3535 {
3536         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3537         struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3538         struct bfa_tskim_s *tskim;
3539         u16     tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
3540
3541         tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3542         bfa_assert(tskim->tsk_tag == tsk_tag);
3543
3544         tskim->tsk_status = rsp->tsk_status;
3545
3546         /**
3547          * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3548          * requests. All other statuses are for normal completions.
3549          */
3550         if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3551                 bfa_stats(tskim->itnim, tm_cleanup_comps);
3552                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3553         } else {
3554                 bfa_stats(tskim->itnim, tm_fw_rsps);
3555                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3556         }
3557 }
3558
3559
3560
3561 /**
3562  *  hal_tskim_api
3563  */
3564
3565
3566 struct bfa_tskim_s *
3567 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3568 {
3569         struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3570         struct bfa_tskim_s *tskim;
3571
3572         bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3573
3574         if (tskim)
3575                 tskim->dtsk = dtsk;
3576
3577         return tskim;
3578 }
3579
3580 void
3581 bfa_tskim_free(struct bfa_tskim_s *tskim)
3582 {
3583         bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3584         list_del(&tskim->qe);
3585         list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3586 }
3587
3588 /**
3589  *      Start a task management command.
3590  *
3591  * @param[in]   tskim   BFA task management command instance
3592  * @param[in]   itnim   i-t nexus for the task management command
3593  * @param[in]   lun     lun, if applicable
3594  * @param[in]   tm_cmnd Task management command code.
3595  * @param[in]   t_secs  Timeout in seconds
3596  *
3597  * @return None.
3598  */
3599 void
3600 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
3601                         enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3602 {
3603         tskim->itnim    = itnim;
3604         tskim->lun      = lun;
3605         tskim->tm_cmnd = tm_cmnd;
3606         tskim->tsecs    = tsecs;
3607         tskim->notify  = BFA_FALSE;
3608         bfa_stats(itnim, tm_cmnds);
3609
3610         list_add_tail(&tskim->qe, &itnim->tsk_q);
3611         bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3612 }