1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41 #include "lpfc_debugfs.h"
44 * Define macro to log: Mailbox command x%x cannot issue Data
45 * This allows multiple uses of lpfc_msgBlk0311
46 * w/o perturbing log msg utility.
48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
49 lpfc_printf_log(phba, \
52 "(%d):0311 Mailbox command x%x cannot " \
53 "issue Data: x%x x%x x%x\n", \
54 pmbox->vport ? pmbox->vport->vpi : 0, \
55 pmbox->mb.mbxCommand, \
56 phba->pport->port_state, \
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
70 * to the start of the ring, and the slot number of the
71 * desired iocb entry, calc a pointer to that entry.
73 static inline IOCB_t *
74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
76 return (IOCB_t *) (((char *) pring->cmdringaddr) +
77 pring->cmdidx * phba->iocb_cmd_size);
80 static inline IOCB_t *
81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
83 return (IOCB_t *) (((char *) pring->rspringaddr) +
84 pring->rspidx * phba->iocb_rsp_size);
87 static struct lpfc_iocbq *
88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
90 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
91 struct lpfc_iocbq * iocbq = NULL;
93 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
98 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
100 struct lpfc_iocbq * iocbq = NULL;
101 unsigned long iflags;
103 spin_lock_irqsave(&phba->hbalock, iflags);
104 iocbq = __lpfc_sli_get_iocbq(phba);
105 spin_unlock_irqrestore(&phba->hbalock, iflags);
110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
115 * Clean all volatile data fields, preserve iotag and node struct.
117 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
124 unsigned long iflags;
127 * Clean all volatile data fields, preserve iotag and node struct.
129 spin_lock_irqsave(&phba->hbalock, iflags);
130 __lpfc_sli_release_iocbq(phba, iocbq);
131 spin_unlock_irqrestore(&phba->hbalock, iflags);
135 * Translate the iocb command to an iocb command type used to decide the final
136 * disposition of each completed IOCB.
138 static lpfc_iocb_type
139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
141 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
143 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
147 case CMD_XMIT_SEQUENCE_CR:
148 case CMD_XMIT_SEQUENCE_CX:
149 case CMD_XMIT_BCAST_CN:
150 case CMD_XMIT_BCAST_CX:
151 case CMD_ELS_REQUEST_CR:
152 case CMD_ELS_REQUEST_CX:
153 case CMD_CREATE_XRI_CR:
154 case CMD_CREATE_XRI_CX:
156 case CMD_XMIT_ELS_RSP_CX:
158 case CMD_FCP_IWRITE_CR:
159 case CMD_FCP_IWRITE_CX:
160 case CMD_FCP_IREAD_CR:
161 case CMD_FCP_IREAD_CX:
162 case CMD_FCP_ICMND_CR:
163 case CMD_FCP_ICMND_CX:
164 case CMD_FCP_TSEND_CX:
165 case CMD_FCP_TRSP_CX:
166 case CMD_FCP_TRECEIVE_CX:
167 case CMD_FCP_AUTO_TRSP_CX:
168 case CMD_ADAPTER_MSG:
169 case CMD_ADAPTER_DUMP:
170 case CMD_XMIT_SEQUENCE64_CR:
171 case CMD_XMIT_SEQUENCE64_CX:
172 case CMD_XMIT_BCAST64_CN:
173 case CMD_XMIT_BCAST64_CX:
174 case CMD_ELS_REQUEST64_CR:
175 case CMD_ELS_REQUEST64_CX:
176 case CMD_FCP_IWRITE64_CR:
177 case CMD_FCP_IWRITE64_CX:
178 case CMD_FCP_IREAD64_CR:
179 case CMD_FCP_IREAD64_CX:
180 case CMD_FCP_ICMND64_CR:
181 case CMD_FCP_ICMND64_CX:
182 case CMD_FCP_TSEND64_CX:
183 case CMD_FCP_TRSP64_CX:
184 case CMD_FCP_TRECEIVE64_CX:
185 case CMD_GEN_REQUEST64_CR:
186 case CMD_GEN_REQUEST64_CX:
187 case CMD_XMIT_ELS_RSP64_CX:
188 type = LPFC_SOL_IOCB;
190 case CMD_ABORT_XRI_CN:
191 case CMD_ABORT_XRI_CX:
192 case CMD_CLOSE_XRI_CN:
193 case CMD_CLOSE_XRI_CX:
194 case CMD_XRI_ABORTED_CX:
195 case CMD_ABORT_MXRI64_CN:
196 type = LPFC_ABORT_IOCB;
198 case CMD_RCV_SEQUENCE_CX:
199 case CMD_RCV_ELS_REQ_CX:
200 case CMD_RCV_SEQUENCE64_CX:
201 case CMD_RCV_ELS_REQ64_CX:
202 case CMD_ASYNC_STATUS:
203 case CMD_IOCB_RCV_SEQ64_CX:
204 case CMD_IOCB_RCV_ELS64_CX:
205 case CMD_IOCB_RCV_CONT64_CX:
206 type = LPFC_UNSOL_IOCB;
209 type = LPFC_UNKNOWN_IOCB;
217 lpfc_sli_ring_map(struct lpfc_hba *phba)
219 struct lpfc_sli *psli = &phba->sli;
224 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
228 phba->link_state = LPFC_INIT_MBX_CMDS;
229 for (i = 0; i < psli->num_rings; i++) {
230 lpfc_config_ring(phba, i, pmb);
231 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
232 if (rc != MBX_SUCCESS) {
233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
234 "0446 Adapter failed to init (%d), "
235 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
237 rc, pmbox->mbxCommand,
238 pmbox->mbxStatus, i);
239 phba->link_state = LPFC_HBA_ERROR;
244 mempool_free(pmb, phba->mbox_mem_pool);
249 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
250 struct lpfc_iocbq *piocb)
252 list_add_tail(&piocb->list, &pring->txcmplq);
253 pring->txcmplq_cnt++;
254 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
255 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
256 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
260 mod_timer(&piocb->vport->els_tmofunc,
261 jiffies + HZ * (phba->fc_ratov << 1));
268 static struct lpfc_iocbq *
269 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
271 struct lpfc_iocbq *cmd_iocb;
273 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
274 if (cmd_iocb != NULL)
280 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
282 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
283 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
284 &phba->slim2p->mbx.us.s2.port[pring->ringno];
285 uint32_t max_cmd_idx = pring->numCiocb;
287 if ((pring->next_cmdidx == pring->cmdidx) &&
288 (++pring->next_cmdidx >= max_cmd_idx))
289 pring->next_cmdidx = 0;
291 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
293 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
295 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
297 "0315 Ring %d issue: portCmdGet %d "
298 "is bigger then cmd ring %d\n",
300 pring->local_getidx, max_cmd_idx);
302 phba->link_state = LPFC_HBA_ERROR;
304 * All error attention handlers are posted to
307 phba->work_ha |= HA_ERATT;
308 phba->work_hs = HS_FFER3;
310 /* hbalock should already be held */
312 lpfc_worker_wake_up(phba);
317 if (pring->local_getidx == pring->next_cmdidx)
321 return lpfc_cmd_iocb(phba, pring);
325 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
327 struct lpfc_iocbq **new_arr;
328 struct lpfc_iocbq **old_arr;
330 struct lpfc_sli *psli = &phba->sli;
333 spin_lock_irq(&phba->hbalock);
334 iotag = psli->last_iotag;
335 if(++iotag < psli->iocbq_lookup_len) {
336 psli->last_iotag = iotag;
337 psli->iocbq_lookup[iotag] = iocbq;
338 spin_unlock_irq(&phba->hbalock);
339 iocbq->iotag = iotag;
341 } else if (psli->iocbq_lookup_len < (0xffff
342 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
343 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
344 spin_unlock_irq(&phba->hbalock);
345 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
348 spin_lock_irq(&phba->hbalock);
349 old_arr = psli->iocbq_lookup;
350 if (new_len <= psli->iocbq_lookup_len) {
351 /* highly unprobable case */
353 iotag = psli->last_iotag;
354 if(++iotag < psli->iocbq_lookup_len) {
355 psli->last_iotag = iotag;
356 psli->iocbq_lookup[iotag] = iocbq;
357 spin_unlock_irq(&phba->hbalock);
358 iocbq->iotag = iotag;
361 spin_unlock_irq(&phba->hbalock);
364 if (psli->iocbq_lookup)
365 memcpy(new_arr, old_arr,
366 ((psli->last_iotag + 1) *
367 sizeof (struct lpfc_iocbq *)));
368 psli->iocbq_lookup = new_arr;
369 psli->iocbq_lookup_len = new_len;
370 psli->last_iotag = iotag;
371 psli->iocbq_lookup[iotag] = iocbq;
372 spin_unlock_irq(&phba->hbalock);
373 iocbq->iotag = iotag;
378 spin_unlock_irq(&phba->hbalock);
380 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
381 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
388 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
389 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
394 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
396 if (pring->ringno == LPFC_ELS_RING) {
397 lpfc_debugfs_slow_ring_trc(phba,
398 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
399 *(((uint32_t *) &nextiocb->iocb) + 4),
400 *(((uint32_t *) &nextiocb->iocb) + 6),
401 *(((uint32_t *) &nextiocb->iocb) + 7));
405 * Issue iocb command to adapter
407 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
409 pring->stats.iocb_cmd++;
412 * If there is no completion routine to call, we can release the
413 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
414 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
416 if (nextiocb->iocb_cmpl)
417 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
419 __lpfc_sli_release_iocbq(phba, nextiocb);
422 * Let the HBA know what IOCB slot will be the next one the
423 * driver will put a command into.
425 pring->cmdidx = pring->next_cmdidx;
426 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
430 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
432 int ringno = pring->ringno;
434 pring->flag |= LPFC_CALL_RING_AVAILABLE;
439 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
440 * The HBA will tell us when an IOCB entry is available.
442 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
443 readl(phba->CAregaddr); /* flush */
445 pring->stats.iocb_cmd_full++;
449 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
451 int ringno = pring->ringno;
454 * Tell the HBA that there is work to do in this ring.
457 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
458 readl(phba->CAregaddr); /* flush */
462 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
465 struct lpfc_iocbq *nextiocb;
469 * (a) there is anything on the txq to send
471 * (c) link attention events can be processed (fcp ring only)
472 * (d) IOCB processing is not blocked by the outstanding mbox command.
474 if (pring->txq_cnt &&
475 lpfc_is_link_up(phba) &&
476 (pring->ringno != phba->sli.fcp_ring ||
477 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
479 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
480 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
481 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
484 lpfc_sli_update_ring(phba, pring);
486 lpfc_sli_update_full_ring(phba, pring);
492 static struct lpfc_hbq_entry *
493 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
495 struct hbq_s *hbqp = &phba->hbqs[hbqno];
497 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
498 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
499 hbqp->next_hbqPutIdx = 0;
501 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
502 uint32_t raw_index = phba->hbq_get[hbqno];
503 uint32_t getidx = le32_to_cpu(raw_index);
505 hbqp->local_hbqGetIdx = getidx;
507 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
508 lpfc_printf_log(phba, KERN_ERR,
510 "1802 HBQ %d: local_hbqGetIdx "
511 "%u is > than hbqp->entry_count %u\n",
512 hbqno, hbqp->local_hbqGetIdx,
515 phba->link_state = LPFC_HBA_ERROR;
519 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
523 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
528 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
530 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
531 struct hbq_dmabuf *hbq_buf;
534 hbq_count = lpfc_sli_hbq_count();
535 /* Return all memory used by all HBQs */
536 for (i = 0; i < hbq_count; ++i) {
537 list_for_each_entry_safe(dmabuf, next_dmabuf,
538 &phba->hbqs[i].hbq_buffer_list, list) {
539 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
540 list_del(&hbq_buf->dbuf.list);
541 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
543 phba->hbqs[i].buffer_count = 0;
547 static struct lpfc_hbq_entry *
548 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
549 struct hbq_dmabuf *hbq_buf)
551 struct lpfc_hbq_entry *hbqe;
552 dma_addr_t physaddr = hbq_buf->dbuf.phys;
554 /* Get next HBQ entry slot to use */
555 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
557 struct hbq_s *hbqp = &phba->hbqs[hbqno];
559 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
560 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
561 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
562 hbqe->bde.tus.f.bdeFlags = 0;
563 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
564 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
566 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
567 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
569 readl(phba->hbq_put + hbqno);
570 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
575 static struct lpfc_hbq_init lpfc_els_hbq = {
580 .ring_mask = (1 << LPFC_ELS_RING),
586 static struct lpfc_hbq_init lpfc_extra_hbq = {
591 .ring_mask = (1 << LPFC_EXTRA_RING),
597 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
603 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
605 uint32_t i, start, end;
606 struct hbq_dmabuf *hbq_buffer;
608 if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
612 start = phba->hbqs[hbqno].buffer_count;
614 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
615 end = lpfc_hbq_defs[hbqno]->entry_count;
618 /* Populate HBQ entries */
619 for (i = start; i < end; i++) {
620 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
623 hbq_buffer->tag = (i | (hbqno << 16));
624 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
625 phba->hbqs[hbqno].buffer_count++;
627 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
633 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
635 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
636 lpfc_hbq_defs[qno]->add_count));
640 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
642 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
643 lpfc_hbq_defs[qno]->init_count));
646 static struct hbq_dmabuf *
647 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
649 struct lpfc_dmabuf *d_buf;
650 struct hbq_dmabuf *hbq_buf;
654 if (hbqno >= LPFC_MAX_HBQS)
657 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
658 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
659 if (hbq_buf->tag == tag) {
663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
664 "1803 Bad hbq tag. Data: x%x x%x\n",
665 tag, phba->hbqs[tag >> 16].buffer_count);
670 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
675 hbqno = hbq_buffer->tag >> 16;
676 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
677 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
683 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
687 switch (mbxCommand) {
691 case MBX_WRITE_VPARMS:
692 case MBX_RUN_BIU_DIAG:
695 case MBX_CONFIG_LINK:
696 case MBX_CONFIG_RING:
698 case MBX_READ_CONFIG:
699 case MBX_READ_RCONFIG:
701 case MBX_READ_STATUS:
705 case MBX_READ_LNK_STAT:
707 case MBX_UNREG_LOGIN:
710 case MBX_DUMP_MEMORY:
711 case MBX_DUMP_CONTEXT:
716 case MBX_DEL_LD_ENTRY:
717 case MBX_RUN_PROGRAM:
722 case MBX_CONFIG_FARP:
725 case MBX_RUN_BIU_DIAG64:
726 case MBX_CONFIG_PORT:
727 case MBX_READ_SPARM64:
729 case MBX_REG_LOGIN64:
731 case MBX_FLASH_WR_ULA:
733 case MBX_LOAD_EXP_ROM:
734 case MBX_ASYNCEVT_ENABLE:
747 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
749 wait_queue_head_t *pdone_q;
750 unsigned long drvr_flag;
753 * If pdone_q is empty, the driver thread gave up waiting and
756 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
757 spin_lock_irqsave(&phba->hbalock, drvr_flag);
758 pdone_q = (wait_queue_head_t *) pmboxq->context1;
760 wake_up_interruptible(pdone_q);
761 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
766 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
768 struct lpfc_dmabuf *mp;
772 mp = (struct lpfc_dmabuf *) (pmb->context1);
775 lpfc_mbuf_free(phba, mp->virt, mp->phys);
780 * If a REG_LOGIN succeeded after node is destroyed or node
781 * is in re-discovery driver need to cleanup the RPI.
783 if (!(phba->pport->load_flag & FC_UNLOADING) &&
784 pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
785 !pmb->mb.mbxStatus) {
787 rpi = pmb->mb.un.varWords[0];
788 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
789 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
790 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
791 if (rc != MBX_NOT_FINISHED)
795 mempool_free(pmb, phba->mbox_mem_pool);
800 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
807 phba->sli.slistat.mbox_event++;
809 /* Get all completed mailboxe buffers into the cmplq */
810 spin_lock_irq(&phba->hbalock);
811 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
812 spin_unlock_irq(&phba->hbalock);
814 /* Get a Mailbox buffer to setup mailbox commands for callback */
816 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
822 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
824 lpfc_debugfs_disc_trc(pmb->vport,
825 LPFC_DISC_TRC_MBOX_VPORT,
826 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
827 (uint32_t)pmbox->mbxCommand,
828 pmbox->un.varWords[0],
829 pmbox->un.varWords[1]);
832 lpfc_debugfs_disc_trc(phba->pport,
834 "MBOX cmpl: cmd:x%x mb:x%x x%x",
835 (uint32_t)pmbox->mbxCommand,
836 pmbox->un.varWords[0],
837 pmbox->un.varWords[1]);
842 * It is a fatal error if unknown mbox command completion.
844 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
846 /* Unknow mailbox command compl */
847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
848 "(%d):0323 Unknown Mailbox command "
850 pmb->vport ? pmb->vport->vpi : 0,
852 phba->link_state = LPFC_HBA_ERROR;
853 phba->work_hs = HS_FFER3;
854 lpfc_handle_eratt(phba);
858 if (pmbox->mbxStatus) {
859 phba->sli.slistat.mbox_stat_err++;
860 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
861 /* Mbox cmd cmpl error - RETRYing */
862 lpfc_printf_log(phba, KERN_INFO,
864 "(%d):0305 Mbox cmd cmpl "
865 "error - RETRYing Data: x%x "
867 pmb->vport ? pmb->vport->vpi :0,
870 pmbox->un.varWords[0],
871 pmb->vport->port_state);
872 pmbox->mbxStatus = 0;
873 pmbox->mbxOwner = OWN_HOST;
874 spin_lock_irq(&phba->hbalock);
875 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
876 spin_unlock_irq(&phba->hbalock);
877 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
878 if (rc == MBX_SUCCESS)
883 /* Mailbox cmd <cmd> Cmpl <cmpl> */
884 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
885 "(%d):0307 Mailbox cmd x%x Cmpl x%p "
886 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
887 pmb->vport ? pmb->vport->vpi : 0,
890 *((uint32_t *) pmbox),
891 pmbox->un.varWords[0],
892 pmbox->un.varWords[1],
893 pmbox->un.varWords[2],
894 pmbox->un.varWords[3],
895 pmbox->un.varWords[4],
896 pmbox->un.varWords[5],
897 pmbox->un.varWords[6],
898 pmbox->un.varWords[7]);
901 pmb->mbox_cmpl(phba,pmb);
906 static struct lpfc_dmabuf *
907 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
909 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
911 void *virt; /* virtual address ptr */
912 dma_addr_t phys; /* mapped address */
914 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
915 if (hbq_entry == NULL)
917 list_del(&hbq_entry->dbuf.list);
920 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
921 if (new_hbq_entry == NULL)
922 return &hbq_entry->dbuf;
923 new_hbq_entry->tag = -1;
924 phys = new_hbq_entry->dbuf.phys;
925 virt = new_hbq_entry->dbuf.virt;
926 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
927 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
928 hbq_entry->dbuf.phys = phys;
929 hbq_entry->dbuf.virt = virt;
930 lpfc_sli_free_hbq(phba, hbq_entry);
931 return &new_hbq_entry->dbuf;
934 static struct lpfc_dmabuf *
935 lpfc_sli_get_buff(struct lpfc_hba *phba,
936 struct lpfc_sli_ring *pring,
939 if (tag & QUE_BUFTAG_BIT)
940 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
942 return lpfc_sli_replace_hbqbuff(phba, tag);
946 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
947 struct lpfc_iocbq *saveq)
953 struct lpfc_iocbq *iocbq;
956 irsp = &(saveq->iocb);
958 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
959 if (pring->lpfc_sli_rcv_async_status)
960 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
962 lpfc_printf_log(phba,
965 "0316 Ring %d handler: unexpected "
966 "ASYNC_STATUS iocb received evt_code "
969 irsp->un.asyncstat.evt_code);
973 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
974 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
975 || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
976 || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
981 (WORD5 *) & (saveq->iocb.un.
983 Rctl = w5p->hcsw.Rctl;
984 Type = w5p->hcsw.Type;
986 /* Firmware Workaround */
987 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
988 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
989 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
992 w5p->hcsw.Rctl = Rctl;
993 w5p->hcsw.Type = Type;
997 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
998 struct lpfc_hbq_entry *hbqe_1, *hbqe_2;
999 hbqe_1 = (struct lpfc_hbq_entry *) &saveq->iocb.un.ulpWord[0];
1000 hbqe_2 = (struct lpfc_hbq_entry *) &saveq->iocb.
1001 unsli3.sli3Words[4];
1003 if (irsp->ulpBdeCount != 0) {
1004 saveq->context2 = lpfc_sli_get_buff(phba, pring,
1005 irsp->un.ulpWord[3]);
1006 if (!saveq->context2)
1007 lpfc_printf_log(phba,
1010 "0341 Ring %d Cannot find buffer for "
1011 "an unsolicited iocb. tag 0x%x\n",
1013 irsp->un.ulpWord[3]);
1016 if (irsp->ulpBdeCount == 2) {
1017 saveq->context3 = lpfc_sli_get_buff(phba, pring,
1018 irsp->unsli3.sli3Words[7]);
1019 if (!saveq->context3)
1020 lpfc_printf_log(phba,
1023 "0342 Ring %d Cannot find buffer for an"
1024 " unsolicited iocb. tag 0x%x\n",
1026 irsp->unsli3.sli3Words[7]);
1028 list_for_each_entry(iocbq, &saveq->list, list) {
1029 hbqe_1 = (struct lpfc_hbq_entry *) &iocbq->iocb.
1031 hbqe_2 = (struct lpfc_hbq_entry *) &iocbq->iocb.
1032 unsli3.sli3Words[4];
1033 irsp = &(iocbq->iocb);
1035 if (irsp->ulpBdeCount != 0) {
1036 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
1037 irsp->un.ulpWord[3]);
1038 if (!saveq->context2)
1039 lpfc_printf_log(phba,
1042 "0343 Ring %d Cannot find "
1043 "buffer for an unsolicited iocb"
1044 ". tag 0x%x\n", pring->ringno,
1045 irsp->un.ulpWord[3]);
1047 if (irsp->ulpBdeCount == 2) {
1048 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
1049 irsp->unsli3.sli3Words[7]);
1050 if (!saveq->context3)
1051 lpfc_printf_log(phba,
1054 "0344 Ring %d Cannot find "
1055 "buffer for an unsolicited "
1058 irsp->unsli3.sli3Words[7]);
1063 /* unSolicited Responses */
1064 if (pring->prt[0].profile) {
1065 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1066 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1070 /* We must search, based on rctl / type
1071 for the right routine */
1072 for (i = 0; i < pring->num_mask;
1074 if ((pring->prt[i].rctl ==
1078 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1079 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1080 (phba, pring, saveq);
1087 /* Unexpected Rctl / Type received */
1088 /* Ring <ringno> handler: unexpected
1089 Rctl <Rctl> Type <Type> received */
1090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1091 "0313 Ring %d handler: unexpected Rctl x%x "
1092 "Type x%x received\n",
1093 pring->ringno, Rctl, Type);
1098 static struct lpfc_iocbq *
1099 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1100 struct lpfc_sli_ring *pring,
1101 struct lpfc_iocbq *prspiocb)
1103 struct lpfc_iocbq *cmd_iocb = NULL;
1106 iotag = prspiocb->iocb.ulpIoTag;
1108 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1109 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1110 list_del_init(&cmd_iocb->list);
1111 pring->txcmplq_cnt--;
1115 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1116 "0317 iotag x%x is out off "
1117 "range: max iotag x%x wd0 x%x\n",
1118 iotag, phba->sli.last_iotag,
1119 *(((uint32_t *) &prspiocb->iocb) + 7));
1124 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1125 struct lpfc_iocbq *saveq)
1127 struct lpfc_iocbq *cmdiocbp;
1129 unsigned long iflag;
1131 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
1132 spin_lock_irqsave(&phba->hbalock, iflag);
1133 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1134 spin_unlock_irqrestore(&phba->hbalock, iflag);
1137 if (cmdiocbp->iocb_cmpl) {
1139 * Post all ELS completions to the worker thread.
1140 * All other are passed to the completion callback.
1142 if (pring->ringno == LPFC_ELS_RING) {
1143 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1144 cmdiocbp->iocb_flag &=
1145 ~LPFC_DRIVER_ABORTED;
1146 saveq->iocb.ulpStatus =
1147 IOSTAT_LOCAL_REJECT;
1148 saveq->iocb.un.ulpWord[4] =
1151 /* Firmware could still be in progress
1152 * of DMAing payload, so don't free data
1153 * buffer till after a hbeat.
1155 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
1158 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
1160 lpfc_sli_release_iocbq(phba, cmdiocbp);
1163 * Unknown initiating command based on the response iotag.
1164 * This could be the case on the ELS ring because of
1167 if (pring->ringno != LPFC_ELS_RING) {
1169 * Ring <ringno> handler: unexpected completion IoTag
1172 lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI,
1173 "0322 Ring %d handler: "
1174 "unexpected completion IoTag x%x "
1175 "Data: x%x x%x x%x x%x\n",
1177 saveq->iocb.ulpIoTag,
1178 saveq->iocb.ulpStatus,
1179 saveq->iocb.un.ulpWord[4],
1180 saveq->iocb.ulpCommand,
1181 saveq->iocb.ulpContext);
1189 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1191 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1192 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1193 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1195 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1196 * rsp ring <portRspMax>
1198 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1199 "0312 Ring %d handler: portRspPut %d "
1200 "is bigger then rsp ring %d\n",
1201 pring->ringno, le32_to_cpu(pgp->rspPutInx),
1204 phba->link_state = LPFC_HBA_ERROR;
1207 * All error attention handlers are posted to
1210 phba->work_ha |= HA_ERATT;
1211 phba->work_hs = HS_FFER3;
1213 /* hbalock should already be held */
1214 if (phba->work_wait)
1215 lpfc_worker_wake_up(phba);
1220 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1222 struct lpfc_sli *psli = &phba->sli;
1223 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1224 IOCB_t *irsp = NULL;
1225 IOCB_t *entry = NULL;
1226 struct lpfc_iocbq *cmdiocbq = NULL;
1227 struct lpfc_iocbq rspiocbq;
1228 struct lpfc_pgp *pgp;
1230 uint32_t portRspPut, portRspMax;
1232 uint32_t rsp_cmpl = 0;
1234 unsigned long iflags;
1236 pring->stats.iocb_event++;
1238 pgp = (phba->sli_rev == 3) ?
1239 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1240 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1244 * The next available response entry should never exceed the maximum
1245 * entries. If it does, treat it as an adapter hardware error.
1247 portRspMax = pring->numRiocb;
1248 portRspPut = le32_to_cpu(pgp->rspPutInx);
1249 if (unlikely(portRspPut >= portRspMax)) {
1250 lpfc_sli_rsp_pointers_error(phba, pring);
1255 while (pring->rspidx != portRspPut) {
1256 entry = lpfc_resp_iocb(phba, pring);
1257 if (++pring->rspidx >= portRspMax)
1260 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1261 (uint32_t *) &rspiocbq.iocb,
1262 phba->iocb_rsp_size);
1263 irsp = &rspiocbq.iocb;
1264 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1265 pring->stats.iocb_rsp++;
1268 if (unlikely(irsp->ulpStatus)) {
1269 /* Rsp ring <ringno> error: IOCB */
1270 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1271 "0326 Rsp Ring %d error: IOCB Data: "
1272 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 irsp->un.ulpWord[0],
1275 irsp->un.ulpWord[1],
1276 irsp->un.ulpWord[2],
1277 irsp->un.ulpWord[3],
1278 irsp->un.ulpWord[4],
1279 irsp->un.ulpWord[5],
1280 *(((uint32_t *) irsp) + 6),
1281 *(((uint32_t *) irsp) + 7));
1285 case LPFC_ABORT_IOCB:
1288 * Idle exchange closed via ABTS from port. No iocb
1289 * resources need to be recovered.
1291 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1292 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1293 "0314 IOCB cmd 0x%x "
1294 "processed. Skipping "
1300 spin_lock_irqsave(&phba->hbalock, iflags);
1301 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1303 spin_unlock_irqrestore(&phba->hbalock, iflags);
1304 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1305 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1310 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1311 char adaptermsg[LPFC_MAX_ADPTMSG];
1312 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1313 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1315 dev_warn(&((phba->pcidev)->dev),
1317 phba->brd_no, adaptermsg);
1319 /* Unknown IOCB command */
1320 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1321 "0321 Unknown IOCB command "
1322 "Data: x%x, x%x x%x x%x x%x\n",
1323 type, irsp->ulpCommand,
1332 * The response IOCB has been processed. Update the ring
1333 * pointer in SLIM. If the port response put pointer has not
1334 * been updated, sync the pgp->rspPutInx and fetch the new port
1335 * response put pointer.
1337 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1339 if (pring->rspidx == portRspPut)
1340 portRspPut = le32_to_cpu(pgp->rspPutInx);
1343 ha_copy = readl(phba->HAregaddr);
1344 ha_copy >>= (LPFC_FCP_RING * 4);
1346 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1347 spin_lock_irqsave(&phba->hbalock, iflags);
1348 pring->stats.iocb_rsp_full++;
1349 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1350 writel(status, phba->CAregaddr);
1351 readl(phba->CAregaddr);
1352 spin_unlock_irqrestore(&phba->hbalock, iflags);
1354 if ((ha_copy & HA_R0CE_RSP) &&
1355 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1356 spin_lock_irqsave(&phba->hbalock, iflags);
1357 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1358 pring->stats.iocb_cmd_empty++;
1360 /* Force update of the local copy of cmdGetInx */
1361 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1362 lpfc_sli_resume_iocb(phba, pring);
1364 if ((pring->lpfc_sli_cmd_available))
1365 (pring->lpfc_sli_cmd_available) (phba, pring);
1367 spin_unlock_irqrestore(&phba->hbalock, iflags);
1374 * This routine presumes LPFC_FCP_RING handling and doesn't bother
1375 * to check it explicitly.
1378 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1379 struct lpfc_sli_ring *pring, uint32_t mask)
1381 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1382 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1383 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1384 IOCB_t *irsp = NULL;
1385 IOCB_t *entry = NULL;
1386 struct lpfc_iocbq *cmdiocbq = NULL;
1387 struct lpfc_iocbq rspiocbq;
1389 uint32_t portRspPut, portRspMax;
1391 lpfc_iocb_type type;
1392 unsigned long iflag;
1393 uint32_t rsp_cmpl = 0;
1395 spin_lock_irqsave(&phba->hbalock, iflag);
1396 pring->stats.iocb_event++;
1399 * The next available response entry should never exceed the maximum
1400 * entries. If it does, treat it as an adapter hardware error.
1402 portRspMax = pring->numRiocb;
1403 portRspPut = le32_to_cpu(pgp->rspPutInx);
1404 if (unlikely(portRspPut >= portRspMax)) {
1405 lpfc_sli_rsp_pointers_error(phba, pring);
1406 spin_unlock_irqrestore(&phba->hbalock, iflag);
1411 while (pring->rspidx != portRspPut) {
1413 * Fetch an entry off the ring and copy it into a local data
1414 * structure. The copy involves a byte-swap since the
1415 * network byte order and pci byte orders are different.
1417 entry = lpfc_resp_iocb(phba, pring);
1418 phba->last_completion_time = jiffies;
1420 if (++pring->rspidx >= portRspMax)
1423 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1424 (uint32_t *) &rspiocbq.iocb,
1425 phba->iocb_rsp_size);
1426 INIT_LIST_HEAD(&(rspiocbq.list));
1427 irsp = &rspiocbq.iocb;
1429 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1430 pring->stats.iocb_rsp++;
1433 if (unlikely(irsp->ulpStatus)) {
1435 * If resource errors reported from HBA, reduce
1436 * queuedepths of the SCSI device.
1438 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1439 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1440 spin_unlock_irqrestore(&phba->hbalock, iflag);
1441 lpfc_adjust_queue_depth(phba);
1442 spin_lock_irqsave(&phba->hbalock, iflag);
1445 /* Rsp ring <ringno> error: IOCB */
1446 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1447 "0336 Rsp Ring %d error: IOCB Data: "
1448 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1450 irsp->un.ulpWord[0],
1451 irsp->un.ulpWord[1],
1452 irsp->un.ulpWord[2],
1453 irsp->un.ulpWord[3],
1454 irsp->un.ulpWord[4],
1455 irsp->un.ulpWord[5],
1456 *(((uint32_t *) irsp) + 6),
1457 *(((uint32_t *) irsp) + 7));
1461 case LPFC_ABORT_IOCB:
1464 * Idle exchange closed via ABTS from port. No iocb
1465 * resources need to be recovered.
1467 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1468 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1469 "0333 IOCB cmd 0x%x"
1470 " processed. Skipping"
1476 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1478 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1479 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1480 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1483 spin_unlock_irqrestore(&phba->hbalock,
1485 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1487 spin_lock_irqsave(&phba->hbalock,
1492 case LPFC_UNSOL_IOCB:
1493 spin_unlock_irqrestore(&phba->hbalock, iflag);
1494 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1495 spin_lock_irqsave(&phba->hbalock, iflag);
1498 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1499 char adaptermsg[LPFC_MAX_ADPTMSG];
1500 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1501 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1503 dev_warn(&((phba->pcidev)->dev),
1505 phba->brd_no, adaptermsg);
1507 /* Unknown IOCB command */
1508 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1509 "0334 Unknown IOCB command "
1510 "Data: x%x, x%x x%x x%x x%x\n",
1511 type, irsp->ulpCommand,
1520 * The response IOCB has been processed. Update the ring
1521 * pointer in SLIM. If the port response put pointer has not
1522 * been updated, sync the pgp->rspPutInx and fetch the new port
1523 * response put pointer.
1525 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1527 if (pring->rspidx == portRspPut)
1528 portRspPut = le32_to_cpu(pgp->rspPutInx);
1531 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1532 pring->stats.iocb_rsp_full++;
1533 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1534 writel(status, phba->CAregaddr);
1535 readl(phba->CAregaddr);
1537 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1538 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1539 pring->stats.iocb_cmd_empty++;
1541 /* Force update of the local copy of cmdGetInx */
1542 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1543 lpfc_sli_resume_iocb(phba, pring);
1545 if ((pring->lpfc_sli_cmd_available))
1546 (pring->lpfc_sli_cmd_available) (phba, pring);
1550 spin_unlock_irqrestore(&phba->hbalock, iflag);
1555 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1556 struct lpfc_sli_ring *pring, uint32_t mask)
1558 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1559 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1560 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1562 IOCB_t *irsp = NULL;
1563 struct lpfc_iocbq *rspiocbp = NULL;
1564 struct lpfc_iocbq *next_iocb;
1565 struct lpfc_iocbq *cmdiocbp;
1566 struct lpfc_iocbq *saveq;
1567 uint8_t iocb_cmd_type;
1568 lpfc_iocb_type type;
1569 uint32_t status, free_saveq;
1570 uint32_t portRspPut, portRspMax;
1572 unsigned long iflag;
1574 spin_lock_irqsave(&phba->hbalock, iflag);
1575 pring->stats.iocb_event++;
1578 * The next available response entry should never exceed the maximum
1579 * entries. If it does, treat it as an adapter hardware error.
1581 portRspMax = pring->numRiocb;
1582 portRspPut = le32_to_cpu(pgp->rspPutInx);
1583 if (portRspPut >= portRspMax) {
1585 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1586 * rsp ring <portRspMax>
1588 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1589 "0303 Ring %d handler: portRspPut %d "
1590 "is bigger then rsp ring %d\n",
1591 pring->ringno, portRspPut, portRspMax);
1593 phba->link_state = LPFC_HBA_ERROR;
1594 spin_unlock_irqrestore(&phba->hbalock, iflag);
1596 phba->work_hs = HS_FFER3;
1597 lpfc_handle_eratt(phba);
1603 while (pring->rspidx != portRspPut) {
1605 * Build a completion list and call the appropriate handler.
1606 * The process is to get the next available response iocb, get
1607 * a free iocb from the list, copy the response data into the
1608 * free iocb, insert to the continuation list, and update the
1609 * next response index to slim. This process makes response
1610 * iocb's in the ring available to DMA as fast as possible but
1611 * pays a penalty for a copy operation. Since the iocb is
1612 * only 32 bytes, this penalty is considered small relative to
1613 * the PCI reads for register values and a slim write. When
1614 * the ulpLe field is set, the entire Command has been
1617 entry = lpfc_resp_iocb(phba, pring);
1619 phba->last_completion_time = jiffies;
1620 rspiocbp = __lpfc_sli_get_iocbq(phba);
1621 if (rspiocbp == NULL) {
1622 printk(KERN_ERR "%s: out of buffers! Failing "
1623 "completion.\n", __FUNCTION__);
1627 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1628 phba->iocb_rsp_size);
1629 irsp = &rspiocbp->iocb;
1631 if (++pring->rspidx >= portRspMax)
1634 if (pring->ringno == LPFC_ELS_RING) {
1635 lpfc_debugfs_slow_ring_trc(phba,
1636 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1637 *(((uint32_t *) irsp) + 4),
1638 *(((uint32_t *) irsp) + 6),
1639 *(((uint32_t *) irsp) + 7));
1642 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1644 if (list_empty(&(pring->iocb_continueq))) {
1645 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1647 list_add_tail(&rspiocbp->list,
1648 &(pring->iocb_continueq));
1651 pring->iocb_continueq_cnt++;
1654 * By default, the driver expects to free all resources
1655 * associated with this iocb completion.
1658 saveq = list_get_first(&pring->iocb_continueq,
1659 struct lpfc_iocbq, list);
1660 irsp = &(saveq->iocb);
1661 list_del_init(&pring->iocb_continueq);
1662 pring->iocb_continueq_cnt = 0;
1664 pring->stats.iocb_rsp++;
1667 * If resource errors reported from HBA, reduce
1668 * queuedepths of the SCSI device.
1670 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1671 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1672 spin_unlock_irqrestore(&phba->hbalock, iflag);
1673 lpfc_adjust_queue_depth(phba);
1674 spin_lock_irqsave(&phba->hbalock, iflag);
1677 if (irsp->ulpStatus) {
1678 /* Rsp ring <ringno> error: IOCB */
1679 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1680 "0328 Rsp Ring %d error: "
1685 "x%x x%x x%x x%x\n",
1687 irsp->un.ulpWord[0],
1688 irsp->un.ulpWord[1],
1689 irsp->un.ulpWord[2],
1690 irsp->un.ulpWord[3],
1691 irsp->un.ulpWord[4],
1692 irsp->un.ulpWord[5],
1693 *(((uint32_t *) irsp) + 6),
1694 *(((uint32_t *) irsp) + 7),
1695 *(((uint32_t *) irsp) + 8),
1696 *(((uint32_t *) irsp) + 9),
1697 *(((uint32_t *) irsp) + 10),
1698 *(((uint32_t *) irsp) + 11),
1699 *(((uint32_t *) irsp) + 12),
1700 *(((uint32_t *) irsp) + 13),
1701 *(((uint32_t *) irsp) + 14),
1702 *(((uint32_t *) irsp) + 15));
1706 * Fetch the IOCB command type and call the correct
1707 * completion routine. Solicited and Unsolicited
1708 * IOCBs on the ELS ring get freed back to the
1709 * lpfc_iocb_list by the discovery kernel thread.
1711 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1712 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1713 if (type == LPFC_SOL_IOCB) {
1714 spin_unlock_irqrestore(&phba->hbalock,
1716 rc = lpfc_sli_process_sol_iocb(phba, pring,
1718 spin_lock_irqsave(&phba->hbalock, iflag);
1719 } else if (type == LPFC_UNSOL_IOCB) {
1720 spin_unlock_irqrestore(&phba->hbalock,
1722 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1724 spin_lock_irqsave(&phba->hbalock, iflag);
1725 } else if (type == LPFC_ABORT_IOCB) {
1726 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1728 lpfc_sli_iocbq_lookup(phba, pring,
1730 /* Call the specified completion
1732 if (cmdiocbp->iocb_cmpl) {
1733 spin_unlock_irqrestore(
1736 (cmdiocbp->iocb_cmpl) (phba,
1742 __lpfc_sli_release_iocbq(phba,
1745 } else if (type == LPFC_UNKNOWN_IOCB) {
1746 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1748 char adaptermsg[LPFC_MAX_ADPTMSG];
1750 memset(adaptermsg, 0,
1752 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1754 dev_warn(&((phba->pcidev)->dev),
1756 phba->brd_no, adaptermsg);
1758 /* Unknown IOCB command */
1759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1760 "0335 Unknown IOCB "
1761 "command Data: x%x "
1771 list_for_each_entry_safe(rspiocbp, next_iocb,
1772 &saveq->list, list) {
1773 list_del(&rspiocbp->list);
1774 __lpfc_sli_release_iocbq(phba,
1777 __lpfc_sli_release_iocbq(phba, saveq);
1783 * If the port response put pointer has not been updated, sync
1784 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1785 * response put pointer.
1787 if (pring->rspidx == portRspPut) {
1788 portRspPut = le32_to_cpu(pgp->rspPutInx);
1790 } /* while (pring->rspidx != portRspPut) */
1792 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1793 /* At least one response entry has been freed */
1794 pring->stats.iocb_rsp_full++;
1795 /* SET RxRE_RSP in Chip Att register */
1796 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1797 writel(status, phba->CAregaddr);
1798 readl(phba->CAregaddr); /* flush */
1800 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1801 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1802 pring->stats.iocb_cmd_empty++;
1804 /* Force update of the local copy of cmdGetInx */
1805 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1806 lpfc_sli_resume_iocb(phba, pring);
1808 if ((pring->lpfc_sli_cmd_available))
1809 (pring->lpfc_sli_cmd_available) (phba, pring);
1813 spin_unlock_irqrestore(&phba->hbalock, iflag);
1818 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1820 LIST_HEAD(completions);
1821 struct lpfc_iocbq *iocb, *next_iocb;
1824 if (pring->ringno == LPFC_ELS_RING) {
1825 lpfc_fabric_abort_hba(phba);
1828 /* Error everything on txq and txcmplq
1831 spin_lock_irq(&phba->hbalock);
1832 list_splice_init(&pring->txq, &completions);
1835 /* Next issue ABTS for everything on the txcmplq */
1836 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1837 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1839 spin_unlock_irq(&phba->hbalock);
1841 while (!list_empty(&completions)) {
1842 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1844 list_del_init(&iocb->list);
1846 if (!iocb->iocb_cmpl)
1847 lpfc_sli_release_iocbq(phba, iocb);
1849 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1850 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1851 (iocb->iocb_cmpl) (phba, iocb, iocb);
1857 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1863 /* Read the HBA Host Status Register */
1864 status = readl(phba->HSregaddr);
1867 * Check status register every 100ms for 5 retries, then every
1868 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1869 * every 2.5 sec for 4.
1870 * Break our of the loop if errors occurred during init.
1872 while (((status & mask) != mask) &&
1873 !(status & HS_FFERM) &&
1885 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1886 lpfc_sli_brdrestart(phba);
1888 /* Read the HBA Host Status Register */
1889 status = readl(phba->HSregaddr);
1892 /* Check to see if any errors occurred during init */
1893 if ((status & HS_FFERM) || (i >= 20)) {
1894 phba->link_state = LPFC_HBA_ERROR;
1901 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1903 void lpfc_reset_barrier(struct lpfc_hba *phba)
1905 uint32_t __iomem *resp_buf;
1906 uint32_t __iomem *mbox_buf;
1907 volatile uint32_t mbox;
1912 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1913 if (hdrtype != 0x80 ||
1914 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1915 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1919 * Tell the other part of the chip to suspend temporarily all
1922 resp_buf = phba->MBslimaddr;
1924 /* Disable the error attention */
1925 hc_copy = readl(phba->HCregaddr);
1926 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1927 readl(phba->HCregaddr); /* flush */
1928 phba->link_flag |= LS_IGNORE_ERATT;
1930 if (readl(phba->HAregaddr) & HA_ERATT) {
1931 /* Clear Chip error bit */
1932 writel(HA_ERATT, phba->HAregaddr);
1933 phba->pport->stopped = 1;
1937 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1938 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1940 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1941 mbox_buf = phba->MBslimaddr;
1942 writel(mbox, mbox_buf);
1945 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1948 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1949 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1950 phba->pport->stopped)
1956 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1957 for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
1962 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1965 if (readl(phba->HAregaddr) & HA_ERATT) {
1966 writel(HA_ERATT, phba->HAregaddr);
1967 phba->pport->stopped = 1;
1971 phba->link_flag &= ~LS_IGNORE_ERATT;
1972 writel(hc_copy, phba->HCregaddr);
1973 readl(phba->HCregaddr); /* flush */
1977 lpfc_sli_brdkill(struct lpfc_hba *phba)
1979 struct lpfc_sli *psli;
1989 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1990 "0329 Kill HBA Data: x%x x%x\n",
1991 phba->pport->port_state, psli->sli_flag);
1993 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1997 /* Disable the error attention */
1998 spin_lock_irq(&phba->hbalock);
1999 status = readl(phba->HCregaddr);
2000 status &= ~HC_ERINT_ENA;
2001 writel(status, phba->HCregaddr);
2002 readl(phba->HCregaddr); /* flush */
2003 phba->link_flag |= LS_IGNORE_ERATT;
2004 spin_unlock_irq(&phba->hbalock);
2006 lpfc_kill_board(phba, pmb);
2007 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2008 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2010 if (retval != MBX_SUCCESS) {
2011 if (retval != MBX_BUSY)
2012 mempool_free(pmb, phba->mbox_mem_pool);
2013 spin_lock_irq(&phba->hbalock);
2014 phba->link_flag &= ~LS_IGNORE_ERATT;
2015 spin_unlock_irq(&phba->hbalock);
2019 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2021 mempool_free(pmb, phba->mbox_mem_pool);
2023 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
2024 * attention every 100ms for 3 seconds. If we don't get ERATT after
2025 * 3 seconds we still set HBA_ERROR state because the status of the
2026 * board is now undefined.
2028 ha_copy = readl(phba->HAregaddr);
2030 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
2032 ha_copy = readl(phba->HAregaddr);
2035 del_timer_sync(&psli->mbox_tmo);
2036 if (ha_copy & HA_ERATT) {
2037 writel(HA_ERATT, phba->HAregaddr);
2038 phba->pport->stopped = 1;
2040 spin_lock_irq(&phba->hbalock);
2041 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2042 phba->link_flag &= ~LS_IGNORE_ERATT;
2043 spin_unlock_irq(&phba->hbalock);
2045 psli->mbox_active = NULL;
2046 lpfc_hba_down_post(phba);
2047 phba->link_state = LPFC_HBA_ERROR;
2049 return ha_copy & HA_ERATT ? 0 : 1;
2053 lpfc_sli_brdreset(struct lpfc_hba *phba)
2055 struct lpfc_sli *psli;
2056 struct lpfc_sli_ring *pring;
2063 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2064 "0325 Reset HBA Data: x%x x%x\n",
2065 phba->pport->port_state, psli->sli_flag);
2067 /* perform board reset */
2068 phba->fc_eventTag = 0;
2069 phba->pport->fc_myDID = 0;
2070 phba->pport->fc_prevDID = 0;
2072 /* Turn off parity checking and serr during the physical reset */
2073 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
2074 pci_write_config_word(phba->pcidev, PCI_COMMAND,
2076 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2078 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
2079 /* Now toggle INITFF bit in the Host Control Register */
2080 writel(HC_INITFF, phba->HCregaddr);
2082 readl(phba->HCregaddr); /* flush */
2083 writel(0, phba->HCregaddr);
2084 readl(phba->HCregaddr); /* flush */
2086 /* Restore PCI cmd register */
2087 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
2089 /* Initialize relevant SLI info */
2090 for (i = 0; i < psli->num_rings; i++) {
2091 pring = &psli->ring[i];
2094 pring->next_cmdidx = 0;
2095 pring->local_getidx = 0;
2097 pring->missbufcnt = 0;
2100 phba->link_state = LPFC_WARM_START;
2105 lpfc_sli_brdrestart(struct lpfc_hba *phba)
2108 struct lpfc_sli *psli;
2110 volatile uint32_t word0;
2111 void __iomem *to_slim;
2113 spin_lock_irq(&phba->hbalock);
2118 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2119 "0337 Restart HBA Data: x%x x%x\n",
2120 phba->pport->port_state, psli->sli_flag);
2123 mb = (MAILBOX_t *) &word0;
2124 mb->mbxCommand = MBX_RESTART;
2127 lpfc_reset_barrier(phba);
2129 to_slim = phba->MBslimaddr;
2130 writel(*(uint32_t *) mb, to_slim);
2131 readl(to_slim); /* flush */
2133 /* Only skip post after fc_ffinit is completed */
2134 if (phba->pport->port_state) {
2136 word0 = 1; /* This is really setting up word1 */
2139 word0 = 0; /* This is really setting up word1 */
2141 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2142 writel(*(uint32_t *) mb, to_slim);
2143 readl(to_slim); /* flush */
2145 lpfc_sli_brdreset(phba);
2146 phba->pport->stopped = 0;
2147 phba->link_state = LPFC_INIT_START;
2149 spin_unlock_irq(&phba->hbalock);
2151 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2152 psli->stats_start = get_seconds();
2159 lpfc_hba_down_post(phba);
2165 lpfc_sli_chipset_init(struct lpfc_hba *phba)
2167 uint32_t status, i = 0;
2169 /* Read the HBA Host Status Register */
2170 status = readl(phba->HSregaddr);
2172 /* Check status register to see what current state is */
2174 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2176 /* Check every 100ms for 5 retries, then every 500ms for 5, then
2177 * every 2.5 sec for 5, then reset board and every 2.5 sec for
2181 /* Adapter failed to init, timeout, status reg
2183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2184 "0436 Adapter failed to init, "
2185 "timeout, status reg x%x\n", status);
2186 phba->link_state = LPFC_HBA_ERROR;
2190 /* Check to see if any errors occurred during init */
2191 if (status & HS_FFERM) {
2192 /* ERROR: During chipset initialization */
2193 /* Adapter failed to init, chipset, status reg
2195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2196 "0437 Adapter failed to init, "
2197 "chipset, status reg x%x\n", status);
2198 phba->link_state = LPFC_HBA_ERROR;
2204 } else if (i <= 10) {
2212 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2213 lpfc_sli_brdrestart(phba);
2215 /* Read the HBA Host Status Register */
2216 status = readl(phba->HSregaddr);
2219 /* Check to see if any errors occurred during init */
2220 if (status & HS_FFERM) {
2221 /* ERROR: During chipset initialization */
2222 /* Adapter failed to init, chipset, status reg <status> */
2223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2224 "0438 Adapter failed to init, chipset, "
2225 "status reg x%x\n", status);
2226 phba->link_state = LPFC_HBA_ERROR;
2230 /* Clear all interrupt enable conditions */
2231 writel(0, phba->HCregaddr);
2232 readl(phba->HCregaddr); /* flush */
2234 /* setup host attn register */
2235 writel(0xffffffff, phba->HAregaddr);
2236 readl(phba->HAregaddr); /* flush */
2241 lpfc_sli_hbq_count(void)
2243 return ARRAY_SIZE(lpfc_hbq_defs);
2247 lpfc_sli_hbq_entry_count(void)
2249 int hbq_count = lpfc_sli_hbq_count();
2253 for (i = 0; i < hbq_count; ++i)
2254 count += lpfc_hbq_defs[i]->entry_count;
2259 lpfc_sli_hbq_size(void)
2261 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2265 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2267 int hbq_count = lpfc_sli_hbq_count();
2271 uint32_t hbq_entry_index;
2273 /* Get a Mailbox buffer to setup mailbox
2274 * commands for HBA initialization
2276 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2283 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2284 phba->link_state = LPFC_INIT_MBX_CMDS;
2286 hbq_entry_index = 0;
2287 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2288 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2289 phba->hbqs[hbqno].hbqPutIdx = 0;
2290 phba->hbqs[hbqno].local_hbqGetIdx = 0;
2291 phba->hbqs[hbqno].entry_count =
2292 lpfc_hbq_defs[hbqno]->entry_count;
2293 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
2294 hbq_entry_index, pmb);
2295 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2297 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2298 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2299 mbxStatus <status>, ring <num> */
2301 lpfc_printf_log(phba, KERN_ERR,
2302 LOG_SLI | LOG_VPORT,
2303 "1805 Adapter failed to init. "
2304 "Data: x%x x%x x%x\n",
2306 pmbox->mbxStatus, hbqno);
2308 phba->link_state = LPFC_HBA_ERROR;
2309 mempool_free(pmb, phba->mbox_mem_pool);
2313 phba->hbq_count = hbq_count;
2315 mempool_free(pmb, phba->mbox_mem_pool);
2317 /* Initially populate or replenish the HBQs */
2318 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2319 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2326 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2329 uint32_t resetcount = 0, rc = 0, done = 0;
2331 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2333 phba->link_state = LPFC_HBA_ERROR;
2337 phba->sli_rev = sli_mode;
2338 while (resetcount < 2 && !done) {
2339 spin_lock_irq(&phba->hbalock);
2340 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2341 spin_unlock_irq(&phba->hbalock);
2342 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2343 lpfc_sli_brdrestart(phba);
2345 rc = lpfc_sli_chipset_init(phba);
2349 spin_lock_irq(&phba->hbalock);
2350 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2351 spin_unlock_irq(&phba->hbalock);
2354 /* Call pre CONFIG_PORT mailbox command initialization. A
2355 * value of 0 means the call was successful. Any other
2356 * nonzero value is a failure, but if ERESTART is returned,
2357 * the driver may reset the HBA and try again.
2359 rc = lpfc_config_port_prep(phba);
2360 if (rc == -ERESTART) {
2361 phba->link_state = LPFC_LINK_UNKNOWN;
2367 phba->link_state = LPFC_INIT_MBX_CMDS;
2368 lpfc_config_port(phba, pmb);
2369 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2370 if (rc != MBX_SUCCESS) {
2371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2372 "0442 Adapter failed to init, mbxCmd x%x "
2373 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2374 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
2375 spin_lock_irq(&phba->hbalock);
2376 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2377 spin_unlock_irq(&phba->hbalock);
2381 phba->max_vpi = (phba->max_vpi &&
2382 pmb->mb.un.varCfgPort.gmv) != 0
2383 ? pmb->mb.un.varCfgPort.max_vpi
2390 goto do_prep_failed;
2393 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2394 (!pmb->mb.un.varCfgPort.cMA)) {
2396 goto do_prep_failed;
2401 mempool_free(pmb, phba->mbox_mem_pool);
2406 lpfc_sli_hba_setup(struct lpfc_hba *phba)
2411 switch (lpfc_sli_mode) {
2413 if (phba->cfg_enable_npiv) {
2414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2415 "1824 NPIV enabled: Override lpfc_sli_mode "
2416 "parameter (%d) to auto (0).\n",
2426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2427 "1819 Unrecognized lpfc_sli_mode "
2428 "parameter: %d.\n", lpfc_sli_mode);
2433 rc = lpfc_do_config_port(phba, mode);
2434 if (rc && lpfc_sli_mode == 3)
2435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2436 "1820 Unable to select SLI-3. "
2437 "Not supported by adapter.\n");
2438 if (rc && mode != 2)
2439 rc = lpfc_do_config_port(phba, 2);
2441 goto lpfc_sli_hba_setup_error;
2443 if (phba->sli_rev == 3) {
2444 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2445 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2446 phba->sli3_options |= LPFC_SLI3_ENABLED;
2447 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2450 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2451 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2452 phba->sli3_options = 0;
2455 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2456 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
2457 phba->sli_rev, phba->max_vpi);
2458 rc = lpfc_sli_ring_map(phba);
2461 goto lpfc_sli_hba_setup_error;
2465 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2466 rc = lpfc_sli_hbq_setup(phba);
2468 goto lpfc_sli_hba_setup_error;
2471 phba->sli.sli_flag |= LPFC_PROCESS_LA;
2473 rc = lpfc_config_port_post(phba);
2475 goto lpfc_sli_hba_setup_error;
2479 lpfc_sli_hba_setup_error:
2480 phba->link_state = LPFC_HBA_ERROR;
2481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2482 "0445 Firmware initialization failed\n");
2486 /*! lpfc_mbox_timeout
2490 * \param hba Pointer to per struct lpfc_hba structure
2491 * \param l1 Pointer to the driver's mailbox queue.
2497 * This routine handles mailbox timeout events at timer interrupt context.
2500 lpfc_mbox_timeout(unsigned long ptr)
2502 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2503 unsigned long iflag;
2504 uint32_t tmo_posted;
2506 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2507 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2509 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2510 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2513 spin_lock_irqsave(&phba->hbalock, iflag);
2514 if (phba->work_wait)
2515 lpfc_worker_wake_up(phba);
2516 spin_unlock_irqrestore(&phba->hbalock, iflag);
2521 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2523 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2524 MAILBOX_t *mb = &pmbox->mb;
2525 struct lpfc_sli *psli = &phba->sli;
2526 struct lpfc_sli_ring *pring;
2528 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2532 /* Mbox cmd <mbxCommand> timeout */
2533 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2534 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2536 phba->pport->port_state,
2538 phba->sli.mbox_active);
2540 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2541 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2542 * it to fail all oustanding SCSI IO.
2544 spin_lock_irq(&phba->pport->work_port_lock);
2545 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2546 spin_unlock_irq(&phba->pport->work_port_lock);
2547 spin_lock_irq(&phba->hbalock);
2548 phba->link_state = LPFC_LINK_UNKNOWN;
2549 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2550 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2551 spin_unlock_irq(&phba->hbalock);
2553 pring = &psli->ring[psli->fcp_ring];
2554 lpfc_sli_abort_iocb_ring(phba, pring);
2556 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2557 "0345 Resetting board due to mailbox timeout\n");
2559 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2560 * on oustanding mailbox commands.
2562 lpfc_offline_prep(phba);
2564 lpfc_sli_brdrestart(phba);
2565 if (lpfc_online(phba) == 0) /* Initialize the HBA */
2566 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2567 lpfc_unblock_mgmt_io(phba);
2572 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2575 struct lpfc_sli *psli = &phba->sli;
2576 uint32_t status, evtctr;
2579 unsigned long drvr_flag = 0;
2580 volatile uint32_t word0, ldata;
2581 void __iomem *to_slim;
2583 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2584 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2586 lpfc_printf_log(phba, KERN_ERR,
2587 LOG_MBOX | LOG_VPORT,
2588 "1806 Mbox x%x failed. No vport\n",
2589 pmbox->mb.mbxCommand);
2591 return MBXERR_ERROR;
2596 /* If the PCI channel is in offline state, do not post mbox. */
2597 if (unlikely(pci_channel_offline(phba->pcidev)))
2598 return MBX_NOT_FINISHED;
2600 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2605 status = MBX_SUCCESS;
2607 if (phba->link_state == LPFC_HBA_ERROR) {
2608 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2610 /* Mbox command <mbxCommand> cannot issue */
2611 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2612 return MBX_NOT_FINISHED;
2615 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2616 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2617 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2618 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2619 return MBX_NOT_FINISHED;
2622 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2623 /* Polling for a mbox command when another one is already active
2624 * is not allowed in SLI. Also, the driver must have established
2625 * SLI2 mode to queue and process multiple mbox commands.
2628 if (flag & MBX_POLL) {
2629 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2631 /* Mbox command <mbxCommand> cannot issue */
2632 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2633 return MBX_NOT_FINISHED;
2636 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2637 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2638 /* Mbox command <mbxCommand> cannot issue */
2639 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2640 return MBX_NOT_FINISHED;
2643 /* Another mailbox command is still being processed, queue this
2644 * command to be processed later.
2646 lpfc_mbox_put(phba, pmbox);
2648 /* Mbox cmd issue - BUSY */
2649 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2650 "(%d):0308 Mbox cmd issue - BUSY Data: "
2651 "x%x x%x x%x x%x\n",
2652 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2653 mb->mbxCommand, phba->pport->port_state,
2654 psli->sli_flag, flag);
2656 psli->slistat.mbox_busy++;
2657 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2660 lpfc_debugfs_disc_trc(pmbox->vport,
2661 LPFC_DISC_TRC_MBOX_VPORT,
2662 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
2663 (uint32_t)mb->mbxCommand,
2664 mb->un.varWords[0], mb->un.varWords[1]);
2667 lpfc_debugfs_disc_trc(phba->pport,
2669 "MBOX Bsy: cmd:x%x mb:x%x x%x",
2670 (uint32_t)mb->mbxCommand,
2671 mb->un.varWords[0], mb->un.varWords[1]);
2677 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2679 /* If we are not polling, we MUST be in SLI2 mode */
2680 if (flag != MBX_POLL) {
2681 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2682 (mb->mbxCommand != MBX_KILL_BOARD)) {
2683 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2684 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2685 /* Mbox command <mbxCommand> cannot issue */
2686 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2687 return MBX_NOT_FINISHED;
2689 /* timeout active mbox command */
2690 mod_timer(&psli->mbox_tmo, (jiffies +
2691 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2694 /* Mailbox cmd <cmd> issue */
2695 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2696 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2698 pmbox->vport ? pmbox->vport->vpi : 0,
2699 mb->mbxCommand, phba->pport->port_state,
2700 psli->sli_flag, flag);
2702 if (mb->mbxCommand != MBX_HEARTBEAT) {
2704 lpfc_debugfs_disc_trc(pmbox->vport,
2705 LPFC_DISC_TRC_MBOX_VPORT,
2706 "MBOX Send vport: cmd:x%x mb:x%x x%x",
2707 (uint32_t)mb->mbxCommand,
2708 mb->un.varWords[0], mb->un.varWords[1]);
2711 lpfc_debugfs_disc_trc(phba->pport,
2713 "MBOX Send: cmd:x%x mb:x%x x%x",
2714 (uint32_t)mb->mbxCommand,
2715 mb->un.varWords[0], mb->un.varWords[1]);
2719 psli->slistat.mbox_cmd++;
2720 evtctr = psli->slistat.mbox_event;
2722 /* next set own bit for the adapter and copy over command word */
2723 mb->mbxOwner = OWN_CHIP;
2725 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2726 /* First copy command data to host SLIM area */
2727 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2729 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2730 /* copy command data into host mbox for cmpl */
2731 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2735 /* First copy mbox command data to HBA SLIM, skip past first
2737 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2738 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2739 MAILBOX_CMD_SIZE - sizeof (uint32_t));
2741 /* Next copy over first word, with mbxOwner set */
2742 ldata = *((volatile uint32_t *)mb);
2743 to_slim = phba->MBslimaddr;
2744 writel(ldata, to_slim);
2745 readl(to_slim); /* flush */
2747 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2748 /* switch over to host mailbox */
2749 psli->sli_flag |= LPFC_SLI2_ACTIVE;
2754 /* interrupt board to doit right away */
2755 writel(CA_MBATT, phba->CAregaddr);
2756 readl(phba->CAregaddr); /* flush */
2760 /* Don't wait for it to finish, just return */
2761 psli->mbox_active = pmbox;
2765 psli->mbox_active = NULL;
2766 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2767 /* First read mbox status word */
2768 word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2769 word0 = le32_to_cpu(word0);
2771 /* First read mbox status word */
2772 word0 = readl(phba->MBslimaddr);
2775 /* Read the HBA Host Attention Register */
2776 ha_copy = readl(phba->HAregaddr);
2778 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2779 i *= 1000; /* Convert to ms */
2781 /* Wait for command to complete */
2782 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2783 (!(ha_copy & HA_MBATT) &&
2784 (phba->link_state > LPFC_WARM_START))) {
2786 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2787 spin_unlock_irqrestore(&phba->hbalock,
2789 return MBX_NOT_FINISHED;
2792 /* Check if we took a mbox interrupt while we were
2794 if (((word0 & OWN_CHIP) != OWN_CHIP)
2795 && (evtctr != psli->slistat.mbox_event))
2798 spin_unlock_irqrestore(&phba->hbalock,
2803 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2805 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2806 /* First copy command data */
2807 word0 = *((volatile uint32_t *)
2808 &phba->slim2p->mbx);
2809 word0 = le32_to_cpu(word0);
2810 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2812 volatile uint32_t slimword0;
2813 /* Check real SLIM for any errors */
2814 slimword0 = readl(phba->MBslimaddr);
2815 slimmb = (MAILBOX_t *) & slimword0;
2816 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2817 && slimmb->mbxStatus) {
2824 /* First copy command data */
2825 word0 = readl(phba->MBslimaddr);
2827 /* Read the HBA Host Attention Register */
2828 ha_copy = readl(phba->HAregaddr);
2831 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2832 /* copy results back to user */
2833 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2836 /* First copy command data */
2837 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2839 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2841 lpfc_memcpy_from_slim((void *)pmbox->context2,
2842 phba->MBslimaddr + DMP_RSP_OFFSET,
2843 mb->un.varDmp.word_cnt);
2847 writel(HA_MBATT, phba->HAregaddr);
2848 readl(phba->HAregaddr); /* flush */
2850 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2851 status = mb->mbxStatus;
2854 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2859 * Caller needs to hold lock.
2862 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2863 struct lpfc_iocbq *piocb)
2865 /* Insert the caller's iocb in the txq tail for later processing. */
2866 list_add_tail(&piocb->list, &pring->txq);
2870 static struct lpfc_iocbq *
2871 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2872 struct lpfc_iocbq **piocb)
2874 struct lpfc_iocbq * nextiocb;
2876 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2886 * Lockless version of lpfc_sli_issue_iocb.
2889 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2890 struct lpfc_iocbq *piocb, uint32_t flag)
2892 struct lpfc_iocbq *nextiocb;
2895 if (piocb->iocb_cmpl && (!piocb->vport) &&
2896 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2897 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2898 lpfc_printf_log(phba, KERN_ERR,
2899 LOG_SLI | LOG_VPORT,
2900 "1807 IOCB x%x failed. No vport\n",
2901 piocb->iocb.ulpCommand);
2907 /* If the PCI channel is in offline state, do not post iocbs. */
2908 if (unlikely(pci_channel_offline(phba->pcidev)))
2912 * We should never get an IOCB if we are in a < LINK_DOWN state
2914 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2918 * Check to see if we are blocking IOCB processing because of a
2919 * outstanding event.
2921 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
2924 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2926 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2927 * can be issued if the link is not up.
2929 switch (piocb->iocb.ulpCommand) {
2930 case CMD_QUE_RING_BUF_CN:
2931 case CMD_QUE_RING_BUF64_CN:
2933 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2934 * completion, iocb_cmpl MUST be 0.
2936 if (piocb->iocb_cmpl)
2937 piocb->iocb_cmpl = NULL;
2939 case CMD_CREATE_XRI_CR:
2940 case CMD_CLOSE_XRI_CN:
2941 case CMD_CLOSE_XRI_CX:
2948 * For FCP commands, we must be in a state where we can process link
2951 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2952 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2956 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2957 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2958 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2961 lpfc_sli_update_ring(phba, pring);
2963 lpfc_sli_update_full_ring(phba, pring);
2966 return IOCB_SUCCESS;
2971 pring->stats.iocb_cmd_delay++;
2975 if (!(flag & SLI_IOCB_RET_IOCB)) {
2976 __lpfc_sli_ringtx_put(phba, pring, piocb);
2977 return IOCB_SUCCESS;
2985 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2986 struct lpfc_iocbq *piocb, uint32_t flag)
2988 unsigned long iflags;
2991 spin_lock_irqsave(&phba->hbalock, iflags);
2992 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2993 spin_unlock_irqrestore(&phba->hbalock, iflags);
2999 lpfc_extra_ring_setup( struct lpfc_hba *phba)
3001 struct lpfc_sli *psli;
3002 struct lpfc_sli_ring *pring;
3006 /* Adjust cmd/rsp ring iocb entries more evenly */
3008 /* Take some away from the FCP ring */
3009 pring = &psli->ring[psli->fcp_ring];
3010 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3011 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3012 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3013 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3015 /* and give them to the extra ring */
3016 pring = &psli->ring[psli->extra_ring];
3018 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3019 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3020 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3021 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3023 /* Setup default profile for this ring */
3024 pring->iotag_max = 4096;
3025 pring->num_mask = 1;
3026 pring->prt[0].profile = 0; /* Mask 0 */
3027 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
3028 pring->prt[0].type = phba->cfg_multi_ring_type;
3029 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
3034 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
3035 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
3040 struct temp_event temp_event_data;
3041 struct Scsi_Host *shost;
3043 icmd = &iocbq->iocb;
3044 evt_code = icmd->un.asyncstat.evt_code;
3045 temp = icmd->ulpContext;
3047 if ((evt_code != ASYNC_TEMP_WARN) &&
3048 (evt_code != ASYNC_TEMP_SAFE)) {
3049 lpfc_printf_log(phba,
3052 "0346 Ring %d handler: unexpected ASYNC_STATUS"
3055 icmd->un.asyncstat.evt_code);
3058 temp_event_data.data = (uint32_t)temp;
3059 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
3060 if (evt_code == ASYNC_TEMP_WARN) {
3061 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
3062 lpfc_printf_log(phba,
3065 "0347 Adapter is very hot, please take "
3066 "corrective action. temperature : %d Celsius\n",
3069 if (evt_code == ASYNC_TEMP_SAFE) {
3070 temp_event_data.event_code = LPFC_NORMAL_TEMP;
3071 lpfc_printf_log(phba,
3074 "0340 Adapter temperature is OK now. "
3075 "temperature : %d Celsius\n",
3079 /* Send temperature change event to applications */
3080 shost = lpfc_shost_from_vport(phba->pport);
3081 fc_host_post_vendor_event(shost, fc_get_event_number(),
3082 sizeof(temp_event_data), (char *) &temp_event_data,
3083 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
3089 lpfc_sli_setup(struct lpfc_hba *phba)
3091 int i, totiocbsize = 0;
3092 struct lpfc_sli *psli = &phba->sli;
3093 struct lpfc_sli_ring *pring;
3095 psli->num_rings = MAX_CONFIGURED_RINGS;
3097 psli->fcp_ring = LPFC_FCP_RING;
3098 psli->next_ring = LPFC_FCP_NEXT_RING;
3099 psli->extra_ring = LPFC_EXTRA_RING;
3101 psli->iocbq_lookup = NULL;
3102 psli->iocbq_lookup_len = 0;
3103 psli->last_iotag = 0;
3105 for (i = 0; i < psli->num_rings; i++) {
3106 pring = &psli->ring[i];
3108 case LPFC_FCP_RING: /* ring 0 - FCP */
3109 /* numCiocb and numRiocb are used in config_port */
3110 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
3111 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
3112 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3113 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3114 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3115 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3116 pring->sizeCiocb = (phba->sli_rev == 3) ?
3117 SLI3_IOCB_CMD_SIZE :
3119 pring->sizeRiocb = (phba->sli_rev == 3) ?
3120 SLI3_IOCB_RSP_SIZE :
3122 pring->iotag_ctr = 0;
3124 (phba->cfg_hba_queue_depth * 2);
3125 pring->fast_iotag = pring->iotag_max;
3126 pring->num_mask = 0;
3128 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
3129 /* numCiocb and numRiocb are used in config_port */
3130 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
3131 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
3132 pring->sizeCiocb = (phba->sli_rev == 3) ?
3133 SLI3_IOCB_CMD_SIZE :
3135 pring->sizeRiocb = (phba->sli_rev == 3) ?
3136 SLI3_IOCB_RSP_SIZE :
3138 pring->iotag_max = phba->cfg_hba_queue_depth;
3139 pring->num_mask = 0;
3141 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
3142 /* numCiocb and numRiocb are used in config_port */
3143 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
3144 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
3145 pring->sizeCiocb = (phba->sli_rev == 3) ?
3146 SLI3_IOCB_CMD_SIZE :
3148 pring->sizeRiocb = (phba->sli_rev == 3) ?
3149 SLI3_IOCB_RSP_SIZE :
3151 pring->fast_iotag = 0;
3152 pring->iotag_ctr = 0;
3153 pring->iotag_max = 4096;
3154 pring->lpfc_sli_rcv_async_status =
3155 lpfc_sli_async_event_handler;
3156 pring->num_mask = 4;
3157 pring->prt[0].profile = 0; /* Mask 0 */
3158 pring->prt[0].rctl = FC_ELS_REQ;
3159 pring->prt[0].type = FC_ELS_DATA;
3160 pring->prt[0].lpfc_sli_rcv_unsol_event =
3161 lpfc_els_unsol_event;
3162 pring->prt[1].profile = 0; /* Mask 1 */
3163 pring->prt[1].rctl = FC_ELS_RSP;
3164 pring->prt[1].type = FC_ELS_DATA;
3165 pring->prt[1].lpfc_sli_rcv_unsol_event =
3166 lpfc_els_unsol_event;
3167 pring->prt[2].profile = 0; /* Mask 2 */
3168 /* NameServer Inquiry */
3169 pring->prt[2].rctl = FC_UNSOL_CTL;
3171 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3172 pring->prt[2].lpfc_sli_rcv_unsol_event =
3173 lpfc_ct_unsol_event;
3174 pring->prt[3].profile = 0; /* Mask 3 */
3175 /* NameServer response */
3176 pring->prt[3].rctl = FC_SOL_CTL;
3178 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3179 pring->prt[3].lpfc_sli_rcv_unsol_event =
3180 lpfc_ct_unsol_event;
3183 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3184 (pring->numRiocb * pring->sizeRiocb);
3186 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3187 /* Too many cmd / rsp ring entries in SLI2 SLIM */
3188 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
3189 "SLI2 SLIM Data: x%x x%lx\n",
3190 phba->brd_no, totiocbsize,
3191 (unsigned long) MAX_SLIM_IOCB_SIZE);
3193 if (phba->cfg_multi_ring_support == 2)
3194 lpfc_extra_ring_setup(phba);
3200 lpfc_sli_queue_setup(struct lpfc_hba *phba)
3202 struct lpfc_sli *psli;
3203 struct lpfc_sli_ring *pring;
3207 spin_lock_irq(&phba->hbalock);
3208 INIT_LIST_HEAD(&psli->mboxq);
3209 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3210 /* Initialize list headers for txq and txcmplq as double linked lists */
3211 for (i = 0; i < psli->num_rings; i++) {
3212 pring = &psli->ring[i];
3214 pring->next_cmdidx = 0;
3215 pring->local_getidx = 0;
3217 INIT_LIST_HEAD(&pring->txq);
3218 INIT_LIST_HEAD(&pring->txcmplq);
3219 INIT_LIST_HEAD(&pring->iocb_continueq);
3220 INIT_LIST_HEAD(&pring->postbufq);
3222 spin_unlock_irq(&phba->hbalock);
3227 lpfc_sli_host_down(struct lpfc_vport *vport)
3229 LIST_HEAD(completions);
3230 struct lpfc_hba *phba = vport->phba;
3231 struct lpfc_sli *psli = &phba->sli;
3232 struct lpfc_sli_ring *pring;
3233 struct lpfc_iocbq *iocb, *next_iocb;
3235 unsigned long flags = 0;
3236 uint16_t prev_pring_flag;
3238 lpfc_cleanup_discovery_resources(vport);
3240 spin_lock_irqsave(&phba->hbalock, flags);
3241 for (i = 0; i < psli->num_rings; i++) {
3242 pring = &psli->ring[i];
3243 prev_pring_flag = pring->flag;
3244 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3245 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3247 * Error everything on the txq since these iocbs have not been
3248 * given to the FW yet.
3250 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3251 if (iocb->vport != vport)
3253 list_move_tail(&iocb->list, &completions);
3257 /* Next issue ABTS for everything on the txcmplq */
3258 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3260 if (iocb->vport != vport)
3262 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3265 pring->flag = prev_pring_flag;
3268 spin_unlock_irqrestore(&phba->hbalock, flags);
3270 while (!list_empty(&completions)) {
3271 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3273 if (!iocb->iocb_cmpl)
3274 lpfc_sli_release_iocbq(phba, iocb);
3276 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3277 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3278 (iocb->iocb_cmpl) (phba, iocb, iocb);
3285 lpfc_sli_hba_down(struct lpfc_hba *phba)
3287 LIST_HEAD(completions);
3288 struct lpfc_sli *psli = &phba->sli;
3289 struct lpfc_sli_ring *pring;
3290 struct lpfc_dmabuf *buf_ptr;
3292 struct lpfc_iocbq *iocb;
3295 unsigned long flags = 0;
3297 lpfc_hba_down_prep(phba);
3299 lpfc_fabric_abort_hba(phba);
3301 spin_lock_irqsave(&phba->hbalock, flags);
3302 for (i = 0; i < psli->num_rings; i++) {
3303 pring = &psli->ring[i];
3304 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3305 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3308 * Error everything on the txq since these iocbs have not been
3309 * given to the FW yet.
3311 list_splice_init(&pring->txq, &completions);
3315 spin_unlock_irqrestore(&phba->hbalock, flags);
3317 while (!list_empty(&completions)) {
3318 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3321 if (!iocb->iocb_cmpl)
3322 lpfc_sli_release_iocbq(phba, iocb);
3324 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3325 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3326 (iocb->iocb_cmpl) (phba, iocb, iocb);
3330 spin_lock_irqsave(&phba->hbalock, flags);
3331 list_splice_init(&phba->elsbuf, &completions);
3332 phba->elsbuf_cnt = 0;
3333 phba->elsbuf_prev_cnt = 0;
3334 spin_unlock_irqrestore(&phba->hbalock, flags);
3336 while (!list_empty(&completions)) {
3337 list_remove_head(&completions, buf_ptr,
3338 struct lpfc_dmabuf, list);
3339 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3343 /* Return any active mbox cmds */
3344 del_timer_sync(&psli->mbox_tmo);
3345 spin_lock_irqsave(&phba->hbalock, flags);
3347 spin_lock(&phba->pport->work_port_lock);
3348 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3349 spin_unlock(&phba->pport->work_port_lock);
3351 if (psli->mbox_active) {
3352 list_add_tail(&psli->mbox_active->list, &completions);
3353 psli->mbox_active = NULL;
3354 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3357 /* Return any pending or completed mbox cmds */
3358 list_splice_init(&phba->sli.mboxq, &completions);
3359 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3360 INIT_LIST_HEAD(&psli->mboxq);
3361 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3363 spin_unlock_irqrestore(&phba->hbalock, flags);
3365 while (!list_empty(&completions)) {
3366 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3367 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3368 if (pmb->mbox_cmpl) {
3369 pmb->mbox_cmpl(phba,pmb);
3376 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3378 uint32_t *src = srcp;
3379 uint32_t *dest = destp;
3383 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3385 ldata = le32_to_cpu(ldata);
3393 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3394 struct lpfc_dmabuf *mp)
3396 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3398 spin_lock_irq(&phba->hbalock);
3399 list_add_tail(&mp->list, &pring->postbufq);
3400 pring->postbufq_cnt++;
3401 spin_unlock_irq(&phba->hbalock);
3406 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
3408 spin_lock_irq(&phba->hbalock);
3409 phba->buffer_tag_count++;
3411 * Always set the QUE_BUFTAG_BIT to distiguish between
3412 * a tag assigned by HBQ.
3414 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
3415 spin_unlock_irq(&phba->hbalock);
3416 return phba->buffer_tag_count;
3419 struct lpfc_dmabuf *
3420 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3423 struct lpfc_dmabuf *mp, *next_mp;
3424 struct list_head *slp = &pring->postbufq;
3426 /* Search postbufq, from the begining, looking for a match on tag */
3427 spin_lock_irq(&phba->hbalock);
3428 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3429 if (mp->buffer_tag == tag) {
3430 list_del_init(&mp->list);
3431 pring->postbufq_cnt--;
3432 spin_unlock_irq(&phba->hbalock);
3437 spin_unlock_irq(&phba->hbalock);
3438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3439 "0410 Cannot find virtual addr for buffer tag on "
3440 "ring %d Data x%lx x%p x%p x%x\n",
3441 pring->ringno, (unsigned long) tag,
3442 slp->next, slp->prev, pring->postbufq_cnt);
3447 struct lpfc_dmabuf *
3448 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3451 struct lpfc_dmabuf *mp, *next_mp;
3452 struct list_head *slp = &pring->postbufq;
3454 /* Search postbufq, from the begining, looking for a match on phys */
3455 spin_lock_irq(&phba->hbalock);
3456 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3457 if (mp->phys == phys) {
3458 list_del_init(&mp->list);
3459 pring->postbufq_cnt--;
3460 spin_unlock_irq(&phba->hbalock);
3465 spin_unlock_irq(&phba->hbalock);
3466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3467 "0410 Cannot find virtual addr for mapped buf on "
3468 "ring %d Data x%llx x%p x%p x%x\n",
3469 pring->ringno, (unsigned long long)phys,
3470 slp->next, slp->prev, pring->postbufq_cnt);
3475 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3476 struct lpfc_iocbq *rspiocb)
3478 IOCB_t *irsp = &rspiocb->iocb;
3479 uint16_t abort_iotag, abort_context;
3480 struct lpfc_iocbq *abort_iocb;
3481 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3485 if (irsp->ulpStatus) {
3486 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3487 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3489 spin_lock_irq(&phba->hbalock);
3490 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3491 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3493 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3494 "0327 Cannot abort els iocb %p "
3495 "with tag %x context %x, abort status %x, "
3497 abort_iocb, abort_iotag, abort_context,
3498 irsp->ulpStatus, irsp->un.ulpWord[4]);
3501 * make sure we have the right iocbq before taking it
3502 * off the txcmplq and try to call completion routine.
3505 abort_iocb->iocb.ulpContext != abort_context ||
3506 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3507 spin_unlock_irq(&phba->hbalock);
3509 list_del_init(&abort_iocb->list);
3510 pring->txcmplq_cnt--;
3511 spin_unlock_irq(&phba->hbalock);
3513 /* Firmware could still be in progress of DMAing
3514 * payload, so don't free data buffer till after
3517 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
3519 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3520 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3521 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3522 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3526 lpfc_sli_release_iocbq(phba, cmdiocb);
3531 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3532 struct lpfc_iocbq *rspiocb)
3534 IOCB_t *irsp = &rspiocb->iocb;
3536 /* ELS cmd tag <ulpIoTag> completes */
3537 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3538 "0133 Ignoring ELS cmd tag x%x completion Data: "
3540 irsp->ulpIoTag, irsp->ulpStatus,
3541 irsp->un.ulpWord[4], irsp->ulpTimeout);
3542 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3543 lpfc_ct_free_iocb(phba, cmdiocb);
3545 lpfc_els_free_iocb(phba, cmdiocb);
3550 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3551 struct lpfc_iocbq *cmdiocb)
3553 struct lpfc_vport *vport = cmdiocb->vport;
3554 struct lpfc_iocbq *abtsiocbp;
3555 IOCB_t *icmd = NULL;
3556 IOCB_t *iabt = NULL;
3557 int retval = IOCB_ERROR;
3560 * There are certain command types we don't want to abort. And we
3561 * don't want to abort commands that are already in the process of
3564 icmd = &cmdiocb->iocb;
3565 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3566 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3567 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3570 /* If we're unloading, don't abort iocb on the ELS ring, but change the
3571 * callback so that nothing happens when it finishes.
3573 if ((vport->load_flag & FC_UNLOADING) &&
3574 (pring->ringno == LPFC_ELS_RING)) {
3575 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3576 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3578 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3579 goto abort_iotag_exit;
3582 /* issue ABTS for this IOCB based on iotag */
3583 abtsiocbp = __lpfc_sli_get_iocbq(phba);
3584 if (abtsiocbp == NULL)
3587 /* This signals the response to set the correct status
3588 * before calling the completion handler.
3590 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3592 iabt = &abtsiocbp->iocb;
3593 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3594 iabt->un.acxri.abortContextTag = icmd->ulpContext;
3595 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3597 iabt->ulpClass = icmd->ulpClass;
3599 if (phba->link_state >= LPFC_LINK_UP)
3600 iabt->ulpCommand = CMD_ABORT_XRI_CN;
3602 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
3604 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3606 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3607 "0339 Abort xri x%x, original iotag x%x, "
3608 "abort cmd iotag x%x\n",
3609 iabt->un.acxri.abortContextTag,
3610 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3611 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3615 * Caller to this routine should check for IOCB_ERROR
3616 * and handle it properly. This routine no longer removes
3617 * iocb off txcmplq and call compl in case of IOCB_ERROR.
3623 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3624 uint16_t tgt_id, uint64_t lun_id,
3625 lpfc_ctx_cmd ctx_cmd)
3627 struct lpfc_scsi_buf *lpfc_cmd;
3628 struct scsi_cmnd *cmnd;
3631 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
3634 if (iocbq->vport != vport)
3637 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3638 cmnd = lpfc_cmd->pCmd;
3645 if ((cmnd->device->id == tgt_id) &&
3646 (cmnd->device->lun == lun_id))
3650 if (cmnd->device->id == tgt_id)
3657 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3658 __FUNCTION__, ctx_cmd);
3666 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
3667 lpfc_ctx_cmd ctx_cmd)
3669 struct lpfc_hba *phba = vport->phba;
3670 struct lpfc_iocbq *iocbq;
3673 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3674 iocbq = phba->sli.iocbq_lookup[i];
3676 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
3685 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3686 struct lpfc_iocbq *rspiocb)
3688 lpfc_sli_release_iocbq(phba, cmdiocb);
3693 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
3694 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
3696 struct lpfc_hba *phba = vport->phba;
3697 struct lpfc_iocbq *iocbq;
3698 struct lpfc_iocbq *abtsiocb;
3700 int errcnt = 0, ret_val = 0;
3703 for (i = 1; i <= phba->sli.last_iotag; i++) {
3704 iocbq = phba->sli.iocbq_lookup[i];
3706 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
3710 /* issue ABTS for this IOCB based on iotag */
3711 abtsiocb = lpfc_sli_get_iocbq(phba);
3712 if (abtsiocb == NULL) {
3718 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3719 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3720 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3721 abtsiocb->iocb.ulpLe = 1;
3722 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3723 abtsiocb->vport = phba->pport;
3725 if (lpfc_is_link_up(phba))
3726 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3728 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3730 /* Setup callback routine and issue the command. */
3731 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3732 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3733 if (ret_val == IOCB_ERROR) {
3734 lpfc_sli_release_iocbq(phba, abtsiocb);
3744 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3745 struct lpfc_iocbq *cmdiocbq,
3746 struct lpfc_iocbq *rspiocbq)
3748 wait_queue_head_t *pdone_q;
3749 unsigned long iflags;
3751 spin_lock_irqsave(&phba->hbalock, iflags);
3752 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3753 if (cmdiocbq->context2 && rspiocbq)
3754 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3755 &rspiocbq->iocb, sizeof(IOCB_t));
3757 pdone_q = cmdiocbq->context_un.wait_queue;
3760 spin_unlock_irqrestore(&phba->hbalock, iflags);
3765 * Issue the caller's iocb and wait for its completion, but no longer than the
3766 * caller's timeout. Note that iocb_flags is cleared before the
3767 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3768 * definition this is a wait function.
3772 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3773 struct lpfc_sli_ring *pring,
3774 struct lpfc_iocbq *piocb,
3775 struct lpfc_iocbq *prspiocbq,
3778 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3779 long timeleft, timeout_req = 0;
3780 int retval = IOCB_SUCCESS;
3784 * If the caller has provided a response iocbq buffer, then context2
3785 * is NULL or its an error.
3788 if (piocb->context2)
3790 piocb->context2 = prspiocbq;
3793 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3794 piocb->context_un.wait_queue = &done_q;
3795 piocb->iocb_flag &= ~LPFC_IO_WAKE;
3797 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3798 creg_val = readl(phba->HCregaddr);
3799 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3800 writel(creg_val, phba->HCregaddr);
3801 readl(phba->HCregaddr); /* flush */
3804 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3805 if (retval == IOCB_SUCCESS) {
3806 timeout_req = timeout * HZ;
3807 timeleft = wait_event_timeout(done_q,
3808 piocb->iocb_flag & LPFC_IO_WAKE,
3811 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3812 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3813 "0331 IOCB wake signaled\n");
3814 } else if (timeleft == 0) {
3815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3816 "0338 IOCB wait timeout error - no "
3817 "wake response Data x%x\n", timeout);
3818 retval = IOCB_TIMEDOUT;
3820 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3821 "0330 IOCB wake NOT set, "
3823 timeout, (timeleft / jiffies));
3824 retval = IOCB_TIMEDOUT;
3827 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3828 ":0332 IOCB wait issue failed, Data x%x\n",
3830 retval = IOCB_ERROR;
3833 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3834 creg_val = readl(phba->HCregaddr);
3835 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3836 writel(creg_val, phba->HCregaddr);
3837 readl(phba->HCregaddr); /* flush */
3841 piocb->context2 = NULL;
3843 piocb->context_un.wait_queue = NULL;
3844 piocb->iocb_cmpl = NULL;
3849 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3852 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3856 /* The caller must leave context1 empty. */
3857 if (pmboxq->context1)
3858 return MBX_NOT_FINISHED;
3860 /* setup wake call as IOCB callback */
3861 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3862 /* setup context field to pass wait_queue pointer to wake function */
3863 pmboxq->context1 = &done_q;
3865 /* now issue the command */
3866 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3868 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3869 wait_event_interruptible_timeout(done_q,
3870 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3873 spin_lock_irqsave(&phba->hbalock, flag);
3874 pmboxq->context1 = NULL;
3876 * if LPFC_MBX_WAKE flag is set the mailbox is completed
3877 * else do not free the resources.
3879 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3880 retval = MBX_SUCCESS;
3882 retval = MBX_TIMEOUT;
3883 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3885 spin_unlock_irqrestore(&phba->hbalock, flag);
3892 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3894 struct lpfc_vport *vport = phba->pport;
3898 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3899 if (i++ > LPFC_MBOX_TMO * 1000)
3903 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3904 * did finish. This way we won't get the misleading
3905 * "Stray Mailbox Interrupt" message.
3907 spin_lock_irq(&phba->hbalock);
3908 ha_copy = phba->work_ha;
3909 phba->work_ha &= ~HA_MBATT;
3910 spin_unlock_irq(&phba->hbalock);
3912 if (ha_copy & HA_MBATT)
3913 if (lpfc_sli_handle_mb_event(phba) == 0)
3919 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3923 lpfc_intr_handler(int irq, void *dev_id)
3925 struct lpfc_hba *phba;
3927 uint32_t work_ha_copy;
3928 unsigned long status;
3931 MAILBOX_t *mbox, *pmbox;
3932 struct lpfc_vport *vport;
3933 struct lpfc_nodelist *ndlp;
3934 struct lpfc_dmabuf *mp;
3939 * Get the driver's phba structure from the dev_id and
3940 * assume the HBA is not interrupting.
3942 phba = (struct lpfc_hba *) dev_id;
3944 if (unlikely(!phba))
3947 /* If the pci channel is offline, ignore all the interrupts. */
3948 if (unlikely(pci_channel_offline(phba->pcidev)))
3951 phba->sli.slistat.sli_intr++;
3954 * Call the HBA to see if it is interrupting. If not, don't claim
3958 /* Ignore all interrupts during initialization. */
3959 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3963 * Read host attention register to determine interrupt source
3964 * Clear Attention Sources, except Error Attention (to
3965 * preserve status) and Link Attention
3967 spin_lock(&phba->hbalock);
3968 ha_copy = readl(phba->HAregaddr);
3969 /* If somebody is waiting to handle an eratt don't process it
3970 * here. The brdkill function will do this.
3972 if (phba->link_flag & LS_IGNORE_ERATT)
3973 ha_copy &= ~HA_ERATT;
3974 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3975 readl(phba->HAregaddr); /* flush */
3976 spin_unlock(&phba->hbalock);
3978 if (unlikely(!ha_copy))
3981 work_ha_copy = ha_copy & phba->work_ha_mask;
3983 if (unlikely(work_ha_copy)) {
3984 if (work_ha_copy & HA_LATT) {
3985 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3987 * Turn off Link Attention interrupts
3988 * until CLEAR_LA done
3990 spin_lock(&phba->hbalock);
3991 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3992 control = readl(phba->HCregaddr);
3993 control &= ~HC_LAINT_ENA;
3994 writel(control, phba->HCregaddr);
3995 readl(phba->HCregaddr); /* flush */
3996 spin_unlock(&phba->hbalock);
3999 work_ha_copy &= ~HA_LATT;
4002 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
4004 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
4005 * the only slow ring.
4007 status = (work_ha_copy &
4008 (HA_RXMASK << (4*LPFC_ELS_RING)));
4009 status >>= (4*LPFC_ELS_RING);
4010 if (status & HA_RXMASK) {
4011 spin_lock(&phba->hbalock);
4012 control = readl(phba->HCregaddr);
4014 lpfc_debugfs_slow_ring_trc(phba,
4015 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
4017 (uint32_t)phba->sli.slistat.sli_intr);
4019 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
4020 lpfc_debugfs_slow_ring_trc(phba,
4022 "pwork:x%x hawork:x%x wait:x%x",
4023 phba->work_ha, work_ha_copy,
4024 (uint32_t)((unsigned long)
4028 ~(HC_R0INT_ENA << LPFC_ELS_RING);
4029 writel(control, phba->HCregaddr);
4030 readl(phba->HCregaddr); /* flush */
4033 lpfc_debugfs_slow_ring_trc(phba,
4034 "ISR slow ring: pwork:"
4035 "x%x hawork:x%x wait:x%x",
4036 phba->work_ha, work_ha_copy,
4037 (uint32_t)((unsigned long)
4040 spin_unlock(&phba->hbalock);
4044 if (work_ha_copy & HA_ERATT) {
4045 phba->link_state = LPFC_HBA_ERROR;
4047 * There was a link/board error. Read the
4048 * status register to retrieve the error event
4051 phba->sli.slistat.err_attn_event++;
4052 /* Save status info */
4053 phba->work_hs = readl(phba->HSregaddr);
4054 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
4055 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
4057 /* Clear Chip error bit */
4058 writel(HA_ERATT, phba->HAregaddr);
4059 readl(phba->HAregaddr); /* flush */
4060 phba->pport->stopped = 1;
4063 if ((work_ha_copy & HA_MBATT) &&
4064 (phba->sli.mbox_active)) {
4065 pmb = phba->sli.mbox_active;
4067 mbox = &phba->slim2p->mbx;
4070 /* First check out the status word */
4071 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
4072 if (pmbox->mbxOwner != OWN_HOST) {
4074 * Stray Mailbox Interrupt, mbxCommand <cmd>
4075 * mbxStatus <status>
4077 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
4079 "(%d):0304 Stray Mailbox "
4080 "Interrupt mbxCommand x%x "
4082 (vport ? vport->vpi : 0),
4086 phba->last_completion_time = jiffies;
4087 del_timer_sync(&phba->sli.mbox_tmo);
4089 phba->sli.mbox_active = NULL;
4090 if (pmb->mbox_cmpl) {
4091 lpfc_sli_pcimem_bcopy(mbox, pmbox,
4094 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
4095 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
4097 lpfc_debugfs_disc_trc(vport,
4098 LPFC_DISC_TRC_MBOX_VPORT,
4099 "MBOX dflt rpi: : status:x%x rpi:x%x",
4100 (uint32_t)pmbox->mbxStatus,
4101 pmbox->un.varWords[0], 0);
4103 if ( !pmbox->mbxStatus) {
4104 mp = (struct lpfc_dmabuf *)
4106 ndlp = (struct lpfc_nodelist *)
4109 /* Reg_LOGIN of dflt RPI was successful.
4110 * new lets get rid of the RPI using the
4113 lpfc_unreg_login(phba, vport->vpi,
4114 pmbox->un.varWords[0], pmb);
4115 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4117 pmb->context2 = ndlp;
4119 spin_lock(&phba->hbalock);
4120 phba->sli.sli_flag &=
4121 ~LPFC_SLI_MBOX_ACTIVE;
4122 spin_unlock(&phba->hbalock);
4123 goto send_current_mbox;
4126 spin_lock(&phba->pport->work_port_lock);
4127 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4128 spin_unlock(&phba->pport->work_port_lock);
4129 lpfc_mbox_cmpl_put(phba, pmb);
4131 if ((work_ha_copy & HA_MBATT) &&
4132 (phba->sli.mbox_active == NULL)) {
4134 spin_lock(&phba->hbalock);
4135 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4136 pmb = lpfc_mbox_get(phba);
4137 spin_unlock(&phba->hbalock);
4139 /* Process next mailbox command if there is one */
4141 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4142 if (rc == MBX_NOT_FINISHED) {
4143 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4144 lpfc_mbox_cmpl_put(phba, pmb);
4145 goto send_next_mbox;
4151 spin_lock(&phba->hbalock);
4152 phba->work_ha |= work_ha_copy;
4153 if (phba->work_wait)
4154 lpfc_worker_wake_up(phba);
4155 spin_unlock(&phba->hbalock);
4158 ha_copy &= ~(phba->work_ha_mask);
4161 * Process all events on FCP ring. Take the optimized path for
4162 * FCP IO. Any other IO is slow path and is handled by
4163 * the worker thread.
4165 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
4166 status >>= (4*LPFC_FCP_RING);
4167 if (status & HA_RXMASK)
4168 lpfc_sli_handle_fast_ring_event(phba,
4169 &phba->sli.ring[LPFC_FCP_RING],
4172 if (phba->cfg_multi_ring_support == 2) {
4174 * Process all events on extra ring. Take the optimized path
4175 * for extra ring IO. Any other IO is slow path and is handled
4176 * by the worker thread.
4178 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
4179 status >>= (4*LPFC_EXTRA_RING);
4180 if (status & HA_RXMASK) {
4181 lpfc_sli_handle_fast_ring_event(phba,
4182 &phba->sli.ring[LPFC_EXTRA_RING],
4188 } /* lpfc_intr_handler */