#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
+#include <linux/aer.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint32_t);
static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
- uint8_t *, uint32_t *);
-
+ uint8_t *, uint32_t *);
+static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
+ struct lpfc_iocbq *);
+static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
+ struct hbq_dmabuf *);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
return -ENOMEM;
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
- bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+ bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
- if (!bf_get(lpfc_eqe_valid, eqe))
+ if (!bf_get_le32(lpfc_eqe_valid, eqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
- bf_set(lpfc_eqe_valid, temp_eqe, 0);
+ bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+ /* PCI read to flush PCI pipeline on re-arming for INTx mode */
+ if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
+ readl(q->phba->sli4_hba.EQCQDBregaddr);
return released;
}
struct lpfc_cqe *cqe;
/* If the next CQE is not valid then we are done */
- if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+ if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
- bf_set(lpfc_cqe_valid, temp_qe, 0);
+ bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
+
+ if (iocbq)
+ phba->iocb_cnt++;
+ if (phba->iocb_cnt > phba->iocb_max)
+ phba->iocb_max = phba->iocb_cnt;
return iocbq;
}
*
* Returns sglq ponter = success, NULL = Failure.
**/
-static struct lpfc_sglq *
+struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
uint16_t adj_xri;
struct lpfc_sglq *sglq = NULL;
uint16_t adj_xri;
list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+ if (!sglq)
+ return NULL;
adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ sglq->state = SGL_ALLOCATED;
return sglq;
}
{
struct lpfc_sglq *sglq;
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
- unsigned long iflag;
+ unsigned long iflag = 0;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
if (sglq) {
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
- || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
- && (iocbq->iocb.un.ulpWord[4]
- == IOERR_SLI_ABORTED))) {
+ if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+ (sglq->state != SGL_XRI_ABORTED)) {
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
iflag);
list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock, iflag);
- } else
+ } else {
+ sglq->state = SGL_FREED;
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+
+ /* Check if TXQ queue needs to be serviced */
+ if (pring->txq_cnt)
+ lpfc_worker_wake_up(phba);
+ }
}
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
+
/**
* __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
phba->__lpfc_sli_release_iocbq(phba, iocbq);
+ phba->iocb_cnt--;
}
/**
case DSSCMD_IWRITE64_CX:
case DSSCMD_IREAD64_CR:
case DSSCMD_IREAD64_CX:
- case DSSCMD_INVALIDATE_DEK:
- case DSSCMD_SET_KEK:
- case DSSCMD_GET_KEK_ID:
- case DSSCMD_GEN_XFER:
type = LPFC_SOL_IOCB;
break;
case CMD_ABORT_XRI_CN:
case CMD_CLOSE_XRI_CX:
case CMD_XRI_ABORTED_CX:
case CMD_ABORT_MXRI64_CN:
+ case CMD_XMIT_BLS_RSP64_CX:
type = LPFC_ABORT_IOCB;
break;
case CMD_RCV_SEQUENCE_CX:
struct lpfc_iocbq *piocb)
{
list_add_tail(&piocb->list, &pring->txcmplq);
+ piocb->iocb_flag |= LPFC_IO_ON_Q;
pring->txcmplq_cnt++;
+ if (pring->txcmplq_cnt > pring->txcmplq_max)
+ pring->txcmplq_max = pring->txcmplq_cnt;
+
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
* the txq, the function returns first iocb in the list after
* removing the iocb from the list, else it returns NULL.
**/
-static struct lpfc_iocbq *
+struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_iocbq *cmd_iocb;
} else
spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
psli->last_iotag);
/* HBQ for ELS and CT traffic. */
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
- .entry_count = 200,
+ .entry_count = 256,
.mask_count = 0,
.profile = 0,
.ring_mask = (1 << LPFC_ELS_RING),
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->add_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return 0;
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->add_count);
}
/**
static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->init_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->entry_count);
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->init_count);
}
/**
case MBX_INIT_VPI:
case MBX_INIT_VFI:
case MBX_RESUME_RPI:
+ case MBX_READ_EVENT_LOG_STATUS:
+ case MBX_READ_EVENT_LOG:
+ case MBX_SECURITY_MGMT:
+ case MBX_AUTH_PORT:
ret = mbxCommand;
break;
default:
void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
uint16_t rpi, vpi;
int rc;
}
if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
- (phba->sli_rev == LPFC_SLI_REV4))
+ (phba->sli_rev == LPFC_SLI_REV4) &&
+ (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
/*
return;
}
+ if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
+ !(phba->pport->load_flag & FC_UNLOADING) &&
+ !pmb->u.mb.mbxStatus) {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ ndlp = (struct lpfc_nodelist *)pmb->context2;
+ lpfc_nlp_put(ndlp);
+ pmb->context2 = NULL;
+ }
+
+ /* Check security permission status on INIT_LINK mailbox command */
+ if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
+ (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2860 SLI authentication is required "
+ "for INIT_LINK but has not done yet\n");
+
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
*/
if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
MBX_SHUTDOWN) {
- /* Unknow mailbox command compl */
+ /* Unknown mailbox command compl */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x) Cmpl\n",
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
(irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
(irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
- Rctl = FC_ELS_REQ;
- Type = FC_ELS_DATA;
+ Rctl = FC_RCTL_ELS_REQ;
+ Type = FC_TYPE_ELS;
} else {
w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
Rctl = w5p->hcsw.Rctl;
if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
- Rctl = FC_ELS_REQ;
- Type = FC_ELS_DATA;
+ Rctl = FC_RCTL_ELS_REQ;
+ Type = FC_TYPE_ELS;
w5p->hcsw.Rctl = Rctl;
w5p->hcsw.Type = Type;
}
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
- pring->txcmplq_cnt--;
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+ pring->txcmplq_cnt--;
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ }
return cmd_iocb;
}
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
- pring->txcmplq_cnt--;
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ pring->txcmplq_cnt--;
+ }
return cmd_iocb;
}
* All other are passed to the completion callback.
*/
if (pring->ringno == LPFC_ELS_RING) {
- if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED)) {
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
cmdiocbp->iocb_flag &=
~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
* of DMAing payload, so don't free data
* buffer till after a hbeat.
*/
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (saveq->iocb_flag &
+ LPFC_EXCHANGE_BUSY) {
+ /* Set cmdiocb flag for the
+ * exchange busy so sgl (xri)
+ * will not be released until
+ * the abort xri is received
+ * from hba.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag |=
+ LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
+ if (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED) {
+ /*
+ * Clear LPFC_DRIVER_ABORTED
+ * bit in case it was driver
+ * initiated abort.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ cmdiocbp->iocb.un.ulpWord[4] =
+ IOERR_ABORT_REQUESTED;
+ /*
+ * For SLI4, irsiocb contains
+ * NO_XRI in sli_xritag, it
+ * shall not affect releasing
+ * sgl (xri) process.
+ */
+ saveq->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ saveq->iocb.un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ saveq->iocb_flag |=
+ LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
}
}
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
return;
}
-/**
- * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
- * @phba: Pointer to HBA context object.
- *
- * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
- * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
- * is enabled.
- *
- * The caller does not hold any lock.
- * The function processes each response iocb in the response ring until it
- * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
- * LE bit set. The function will call the completion handler of the command iocb
- * if the response iocb indicates a completion for a command iocb or it is
- * an abort completion.
- **/
-void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
-{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
- IOCB_t *irsp = NULL;
- IOCB_t *entry = NULL;
- struct lpfc_iocbq *cmdiocbq = NULL;
- struct lpfc_iocbq rspiocbq;
- struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
- uint32_t status;
- uint32_t portRspPut, portRspMax;
- int type;
- uint32_t rsp_cmpl = 0;
- uint32_t ha_copy;
- unsigned long iflags;
-
- pring->stats.iocb_event++;
-
- /*
- * The next available response entry should never exceed the maximum
- * entries. If it does, treat it as an adapter hardware error.
- */
- portRspMax = pring->numRiocb;
- portRspPut = le32_to_cpu(pgp->rspPutInx);
- if (unlikely(portRspPut >= portRspMax)) {
- lpfc_sli_rsp_pointers_error(phba, pring);
- return;
- }
-
- rmb();
- while (pring->rspidx != portRspPut) {
- entry = lpfc_resp_iocb(phba, pring);
- if (++pring->rspidx >= portRspMax)
- pring->rspidx = 0;
-
- lpfc_sli_pcimem_bcopy((uint32_t *) entry,
- (uint32_t *) &rspiocbq.iocb,
- phba->iocb_rsp_size);
- irsp = &rspiocbq.iocb;
- type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
- pring->stats.iocb_rsp++;
- rsp_cmpl++;
-
- if (unlikely(irsp->ulpStatus)) {
- /* Rsp ring <ringno> error: IOCB */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0326 Rsp Ring %d error: IOCB Data: "
- "x%x x%x x%x x%x x%x x%x x%x x%x\n",
- pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(uint32_t *)&irsp->un1,
- *((uint32_t *)&irsp->un1 + 1));
- }
-
- switch (type) {
- case LPFC_ABORT_IOCB:
- case LPFC_SOL_IOCB:
- /*
- * Idle exchange closed via ABTS from port. No iocb
- * resources need to be recovered.
- */
- if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0314 IOCB cmd 0x%x "
- "processed. Skipping "
- "completion",
- irsp->ulpCommand);
- break;
- }
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
- &rspiocbq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
- }
- break;
- default:
- if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
- char adaptermsg[LPFC_MAX_ADPTMSG];
- memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
- memcpy(&adaptermsg[0], (uint8_t *) irsp,
- MAX_MSG_DATA);
- dev_warn(&((phba->pcidev)->dev),
- "lpfc%d: %s\n",
- phba->brd_no, adaptermsg);
- } else {
- /* Unknown IOCB command */
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0321 Unknown IOCB command "
- "Data: x%x, x%x x%x x%x x%x\n",
- type, irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
- }
- break;
- }
-
- /*
- * The response IOCB has been processed. Update the ring
- * pointer in SLIM. If the port response put pointer has not
- * been updated, sync the pgp->rspPutInx and fetch the new port
- * response put pointer.
- */
- writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
-
- if (pring->rspidx == portRspPut)
- portRspPut = le32_to_cpu(pgp->rspPutInx);
- }
-
- ha_copy = readl(phba->HAregaddr);
- ha_copy >>= (LPFC_FCP_RING * 4);
-
- if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- pring->stats.iocb_rsp_full++;
- status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
- writel(status, phba->CAregaddr);
- readl(phba->CAregaddr);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
- if ((ha_copy & HA_R0CE_RSP) &&
- (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
- pring->stats.iocb_cmd_empty++;
-
- /* Force update of the local copy of cmdGetInx */
- pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
- lpfc_sli_resume_iocb(phba, pring);
-
- if ((pring->lpfc_sli_cmd_available))
- (pring->lpfc_sli_cmd_available) (phba, pring);
-
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
- return;
-}
/**
* lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
* an abort completion. The function will call lpfc_sli_process_unsol_iocb
* function if this is an unsolicited iocb.
* This routine presumes LPFC_FCP_RING handling and doesn't bother
- * to check it explicitly. This function always returns 1.
- **/
-static int
+ * to check it explicitly.
+ */
+int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
spin_unlock_irqrestore(&phba->hbalock, iflag);
return 1;
}
+ if (phba->fcp_ring_in_use) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return 1;
+ } else
+ phba->fcp_ring_in_use = 1;
rmb();
while (pring->rspidx != portRspPut) {
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
- if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
- } else {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- }
+ if (unlikely(!cmdiocbq))
+ break;
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->iocb_cmpl) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+ &rspiocbq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
}
break;
case LPFC_UNSOL_IOCB:
}
+ phba->fcp_ring_in_use = 0;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rc;
}
struct lpfc_sli_ring *pring, uint32_t mask)
{
struct lpfc_iocbq *irspiocbq;
+ struct hbq_dmabuf *dmabuf;
+ struct lpfc_cq_event *cq_event;
unsigned long iflag;
- while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
/* Get the response iocb from the head of work queue */
spin_lock_irqsave(&phba->hbalock, iflag);
- list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
- irspiocbq, struct lpfc_iocbq, list);
+ list_remove_head(&phba->sli4_hba.sp_queue_event,
+ cq_event, struct lpfc_cq_event, list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- /* Process the response iocb */
- lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
+
+ switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+ case CQE_CODE_COMPL_WQE:
+ irspiocbq = container_of(cq_event, struct lpfc_iocbq,
+ cq_event);
+ /* Translate ELS WCQE to response IOCBQ */
+ irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
+ irspiocbq);
+ if (irspiocbq)
+ lpfc_sli_sp_handle_rspiocb(phba, pring,
+ irspiocbq);
+ break;
+ case CQE_CODE_RECEIVE:
+ dmabuf = container_of(cq_event, struct hbq_dmabuf,
+ cq_event);
+ lpfc_sli4_handle_received_buffer(phba, dmabuf);
+ break;
+ default:
+ break;
+ }
}
}
/* Check to see if any errors occurred during init */
if ((status & HS_FFERM) || (i >= 20)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2751 Adapter failed to restart, "
+ "status reg x%x, FW Data: A8 x%x AC x%x\n",
+ status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
retval = 1;
}
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2752 KILL_BOARD command failed retval %d\n",
+ retval);
spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_IGNORE_ERATT;
spin_unlock_irq(&phba->hbalock);
/* perform board reset */
phba->fc_eventTag = 0;
+ phba->link_events = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
/* perform board reset */
phba->fc_eventTag = 0;
+ phba->link_events = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
list_del_init(&phba->sli4_hba.dat_rq->list);
list_del_init(&phba->sli4_hba.mbx_cq->list);
list_del_init(&phba->sli4_hba.els_cq->list);
- list_del_init(&phba->sli4_hba.rxq_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
struct lpfc_sli *psli;
volatile uint32_t word0;
void __iomem *to_slim;
+ uint32_t hba_aer_enabled;
spin_lock_irq(&phba->hbalock);
+ /* Take PCIe device Advanced Error Reporting (AER) state */
+ hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
psli = &phba->sli;
/* Restart HBA */
/* Give the INITFF and Post time to settle. */
mdelay(100);
+ /* Reset HBA AER if it was enabled, note hba_flag was reset above */
+ if (hba_aer_enabled)
+ pci_disable_pcie_error_reporting(phba->pcidev);
+
lpfc_hba_down_post(phba);
return 0;
lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
-
+ uint32_t hba_aer_enabled;
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0296 Restart HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
+ /* Take PCIe device Advanced Error Reporting (AER) state */
+ hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
lpfc_sli4_brdreset(phba);
spin_lock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
+ /* Reset HBA AER if it was enabled, note hba_flag was reset above */
+ if (hba_aer_enabled)
+ pci_disable_pcie_error_reporting(phba->pcidev);
+
lpfc_hba_down_post(phba);
return 0;
i = 0;
while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
- /* Check every 100ms for 5 retries, then every 500ms for 5, then
- * every 2.5 sec for 5, then reset board and every 2.5 sec for
- * 4.
+ /* Check every 10ms for 10 retries, then every 100ms for 90
+ * retries, then every 1 sec for 50 retires for a total of
+ * ~60 seconds before reset the board again and check every
+ * 1 sec for 50 retries. The up to 60 seconds before the
+ * board ready is required by the Falcon FIPS zeroization
+ * complete, and any reset the board in between shall cause
+ * restart of zeroization, further delay the board ready.
*/
- if (i++ >= 20) {
+ if (i++ >= 200) {
/* Adapter failed to init, timeout, status reg
<status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
return -EIO;
}
- if (i <= 5) {
+ if (i <= 10)
msleep(10);
- } else if (i <= 10) {
- msleep(500);
- } else {
- msleep(2500);
- }
+ else if (i <= 100)
+ msleep(100);
+ else
+ msleep(1000);
- if (i == 15) {
- /* Do post */
+ if (i == 150) {
+ /* Do post */
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
phba->link_state = LPFC_HBA_ERROR;
mempool_free(pmb, phba->mbox_mem_pool);
- return ENXIO;
+ return -ENXIO;
}
}
phba->hbq_count = hbq_count;
phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
LPFC_SLI3_HBQ_ENABLED |
LPFC_SLI3_CRP_ENABLED |
- LPFC_SLI3_INB_ENABLED |
- LPFC_SLI3_BG_ENABLED);
+ LPFC_SLI3_BG_ENABLED |
+ LPFC_SLI3_DSS_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x "
} else
phba->max_vpi = 0;
- if (pmb->u.mb.un.varCfgPort.gdss)
+ phba->fips_level = 0;
+ phba->fips_spec_rev = 0;
+ if (pmb->u.mb.un.varCfgPort.gdss) {
phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+ phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
+ phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2850 Security Crypto Active. FIPS x%d "
+ "(Spec Rev: x%d)",
+ phba->fips_level, phba->fips_spec_rev);
+ }
+ if (pmb->u.mb.un.varCfgPort.sec_err) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2856 Config Port Security Crypto "
+ "Error: x%x ",
+ pmb->u.mb.un.varCfgPort.sec_err);
+ }
if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
- if (pmb->u.mb.un.varCfgPort.ginb) {
- phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
- phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
- phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
- phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
- phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
- phba->inb_last_counter =
- phba->mbox->us.s3_inb_pgp.counter;
- } else {
- phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
- phba->port_gp = phba->mbox->us.s3_pgp.port;
- phba->inb_ha_copy = NULL;
- phba->inb_counter = NULL;
- }
+
+ phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
+ phba->port_gp = phba->mbox->us.s3_pgp.port;
if (phba->cfg_enable_bg) {
if (pmb->u.mb.un.varCfgPort.gbg)
} else {
phba->hbq_get = NULL;
phba->port_gp = phba->mbox->us.s2.port;
- phba->inb_ha_copy = NULL;
- phba->inb_counter = NULL;
phba->max_vpi = 0;
}
do_prep_failed:
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Enable PCIe device Advanced Error Reporting (AER) if configured */
+ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+ rc = pci_enable_pcie_error_reporting(phba->pcidev);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2709 This device supports "
+ "Advanced Error Reporting (AER)\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_AER_ENABLED;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2708 This device does not support "
+ "Advanced Error Reporting (AER)\n");
+ phba->cfg_aer_support = 0;
+ }
+ }
+
if (phba->sli_rev == 3) {
phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
lpfc_sli_hba_setup_error:
phba->link_state = LPFC_HBA_ERROR;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0445 Firmware initialization failed\n");
return rc;
}
return -EIO;
}
data_length = mqe->un.mb_words[5];
- if (data_length > DMP_FCOEPARAM_RGN_SIZE)
+ if (data_length > DMP_RGN23_SIZE) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
return -EIO;
+ }
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
* addition, this routine gets the port vpd data.
*
* Return codes
- * 0 - sucessful
- * ENOMEM - could not allocated memory.
+ * 0 - successful
+ * -ENOMEM - could not allocated memory.
**/
static int
lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (rc) {
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
return -EIO;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0380 Mailbox cmd x%x Status x%x "
- "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
- "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
- "CQ: x%x x%x x%x x%x\n",
- mboxq->vport ? mboxq->vport->vpi : 0,
- bf_get(lpfc_mqe_command, mqe),
- bf_get(lpfc_mqe_status, mqe),
- mqe->un.mb_words[0], mqe->un.mb_words[1],
- mqe->un.mb_words[2], mqe->un.mb_words[3],
- mqe->un.mb_words[4], mqe->un.mb_words[5],
- mqe->un.mb_words[6], mqe->un.mb_words[7],
- mqe->un.mb_words[8], mqe->un.mb_words[9],
- mqe->un.mb_words[10], mqe->un.mb_words[11],
- mqe->un.mb_words[12], mqe->un.mb_words[13],
- mqe->un.mb_words[14], mqe->un.mb_words[15],
- mqe->un.mb_words[16], mqe->un.mb_words[50],
- mboxq->mcqe.word0,
- mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
- mboxq->mcqe.trailer);
-
/*
* The available vpd length cannot be bigger than the
* DMA buffer passed to the port. Catch the less than
if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
*vpd_size = mqe->un.read_rev.avail_vpd_len;
- lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
+ memcpy(vpd, dmabuf->virt, *vpd_size);
+
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
- "2570 Failed to read FCoE parameters \n");
+ "2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
- vpd_size = PAGE_SIZE;
+ vpd_size = SLI4_PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
if (!vpd) {
rc = -ENOMEM;
goto out_free_vpd;
mqe = &mboxq->u.mqe;
- if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
- &mqe->un.read_rev) != LPFC_SLI_REV4) ||
- (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
+ phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
+ if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
+ phba->hba_flag |= HBA_FCOE_SUPPORT;
+
+ if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
+ LPFC_DCBX_CEE_MODE)
+ phba->hba_flag |= HBA_FIP_SUPPORT;
+ else
+ phba->hba_flag &= ~HBA_FIP_SUPPORT;
+
+ if (phba->sli_rev != LPFC_SLI_REV4 ||
+ !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0376 READ_REV Error. SLI Level %d "
"FCoE enabled %d\n",
- bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
- bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
+ phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
rc = -EIO;
goto out_free_vpd;
}
- /* Single threaded at this point, no need for lock */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_FCOE_SUPPORT;
- spin_unlock_irq(&phba->hbalock);
/*
* Evaluate the read rev and vpd data. Populate the driver
* state with the results. If this routine fails, the failure
rc = 0;
}
- /* By now, we should determine the SLI revision, hard code for now */
- phba->sli_rev = LPFC_SLI_REV4;
+ /* Save information as VPD data */
+ phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
+ phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
+ phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
+ phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
+ &mqe->un.read_rev);
+ phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
+ &mqe->un.read_rev);
+ phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
+ &mqe->un.read_rev);
+ phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
+ &mqe->un.read_rev);
+ phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
+ memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
+ phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
+ memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
+ phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
+ memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0380 READ_REV Status x%x "
+ "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ bf_get(lpfc_mqe_status, mqe),
+ phba->vpd.rev.opFwName,
+ phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
+ phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
/*
* Discover the port's supported feature set and match it against the
spin_unlock_irq(&phba->hbalock);
/* Read the port's service parameters. */
- lpfc_read_sparam(phba, mboxq, vport->vpi);
+ rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
+ if (rc) {
+ phba->link_state = LPFC_HBA_ERROR;
+ rc = -ENOMEM;
+ goto out_free_vpd;
+ }
+
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
mp = (struct lpfc_dmabuf *) mboxq->context1;
rc = lpfc_sli4_post_sgl_list(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0582 Error %d during sgl post operation", rc);
+ "0582 Error %d during sgl post operation\n",
+ rc);
rc = -ENODEV;
goto out_free_vpd;
}
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
- "0383 Error %d during scsi sgl post opeation",
- rc);
+ "0383 Error %d during scsi sgl post "
+ "operation\n", rc);
/* Some Scsi buffers were moved to the abort scsi list */
/* A pci function reset will repost them */
rc = -ENODEV;
rc = -ENODEV;
goto out_free_vpd;
}
- /* Temporary initialization of lpfc_fip_flag to non-fip */
- bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
/* Set up all the queues to the device */
rc = lpfc_sli4_queue_setup(phba);
/* Post receive buffers to the device */
lpfc_sli4_rb_setup(phba);
+ /* Reset HBA FCF states after HBA reset */
+ phba->fcf.fcf_flag = 0;
+ phba->fcf.current_rec.flag = 0;
+
/* Start the ELS watchdog timer */
- /*
- * The driver for SLI4 is not yet ready to process timeouts
- * or interrupts. Once it is, the comment bars can be removed.
- */
- /* mod_timer(&vport->els_tmofunc,
- * jiffies + HZ * (phba->fc_ratov*2)); */
+ mod_timer(&vport->els_tmofunc,
+ jiffies + HZ * (phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+ /* Enable PCIe device Advanced Error Reporting (AER) if configured */
+ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+ rc = pci_enable_pcie_error_reporting(phba->pcidev);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2829 This device supports "
+ "Advanced Error Reporting (AER)\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_AER_ENABLED;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2830 This device does not support "
+ "Advanced Error Reporting (AER)\n");
+ phba->cfg_aer_support = 0;
+ }
+ }
+
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (!pmbox) {
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* processing mbox queue from intr_handler */
if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return MBX_SUCCESS;
}
processing_queue = 1;
- phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
pmbox = lpfc_mbox_get(phba);
if (!pmbox) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
mb->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
- /* First copy command data to host SLIM area */
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = (uint8_t *)phba->mbox_ext
+ - (uint8_t *)phba->mbox;
+ }
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(pmbox->context2,
+ (uint8_t *)phba->mbox_ext,
+ pmbox->in_ext_byte_len);
+ }
+ /* Copy command data to host SLIM area */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = MAILBOX_HBA_EXT_OFFSET;
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_to_slim(phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->context2, pmbox->in_ext_byte_len);
+
+ }
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
+ pmbox->context2,
+ pmbox->out_ext_byte_len);
+ }
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
- if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
- pmbox->context2) {
- lpfc_memcpy_from_slim((void *)pmbox->context2,
- phba->MBslimaddr + DMP_RSP_OFFSET,
- mb->un.varDmp.word_cnt);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_from_slim(pmbox->context2,
+ phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->out_ext_byte_len);
}
}
}
/**
+ * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function blocks the posting of SLI4 asynchronous mailbox commands from
+ * the driver internal pending mailbox queue. It will then try to wait out the
+ * possible outstanding mailbox command before return.
+ *
+ * Returns:
+ * 0 - the outstanding mailbox command completed; otherwise, the wait for
+ * the outstanding mailbox command timed out.
+ **/
+static int
+lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ uint8_t actcmd = MBX_HEARTBEAT;
+ int rc = 0;
+ unsigned long timeout;
+
+ /* Mark the asynchronous mailbox command posting as blocked */
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+ if (phba->sli.mbox_active)
+ actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
+ spin_unlock_irq(&phba->hbalock);
+ /* Determine how long we might wait for the active mailbox
+ * command to be gracefully completed by firmware.
+ */
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
+ jiffies;
+ /* Wait for the outstnading mailbox command to complete */
+ while (phba->sli.mbox_active) {
+ /* Check active mailbox complete status every 2ms */
+ msleep(2);
+ if (time_after(jiffies, timeout)) {
+ /* Timeout, marked the outstanding cmd not complete */
+ rc = 1;
+ break;
+ }
+ }
+
+ /* Can not cleanly block async mailbox command, fails it */
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function unblocks and resume posting of SLI4 asynchronous mailbox
+ * commands from the driver internal pending mailbox queue. It makes sure
+ * that there is no outstanding mailbox command before resuming posting
+ * asynchronous mailbox commands. If, for any reason, there is outstanding
+ * mailbox command, it will try to wait it out before resuming asynchronous
+ * mailbox command posting.
+ **/
+static void
+lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ spin_lock_irq(&phba->hbalock);
+ if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+ /* Asynchronous mailbox posting is not blocked, do nothing */
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ /* Outstanding synchronous mailbox command is guaranteed to be done,
+ * successful or timeout, after timing-out the outstanding mailbox
+ * command shall always be removed, so just unblock posting async
+ * mailbox command and resume
+ */
+ psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* wake up worker thread to post asynchronlous mailbox command */
+ lpfc_worker_wake_up(phba);
+}
+
+/**
* lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
* @phba: Pointer to HBA context object.
* @mboxq: Pointer to mailbox object.
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
rc = MBXERR_ERROR;
- }
+ } else
+ lpfc_sli4_swap_str(phba, mboxq);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
unsigned long iflags;
int rc;
+ rc = lpfc_mbox_dev_check(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2544 Mailbox command x%x (x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
/* Detect polling mode and jump to a handler */
if (!phba->sli4_hba.intr_enable) {
if (flag == MBX_POLL)
psli->sli_flag, flag);
return rc;
} else if (flag == MBX_POLL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2542 Mailbox command x%x (x%x) "
- "cannot issue Data: x%x x%x\n",
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "(%d):2542 Try to issue mailbox command "
+ "x%x (x%x) synchronously ahead of async"
+ "mailbox command queue: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
- return -EIO;
+ /* Try to block the asynchronous mailbox posting */
+ rc = lpfc_sli4_async_mbox_block(phba);
+ if (!rc) {
+ /* Successfully blocked, now issue sync mbox cmd */
+ rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+ if (rc != MBX_SUCCESS)
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "(%d):2597 Mailbox command "
+ "x%x (x%x) cannot issue "
+ "Data: x%x x%x\n",
+ mboxq->vport ?
+ mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba,
+ mboxq),
+ psli->sli_flag, flag);
+ /* Unblock the async mailbox posting afterward */
+ lpfc_sli4_async_mbox_unblock(phba);
+ }
+ return rc;
}
/* Now, interrupt mode asynchrous mailbox command */
psli->sli_flag, flag);
goto out_not_finished;
}
- rc = lpfc_mbox_dev_check(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2544 Mailbox command x%x (x%x) "
- "cannot issue Data: x%x x%x\n",
- mboxq->vport ? mboxq->vport->vpi : 0,
- mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
- psli->sli_flag, flag);
- goto out_not_finished;
- }
/* Put the mailbox command to the driver internal FIFO */
psli->slistat.mbox_busy++;
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
-static void
+void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
case CMD_GEN_REQUEST64_CX:
if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
- FC_FCP_CMND) ||
+ FC_RCTL_DD_UNSOL_CMD) ||
(piocb->iocb.un.genreq64.w5.hcsw.Type !=
MENLO_TRANSPORT_TYPE))
for (i = 0; i < numBdes; i++) {
/* Should already be byte swapped. */
- sgl->addr_hi = bpl->addrHigh;
- sgl->addr_lo = bpl->addrLow;
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- bde.tus.w = le32_to_cpu(bpl->tus.w);
- bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
+ sgl->addr_hi = bpl->addrHigh;
+ sgl->addr_lo = bpl->addrLow;
+
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ bde.tus.w = le32_to_cpu(bpl->tus.w);
+ sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
bpl++;
sgl++;
}
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
- bf_set(lpfc_sli4_sge_len, sgl,
- icmd->un.genreq64.bdl.bdeSize);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ sgl->sge_len =
+ cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
}
return sglq->sli4_xritag;
}
/**
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
- * @piocb: Pointer to command iocb.
*
- * This routine performs a round robin SCSI command to SLI4 FCP WQ index
- * distribution.
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
+ * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
*
* Return: index into SLI4 fast-path FCP queue index.
**/
static uint32_t
-lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
- static uint32_t fcp_qidx;
+ ++phba->fcp_qidx;
+ if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
+ phba->fcp_qidx = 0;
- return fcp_qidx++ % phba->cfg_fcp_wq_count;
+ return phba->fcp_qidx;
}
/**
lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
union lpfc_wqe *wqe)
{
- uint32_t payload_len = 0;
+ uint32_t xmit_len = 0, total_len = 0;
uint8_t ct = 0;
uint32_t fip;
uint32_t abort_tag;
uint8_t command_type = ELS_COMMAND_NON_FIP;
uint8_t cmnd;
uint16_t xritag;
+ uint16_t abrt_iotag;
+ struct lpfc_iocbq *abrtiocbq;
struct ulp_bde64 *bpl = NULL;
+ uint32_t els_id = LPFC_ELS_ID_DEFAULT;
+ int numBdes, i;
+ struct ulp_bde64 bde;
- fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
+ fip = phba->hba_flag & HBA_FIP_SUPPORT;
/* The fcp commands will set command type */
- if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
- command_type = ELS_COMMAND_NON_FIP;
- else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
- command_type = ELS_COMMAND_FIP;
- else if (iocbq->iocb_flag & LPFC_IO_FCP)
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
command_type = FCP_COMMAND;
- else {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2019 Invalid cmd 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
+ else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
+ command_type = ELS_COMMAND_FIP;
+ else
+ command_type = ELS_COMMAND_NON_FIP;
+
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
- wqe->words[7] = 0; /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+ numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+ sizeof(struct ulp_bde64);
bpl = (struct ulp_bde64 *)
((struct lpfc_dmabuf *)iocbq->context3)->virt;
if (!bpl)
* can assign it to the sgl.
*/
wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
- payload_len = wqe->generic.bde.tus.f.bdeSize;
+ xmit_len = wqe->generic.bde.tus.f.bdeSize;
+ total_len = 0;
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ total_len += bde.tus.f.bdeSize;
+ }
} else
- payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
+ xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
iocbq->iocb.ulpIoTag = iocbq->iotag;
cmnd = iocbq->iocb.ulpCommand;
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
- wqe->els_req.payload_len = payload_len;
+ wqe->els_req.payload_len = xmit_len;
/* Els_reguest64 has a TMO */
bf_set(wqe_tmo, &wqe->els_req.wqe_com,
iocbq->iocb.ulpTimeout);
* contains the FCFI and remote N_Port_ID is
* in word 5.
*/
-
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
- iocbq->iocb.ulpContext);
-
- if (iocbq->vport->fc_myDID != 0) {
- bf_set(els_req64_sid, &wqe->els_req,
- iocbq->vport->fc_myDID);
- bf_set(els_req64_sp, &wqe->els_req, 1);
- }
- bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+ bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
+ if (command_type == ELS_COMMAND_FIP) {
+ els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
+ >> LPFC_FIP_ELS_ID_SHIFT);
+ }
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
break;
+ case CMD_XMIT_SEQUENCE64_CX:
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.un.ulpWord[3]);
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.ulpContext);
+ /* The entire sequence is transmitted for this IOCB */
+ xmit_len = total_len;
+ cmnd = CMD_XMIT_SEQUENCE64_CR;
case CMD_XMIT_SEQUENCE64_CR:
- /* word3 iocb=io_tag32 wqe=payload_offset */
- /* payload offset used for multilpe outstanding
- * sequences on the same exchange
- */
- wqe->words[3] = 0;
+ /* word3 iocb=io_tag32 wqe=reserved */
+ wqe->xmit_sequence.rsvd3 = 0;
/* word4 relative_offset memcpy */
/* word5 r_ctl/df_ctl memcpy */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
- wqe->xmit_sequence.xmit_len = payload_len;
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+ wqe->xmit_sequence.xmit_len = xmit_len;
+ command_type = OTHER_COMMAND;
break;
case CMD_XMIT_BCAST64_CN:
- /* word3 iocb=iotag32 wqe=payload_len */
- wqe->words[3] = 0; /* no definition for this in wqe */
+ /* word3 iocb=iotag32 wqe=seq_payload_len */
+ wqe->xmit_bcast64.seq_payload_len = xmit_len;
/* word4 iocb=rsvd wqe=rsvd */
/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
break;
case CMD_FCP_IWRITE64_CR:
command_type = FCP_COMMAND_DATA_OUT;
- /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
- * confusing.
- * word3 is payload_len: byte offset to the sgl entry for the
- * fcp_command.
- * word4 is total xfer len, same as the IOCB->ulpParameter.
- * word5 is initial xfer len 0 = wait for xfer-ready
- */
-
- /* Always wait for xfer-ready before sending data */
- wqe->fcp_iwrite.initial_xfer_len = 0;
- /* word 4 (xfer length) should have been set on the memcpy */
-
- /* allow write to fall through to read */
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ wqe->fcp_iwrite.payload_offset_len =
+ xmit_len + sizeof(struct fcp_rsp);
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+ break;
case CMD_FCP_IREAD64_CR:
- /* FCP_CMD is always the 1st sgl entry */
- wqe->fcp_iread.payload_len =
- payload_len + sizeof(struct fcp_rsp);
-
- /* word 4 (xfer length) should have been set on the memcpy */
-
- bf_set(lpfc_wqe_gen_erp, &wqe->generic,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
- /* The XC bit and the XS bit are similar. The driver never
- * tracked whether or not the exchange was previouslly open.
- * XC = Exchange create, 0 is create. 1 is already open.
- * XS = link cmd: 1 do not close the exchange after command.
- * XS = 0 close exchange when command completes.
- * The only time we would not set the XC bit is when the XS bit
- * is set and we are sending our 2nd or greater command on
- * this exchange.
- */
-
- /* ALLOW read & write to fall through to ICMD64 */
- case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ wqe->fcp_iread.payload_offset_len =
+ xmit_len + sizeof(struct fcp_rsp);
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
- wqe->words[10] &= 0xffff0000; /* zero out ebde count */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+ break;
+ case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=IO_TAG wqe=reserved */
+ wqe->fcp_icmd.rsrvd3 = 0;
+ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
break;
case CMD_GEN_REQUEST64_CR:
- /* word3 command length is described as byte offset to the
- * rsp_data. Would always be 16, sizeof(struct sli4_sge)
- * sgl[0] = cmnd
- * sgl[1] = rsp.
- *
- */
- wqe->gen_req.command_len = payload_len;
- /* Word4 parameter copied in the memcpy */
- /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+ /* word3 iocb=IO_TAG wqe=request_payload_len */
+ wqe->gen_req.request_payload_len = xmit_len;
+ /* word4 iocb=parameter wqe=relative_offset memcpy */
+ /* word5 [rctl, type, df_ctl, la] copied in memcpy */
/* word6 context tag copied in memcpy */
if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
ct, iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
- bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
- iocbq->iocb.ulpTimeout);
-
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+ bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
/* words0-2 BDE memcpy */
- /* word3 iocb=iotag32 wqe=rsvd */
- wqe->words[3] = 0;
+ /* word3 iocb=iotag32 wqe=response_payload_len */
+ wqe->xmit_els_rsp.response_payload_len = xmit_len;
/* word4 iocb=did wge=rsvd. */
- wqe->words[4] = 0;
+ wqe->xmit_els_rsp.rsvd4 = 0;
/* word5 iocb=rsvd wge=did */
bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
iocbq->iocb.un.elsreq64.remoteID);
-
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
-
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
- bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
iocbq->vport->vpi + phba->vpi_base);
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
case CMD_ABORT_XRI_CX:
/* words 0-2 memcpy should be 0 rserved */
/* port will send abts */
- if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
+ if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
+ abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
+ fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
+ } else
+ fip = 0;
+
+ if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
/*
- * The link is down so the fw does not need to send abts
+ * The link is down, or the command was ELS_FIP
+ * so the fw does not need to send abts
* on the wire.
*/
bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
else
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- wqe->words[5] = 0;
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+ wqe->abort_cmd.rsrvd5 = 0;
+ bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- wqe->generic.abort_tag = abort_tag;
/*
* The abort handler will send us CMD_ABORT_XRI_CN or
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
*/
- bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
cmnd = CMD_ABORT_XRI_CX;
command_type = OTHER_COMMAND;
xritag = 0;
break;
+ case CMD_XMIT_BLS_RSP64_CX:
+ /* As BLS ABTS-ACC WQE is very different from other WQEs,
+ * we re-construct this WQE here based on information in
+ * iocbq from scratch.
+ */
+ memset(wqe, 0, sizeof(union lpfc_wqe));
+ /* OX_ID is invariable to who sent ABTS to CT exchange */
+ bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
+ if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
+ LPFC_ABTS_UNSOL_INT) {
+ /* ABTS sent by initiator to CT exchange, the
+ * RX_ID field will be filled with the newly
+ * allocated responder XRI.
+ */
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ iocbq->sli4_xritag);
+ } else {
+ /* ABTS sent by responder to CT exchange, the
+ * RX_ID field will be filled with the responder
+ * RX_ID from ABTS.
+ */
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
+ }
+ bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+ bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+ iocbq->iocb.ulpContext);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ /* Overwrite the pre-set comnd type with OTHER_COMMAND */
+ command_type = OTHER_COMMAND;
+ break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
- /* words0-2 are all 0's no bde */
- /* word3 and word4 are rsvrd */
- wqe->words[3] = 0;
- wqe->words[4] = 0;
- /* word5 iocb=rsvd wge=did */
- /* There is no remote port id in the IOCB? */
- /* Let this fall through and fail */
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
case CMD_FCP_TRSP64_CX: /* Target mode rcv */
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
break;
-
}
- bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
- bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
- wqe->generic.abort_tag = abort_tag;
- bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
- bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
- bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
- bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
-
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+ bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
return 0;
}
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
- uint16_t xritag;
union lpfc_wqe wqe;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
- uint32_t fcp_wqidx;
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
+ if (pring->txq_cnt) {
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ __lpfc_sli_ringtx_put(phba,
+ pring, piocb);
+ return IOCB_SUCCESS;
+ } else {
+ return IOCB_BUSY;
+ }
+ } else {
sglq = __lpfc_sli_get_sglq(phba);
- if (!sglq)
- return IOCB_ERROR;
- piocb->sli4_xritag = sglq->sli4_xritag;
+ if (!sglq) {
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ __lpfc_sli_ringtx_put(phba,
+ pring,
+ piocb);
+ return IOCB_SUCCESS;
+ } else
+ return IOCB_BUSY;
+ }
+ }
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
sglq = NULL; /* These IO's already have an XRI and
}
if (sglq) {
- xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
- if (xritag != sglq->sli4_xritag)
+ piocb->sli4_xritag = sglq->sli4_xritag;
+
+ if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
return IOCB_ERROR;
}
if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
return IOCB_ERROR;
- if (piocb->iocb_flag & LPFC_IO_FCP) {
- fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
- if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
+ if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ /*
+ * For FCP command IOCB, get a new WQ index to distribute
+ * WQE across the WQsr. On the other hand, for abort IOCB,
+ * it carries the same WQ index to the original command
+ * IOCB.
+ */
+ if (piocb->iocb_flag & LPFC_IO_FCP)
+ piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
+ &wqe))
return IOCB_ERROR;
} else {
if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
* IOCB_SUCCESS - Success
* IOCB_BUSY - Busy
**/
-static inline int
+int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
KERN_ERR,
LOG_SLI,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
- " evt_code 0x%x \n"
+ " evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
"W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
"W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
pring->iotag_max = 4096;
pring->lpfc_sli_rcv_async_status =
lpfc_sli_async_event_handler;
- pring->num_mask = 4;
+ pring->num_mask = LPFC_MAX_RING_MASK;
pring->prt[0].profile = 0; /* Mask 0 */
- pring->prt[0].rctl = FC_ELS_REQ;
- pring->prt[0].type = FC_ELS_DATA;
+ pring->prt[0].rctl = FC_RCTL_ELS_REQ;
+ pring->prt[0].type = FC_TYPE_ELS;
pring->prt[0].lpfc_sli_rcv_unsol_event =
lpfc_els_unsol_event;
pring->prt[1].profile = 0; /* Mask 1 */
- pring->prt[1].rctl = FC_ELS_RSP;
- pring->prt[1].type = FC_ELS_DATA;
+ pring->prt[1].rctl = FC_RCTL_ELS_REP;
+ pring->prt[1].type = FC_TYPE_ELS;
pring->prt[1].lpfc_sli_rcv_unsol_event =
lpfc_els_unsol_event;
pring->prt[2].profile = 0; /* Mask 2 */
/* NameServer Inquiry */
- pring->prt[2].rctl = FC_UNSOL_CTL;
+ pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
/* NameServer */
- pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
+ pring->prt[2].type = FC_TYPE_CT;
pring->prt[2].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
pring->prt[3].profile = 0; /* Mask 3 */
/* NameServer response */
- pring->prt[3].rctl = FC_SOL_CTL;
+ pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
/* NameServer */
- pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
+ pring->prt[3].type = FC_TYPE_CT;
pring->prt[3].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
+ /* abort unsolicited sequence */
+ pring->prt[4].profile = 0; /* Mask 4 */
+ pring->prt[4].rctl = FC_RCTL_BA_ABTS;
+ pring->prt[4].type = FC_TYPE_BLS;
+ pring->prt[4].lpfc_sli_rcv_unsol_event =
+ lpfc_sli4_ct_abort_unsol_event;
break;
}
totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
}
/**
- * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
- * @phba: Pointer to HBA context object.
- *
- * This function cleans up all queues, iocb, buffers, mailbox commands while
- * shutting down the SLI4 HBA FCoE function. This function is called with no
- * lock held and always returns 1.
- *
- * This function does the following to cleanup driver FCoE function resources:
- * - Free discovery resources for each virtual port
- * - Cleanup any pending fabric iocbs
- * - Iterate through the iocb txq and free each entry in the list.
- * - Free up any buffer posted to the HBA.
- * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
- * - Free mailbox commands in the mailbox queue.
- **/
-int
-lpfc_sli4_hba_down(struct lpfc_hba *phba)
-{
- /* Stop the SLI4 device port */
- lpfc_stop_port(phba);
-
- /* Tear down the queues in the HBA */
- lpfc_sli4_queue_unset(phba);
-
- /* unregister default FCFI from the HBA */
- lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
-
- return 1;
-}
-
-/**
* lpfc_sli_pcimem_bcopy - SLI memory copy function
* @srcp: Source memory pointer.
* @destp: Destination memory pointer.
/**
+ * lpfc_sli_bemem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between a data structure
+ * with big endian representation to local endianness.
+ * This function can be called with or without lock.
+ **/
+void
+lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+ uint32_t *src = srcp;
+ uint32_t *dest = destp;
+ uint32_t ldata;
+ int i;
+
+ for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
+ ldata = *src;
+ ldata = be32_to_cpu(ldata);
+ *dest = ldata;
+ src++;
+ dest++;
+ }
+}
+
+/**
* lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
spin_lock_irq(&phba->hbalock);
- if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
- abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
-
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
- "0327 Cannot abort els iocb %p "
- "with tag %x context %x, abort status %x, "
- "abort code %x\n",
- abort_iocb, abort_iotag, abort_context,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (abort_iotag != 0 &&
+ abort_iotag <= phba->sli.last_iotag)
+ abort_iocb =
+ phba->sli.iocbq_lookup[abort_iotag];
+ } else
+ /* For sli4 the abort_tag is the XRI,
+ * so the abort routine puts the iotag of the iocb
+ * being aborted in the context field of the abort
+ * IOCB.
+ */
+ abort_iocb = phba->sli.iocbq_lookup[abort_context];
/*
* If the iocb is not found in Firmware queue the iocb
* might have completed already. Do not free it again.
*/
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- spin_unlock_irq(&phba->hbalock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- return;
+ if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+ return;
+ }
+ /* For SLI4 the ulpContext field for abort IOCB
+ * holds the iotag of the IOCB being aborted so
+ * the local abort_context needs to be reset to
+ * match the aborted IOCBs ulpContext.
+ */
+ if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
+ abort_context = abort_iocb->iocb.ulpContext;
}
- /*
- * make sure we have the right iocbq before taking it
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
+ "0327 Cannot abort els iocb %p "
+ "with tag %x context %x, abort status %x, "
+ "abort code %x\n",
+ abort_iocb, abort_iotag, abort_context,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ /*
+ * make sure we have the right iocbq before taking it
* off the txcmplq and try to call completion routine.
*/
if (!abort_iocb ||
abort_iocb->iocb.ulpContext != abort_context ||
(abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
spin_unlock_irq(&phba->hbalock);
- else {
+ else if (phba->sli_rev < LPFC_SLI_REV4) {
+ /*
+ * leave the SLI4 aborted command on the txcmplq
+ * list and the command complete WCQE's XB bit
+ * will tell whether the SGL (XRI) can be released
+ * immediately or to the aborted SGL list for the
+ * following abort XRI from the HBA.
+ */
list_del_init(&abort_iocb->list);
- pring->txcmplq_cnt--;
- spin_unlock_irq(&phba->hbalock);
+ if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
+ abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ pring->txcmplq_cnt--;
+ }
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
-
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irq(&phba->hbalock);
+
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
- abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
- }
+ } else
+ spin_unlock_irq(&phba->hbalock);
}
lpfc_sli_release_iocbq(phba, cmdiocb);
}
/**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @cmdiocb: Pointer to driver command iocb object.
*
- * This function issues an abort iocb for the provided command
- * iocb. This function is called with hbalock held.
- * The function returns 0 when it fails due to memory allocation
- * failure or when the command iocb is an abort request.
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
**/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *cmdiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
- int retval = IOCB_ERROR;
+ int retval;
/*
* There are certain command types we don't want to abort. And we
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
- /* If we're unloading, don't abort iocb on the ELS ring, but change the
- * callback so that nothing happens when it finishes.
- */
- if ((vport->load_flag & FC_UNLOADING) &&
- (pring->ringno == LPFC_ELS_RING)) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
- }
-
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return 0;
/* This signals the response to set the correct status
- * before calling the completion handler.
+ * before calling the completion handler
*/
cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
iabt = &abtsiocbp->iocb;
iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
iabt->un.acxri.abortContextTag = icmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+ iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ }
else
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+
if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
else
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort xri x%x, original iotag x%x, "
"abort cmd iotag x%x\n",
+ iabt->un.acxri.abortIoTag,
iabt->un.acxri.abortContextTag,
- iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
+ abtsiocbp->iotag);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
if (retval)
__lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+ /*
+ * Caller to this routine should check for IOCB_ERROR
+ * and handle it properly. This routine no longer removes
+ * iocb off txcmplq and call compl in case of IOCB_ERROR.
+ */
+ return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ int retval = IOCB_ERROR;
+ IOCB_t *icmd = NULL;
+
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
+ */
+ icmd = &cmdiocb->iocb;
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+ return 0;
+
+ /*
+ * If we're unloading, don't abort iocb on the ELS ring, but change
+ * the callback so that nothing happens when it finishes.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ (pring->ringno == LPFC_ELS_RING)) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ goto abort_iotag_exit;
+ }
+
+ /* Now, we try to issue the abort to the cmdiocb out */
+ retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
}
/**
+ * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues abort iocbs unconditionally for all
+ * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
+ * to complete before the return of this function. The caller is not required
+ * to hold any locks.
+ **/
+static void
+lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ if (pring->ringno == LPFC_ELS_RING)
+ lpfc_fabric_abort_hba(phba);
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* Take off all the iocbs on txq for cancelling */
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_abort_iotag_issue(phba, pring, iocb);
+
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ int i;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ lpfc_sli_iocb_ring_abort(phba, pring);
+ }
+}
+
+/**
* lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
* @iocbq: Pointer to driver iocb object.
* @vport: Pointer to driver virtual port object.
abtsiocb->iocb.ulpClass = cmd->ulpClass;
abtsiocb->vport = phba->pport;
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+
if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
else
{
wait_queue_head_t *pdone_q;
unsigned long iflags;
+ struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
+ /* Set the exchange busy flag for task management commands */
+ if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+ cur_iocbq);
+ lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ }
+
pdone_q = cmdiocbq->context_un.wait_queue;
if (pdone_q)
wake_up(pdone_q);
}
/**
+ * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
+ * @phba: Pointer to HBA context object..
+ * @piocbq: Pointer to command iocb.
+ * @flag: Flag to test.
+ *
+ * This routine grabs the hbalock and then test the iocb_flag to
+ * see if the passed in flag is set.
+ * Returns:
+ * 1 if flag is set.
+ * 0 if flag is not set.
+ **/
+static int
+lpfc_chk_iocb_flg(struct lpfc_hba *phba,
+ struct lpfc_iocbq *piocbq, uint32_t flag)
+{
+ unsigned long iflags;
+ int ret;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ret = piocbq->iocb_flag & flag;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return ret;
+
+}
+
+/**
* lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
* @phba: Pointer to HBA context object..
* @pring: Pointer to sli ring.
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
-
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
readl(phba->HCregaddr); /* flush */
}
- retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
+ retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
+ SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
timeleft = wait_event_timeout(done_q,
- piocb->iocb_flag & LPFC_IO_WAKE,
+ lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
if (piocb->iocb_flag & LPFC_IO_WAKE) {
timeout, (timeleft / jiffies));
retval = IOCB_TIMEDOUT;
}
+ } else if (retval == IOCB_BUSY) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+ phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+ return retval;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0332 IOCB wait issue failed, Data x%x\n",
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
*/
- if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
+ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
retval = MBX_SUCCESS;
- else {
+ lpfc_sli4_swap_str(phba, pmboxq);
+ } else {
retval = MBX_TIMEOUT;
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
/* Check if there is a deferred error condition is active */
if ((HS_FFER1 & phba->work_hs) &&
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
- HS_FFER6 | HS_FFER7) & phba->work_hs)) {
- spin_lock_irq(&phba->hbalock);
+ HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
phba->hba_flag |= DEFER_ERATT;
- spin_unlock_irq(&phba->hbalock);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr);
}
/* Set the driver HA work bitmap */
- spin_lock_irq(&phba->hbalock);
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
- spin_unlock_irq(&phba->hbalock);
return 1;
}
return 0;
lpfc_sli4_eratt_read(struct lpfc_hba *phba)
{
uint32_t uerr_sta_hi, uerr_sta_lo;
- uint32_t onlnreg0, onlnreg1;
/* For now, use the SLI4 device internal unrecoverable error
* registers for error attention. This can be changed later.
*/
- onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
- onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
- if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
- uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
- uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
- if (uerr_sta_lo || uerr_sta_hi) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1423 HBA Unrecoverable error: "
- "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
- "online0_reg=0x%x, online1_reg=0x%x\n",
- uerr_sta_lo, uerr_sta_hi,
- onlnreg0, onlnreg1);
- /* TEMP: as the driver error recover logic is not
- * fully developed, we just log the error message
- * and the device error attention action is now
- * temporarily disabled.
- */
- return 0;
- phba->work_status[0] = uerr_sta_lo;
- phba->work_status[1] = uerr_sta_hi;
- spin_lock_irq(&phba->hbalock);
- /* Set the driver HA work bitmap */
- phba->work_ha |= HA_ERATT;
- /* Indicate polling handles this ERATT */
- phba->hba_flag |= HBA_ERATT_HANDLED;
- spin_unlock_irq(&phba->hbalock);
- return 1;
- }
+ uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
+ uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
+ if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
+ (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1423 HBA Unrecoverable error: "
+ "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+ "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
+ uerr_sta_lo, uerr_sta_hi,
+ phba->sli4_hba.ue_mask_lo,
+ phba->sli4_hba.ue_mask_hi);
+ phba->work_status[0] = uerr_sta_lo;
+ phba->work_status[1] = uerr_sta_hi;
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
}
return 0;
}
lpfc_sli_sp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
- uint32_t ha_copy;
+ uint32_t ha_copy, hc_copy;
uint32_t work_ha_copy;
unsigned long status;
unsigned long iflag;
}
/* Clear up only attention source related to slow-path */
+ hc_copy = readl(phba->HCregaddr);
+ writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
+ HC_LAINT_ENA | HC_ERINT_ENA),
+ phba->HCregaddr);
writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
phba->HAregaddr);
+ writel(hc_copy, phba->HCregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock_irqrestore(&phba->hbalock, iflag);
} else
*/
if ((HS_FFER1 & phba->work_hs) &&
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
- HS_FFER6 | HS_FFER7) & phba->work_hs)) {
+ HS_FFER6 | HS_FFER7 | HS_FFER8) &
+ phba->work_hs)) {
phba->hba_flag |= DEFER_ERATT;
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
+ if (pmb->out_ext_byte_len &&
+ pmb->context2)
+ lpfc_sli_pcimem_bcopy(
+ phba->mbox_ext,
+ pmb->context2,
+ pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
KERN_ERR,
LOG_MBOX | LOG_SLI,
"0350 rc should have"
- "been MBX_BUSY");
+ "been MBX_BUSY\n");
if (rc != MBX_NOT_FINISHED)
goto send_current_mbox;
}
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
LOG_SLI, "0349 rc should be "
- "MBX_SUCCESS");
+ "MBX_SUCCESS\n");
}
spin_lock_irqsave(&phba->hbalock, iflag);
struct lpfc_hba *phba;
irqreturn_t sp_irq_rc, fp_irq_rc;
unsigned long status1, status2;
+ uint32_t hc_copy;
/*
* Get the driver's phba structure from the dev_id and
* If there is deferred error attention, do not check for any interrupt.
*/
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock(&phba->hbalock);
return IRQ_NONE;
}
/* Clear attention sources except link and error attentions */
+ hc_copy = readl(phba->HCregaddr);
+ writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
+ | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
+ phba->HCregaddr);
writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+ writel(hc_copy, phba->HCregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock(&phba->hbalock);
}
}
+/**
+ * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
+ * @phba: pointer to lpfc hba data structure
+ * @pIocbIn: pointer to the rspiocbq
+ * @pIocbOut: pointer to the cmdiocbq
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This routine transfers the fields of a command iocbq to a response iocbq
+ * by copying all the IOCB fields from command iocbq and transferring the
+ * completion status information from the complete wcqe.
+ **/
static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
+lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
+ unsigned long iflags;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
sizeof(struct lpfc_iocbq) - offset);
- memset(&pIocbIn->sli4_info, 0,
- sizeof(struct lpfc_sli4_rspiocb_info));
/* Map WCQE parameters into irspiocb parameters */
pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
if (pIocbOut->iocb_flag & LPFC_IO_FCP)
wcqe->total_data_placed;
else
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- else
+ else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- /* Load in additional WCQE parameters */
- pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
- pIocbIn->sli4_info.bfield = 0;
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
- pIocbIn->sli4_info.bfield |= LPFC_XB;
- if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
- pIocbIn->sli4_info.bfield |= LPFC_PV;
- pIocbIn->sli4_info.priority =
- bf_get(lpfc_wcqe_c_priority, wcqe);
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+ }
+
+ /* Pick up HBA exchange busy condition */
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+}
+
+/**
+ * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event and construct
+ * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * discovery engine to handle.
+ *
+ * Return: Pointer to the receive IOCBQ, NULL otherwise.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *irspiocbq)
+{
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_wcqe_complete *wcqe;
+ unsigned long iflags;
+
+ wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pring->stats.iocb_event++;
+ /* Look up the ELS command IOCB and create pseudo response IOCB */
+ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ if (unlikely(!cmdiocbq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0386 ELS complete with no corresponding "
+ "cmdiocb: iotag (%d)\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ lpfc_sli_release_iocbq(phba, irspiocbq);
+ return NULL;
}
+
+ /* Fake the irspiocbq and copy necessary response information */
+ lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+
+ return irspiocbq;
}
/**
lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *wcqe)
{
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *cmdiocbq;
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
- bool workposted = false;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- pring->stats.iocb_event++;
- /* Look up the ELS command IOCB and create pseudo response IOCB */
- cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
- bf_get(lpfc_wcqe_c_request_tag, wcqe));
- spin_unlock_irqrestore(&phba->hbalock, iflags);
-
- if (unlikely(!cmdiocbq)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0386 ELS complete with no corresponding "
- "cmdiocb: iotag (%d)\n",
- bf_get(lpfc_wcqe_c_request_tag, wcqe));
- return workposted;
- }
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
- /* Fake the irspiocbq and copy necessary response information */
+ /* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0387 Failed to allocate an iocbq\n");
- return workposted;
+ "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
+ "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ pring->txq_cnt, phba->iocb_cnt,
+ phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
+ phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
+ return false;
}
- lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
- /* Add the irspiocb to the response IOCB work list */
+ /* Save off the slow-path queue event for work thread to process */
+ memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
- /* Indicate ELS ring attention */
- phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
+ list_add_tail(&irspiocbq->cq_event.list,
+ &phba->sli4_hba.sp_queue_event);
+ phba->hba_flag |= HBA_SP_QUEUE_EVT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
- return workposted;
+ return true;
}
/**
}
/**
- * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
+ * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
* @phba: Pointer to HBA context object.
- * @cq: Pointer to the completion queue.
- * @wcqe: Pointer to a completion queue entry.
+ * @rcqe: Pointer to receive-queue completion queue entry.
*
- * This routine process a slow-path work-queue completion queue entry.
+ * This routine process a receive-queue completion queue entry.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
-lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
- struct lpfc_cqe *cqe)
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{
- struct lpfc_wcqe_complete wcqe;
bool workposted = false;
+ struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
+ struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+ struct hbq_dmabuf *dma_buf;
+ uint32_t status;
+ unsigned long iflags;
- /* Copy the work queue CQE and convert endian order if needed */
- lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+ if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
+ goto out;
- /* Check and process for different type of WCQE and dispatch */
- switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
- case CQE_CODE_COMPL_WQE:
- /* Process the WQ complete event */
- workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
- (struct lpfc_wcqe_complete *)&wcqe);
- break;
- case CQE_CODE_RELEASE_WQE:
- /* Process the WQ release event */
- lpfc_sli4_sp_handle_rel_wcqe(phba,
- (struct lpfc_wcqe_release *)&wcqe);
- break;
- case CQE_CODE_XRI_ABORTED:
- /* Process the WQ XRI abort event */
- workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
- (struct sli4_wcqe_xri_aborted *)&wcqe);
- break;
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0388 Not a valid WCQE code: x%x\n",
- bf_get(lpfc_wcqe_c_code, &wcqe));
- break;
- }
- return workposted;
-}
-
-/**
- * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
- * @phba: Pointer to HBA context object.
- * @rcqe: Pointer to receive-queue completion queue entry.
- *
- * This routine process a receive-queue completion queue entry.
- *
- * Return: true if work posted to worker thread, otherwise false.
- **/
-static bool
-lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
-{
- struct lpfc_rcqe rcqe;
- bool workposted = false;
- struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
- struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
- struct hbq_dmabuf *dma_buf;
- uint32_t status;
- unsigned long iflags;
-
- /* Copy the receive queue CQE and convert endian order if needed */
- lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
- lpfc_sli4_rq_release(hrq, drq);
- if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
- goto out;
- if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
- goto out;
-
- status = bf_get(lpfc_rcqe_status, &rcqe);
+ status = bf_get(lpfc_rcqe_status, rcqe);
switch (status) {
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
+ lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
- memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
+ memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
/* save off the frame for the word thread to process */
- list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
+ list_add_tail(&dma_buf->cq_event.list,
+ &phba->sli4_hba.sp_queue_event);
/* Frame received */
- phba->hba_flag |= HBA_RECEIVE_BUFFER;
+ phba->hba_flag |= HBA_SP_QUEUE_EVT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
}
out:
return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue or recieve queue completion queue
+ * entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
+{
+ struct lpfc_cqe cqevt;
+ bool workposted = false;
+
+ /* Copy the work queue CQE and convert endian order if needed */
+ lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
+ /* Check and process for different type of WCQE and dispatch */
+ switch (bf_get(lpfc_cqe_code, &cqevt)) {
+ case CQE_CODE_COMPL_WQE:
+ /* Process the WQ/RQ complete event */
+ phba->last_completion_time = jiffies;
+ workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+ (struct lpfc_wcqe_complete *)&cqevt);
+ break;
+ case CQE_CODE_RELEASE_WQE:
+ /* Process the WQ release event */
+ lpfc_sli4_sp_handle_rel_wcqe(phba,
+ (struct lpfc_wcqe_release *)&cqevt);
+ break;
+ case CQE_CODE_XRI_ABORTED:
+ /* Process the WQ XRI abort event */
+ phba->last_completion_time = jiffies;
+ workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+ (struct sli4_wcqe_xri_aborted *)&cqevt);
+ break;
+ case CQE_CODE_RECEIVE:
+ /* Process the RQ event */
+ phba->last_completion_time = jiffies;
+ workposted = lpfc_sli4_sp_handle_rcqe(phba,
+ (struct lpfc_rcqe *)&cqevt);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0388 Not a valid WCQE code: x%x\n",
+ bf_get(lpfc_cqe_code, &cqevt));
+ break;
+ }
+ return workposted;
}
/**
int ecount = 0;
uint16_t cqid;
- if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
- bf_get(lpfc_eqe_minor_code, eqe) != 0) {
+ if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
}
}
if (unlikely(!cq)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0365 Slow-path CQ identifier (%d) does "
- "not exist\n", cqid);
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0365 Slow-path CQ identifier "
+ "(%d) does not exist\n", cqid);
return;
}
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
- if (!(++ecount % LPFC_GET_QE_REL_INT))
- lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
- }
- break;
- case LPFC_RCQ:
- while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
}
/* Fake the irspiocb and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
+ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
+
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
/* Pass the cmd_iocb and the rsp state to the upper layer */
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE:
/* Process the WQ complete event */
+ phba->last_completion_time = jiffies;
lpfc_sli4_fp_handle_fcp_wcqe(phba,
(struct lpfc_wcqe_complete *)&wcqe);
break;
break;
case CQE_CODE_XRI_ABORTED:
/* Process the WQ XRI abort event */
+ phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&wcqe);
break;
uint16_t cqid;
int ecount = 0;
- if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
- unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
if (unlikely(!cq)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0367 Fast-path completion queue does not "
- "exist\n");
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0367 Fast-path completion queue "
+ "does not exist\n");
return;
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0368 Miss-matched fast-path completion "
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
*
- * This function frees a queue structure and the DMAable memeory used for
+ * This function frees a queue structure and the DMAable memory used for
* the host resident queue. This function must be called after destroying the
* queue on the HBA.
**/
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
struct lpfc_dmabuf *dmabuf;
int x, total_qe_count;
void *dma_pointer;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
if (!queue)
return NULL;
- queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
+ queue->page_count = (ALIGN(entry_size * entry_count,
+ hw_page_size))/hw_page_size;
INIT_LIST_HEAD(&queue->list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
if (!dmabuf)
goto out_fail;
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- PAGE_SIZE, &dmabuf->phys,
+ hw_page_size, &dmabuf->phys,
GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
}
+ memset(dmabuf->virt, 0, hw_page_size);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
dma_pointer = dmabuf->virt;
for (; total_qe_count < entry_count &&
- dma_pointer < (PAGE_SIZE + dmabuf->virt);
+ dma_pointer < (hw_page_size + dmabuf->virt);
total_qe_count++, dma_pointer += entry_size) {
queue->qe[total_qe_count].address = dma_pointer;
}
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
- * memory this function will return ENOMEM. If the queue create mailbox command
- * fails this function will return ENXIO.
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
**/
uint32_t
lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
break;
}
list_for_each_entry(dmabuf, &eq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
eq->host_index = 0;
eq->hba_index = 0;
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
- * memory this function will return ENOMEM. If the queue create mailbox command
- * fails this function will return ENXIO.
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
**/
uint32_t
lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
cq->host_index = 0;
cq->hba_index = 0;
-out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+out:
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations. It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+ LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+ struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_dmabuf *dmabuf;
+ int length;
+
+ length = (sizeof(struct lpfc_mbx_mq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+ mq->page_count);
+ bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ switch (mq->entry_count) {
+ case 16:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_16);
+ break;
+ case 32:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_32);
+ break;
+ case 64:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_64);
+ break;
+ case 128:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_128);
+ break;
+ }
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+}
+
+/**
* lpfc_mq_create - Create a mailbox Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
*
* This function creates a mailbox queue, as detailed in @mq, on a port,
* described by @phba by sending a MQ_CREATE mailbox command to the HBA.
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
- * memory this function will return ENOMEM. If the queue create mailbox command
- * fails this function will return ENXIO.
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
**/
-uint32_t
+int32_t
lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
struct lpfc_queue *cq, uint32_t subtype)
{
struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_mbx_mq_create_ext *mq_create_ext;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
- length = (sizeof(struct lpfc_mbx_mq_create) -
+ length = (sizeof(struct lpfc_mbx_mq_create_ext) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_MQ_CREATE,
+ LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
length, LPFC_SLI4_MBX_EMBED);
- mq_create = &mbox->u.mqe.un.mq_create;
- bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+
+ mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+ bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
mq->page_count);
- bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
- cq->queue_id);
- bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
+ 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
switch (mq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 16:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
- mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ memset(dmabuf->virt, 0, hw_page_size);
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
- mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create_ext->u.response);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2795 MQ_CREATE_EXT failed with "
+ "status x%x. Failback to MQ_CREATE.\n",
+ rc);
+ lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create->u.response);
+ }
+
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
status = -ENXIO;
goto out;
}
- mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
if (mq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
/* link the mq onto the parent cq child list */
list_add_tail(&mq->list, &cq->child_list);
out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
- * memory this function will return ENOMEM. If the queue create mailbox command
- * fails this function will return ENXIO.
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
**/
uint32_t
lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
list_for_each_entry(dmabuf, &wq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
out:
- if (rc == MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
* mailbox command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
- * memory this function will return ENOMEM. If the queue create mailbox command
- * fails this function will return ENXIO.
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
**/
uint32_t
lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
if (hrq->entry_count != drq->entry_count)
return -EINVAL;
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
LPFC_HDR_BUF_SIZE);
list_for_each_entry(dmabuf, &hrq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
list_add_tail(&drq->list, &cq->child_list);
out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
* The @eq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
- * command fails this function will return ENXIO.
+ * command fails this function will return -ENXIO.
**/
uint32_t
lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
/* Remove eq from any list */
list_del_init(&eq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, eq->phba->mbox_mem_pool);
+ mempool_free(mbox, eq->phba->mbox_mem_pool);
return status;
}
* The @cq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
- * command fails this function will return ENXIO.
+ * command fails this function will return -ENXIO.
**/
uint32_t
lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
}
/* Remove cq from any list */
list_del_init(&cq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, cq->phba->mbox_mem_pool);
+ mempool_free(mbox, cq->phba->mbox_mem_pool);
return status;
}
* The @mq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
- * command fails this function will return ENXIO.
+ * command fails this function will return -ENXIO.
**/
uint32_t
lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
}
/* Remove mq from any list */
list_del_init(&mq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, mq->phba->mbox_mem_pool);
+ mempool_free(mbox, mq->phba->mbox_mem_pool);
return status;
}
* The @wq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
- * command fails this function will return ENXIO.
+ * command fails this function will return -ENXIO.
**/
uint32_t
lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
}
/* Remove wq from any list */
list_del_init(&wq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, wq->phba->mbox_mem_pool);
+ mempool_free(mbox, wq->phba->mbox_mem_pool);
return status;
}
* The @rq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
- * command fails this function will return ENXIO.
+ * command fails this function will return -ENXIO.
**/
uint32_t
lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
list_del_init(&hrq->list);
list_del_init(&drq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
return status;
}
}
return 0;
}
-/**
- * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
- * @phba: The virtual port for which this call being executed.
- *
- * This routine will remove all of the sgl pages registered with the hba.
- *
- * Return codes:
- * 0 - Success
- * -ENXIO, -ENOMEM - Failure
- **/
-int
-lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
-{
- LPFC_MBOXQ_t *mbox;
- int rc;
- uint32_t shdr_status, shdr_add_status;
- union lpfc_sli4_cfg_shdr *shdr;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
-
- lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
- LPFC_SLI4_MBX_EMBED);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
- /* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *)
- &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
- "status x%x add_status x%x, mbx status x%x\n",
- shdr_status, shdr_add_status, rc);
- rc = -ENXIO;
- }
- return rc;
-}
/**
* lpfc_sli4_next_xritag - Get an xritag for the io
return xritag;
}
spin_unlock_irq(&phba->hbalock);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2004 Failed to allocate XRI.last XRITAG is %d"
" Max XRI is %d, Used XRI is %d\n",
phba->sli4_hba.next_xri,
reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"2559 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
-
/* Get the first SGE entry from the non-embedded DMA memory */
- if (unlikely(!mbox->sge_array)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "2525 Failed to get the non-embedded SGE "
- "virtual address\n");
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- return -ENOMEM;
- }
viraddr = mbox->sge_array->addr[0];
/* Set up the SGL pages in the non-embedded DMA pages */
sgl_pg_pairs++;
}
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
- pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
- bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
/* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
/* Calculate the requested length of the dma memory */
reqlen = cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0217 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
-
/* Get the first SGE entry from the non-embedded DMA memory */
- if (unlikely(!mbox->sge_array)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "2565 Failed to get the non-embedded SGE "
- "virtual address\n");
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- return -ENOMEM;
- }
viraddr = mbox->sge_array->addr[0];
/* Set up the SGL pages in the non-embedded DMA pages */
}
/**
- * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
- * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
+ * @vport: The vport to work on.
*
- * This function searches through the existing incomplete sequences that have
- * been sent to this @vport. If the frame matches one of the incomplete
- * sequences then the dbuf in the @dmabuf is added to the list of frames that
- * make up that sequence. If no sequence is found that matches this frame then
- * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
- * This function returns a pointer to the first dmabuf in the sequence list that
- * the frame was linked to.
+ * This function updates the receive sequence time stamp for this vport. The
+ * receive sequence time stamp indicates the time that the last frame of the
+ * the sequence that has been idle for the longest amount of time was received.
+ * the driver uses this time stamp to indicate if any received sequences have
+ * timed out.
**/
-static struct hbq_dmabuf *
-lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+void
+lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
{
- struct fc_frame_header *new_hdr;
- struct fc_frame_header *temp_hdr;
- struct lpfc_dmabuf *d_buf;
struct lpfc_dmabuf *h_buf;
- struct hbq_dmabuf *seq_dmabuf = NULL;
- struct hbq_dmabuf *temp_dmabuf = NULL;
+ struct hbq_dmabuf *dmabuf = NULL;
- new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
- /* Use the hdr_buf to find the sequence that this frame belongs to */
- list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
- temp_hdr = (struct fc_frame_header *)h_buf->virt;
+ /* get the oldest sequence on the rcv list */
+ h_buf = list_get_first(&vport->rcv_buffer_list,
+ struct lpfc_dmabuf, list);
+ if (!h_buf)
+ return;
+ dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
+}
+
+/**
+ * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function cleans up all outstanding received sequences. This is called
+ * by the driver when a link event or user action invalidates all the received
+ * sequences.
+ **/
+void
+lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
+{
+ struct lpfc_dmabuf *h_buf, *hnext;
+ struct lpfc_dmabuf *d_buf, *dnext;
+ struct hbq_dmabuf *dmabuf = NULL;
+
+ /* start with the oldest sequence on the rcv list */
+ list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+ dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ list_del_init(&dmabuf->hbuf.list);
+ list_for_each_entry_safe(d_buf, dnext,
+ &dmabuf->dbuf.list, list) {
+ list_del_init(&d_buf->list);
+ lpfc_in_buf_free(vport->phba, d_buf);
+ }
+ lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+ }
+}
+
+/**
+ * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function determines whether any received sequences have timed out by
+ * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
+ * indicates that there is at least one timed out sequence this routine will
+ * go through the received sequences one at a time from most inactive to most
+ * active to determine which ones need to be cleaned up. Once it has determined
+ * that a sequence needs to be cleaned up it will simply free up the resources
+ * without sending an abort.
+ **/
+void
+lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
+{
+ struct lpfc_dmabuf *h_buf, *hnext;
+ struct lpfc_dmabuf *d_buf, *dnext;
+ struct hbq_dmabuf *dmabuf = NULL;
+ unsigned long timeout;
+ int abort_count = 0;
+
+ timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+ vport->rcv_buffer_time_stamp);
+ if (list_empty(&vport->rcv_buffer_list) ||
+ time_before(jiffies, timeout))
+ return;
+ /* start with the oldest sequence on the rcv list */
+ list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+ dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+ dmabuf->time_stamp);
+ if (time_before(jiffies, timeout))
+ break;
+ abort_count++;
+ list_del_init(&dmabuf->hbuf.list);
+ list_for_each_entry_safe(d_buf, dnext,
+ &dmabuf->dbuf.list, list) {
+ list_del_init(&d_buf->list);
+ lpfc_in_buf_free(vport->phba, d_buf);
+ }
+ lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+ }
+ if (abort_count)
+ lpfc_update_rcv_time_stamp(vport);
+}
+
+/**
+ * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ *
+ * This function searches through the existing incomplete sequences that have
+ * been sent to this @vport. If the frame matches one of the incomplete
+ * sequences then the dbuf in the @dmabuf is added to the list of frames that
+ * make up that sequence. If no sequence is found that matches this frame then
+ * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
+ * This function returns a pointer to the first dmabuf in the sequence list that
+ * the frame was linked to.
+ **/
+static struct hbq_dmabuf *
+lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header *new_hdr;
+ struct fc_frame_header *temp_hdr;
+ struct lpfc_dmabuf *d_buf;
+ struct lpfc_dmabuf *h_buf;
+ struct hbq_dmabuf *seq_dmabuf = NULL;
+ struct hbq_dmabuf *temp_dmabuf = NULL;
+
+ INIT_LIST_HEAD(&dmabuf->dbuf.list);
+ dmabuf->time_stamp = jiffies;
+ new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* Use the hdr_buf to find the sequence that this frame belongs to */
+ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+ temp_hdr = (struct fc_frame_header *)h_buf->virt;
if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
(temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
(memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
* Queue the buffer on the vport's rcv_buffer_list.
*/
list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ lpfc_update_rcv_time_stamp(vport);
return dmabuf;
}
temp_hdr = seq_dmabuf->hbuf.virt;
- if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
- list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) <
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+ list_del_init(&seq_dmabuf->hbuf.list);
+ list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+ lpfc_update_rcv_time_stamp(vport);
return dmabuf;
}
+ /* move this sequence to the tail to indicate a young sequence */
+ list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ seq_dmabuf->time_stamp = jiffies;
+ lpfc_update_rcv_time_stamp(vport);
+ if (list_empty(&seq_dmabuf->dbuf.list)) {
+ temp_hdr = dmabuf->hbuf.virt;
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+ return seq_dmabuf;
+ }
/* find the correct place in the sequence to insert this frame */
list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
* If the frame's sequence count is greater than the frame on
* the list then insert the frame right after this frame
*/
- if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
return seq_dmabuf;
}
}
/**
+ * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the partially assembed sequence, described
+ * by the information from basic abbort @dmabuf. It checks to see whether such
+ * partially assembled sequence held by the driver. If so, it shall free up all
+ * the frames from the partially assembled sequence.
+ *
+ * Return
+ * true -- if there is matching partially assembled sequence present and all
+ * the frames freed with the sequence;
+ * false -- if there is no matching partially assembled sequence present so
+ * nothing got aborted in the lower layer driver
+ **/
+static bool
+lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
+ struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header *new_hdr;
+ struct fc_frame_header *temp_hdr;
+ struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
+ struct hbq_dmabuf *seq_dmabuf = NULL;
+
+ /* Use the hdr_buf to find the sequence that matches this frame */
+ INIT_LIST_HEAD(&dmabuf->dbuf.list);
+ INIT_LIST_HEAD(&dmabuf->hbuf.list);
+ new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+ temp_hdr = (struct fc_frame_header *)h_buf->virt;
+ if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+ (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+ (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+ continue;
+ /* found a pending sequence that matches this frame */
+ seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ break;
+ }
+
+ /* Free up all the frames from the partially assembled sequence */
+ if (seq_dmabuf) {
+ list_for_each_entry_safe(d_buf, n_buf,
+ &seq_dmabuf->dbuf.list, list) {
+ list_del_init(&d_buf->list);
+ lpfc_in_buf_free(vport->phba, d_buf);
+ }
+ return true;
+ }
+ return false;
+}
+
+/**
+ * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
+ * @phba: Pointer to HBA context object.
+ * @cmd_iocbq: pointer to the command iocbq structure.
+ * @rsp_iocbq: pointer to the response iocbq structure.
+ *
+ * This function handles the sequence abort accept iocb command complete
+ * event. It properly releases the memory allocated to the sequence abort
+ * accept iocb.
+ **/
+static void
+lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmd_iocbq,
+ struct lpfc_iocbq *rsp_iocbq)
+{
+ if (cmd_iocbq)
+ lpfc_sli_release_iocbq(phba, cmd_iocbq);
+}
+
+/**
+ * lpfc_sli4_seq_abort_acc - Accept sequence abort
+ * @phba: Pointer to HBA context object.
+ * @fc_hdr: pointer to a FC frame header.
+ *
+ * This function sends a basic accept to a previous unsol sequence abort
+ * event after aborting the sequence handling.
+ **/
+static void
+lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
+ struct fc_frame_header *fc_hdr)
+{
+ struct lpfc_iocbq *ctiocb = NULL;
+ struct lpfc_nodelist *ndlp;
+ uint16_t oxid, rxid;
+ uint32_t sid, fctl;
+ IOCB_t *icmd;
+
+ if (!lpfc_is_link_up(phba))
+ return;
+
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ rxid = be16_to_cpu(fc_hdr->fh_rx_id);
+
+ ndlp = lpfc_findnode_did(phba->pport, sid);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "1268 Find ndlp returned NULL for oxid:x%x "
+ "SID:x%x\n", oxid, sid);
+ return;
+ }
+
+ /* Allocate buffer for acc iocb */
+ ctiocb = lpfc_sli_get_iocbq(phba);
+ if (!ctiocb)
+ return;
+
+ /* Extract the F_CTL field from FC_HDR */
+ fctl = sli4_fctl_from_fc_hdr(fc_hdr);
+
+ icmd = &ctiocb->iocb;
+ icmd->un.xseq64.bdl.bdeSize = 0;
+ icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
+
+ /* Fill in the rest of iocb fields */
+ icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
+ icmd->ulpBdeCount = 0;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+ icmd->ulpContext = ndlp->nlp_rpi;
+
+ ctiocb->iocb_cmpl = NULL;
+ ctiocb->vport = phba->pport;
+ ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
+
+ if (fctl & FC_FC_EX_CTX) {
+ /* ABTS sent by responder to CT exchange, construction
+ * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
+ * field and RX_ID from ABTS for RX_ID field.
+ */
+ bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
+ ctiocb->sli4_xritag = oxid;
+ } else {
+ /* ABTS sent by initiator to CT exchange, construction
+ * of BA_ACC will need to allocate a new XRI as for the
+ * XRI_TAG and RX_ID fields.
+ */
+ bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
+ ctiocb->sli4_xritag = NO_XRI;
+ }
+ bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
+
+ /* Xmit CT abts accept on exchange <xid> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
+ CMD_XMIT_BLS_RSP64_CX, phba->link_state);
+ lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+}
+
+/**
+ * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles an SLI-4 unsolicited abort event. If the unsolicited
+ * receive sequence is only partially assembed by the driver, it shall abort
+ * the partially assembled frames for the sequence. Otherwise, if the
+ * unsolicited receive sequence has been completely assembled and passed to
+ * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
+ * unsolicited sequence has been aborted. After that, it will issue a basic
+ * accept to accept the abort.
+ **/
+void
+lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
+ struct hbq_dmabuf *dmabuf)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_frame_header fc_hdr;
+ uint32_t fctl;
+ bool abts_par;
+
+ /* Make a copy of fc_hdr before the dmabuf being released */
+ memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+ fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
+
+ if (fctl & FC_FC_EX_CTX) {
+ /*
+ * ABTS sent by responder to exchange, just free the buffer
+ */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ } else {
+ /*
+ * ABTS sent by initiator to exchange, need to do cleanup
+ */
+ /* Try to abort partially assembled seq */
+ abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+
+ /* Send abort to ULP if partially seq abort failed */
+ if (abts_par == false)
+ lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
+ else
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ }
+ /* Send basic accept (BA_ACC) to the abort requester */
+ lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
+}
+
+/**
* lpfc_seq_complete - Indicates if a sequence is complete
* @dmabuf: pointer to a dmabuf that describes the FC sequence
*
seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* If there is a hole in the sequence count then fail. */
- if (++seq_count != hdr->fh_seq_cnt)
+ if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
return 0;
fctl = (hdr->fh_f_ctl[0] << 16 |
hdr->fh_f_ctl[1] << 8 |
struct lpfc_iocbq *first_iocbq, *iocbq;
struct fc_frame_header *fc_hdr;
uint32_t sid;
+ struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
list_del_init(&seq_dmabuf->hbuf.list);
+ lpfc_update_rcv_time_stamp(vport);
/* get the Remote Port's SID */
- sid = (fc_hdr->fh_s_id[0] << 16 |
- fc_hdr->fh_s_id[1] << 8 |
- fc_hdr->fh_s_id[2]);
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
/* Get an iocbq struct to fill in. */
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.un.rcvels.remoteID = sid;
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+ bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
}
iocbq = first_iocbq;
/*
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+ bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
iocbq->iocb.ulpBdeCount = 1;
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+ bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
iocbq->iocb.un.rcvels.remoteID = sid;
list_add_tail(&iocbq->list, &first_iocbq->list);
}
return first_iocbq;
}
+static void
+lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
+ struct hbq_dmabuf *seq_dmabuf)
+{
+ struct fc_frame_header *fc_hdr;
+ struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
+ struct lpfc_hba *phba = vport->phba;
+
+ fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+ if (!iocbq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2707 Ring %d handler: Failed to allocate "
+ "iocb Rctl x%x Type x%x received\n",
+ LPFC_ELS_RING,
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ return;
+ }
+ if (!lpfc_complete_unsol_iocb(phba,
+ &phba->sli.ring[LPFC_ELS_RING],
+ iocbq, fc_hdr->fh_r_ctl,
+ fc_hdr->fh_type))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2540 Ring %d handler: unexpected Rctl "
+ "x%x Type x%x received\n",
+ LPFC_ELS_RING,
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+
+ /* Free iocb created in lpfc_prep_seq */
+ list_for_each_entry_safe(curr_iocb, next_iocb,
+ &iocbq->list, list) {
+ list_del_init(&curr_iocb->list);
+ lpfc_sli_release_iocbq(phba, curr_iocb);
+ }
+ lpfc_sli_release_iocbq(phba, iocbq);
+}
+
/**
* lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
* @phba: Pointer to HBA context object.
* Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
* appropriate receive function when the final frame in a sequence is received.
**/
-int
-lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
+void
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
+ struct hbq_dmabuf *dmabuf)
{
- LIST_HEAD(cmplq);
- struct hbq_dmabuf *dmabuf, *seq_dmabuf;
+ struct hbq_dmabuf *seq_dmabuf;
struct fc_frame_header *fc_hdr;
struct lpfc_vport *vport;
uint32_t fcfi;
- struct lpfc_iocbq *iocbq;
-
- /* Clear hba flag and get all received buffers into the cmplq */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
- list_splice_init(&phba->rb_pend_list, &cmplq);
- spin_unlock_irq(&phba->hbalock);
/* Process each received buffer */
- while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
- fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
- /* check to see if this a valid type of frame */
- if (lpfc_fc_frame_check(phba, fc_hdr)) {
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
- continue;
- }
- fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
- vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
- if (!vport) {
- /* throw out the frame */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
- continue;
- }
- /* Link this frame */
- seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
- if (!seq_dmabuf) {
- /* unable to add frame to vport - throw it out */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
- continue;
- }
- /* If not last frame in sequence continue processing frames. */
- if (!lpfc_seq_complete(seq_dmabuf)) {
- /*
- * When saving off frames post a new one and mark this
- * frame to be freed when it is finished.
- **/
- lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
- dmabuf->tag = -1;
- continue;
- }
- fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
- iocbq = lpfc_prep_seq(vport, seq_dmabuf);
- if (!lpfc_complete_unsol_iocb(phba,
- &phba->sli.ring[LPFC_ELS_RING],
- iocbq, fc_hdr->fh_r_ctl,
- fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "2540 Ring %d handler: unexpected Rctl "
- "x%x Type x%x received\n",
- LPFC_ELS_RING,
- fc_hdr->fh_r_ctl, fc_hdr->fh_type);
- };
- return 0;
+ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* check to see if this a valid type of frame */
+ if (lpfc_fc_frame_check(phba, fc_hdr)) {
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
+ vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+ if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+ /* throw out the frame */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ /* Handle the basic abort sequence (BA_ABTS) event */
+ if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
+ lpfc_sli4_handle_unsol_abort(vport, dmabuf);
+ return;
+ }
+
+ /* Link this frame */
+ seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+ if (!seq_dmabuf) {
+ /* unable to add frame to vport - throw it out */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ /* If not last frame in sequence continue processing frames. */
+ if (!lpfc_seq_complete(seq_dmabuf))
+ return;
+
+ /* Send the complete sequence to the upper layer protocol */
+ lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
}
/**
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* This routine does not require any locks. It's usage is expected
* to be driver load or reset recovery when the driver is
* sequential.
*
* Return codes
- * 0 - sucessful
- * EIO - The mailbox failed to complete successfully.
+ * 0 - successful
+ * -EIO - The mailbox failed to complete successfully.
* When this error occurs, the driver is not guaranteed
* to have any rpi regions posted to the device and
* must either attempt to repost the regions or take a
* maps up to 64 rpi context regions.
*
* Return codes
- * 0 - sucessful
- * ENOMEM - No available memory
- * EIO - The mailbox failed to complete successfully.
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
**/
int
lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
rpi_page->start_rpi);
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* Returns
- * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
+ * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
* LPFC_RPI_ALLOC_ERROR if no rpis are available.
**/
int
* available rpis maintained by the driver.
**/
void
+__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+ if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
+ phba->sli4_hba.rpi_count--;
+ phba->sli4_hba.max_cfg_param.rpi_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
spin_lock_irq(&phba->hbalock);
- clear_bit(rpi, phba->sli4_hba.rpi_bmask);
- phba->sli4_hba.rpi_count--;
- phba->sli4_hba.max_cfg_param.rpi_used--;
+ __lpfc_sli4_free_rpi(phba, rpi);
spin_unlock_irq(&phba->hbalock);
}
{
LPFC_MBOXQ_t *mboxq;
int rc = 0;
+ int retval = MBX_SUCCESS;
uint32_t mbox_tmo;
if (vpi == 0)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
- lpfc_init_vpi(mboxq, vpi);
+ lpfc_init_vpi(phba, mboxq, vpi);
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2022 INIT VPI Mailbox failed "
"status %d, mbxStatus x%x\n", rc,
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
- rc = -EIO;
+ retval = -EIO;
}
- return rc;
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+
+ return retval;
}
/**
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
- if (unlikely(!mboxq->sge_array)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "2526 Failed to get the non-embedded SGE "
- "virtual address\n");
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return -ENOMEM;
- }
virt_addr = mboxq->sge_array->addr[0];
/*
* Configure the FCF record for FCFI 0. This is the driver's
bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+ bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
LPFC_FCF_FPMA | LPFC_FCF_SPMA);
}
/**
- * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
+ * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
- * This routine is invoked to read up to @fcf_num of FCF record from the
- * device starting with the given @fcf_index.
+ * This routine is invoked to scan the entire FCF table by reading FCF
+ * record and processing it one at a time starting from the @fcf_index
+ * for initial FCF discovery or fast FCF failover rediscovery.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
**/
int
-lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
+lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
- void *virt_addr;
- dma_addr_t phys_addr;
- uint8_t *bytep;
- struct lpfc_mbx_sge sge;
- uint32_t alloc_len, req_len;
- struct lpfc_mbx_read_fcf_tbl *read_fcf;
+ phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2000 Failed to allocate mbox for "
"READ_FCF cmd\n");
- return -ENOMEM;
+ error = -ENOMEM;
+ goto fail_fcf_scan;
}
-
- req_len = sizeof(struct fcf_record) +
- sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
-
- /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
- alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
- LPFC_SLI4_MBX_NEMBED);
-
- if (alloc_len < req_len) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0291 Allocated DMA memory size (x%x) is "
- "less than the requested DMA memory "
- "size (x%x)\n", alloc_len, req_len);
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return -ENOMEM;
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_scan;
}
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
- /* Get the first SGE entry from the non-embedded DMA memory. This
- * routine only uses a single SGE.
- */
- lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
- phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
- if (unlikely(!mboxq->sge_array)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "2527 Failed to get the non-embedded SGE "
- "virtual address\n");
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return -ENOMEM;
- }
- virt_addr = mboxq->sge_array->addr[0];
- read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
- /* Set up command fields */
- bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
- /* Perform necessary endian conversion */
- bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
- lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
- mboxq->vport = phba->pport;
- mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ if (rc == MBX_NOT_FINISHED)
error = -EIO;
- } else
+ else {
+ /* Reset eligible FCF count for new scan */
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+ phba->fcf.eligible_fcf_cnt = 0;
error = 0;
+ }
+fail_fcf_scan:
+ if (error) {
+ if (mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ /* FCF scan failed, clear FCF_TS_INPROG flag */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ }
return error;
}
+
+/**
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index
+ * and to use it for FLOGI roundrobin FCF failover.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2763 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index to
+ * determine whether it's eligible for FLOGI roundrobin failover list.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2758 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get the next eligible FCF record index in a round
+ * robin fashion. If the next eligible FCF record index equals to the
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * shall be returned, otherwise, the next eligible FCF record's index
+ * shall be returned.
+ **/
+uint16_t
+lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_index;
+
+ /* Search start from next bit of currently registered FCF index */
+ next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
+ LPFC_SLI4_FCF_TBL_INDX_MAX;
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX,
+ next_fcf_index);
+
+ /* Wrap around condition on phba->fcf.fcf_rr_bmask */
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+
+ /* Check roundrobin failover list empty condition */
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2844 No roundrobin failover FCF available\n");
+ return LPFC_FCOE_FCF_NEXT_NONE;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2845 Get next roundrobin failover FCF (x%x)\n",
+ next_fcf_index);
+
+ return next_fcf_index;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets the FCF record index in to the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before setting the bit.
+ *
+ * Returns 0 if the index bit successfully set, otherwise, it returns
+ * -EINVAL.
+ **/
+int
+lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2610 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return -EINVAL;
+ }
+ /* Set the eligible FCF record index bmask */
+ set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2790 Set FCF (x%x) to roundrobin FCF failover "
+ "bmask\n", fcf_index);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine clears the FCF record index from the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before clearing the bit.
+ **/
+void
+lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2762 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return;
+ }
+ /* Clear the eligible FCF record index bmask */
+ clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2791 Clear FCF (x%x) from roundrobin failover "
+ "bmask\n", fcf_index);
+}
+
+/**
+ * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the completion routine for the rediscover FCF table mailbox
+ * command. If the mailbox command returned failure, it will try to stop the
+ * FCF rediscover wait timer.
+ **/
+void
+lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ uint32_t shdr_status, shdr_add_status;
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ if (shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2746 Requesting for FCF rediscovery failed "
+ "status x%x add_status x%x\n",
+ shdr_status, shdr_add_status);
+ if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * CVL event triggered FCF rediscover request failed,
+ * last resort to re-try current registered FCF entry.
+ */
+ lpfc_retry_pport_discovery(phba);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * DEAD FCF event triggered FCF rediscover request
+ * failed, last resort to fail over as a link down
+ * to FCF registration.
+ */
+ lpfc_sli4_fcf_dead_failthrough(phba);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2775 Start FCF rediscover quiescent timer\n");
+ /*
+ * Start FCF rediscovery wait timer for pending FCF
+ * before rescan FCF record table.
+ */
+ lpfc_fcf_redisc_wait_start_timer(phba);
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request for rediscovery of the entire FCF table
+ * by the port.
+ **/
+int
+lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ int rc, length;
+
+ /* Cancel retry delay timers to all vports before FCF rediscover */
+ lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2745 Failed to allocate mbox for "
+ "requesting FCF rediscover.\n");
+ return -ENOMEM;
+ }
+
+ length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+ /* Set count to 0 for invalidating the entire FCF database */
+ bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
+
+ /* Issue the mailbox command asynchronously */
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function is the failover routine as a last resort to the FCF DEAD
+ * event when driver failed to perform fast FCF failover.
+ **/
+void
+lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
+{
+ uint32_t link_state;
+
+ /*
+ * Last resort as FCF DEAD event failover will treat this as
+ * a link down, but save the link state because we don't want
+ * it to be changed to Link Down unless it is already down.
+ */
+ link_state = phba->link_state;
+ lpfc_linkdown(phba);
+ phba->link_state = link_state;
+
+ /* Unregister FCF if no devices connected to it */
+ lpfc_unregister_unused_fcf(phba);
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmb = NULL;
+ MAILBOX_t *mb;
+ uint8_t *rgn23_data = NULL;
+ uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2600 lpfc_sli_read_serdes_param failed to"
+ " allocate mailbox memory\n");
+ goto out;
+ }
+ mb = &pmb->u.mb;
+
+ /* Get adapter Region 23 data */
+ rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+ if (!rgn23_data)
+ goto out;
+
+ do {
+ lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2601 lpfc_sli_read_link_ste failed to"
+ " read config region 23 rc 0x%x Status 0x%x\n",
+ rc, mb->mbxStatus);
+ mb->un.varDmp.word_cnt = 0;
+ }
+ /*
+ * dump mem may return a zero when finished or we got a
+ * mailbox error, either way we are done.
+ */
+ if (mb->un.varDmp.word_cnt == 0)
+ break;
+ if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
+ mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
+
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ rgn23_data + offset,
+ mb->un.varDmp.word_cnt);
+ offset += mb->un.varDmp.word_cnt;
+ } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
+
+ data_size = offset;
+ offset = 0;
+
+ if (!data_size)
+ goto out;
+
+ /* Check the region signature first */
+ if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2619 Config region 23 has bad signature\n");
+ goto out;
+ }
+ offset += 4;
+
+ /* Check the data structure version */
+ if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2620 Config region 23 has bad version\n");
+ goto out;
+ }
+ offset += 4;
+
+ /* Parse TLV entries in the region */
+ while (offset < data_size) {
+ if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
+ break;
+ /*
+ * If the TLV is not driver specific TLV or driver id is
+ * not linux driver id, skip the record.
+ */
+ if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
+ (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
+ (rgn23_data[offset + 3] != 0)) {
+ offset += rgn23_data[offset + 1] * 4 + 4;
+ continue;
+ }
+
+ /* Driver found a driver specific TLV in the config region */
+ sub_tlv_len = rgn23_data[offset + 1] * 4;
+ offset += 4;
+ tlv_offset = 0;
+
+ /*
+ * Search for configured port state sub-TLV.
+ */
+ while ((offset < data_size) &&
+ (tlv_offset < sub_tlv_len)) {
+ if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
+ offset += 4;
+ tlv_offset += 4;
+ break;
+ }
+ if (rgn23_data[offset] != PORT_STE_TYPE) {
+ offset += rgn23_data[offset + 1] * 4 + 4;
+ tlv_offset += rgn23_data[offset + 1] * 4 + 4;
+ continue;
+ }
+
+ /* This HBA contains PORT_STE configured */
+ if (!rgn23_data[offset + 2])
+ phba->hba_flag |= LINK_DISABLED;
+
+ goto out;
+ }
+ }
+out:
+ if (pmb)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ kfree(rgn23_data);
+ return;
+}
+
+/**
+ * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
+ * @vport: pointer to vport data structure.
+ *
+ * This function iterate through the mailboxq and clean up all REG_LOGIN
+ * and REG_VPI mailbox commands associated with the vport. This function
+ * is called when driver want to restart discovery of the vport due to
+ * a Clear Virtual Link event.
+ **/
+void
+lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mb, *nextmb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *act_mbx_ndlp = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ LIST_HEAD(mbox_cmd_list);
+
+ /* Clean up internally queued mailbox commands with the vport */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ if (mb->vport != vport)
+ continue;
+
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+ (mb->u.mb.mbxCommand != MBX_REG_VPI))
+ continue;
+
+ list_del(&mb->list);
+ list_add_tail(&mb->list, &mbox_cmd_list);
+ }
+ /* Clean up active mailbox command with the vport */
+ mb = phba->sli.mbox_active;
+ if (mb && (mb->vport == vport)) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
+ (mb->u.mb.mbxCommand == MBX_REG_VPI))
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
+ /* Put reference count for delayed processing */
+ act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+ /* Unregister the RPI when mailbox complete */
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Release the cleaned-up mailbox commands */
+ while (!list_empty(&mbox_cmd_list)) {
+ list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ __lpfc_sli4_free_rpi(phba,
+ mb->u.mb.un.varRegLogin.rpi);
+ mp = (struct lpfc_dmabuf *) (mb->context1);
+ if (mp) {
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ mb->context2 = NULL;
+ if (ndlp) {
+ spin_lock(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock(shost->host_lock);
+ lpfc_nlp_put(ndlp);
+ }
+ }
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+
+ /* Release the ndlp with the cleaned-up active mailbox command */
+ if (act_mbx_ndlp) {
+ spin_lock(shost->host_lock);
+ act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock(shost->host_lock);
+ lpfc_nlp_put(act_mbx_ndlp);
+ }
+}
+
+/**
+ * lpfc_drain_txq - Drain the txq
+ * @phba: Pointer to HBA context object.
+ *
+ * This function attempt to submit IOCBs on the txq
+ * to the adapter. For SLI4 adapters, the txq contains
+ * ELS IOCBs that have been deferred because the there
+ * are no SGLs. This congestion can occur with large
+ * vport counts during node discovery.
+ **/
+
+uint32_t
+lpfc_drain_txq(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *piocbq = 0;
+ unsigned long iflags = 0;
+ char *fail_msg = NULL;
+ struct lpfc_sglq *sglq;
+ union lpfc_wqe wqe;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (pring->txq_cnt > pring->txq_max)
+ pring->txq_max = pring->txq_cnt;
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ while (pring->txq_cnt) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+
+ sglq = __lpfc_sli_get_sglq(phba);
+ if (!sglq) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ } else {
+ piocbq = lpfc_sli_ringtx_get(phba, pring);
+ if (!piocbq) {
+ /* The txq_cnt out of sync. This should
+ * never happen
+ */
+ sglq = __lpfc_clear_active_sglq(phba,
+ sglq->sli4_xritag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2823 txq empty and txq_cnt is %d\n ",
+ pring->txq_cnt);
+ break;
+ }
+ }
+
+ /* The xri and iocb resources secured,
+ * attempt to issue request
+ */
+ piocbq->sli4_xritag = sglq->sli4_xritag;
+ if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
+ fail_msg = "to convert bpl to sgl";
+ else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
+ fail_msg = "to convert iocb to wqe";
+ else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ fail_msg = " - Wq is full";
+ else
+ lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+
+ if (fail_msg) {
+ /* Failed means we can't issue and need to cancel */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2822 IOCB failed %s iotag 0x%x "
+ "xri 0x%x\n",
+ fail_msg,
+ piocbq->iotag, piocbq->sli4_xritag);
+ list_add_tail(&piocbq->list, &completions);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
+ /* Cancel all the IOCBs that cannot be issued */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+
+ return pring->txq_cnt;
+}