}
if (m->hw_handler_name) {
- r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
- m->hw_handler_name);
+ struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
+
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ if (r == -EBUSY) {
+ /*
+ * Already attached to different hw_handler,
+ * try to reattach with correct one.
+ */
+ scsi_dh_detach(q);
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ }
if (r < 0) {
+ ti->error = "error attaching hardware handler";
dm_put_device(ti, p->path.dev);
goto bad;
}
struct list_head node;
char vendor[9];
char model[17];
+ char tgps;
struct scsi_device_handler *handler;
};
spin_lock(&list_lock);
list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
- !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
+ !strncmp(sdev->model, tmp->model, strlen(tmp->model)) &&
+ (!tmp->tgps || (sdev->tgps & tmp->tgps) != 0)) {
found_dh = tmp->handler;
break;
}
if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
strlen(scsi_dh->devlist[i].vendor)) &&
!strncmp(sdev->model, scsi_dh->devlist[i].model,
- strlen(scsi_dh->devlist[i].model))) {
+ strlen(scsi_dh->devlist[i].model)) &&
+ (!scsi_dh->devlist[i].tgps ||
+ (sdev->tgps & scsi_dh->devlist[i].tgps) != 0)) {
found = 1;
break;
}
strncpy(tmp->model, sdev->model, 16);
tmp->vendor[8] = '\0';
tmp->model[16] = '\0';
+ tmp->tgps = sdev->tgps;
tmp->handler = found_dh;
spin_lock(&list_lock);
list_add(&tmp->node, &scsi_dh_dev_list);
if (!sdev)
return;
- if (sdev->scsi_dh_data) {
- /* if sdev is not on internal list, detach */
- scsi_dh = sdev->scsi_dh_data->scsi_dh;
- if (!device_handler_match(scsi_dh, sdev))
- scsi_dh_handler_detach(sdev, scsi_dh);
- }
+ if (sdev->scsi_dh_data)
+ scsi_dh_handler_detach(sdev, sdev->scsi_dh_data->scsi_dh);
+
put_device(&sdev->sdev_gendev);
}
EXPORT_SYMBOL_GPL(scsi_dh_detach);
}
/*
- * submit_std_inquiry - Issue a standard INQUIRY command
- * @sdev: sdev the command should be send to
- */
-static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
-{
- struct request *rq;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ);
- if (!rq)
- goto done;
-
- /* Prepare the command. */
- rq->cmd[0] = INQUIRY;
- rq->cmd[1] = 0;
- rq->cmd[2] = 0;
- rq->cmd[4] = ALUA_INQUIRY_SIZE;
- rq->cmd_len = COMMAND_SIZE(INQUIRY);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = h->senselen = 0;
-
- err = blk_execute_rq(rq->q, NULL, rq, 1);
- if (err == -EIO) {
- sdev_printk(KERN_INFO, sdev,
- "%s: std inquiry failed with %x\n",
- ALUA_DH_NAME, rq->errors);
- h->senselen = rq->sense_len;
- err = SCSI_DH_IO;
- }
- blk_put_request(rq);
-done:
- return err;
-}
-
-/*
* submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
* @sdev: sdev the command should be sent to
*/
}
/*
- * alua_std_inquiry - Evaluate standard INQUIRY command
+ * alua_check_tgps - Evaluate TGPS setting
* @sdev: device to be checked
*
- * Just extract the TPGS setting to find out if ALUA
+ * Just examine the TPGS setting of the device to find out if ALUA
* is supported.
*/
-static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+static int alua_check_tgps(struct scsi_device *sdev, struct alua_dh_data *h)
{
int err;
- err = submit_std_inquiry(sdev, h);
-
- if (err != SCSI_DH_OK)
- return err;
-
/* Check TPGS setting */
- h->tpgs = (h->inq[5] >> 4) & 0x3;
+ h->tpgs = sdev->tgps;
+
switch (h->tpgs) {
case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
sdev_printk(KERN_INFO, sdev,
return SUCCESS;
break;
case UNIT_ATTENTION:
- if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
- /*
- * Power On, Reset, or Bus Device Reset, just retry.
- */
- return ADD_TO_MLQUEUE;
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
- /*
- * ALUA state changed
- */
- return ADD_TO_MLQUEUE;
- }
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
- /*
- * Implicit ALUA state transition failed
- */
- return ADD_TO_MLQUEUE;
- }
- break;
+ /*
+ * Just retry for UNIT_ATTENTION
+ */
+ return ADD_TO_MLQUEUE;
}
return SCSI_RETURN_NOT_HANDLED;
{
int err;
- err = alua_std_inquiry(sdev, h);
+ err = alua_check_tgps(sdev, h);
if (err != SCSI_DH_OK)
goto out;
}
static const struct scsi_dh_devlist alua_dev_list[] = {
- {"HP", "MSA VOLUME" },
- {"HP", "HSV101" },
- {"HP", "HSV111" },
- {"HP", "HSV200" },
- {"HP", "HSV210" },
- {"HP", "HSV300" },
- {"IBM", "2107900" },
- {"IBM", "2145" },
- {"Pillar", "Axiom" },
- {NULL, NULL}
+ {"", "", 3 },
+ {NULL, NULL, 0}
};
static int alua_bus_attach(struct scsi_device *sdev);
}
static const struct scsi_dh_devlist clariion_dev_list[] = {
- {"DGC", "RAID"},
- {"DGC", "DISK"},
- {"DGC", "VRAID"},
- {NULL, NULL},
+ {"DGC", "RAID", 0},
+ {"DGC", "DISK", 0},
+ {"DGC", "VRAID", 0},
+ {NULL, NULL, 0},
};
static int clariion_bus_attach(struct scsi_device *sdev);
}
static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
- {"COMPAQ", "MSA1000 VOLUME"},
- {"COMPAQ", "HSV110"},
- {"HP", "HSV100"},
- {"DEC", "HSG80"},
- {NULL, NULL},
+ {"COMPAQ", "MSA1000 VOLUME", 0},
+ {"COMPAQ", "HSV110", 0},
+ {"HP", "HSV100", 0},
+ {"DEC", "HSG80", 0},
+ {NULL, NULL, 0},
};
static int hp_sw_bus_attach(struct scsi_device *sdev);
}
static const struct scsi_dh_devlist rdac_dev_list[] = {
- {"IBM", "1722"},
- {"IBM", "1724"},
- {"IBM", "1726"},
- {"IBM", "1742"},
- {"IBM", "1814"},
- {"IBM", "1815"},
- {"IBM", "1818"},
- {"IBM", "3526"},
- {"SGI", "TP9400"},
- {"SGI", "TP9500"},
- {"SGI", "IS"},
- {"STK", "OPENstorage D280"},
- {"SUN", "CSM200_R"},
- {"SUN", "LCSM100_F"},
- {"DELL", "MD3000"},
- {"DELL", "MD3000i"},
- {NULL, NULL},
+ {"IBM", "1722", 0},
+ {"IBM", "1724", 0},
+ {"IBM", "1726", 0},
+ {"IBM", "1742", 0},
+ {"IBM", "1814", 0},
+ {"IBM", "1815", 0},
+ {"IBM", "1818", 0},
+ {"IBM", "3526", 0},
+ {"SGI", "TP9400", 0},
+ {"SGI", "TP9500", 0},
+ {"SGI", "IS", 0},
+ {"STK", "OPENstorage D280", 0},
+ {"SUN", "CSM200_R", 0},
+ {"SUN", "LCSM100_F", 0},
+ {"DELL", "MD3000", 0},
+ {"DELL", "MD3000i", 0},
+ {NULL, NULL, 0},
};
static int rdac_bus_attach(struct scsi_device *sdev);
/* Number of MSI-X vectors the driver uses */
#define LPFC_MSIX_VECTORS 2
+/* Active interrupt test threshold */
+#define LPFC_INTR_THRESHOLD 1
+
/* lpfc wait event data ready flag */
#define LPFC_DATA_READY (1<<0)
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
+ uint32_t cfg_pci_max_read;
lpfc_vpd_t vpd; /* vital product data */
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
+ uint32_t intr_mode;
+#define LPFC_INTR_ERROR 0xFFFFFFFF
struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
struct lpfcdfc_host *dfc_host;
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_auth_access.h"
+#include "lpfc_security.h"
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
*/
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+/*
+ * lpfc_pci_max_read: Maximum DMA read byte count. This parameter can have
+ * values 512, 1024, 2048, 4096. Default value is 2048.
+ */
+static int lpfc_pci_max_read = 2048;
+module_param(lpfc_pci_max_read, int, 0);
+MODULE_PARM_DESC(lpfc_pci_max_read,
+ "Maximum DMA read byte count. Allowed values:"
+ " 512,1024,2048,4096.");
+static int
+lpfc_pci_max_read_init(struct lpfc_hba *phba, int val)
+{
+ phba->cfg_pci_max_read = 2048;
+ if ((val == 512) || (val == 1024) || (val == 2048) || (val == 4096))
+ phba->cfg_pci_max_read = val;
+ return 0;
+}
+
+lpfc_param_show(pci_max_read)
+static DEVICE_ATTR(lpfc_pci_max_read, S_IRUGO,
+ lpfc_pci_max_read_show, NULL);
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_info,
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_lpfc_enable_npiv,
+ &dev_attr_lpfc_pci_max_read,
&dev_attr_nport_evt_cnt,
&dev_attr_board_mode,
&dev_attr_max_vpi,
if (error)
goto out_remove_ctlreg_attr;
+ error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+ &sysfs_menlo_attr);
+ if (error)
+ goto out_remove_menlo_attr;
+
+
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
out_remove_stat_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
+out_remove_menlo_attr:
+ sysfs_remove_bin_file(&shost->shost_dev.kobj,
+ &sysfs_menlo_attr);
+
out:
return error;
}
if (vport->fc_flag & FC_OFFLINE_MODE)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
else {
+ if ((vport->cfg_enable_auth) &&
+ (lpfc_security_service_state == SECURITY_OFFLINE)) {
+ fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+
switch (phba->link_state) {
case LPFC_LINK_UNKNOWN:
case LPFC_LINK_DOWN:
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+ lpfc_pci_max_read_init(phba, lpfc_pci_max_read);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2006-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2006-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
fc_sc_req->tran_id = seq;
len = sizeof(struct fc_nl_sc_message) + auth_req_len;
- fc_nl_sc_msg = kzalloc(sizeof(struct fc_nl_sc_message) + auth_req_len,
- GFP_KERNEL);
- if (!fc_nl_sc_msg)
+ fc_nl_sc_msg = kzalloc(len, GFP_KERNEL);
+ if (!fc_nl_sc_msg) {
+ kfree(fc_sc_req);
return -ENOMEM;
+ }
fc_nl_sc_msg->msgtype = msg_type;
fc_nl_sc_msg->data_len = auth_req_len;
memcpy(fc_nl_sc_msg->data, auth_req, auth_req_len);
scsi_nl_send_vendor_msg(fc_service_pid, shost->host_no,
(SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX),
(char *) fc_nl_sc_msg, len);
+ kfree(fc_nl_sc_msg);
lpfc_fc_sc_add_timer(fc_sc_req, FC_SC_REQ_TIMEOUT,
lpfc_fc_sc_req_times_out);
return 0;
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
LPFC_MBOXQ_t *, uint32_t);
+void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
+int lpfc_sli_set_dma_length(struct lpfc_hba *, uint32_t);
+
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
/* Don't bother processing response if vport is being torn down. */
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING) {
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
goto out;
+ }
if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
if (lpfc_error_lost_link(irsp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
goto out;
}
if (irsp->ulpStatus) {
if (rc == 0)
goto out;
}
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0257 GID_FT Query error: 0x%x 0x%x\n",
return elsiocb;
els_iocb_free_pbuf_exit:
- lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+ if (expectRsp)
+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pbuflist);
els_iocb_free_prsp_exit:
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
+ if ((vport->cfg_enable_auth) &&
+ (lpfc_security_service_state == SECURITY_OFFLINE))
+ return 1;
+
vport->port_state = LPFC_FLOGI;
lpfc_set_disctmo(vport);
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
- /* If the timer is already canceled do nothing */
- if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
- return;
- }
spin_lock_irq(&phba->hbalock);
timeout = (uint32_t)(phba->fc_ratov << 1);
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_debugfs.h"
+#include "lpfc_security.h"
/* AlpaArray for assignment of scsid for scan-down and bind_method */
static uint8_t lpfcAlpaArray[] = {
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI) {
+ if ((vport->cfg_enable_auth) &&
+ (lpfc_security_service_state == SECURITY_OFFLINE))
+ lpfc_issue_clear_la(phba, vport);
+ else if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
- }
+
return;
out:
uint32_t rsvd1;
} CLEAR_LA_VAR;
+/* Structure for MB Command SET_SLIM (33) */
+/* Values needed to set MAX_DMA_LENGTH parameter */
+#define SLIM_VAR_MAX_DMA_LENGTH 0x100506
+#define SLIM_VAL_MAX_DMA_512 0x0
+#define SLIM_VAL_MAX_DMA_1024 0x1
+#define SLIM_VAL_MAX_DMA_2048 0x2
+#define SLIM_VAL_MAX_DMA_4096 0x3
+
/* Structure for MB Command DUMP */
typedef struct {
uint32_t pcbLow; /* bit 31:0 of memory based port config block */
uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
- uint32_t hbainit[6];
+ uint32_t hbainit[5];
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
+ uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
+ uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
+#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd : 24; /* Reserved */
+ uint32_t rsvd1 : 24; /* Reserved */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
- uint32_t rsvd : 24; /* Reserved */
+ uint32_t rsvd1 : 24; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd2 : 24; /* Reserved */
/* character array used for decoding dist type. */
char dist_char[] = "nabx";
- if (pmboxq->mb.mbxStatus != MBX_SUCCESS)
+ if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
return;
+ }
prg = (struct prog_id *) &prog_id_word;
sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
prg->ver, prg->rev, prg->lev,
dist, prg->num);
+ mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+ /* Use the existing MBOX buffer, it will be freed in mbox compl */
+ lpfc_config_async(phba, pmb, LPFC_ELS_RING);
+ pmb->mbox_cmpl = lpfc_config_async_cmpl;
+ pmb->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0456 Adapter failed to issue "
+ "ASYNCEVT_ENABLE mbox status x%x \n.", rc);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+
+ /* Allocate new MBOX buffer, it will be freed in mbox compl */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ lpfc_dump_wakeup_param(phba, pmb);
+ pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
+ pmb->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0435 Adapter failed to get Option "
+ "ROM version status x%x\n.", rc);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+
if (vport->cfg_enable_auth) {
if (lpfc_security_service_state == SECURITY_OFFLINE) {
lpfc_printf_log(vport->phba, KERN_ERR, LOG_SECURITY,
"1000 Authentication is enabled but "
"authentication service is not running\n");
vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
- phba->link_state = LPFC_HBA_ERROR;
- mempool_free(pmb, phba->mbox_mem_pool);
- return 0;
}
}
+ /* Allocate new MBOX buffer, will be freed in mbox compl */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc != MBX_SUCCESS) {
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0454 Adapter failed to init, mbxCmd x%x "
"INIT_LINK, mbxStatus x%x\n",
readl(phba->HAregaddr); /* flush */
phba->link_state = LPFC_HBA_ERROR;
- if (rc != MBX_BUSY)
- mempool_free(pmb, phba->mbox_mem_pool);
- return -EIO;
- }
- /* MBOX buffer will be freed in mbox compl */
- pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- lpfc_config_async(phba, pmb, LPFC_ELS_RING);
- pmb->mbox_cmpl = lpfc_config_async_cmpl;
- pmb->vport = phba->pport;
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-
- if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
- "0456 Adapter failed to issue "
- "ASYNCEVT_ENABLE mbox status x%x \n.",
- rc);
- mempool_free(pmb, phba->mbox_mem_pool);
- }
-
- /* Get Option rom version */
- pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- lpfc_dump_wakeup_param(phba, pmb);
- pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
- pmb->vport = phba->pport;
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-
- if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
- "to get Option ROM version status x%x\n.", rc);
mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
}
-
return 0;
}
return;
spin_lock_irq(&phba->pport->work_port_lock);
- /* If the timer is already canceled do nothing */
- if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
- spin_unlock_irq(&phba->pport->work_port_lock);
- return;
- }
if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
jiffies)) {
}
/**
+ * lpfc_setup_max_dma_length: Check the host's chipset and adjust HBA's
+ * max DMA length.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to test the machines chipsets. Some of Emulex's
+ * HBA models expose bugs in these chipsets. To work around these bugs we
+ * tell the HBA to use a smaller maxium DMA length.
+ * This routine is only called during module init. The DMA length is passed
+ * to the driver as a module parameter(lpfc_pci_max_read).
+ *
+ * return: NONE.
+ **/
+void
+lpfc_setup_max_dma_length(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ struct pci_bus *bus = pdev->bus;
+ uint8_t rev;
+
+ while (bus) {
+ /*
+ * 0x7450 == PCI_DEVICE_ID_AMD_8131_BRIDGE for 2.6 kernels
+ * 0x7450 == PCI_DEVICE_ID_AMD_8131_APIC for 2.4 kernels
+ */
+ if (bus->self &&
+ (bus->self->vendor == PCI_VENDOR_ID_AMD) &&
+ (bus->self->device == 0x7450)) {
+ pci_read_config_byte(bus->self, 0x08, &rev);
+ if (rev == 0x13) {
+ /*
+ * If set a value in module paramter,
+ * use that value.
+ */
+ if (phba->cfg_pci_max_read == 2048)
+ phba->cfg_pci_max_read = 1024;
+ return;
+ }
+ }
+ bus = bus->parent;
+ }
+ return;
+}
+
+
+/**
* lpfc_enable_msix: Enable MSI-X interrupt mode.
* @phba: pointer to lpfc hba data structure.
*
ARRAY_SIZE(phba->msix_entries));
if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0420 Enable MSI-X failed (%d), continuing "
- "with MSI\n", rc);
+ "0420 PCI enable MSI-X failed (%d)\n", rc);
goto msi_fail_out;
} else
for (i = 0; i < LPFC_MSIX_VECTORS; i++)
rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0421 MSI-X slow-path request_irq failed "
- "(%d), continuing with MSI\n", rc);
+ "(%d)\n", rc);
goto msi_fail_out;
}
IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0429 MSI-X fast-path request_irq failed "
- "(%d), continuing with MSI\n", rc);
+ "(%d)\n", rc);
goto irq_fail_out;
}
goto mbx_fail_out;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0351 Config MSI mailbox command failed, "
"mbxCmd x%x, mbxStatus x%x\n",
pmb->mb.mbxCommand, pmb->mb.mbxStatus);
}
/**
+ * lpfc_enable_msi: Enable MSI interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode. The kernel
+ * function pci_enable_msi() is called to enable the MSI vector. The
+ * device driver is responsible for calling the request_irq() to register
+ * MSI vector with a interrupt the handler, which is done in this function.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ */
+static int
+lpfc_enable_msi(struct lpfc_hba *phba)
+{
+ int rc;
+
+ rc = pci_enable_msi(phba->pcidev);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0462 PCI enable MSI mode success.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0471 PCI enable MSI mode failed (%d)\n", rc);
+ return rc;
+ }
+
+ rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (rc) {
+ pci_disable_msi(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0478 MSI request_irq failed (%d)\n", rc);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_disable_msi: Disable MSI interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode. The driver
+ * calls free_irq() on MSI vector it has done request_irq() on before
+ * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
+ * a device will be left with MSI enabled and leaks its vector.
+ */
+
+static void
+lpfc_disable_msi(struct lpfc_hba *phba)
+{
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msi(phba->pcidev);
+ return;
+}
+
+/**
+ * lpfc_log_intr_mode: Log the active interrupt mode
+ * @phba: pointer to lpfc hba data structure.
+ * @intr_mode: active interrupt mode adopted.
+ *
+ * This routine it invoked to log the currently used active interrupt mode
+ * to the device.
+ */
+static void
+lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+{
+ switch (intr_mode) {
+ case 0:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0470 Enable INTx interrupt mode.\n");
+ break;
+ case 1:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0481 Enabled MSI interrupt mode.\n");
+ break;
+ case 2:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0480 Enabled MSI-X interrupt mode.\n");
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0482 Illegal interrupt mode.\n");
+ break;
+ }
+ return;
+}
+
+static void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ /* Reset some HBA SLI setup states */
+ lpfc_stop_phba_timers(phba);
+ phba->pport->work_port_events = 0;
+
+ return;
+}
+
+/**
* lpfc_enable_intr: Enable device interrupt.
* @phba: pointer to lpfc hba data structure.
*
* 0 - sucessful
* other values - error
**/
-static int
-lpfc_enable_intr(struct lpfc_hba *phba)
+static uint32_t
+lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
{
- int retval = 0;
-
- /* Starting point of configuring interrupt method */
- phba->intr_type = NONE;
+ uint32_t intr_mode = LPFC_INTR_ERROR;
+ int retval;
- if (phba->cfg_use_msi == 2) {
+ if (cfg_mode == 2) {
/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
retval = lpfc_sli_config_port(phba, 3);
- if (retval)
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0478 Firmware not capable of SLI 3 mode.\n");
- else {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0479 Firmware capable of SLI 3 mode.\n");
+ if (!retval) {
/* Now, try to enable MSI-X interrupt mode */
retval = lpfc_enable_msix(phba);
if (!retval) {
+ /* Indicate initialization to MSI-X mode */
phba->intr_type = MSIX;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0480 enable MSI-X mode.\n");
+ intr_mode = 2;
}
}
}
/* Fallback to MSI if MSI-X initialization failed */
- if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
- retval = pci_enable_msi(phba->pcidev);
+ if (cfg_mode >= 1 && phba->intr_type == NONE) {
+ retval = lpfc_enable_msi(phba);
if (!retval) {
+ /* Indicate initialization to MSI mode */
phba->intr_type = MSI;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0481 enable MSI mode.\n");
- } else
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0470 enable IRQ mode.\n");
+ intr_mode = 1;
+ }
}
- /* MSI-X is the only case the doesn't need to call request_irq */
- if (phba->intr_type != MSIX) {
+ /* Fallback to INTx if both MSI-X/MSI initalization failed */
+ if (phba->intr_type == NONE) {
retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
- if (retval) {
- if (phba->intr_type == MSI)
- pci_disable_msi(phba->pcidev);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0471 Enable interrupt handler "
- "failed\n");
- } else if (phba->intr_type != MSI)
+ if (!retval) {
+ /* Indicate initialization to INTx mode */
phba->intr_type = INTx;
+ intr_mode = 0;
+ }
}
-
- return retval;
+ return intr_mode;
}
/**
static void
lpfc_disable_intr(struct lpfc_hba *phba)
{
+ /* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX)
lpfc_disable_msix(phba);
- else {
+ else if (phba->intr_type == MSI)
+ lpfc_disable_msi(phba);
+ else if (phba->intr_type == INTx)
free_irq(phba->pcidev->irq, phba);
- if (phba->intr_type == MSI)
- pci_disable_msi(phba->pcidev);
- }
+
+ /* Reset interrupt management states */
+ phba->intr_type = NONE;
+ phba->sli.slistat.sli_intr = 0;
+
return;
}
int error = -ENODEV, retval;
int i, hbq_count;
uint16_t iotag;
+ uint32_t cfg_mode, intr_mode;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
struct lpfc_adapter_event_header adapter_event;
* establish the host.
*/
lpfc_get_cfgparam(phba);
+ /* Check if we need to change the DMA length */
+ lpfc_setup_max_dma_length(phba);
+
phba->max_vpi = lpfc_hba_max_vpi(phba->pcidev->device);
/* Initialize timers used by driver */
phba->eratt_poll.data = (unsigned long) phba;
pci_set_master(pdev);
+ pci_save_state(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
if ((lpfc_get_security_enabled)(shost)) {
unsigned long flags;
- /* Triggers fcauthd to register if it is running */
- fc_host_post_event(shost, fc_get_event_number(),
- FCH_EVT_PORT_ONLINE, shost->host_no);
spin_lock_irqsave(&fc_security_user_lock, flags);
list_add_tail(&vport->sc_users, &fc_security_user_list);
spin_unlock_irqrestore(&fc_security_user_lock, flags);
lpfc_fc_queue_security_work(vport,
&vport->sc_online_work);
}
+ /* Triggers fcauthd to register if it is running */
+ fc_host_post_event(shost, fc_get_event_number(),
+ FCH_EVT_PORT_ONLINE, shost->host_no);
}
phba->pport = vport;
lpfc_debugfs_initialize(vport);
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
- /* Configure and enable interrupt */
- error = lpfc_enable_intr(phba);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0426 Failed to enable interrupt.\n");
- goto out_destroy_port;
- }
-
+ /* Confiugre sysfs attributes */
phba->dfc_host = lpfcdfc_host_add(pdev, shost, phba);
if (!phba->dfc_host) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"1201 Failed to allocate dfc_host \n");
error = -ENOMEM;
- goto out_free_irq;
+ goto out_destroy_port;
}
if (lpfc_alloc_sysfs_attr(vport)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1476 Failed to allocate sysfs attr\n");
error = -ENOMEM;
- goto out_free_irq;
+ goto out_del_dfc_host;
}
- if (lpfc_sli_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1477 Failed to set up hba\n");
- error = -ENODEV;
- goto out_remove_device;
+ cfg_mode = phba->cfg_use_msi;
+ while (true) {
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0426 Failed to enable interrupt.\n");
+ goto out_free_sysfs_attr;
+ }
+ /* HBA SLI setup */
+ if (lpfc_sli_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1477 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_remove_device;
+ }
+
+ /* Wait 50ms for the interrupts of previous mailbox commands */
+ msleep(50);
+ /* Check active interrupts received */
+ if (phba->sli.slistat.sli_intr > LPFC_INTR_THRESHOLD) {
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
+ break;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0451 Configure interrupt mode (%d) "
+ "failed active interrupt test.\n",
+ intr_mode);
+ if (intr_mode == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0479 Failed to enable "
+ "interrupt.\n");
+ error = -ENODEV;
+ goto out_remove_device;
+ }
+ /* Stop HBA SLI setups */
+ lpfc_stop_port(phba);
+ /* Disable the current interrupt mode */
+ lpfc_disable_intr(phba);
+ /* Try next level of interrupt mode */
+ cfg_mode = --intr_mode;
+ }
}
/*
return 0;
out_remove_device:
- lpfc_free_sysfs_attr(vport);
spin_lock_irq(shost->host_lock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(shost->host_lock);
-out_free_irq:
- if (phba->dfc_host)
- lpfcdfc_host_del(phba->dfc_host);
lpfc_stop_phba_timers(phba);
phba->pport->work_port_events = 0;
lpfc_disable_intr(phba);
+ lpfc_sli_hba_down(phba);
+ lpfc_sli_brdrestart(phba);
+out_free_sysfs_attr:
+ lpfc_free_sysfs_attr(vport);
+out_del_dfc_host:
+ if (phba->dfc_host)
+ lpfcdfc_host_del(phba->dfc_host);
out_destroy_port:
destroy_port(vport);
out_kthread_stop:
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint32_t intr_mode;
int error;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
return error;
}
- /* Enable interrupt from device */
- error = lpfc_enable_intr(phba);
- if (error) {
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0430 PM resume Failed to enable interrupt: "
- "error=x%x.\n", error);
- return error;
- }
+ "0430 PM resume Failed to enable interrupt\n");
+ return -EIO;
+ } else
+ phba->intr_mode = intr_mode;
/* Restart HBA and bring it online */
lpfc_sli_brdrestart(phba);
lpfc_online(phba);
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
return 0;
}
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
- int error;
+ uint32_t intr_mode;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_set_master(pdev);
+ pci_restore_state(pdev);
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- /* Enable configured interrupt method */
- error = lpfc_enable_intr(phba);
- if (error) {
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0427 Cannot re-enable interrupt after "
"slot reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
- }
+ } else
+ phba->intr_mode = intr_mode;
/* Take device offline; this will perform cleanup */
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
return PCI_ERS_RESULT_RECOVERED;
}
for (i0 = 0;
i0 < 10 && (pndl->nlp_flag & NLP_ELS_SND_MASK) == NLP_RNID_SND;
i0++) {
- mdelay(1000);
+ msleep(1000);
}
if (i0 == 10) {
outdmp = dfc_cmd_data_alloc(phba, NULL, bpl, snsbfrcnt);
if (!outdmp) {
rc = ENOMEM;
- spin_lock_irq(shost->host_lock);
goto send_mgmt_cmd_free_indmp;
}
if (i++ > 500) /* wait up to 5 seconds */
break;
- mdelay(10);
+ msleep(10);
}
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
}
/**
+ * lpfc_set_var: Prepare a mailbox command to write slim.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @addr: This the set variable number that identifies the variable.
+ * @value:The value that we are setting the parameter to.
+ *
+ * The routine just sets the addr and value in the set variable mailbox
+ * command structure.
+ * returns: NONE.
+ **/
+void
+lpfc_set_var(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint32_t addr,
+ uint32_t value)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+
+ /*
+ * Always turn on DELAYED ABTS for ELS timeouts
+ */
+ if ((addr == 0x052198) && (value == 0))
+ value = 1;
+
+ mb->un.varWords[0] = addr;
+ mb->un.varWords[1] = value;
+
+ mb->mbxCommand = MBX_SET_VARIABLE;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
* lpfc_read_rev: Prepare a mailbox command for reading HBA revision.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+ /* Always Host Group Pointer is in SLIM */
+ mb->un.varCfgPort.hps = 1;
+
/* If HBA supports SLI=3 ask for it */
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
sizeof(*phba->host_gp));
}
- /* Setup Port Group ring pointer */
- if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) {
- pgp_offset = offsetof(struct lpfc_sli2_slim,
- mbx.us.s3_inb_pgp.port);
- phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
- } else if (phba->sli_rev == 3) {
+ /* Setup Port Group offset */
+ if (phba->sli_rev == 3)
pgp_offset = offsetof(struct lpfc_sli2_slim,
mbx.us.s3_pgp.port);
- phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
- } else
+ else
pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
pdma_addr = phba->slim2p.phys + pgp_offset;
phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
sysfs_menlo_idle(struct lpfc_hba *phba,
struct lpfc_sysfs_menlo *sysfs_menlo)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
spin_lock_irq(&phba->hbalock);
list_del_init(&sysfs_menlo->list);
spin_unlock_irq(&phba->hbalock);
- spin_lock_irq(shost->host_lock);
if (sysfs_menlo->cr.cmdiocbq)
sysfs_menlo_genreq_free(phba, &sysfs_menlo->cr);
if (sysfs_menlo->cx.cmdiocbq)
sysfs_menlo_genreq_free(phba, &sysfs_menlo->cx);
- spin_unlock_irq(shost->host_lock);
kfree(sysfs_menlo);
}
}
if ((count + sysfs_menlo->cr.offset) > sysfs_menlo->cmdhdr.cmdsize) {
- if ( sysfs_menlo->cmdhdr.cmdsize != 4) {
- lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ if (sysfs_menlo->cmdhdr.cmdsize >=
+ sizeof(struct lpfc_sysfs_menlo_hdr)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"1213 FCoE cmd overflow: off %d + cnt %d > cmdsz %d\n",
- (int)sysfs_menlo->cr.offset,
- (int)count,
- (int)sysfs_menlo->cmdhdr.cmdsize);
- sysfs_menlo_idle(phba, sysfs_menlo);
- return -ERANGE;
+ (int)sysfs_menlo->cr.offset,
+ (int)count,
+ (int)sysfs_menlo->cmdhdr.cmdsize);
+ sysfs_menlo_idle(phba, sysfs_menlo);
+ return -ERANGE;
}
}
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
return ndlp->nlp_state;
}
if (cmd->result)
return;
+ latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+
spin_lock_irqsave(shost->host_lock, flags);
if (!vport->stat_data_enabled ||
vport->stat_data_blocked ||
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
- latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
phba->bucket_step;
- if (i >= LPFC_MAX_BUCKET_COUNT)
- i = LPFC_MAX_BUCKET_COUNT;
+ /* check array subscript bounds */
+ if (i < 0)
+ i = 0;
+ else if (i >= LPFC_MAX_BUCKET_COUNT)
+ i = LPFC_MAX_BUCKET_COUNT - 1;
} else {
for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
if (latency <= (phba->bucket_base +
bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
+ bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
/* Setup the physical region for the FCP RSP */
bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
+ bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
/*
* Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
- SAM_STAT_BUSY);
+ SAM_STAT_BUSY);
} else {
cmd->result = ScsiResult(DID_OK, 0);
}
lpfc_security_service_state = SECURITY_ONLINE;
if (vport->cfg_enable_auth &&
- vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN &&
- vport->phba->link_state == LPFC_HBA_ERROR)
+ vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN)
lpfc_selective_reset(vport->phba);
}
}
/**
- * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
- * @phba: Pointer to HBA context object.
- * @tag: Tag for the HBQ buffer.
- *
- * This function is called from unsolicited event handler code path to get the
- * HBQ buffer associated with an unsolicited iocb. This function is called with
- * no lock held. It returns the buffer associated with the given tag and posts
- * another buffer to the firmware. Note that the new buffer must be allocated
- * before taking the hbalock and that the hba lock must be held until it is
- * finished with the hbq entry swap.
- **/
-static struct lpfc_dmabuf *
-lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
-{
- struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
- uint32_t hbqno;
- void *virt; /* virtual address ptr */
- dma_addr_t phys; /* mapped address */
- unsigned long flags;
-
- hbqno = tag >> 16;
- new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
- /* Check whether HBQ is still in use */
- spin_lock_irqsave(&phba->hbalock, flags);
- if (!phba->hbq_in_use) {
- if (new_hbq_entry)
- (phba->hbqs[hbqno].hbq_free_buffer)(phba,
- new_hbq_entry);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return NULL;
- }
-
- hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
- if (hbq_entry == NULL) {
- if (new_hbq_entry)
- (phba->hbqs[hbqno].hbq_free_buffer)(phba,
- new_hbq_entry);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return NULL;
- }
- list_del(&hbq_entry->dbuf.list);
-
- if (new_hbq_entry == NULL) {
- list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return &hbq_entry->dbuf;
- }
- new_hbq_entry->tag = -1;
- phys = new_hbq_entry->dbuf.phys;
- virt = new_hbq_entry->dbuf.virt;
- new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
- new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
- hbq_entry->dbuf.phys = phys;
- hbq_entry->dbuf.virt = virt;
- lpfc_sli_free_hbq(phba, hbq_entry);
- list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
- return &new_hbq_entry->dbuf;
-}
-
-/**
* lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
**/
static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
- uint32_t tag)
+ struct lpfc_sli_ring *pring,
+ uint32_t tag)
{
+ struct hbq_dmabuf *hbq_entry;
+
if (tag & QUE_BUFTAG_BIT)
return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
- else
- return lpfc_sli_replace_hbqbuff(phba, tag);
+ hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
+ if (!hbq_entry)
+ return NULL;
+ return &hbq_entry->dbuf;
}
match = 0;
irsp = &(saveq->iocb);
- if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
- return 1;
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
}
/**
+ * lpfc_sli_set_dma_length: Set the HBA's max DMA length.
+ * @phba: Pointer to HBA context object.
+ * @polling: flag that indicates if interrupts are enabled.
+ *
+ * This function sets the HBA's max dma length by issuing a set variable
+ * mailbox command. The dma length is taking from the cfg_pci_max_read
+ * configuration parameter. This parameter is passed as a module parameter
+ * during the driver load. If the HBA does not support this set variable
+ * mbox command the failure status will reset the cfg_pci_max_read to the
+ * default(2048).
+ * If interrupts are not enabled yet then the polling flag = 1 should be
+ * be used so that the right mailbox routine is called.
+ * This function returns 0 for success, non 0 returned for failure.
+ **/
+int
+lpfc_sli_set_dma_length(struct lpfc_hba *phba, uint32_t polling)
+{
+ uint32_t dma_length;
+ LPFC_MBOXQ_t *mbox;
+ int ret = 0;
+
+ switch (phba->cfg_pci_max_read) {
+ case 512:
+ dma_length = SLIM_VAL_MAX_DMA_512;
+ break;
+ case 1024:
+ dma_length = SLIM_VAL_MAX_DMA_1024;
+ break;
+ case 2048:
+ dma_length = SLIM_VAL_MAX_DMA_2048;
+ break;
+ case 4096:
+ dma_length = SLIM_VAL_MAX_DMA_4096;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto failed;
+
+ lpfc_set_var(phba, mbox, SLIM_VAR_MAX_DMA_LENGTH, dma_length);
+
+ if (polling)
+ ret = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else
+ ret = lpfc_sli_issue_mbox_wait(phba, mbox,
+ LPFC_MBOX_TMO * 2);
+
+ if (ret != MBX_SUCCESS) {
+ if (mbox->mb.mbxStatus != MBXERR_UNKNOWN_CMD)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0443 Adapter failed to set maximum"
+ " DMA length mbxStatus x%x \n",
+ phba->brd_no, mbox->mb.mbxStatus);
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0447 Adapter failed to set maximum"
+ " DMA length mbxStatus x%x \n",
+ phba->brd_no, mbox->mb.mbxStatus);
+ goto failed;
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 0;
+
+failed:
+ /* If mailbox command failed, reset the value to default value */
+ phba->cfg_pci_max_read = 2048;
+ if (ret == MBX_TIMEOUT) {
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ return -EPERM;
+ } else if (mbox) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EPERM;
+ } else
+ return -ENOMEM;
+}
+/**
* lpfc_sli_brdrestart: Restart the HBA.
* @phba: Pointer to HBA context object.
*
phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
if (pmb->mb.un.varCfgPort.ginb) {
phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
+ phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
phba->inb_last_counter =
phba->mbox->us.s3_inb_pgp.counter;
} else {
+ phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
phba->port_gp = phba->mbox->us.s3_pgp.port;
phba->inb_ha_copy = NULL;
phba->inb_counter = NULL;
}
} else {
+ phba->hbq_get = NULL;
phba->port_gp = phba->mbox->us.s2.port;
phba->inb_ha_copy = NULL;
phba->inb_counter = NULL;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0444 Firmware in SLI %x mode. Max_vpi %d\n",
phba->sli_rev, phba->max_vpi);
+
+ lpfc_sli_set_dma_length(phba, 1);
+
rc = lpfc_sli_ring_map(phba);
if (rc)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
- return;
- }
-
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
uint32_t ha_copy;
uint32_t work_ha_copy;
unsigned long status;
+ unsigned long iflag;
uint32_t control;
MAILBOX_t *mbox, *pmbox;
if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return IRQ_NONE;
/* Need to read HA REG for slow-path events */
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
ha_copy = readl(phba->HAregaddr);
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
} else
ha_copy = phba->ha_copy;
* Turn off Link Attention interrupts
* until CLEAR_LA done
*/
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
else
work_ha_copy &= ~HA_LATT;
(HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
if (status & HA_RXMASK) {
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
control = readl(phba->HCregaddr);
lpfc_debugfs_slow_ring_trc(phba,
(uint32_t)((unsigned long)
&phba->work_waitq));
}
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
}
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (work_ha_copy & HA_ERATT)
lpfc_sli_read_hs(phba);
if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
/* First check out the status word */
lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
if (pmbox->mbxOwner != OWN_HOST) {
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
/*
* Stray Mailbox Interrupt, mbxCommand <cmd>
* mbxStatus <status>
work_ha_copy &= ~HA_MBATT;
} else {
phba->sli.mbox_active = NULL;
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->last_completion_time = jiffies;
del_timer(&phba->sli.mbox_tmo);
if (pmb->mbox_cmpl) {
goto send_current_mbox;
}
}
- spin_lock(&phba->pport->work_port_lock);
+ spin_lock_irqsave(
+ &phba->pport->work_port_lock,
+ iflag);
phba->pport->work_port_events &=
~WORKER_MBOX_TMO;
- spin_unlock(&phba->pport->work_port_lock);
+ spin_unlock_irqrestore(
+ &phba->pport->work_port_lock,
+ iflag);
lpfc_mbox_cmpl_put(phba, pmb);
}
} else
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) {
"MBX_SUCCESS");
}
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
phba->work_ha |= work_ha_copy;
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_worker_wake_up(phba);
}
return IRQ_HANDLED;
struct lpfc_hba *phba;
uint32_t ha_copy;
unsigned long status;
+ unsigned long iflag;
/* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
/* Need to read HA REG for FCP ring and other ring events */
ha_copy = readl(phba->HAregaddr);
/* Clear up only attention source related to fast-path */
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
} else
ha_copy = phba->ha_copy;
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.2.8.4"
+#define LPFC_DRIVER_VERSION "8.2.8.7"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#include <scsi/scsi_dbg.h>
-static void qla4xxx_print_srb_info(struct srb * srb)
-{
- printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
- printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
- __func__, srb->cmd, (unsigned long) srb->dma_handle);
- printk("%s: fw_ddb_index = %d, lun = %d\n",
- __func__, srb->fw_ddb_index, srb->cmd->device->lun);
- printk("%s: iocb_tov = %d\n",
- __func__, srb->iocb_tov);
- printk("%s: cc_stat = 0x%x\n", __func__, srb->cc_stat);
-}
-
-void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
-{
- printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
- printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
- cmd->device->channel, cmd->device->id, cmd->device->lun,
- cmd->cmd_len);
- scsi_print_command(cmd);
- qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
-}
-
void __dump_registers(struct scsi_qla_host *ha)
{
uint8_t i;
}
}
-void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
-{
- unsigned long flags = 0;
- int i = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (i = 1; i < MBOX_REG_COUNT; i++)
- printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
- readw(&ha->reg->mailbox[i]));
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
void qla4xxx_dump_registers(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
else
printk(KERN_DEBUG " ");
}
- if (cnt % 16)
- printk(KERN_DEBUG "\n");
+ printk(KERN_DEBUG "\n");
}
/* #define QL_DEBUG_LEVEL_5 */
/* #define QL_DEBUG_LEVEL_6 */
/* #define QL_DEBUG_LEVEL_9 */
+#ifndef _QL4_DBG_
+#define _QL4_DBG_
#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
#if defined(QL_DEBUG)
-#define DEBUG(x) do {x;} while (0);
+#define DEBUG(x) do {if(extended_error_logging & 0x01) x;} while (0);
#else
-#define DEBUG(x) do {} while (0);
+#define DEBUG(x)
#endif
#if defined(QL_DEBUG_LEVEL_2)
-#define DEBUG2(x) do {if(extended_error_logging == 2) x;} while (0);
-#define DEBUG2_3(x) do {x;} while (0);
-#else /* */
-#define DEBUG2(x) do {} while (0);
-#endif /* */
+#define DEBUG2(x) do {if(extended_error_logging & 0x02) x;} while (0);
+#else
+#define DEBUG2(x)
+#endif
#if defined(QL_DEBUG_LEVEL_3)
-#define DEBUG3(x) do {if(extended_error_logging == 3) x;} while (0);
-#else /* */
-#define DEBUG3(x) do {} while (0);
-#if !defined(QL_DEBUG_LEVEL_2)
-#define DEBUG2_3(x) do {} while (0);
-#endif /* */
-#endif /* */
+#define DEBUG3(x) do {if(extended_error_logging & 0x04) x;} while (0);
+#else
+#define DEBUG3(x)
+#endif
+
#if defined(QL_DEBUG_LEVEL_4)
-#define DEBUG4(x) do {x;} while (0);
-#else /* */
-#define DEBUG4(x) do {} while (0);
-#endif /* */
+#define DEBUG4(x) do {if(extended_error_logging & 0x08) x;} while (0);
+#else
+#define DEBUG4(x)
+#endif
#if defined(QL_DEBUG_LEVEL_5)
-#define DEBUG5(x) do {x;} while (0);
-#else /* */
-#define DEBUG5(x) do {} while (0);
-#endif /* */
+#define DEBUG5(x) do {if(extended_error_logging & 0x10) x;} while (0);
+#else
+#define DEBUG5(x)
+#endif
#if defined(QL_DEBUG_LEVEL_6)
-#define DEBUG6(x) do {x;} while (0);
-#else /* */
-#define DEBUG6(x) do {} while (0);
-#endif /* */
-
-#if defined(QL_DEBUG_LEVEL_9)
-#define DEBUG9(x) do {x;} while (0);
-#else /* */
-#define DEBUG9(x) do {} while (0);
-#endif /* */
+#define DEBUG6(x) do {if(extended_error_logging & 0x20) x;} while (0);
+#else
+#define DEBUG6(x)
+#endif
+
+#endif /*_QL4_DBG_*/
#define RESET_FIRMWARE_TOV 30
#define LOGOUT_TOV 10
#define IOCB_TOV_MARGIN 10
-#define RELOGIN_TOV 18
#define ISNS_DEREG_TOV 5
#define MAX_RESET_HA_RETRIES 2
#define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL
* logged it out */
#define DF_SCAN_ISSUED 2
+#define DF_OFFLINE 3 /* Offline Device */
+#define DF_DELETED 4 /* Device has been removed */
/*
* Asynchronous Event Queue structure
uint32_t tot_ddbs;
unsigned long flags;
-#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
#define AF_ONLINE 0 /* 0x00000001 */
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
-#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
+#define AF_OS_INDEX_VALID 12 /* 0x00001000 */
unsigned long dpc_flags;
#define DPC_ISNS_RESTART 7 /* 0x00000080 */
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+#define DPC_OFFLINE_DEVICE 16 /* 0x00010000 */
+#define DPC_DELETE_DEVICE 17 /* 0x00020000 */
uint16_t iocb_cnt;
uint16_t iocb_hiwat;
void (*ql4getaenlog)(struct scsi_qla_host *ha, struct ql4_aen_log *aenl);
#define QL_INDICES_PER_ENTRY 32
#define QL_OSINDEX_ENTRIES (MAX_DDB_ENTRIES/QL_INDICES_PER_ENTRY)
- volatile uint32_t os_map[QL_OSINDEX_ENTRIES];
+ volatile unsigned long os_map[QL_OSINDEX_ENTRIES];
};
static inline int is_qla4010(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
}
+
static inline int is_qla4022(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
struct iscsi_cls_conn;
-void qla4xxx_hw_reset(struct scsi_qla_host *ha);
+void qla4xxx_hw_reset(struct scsi_qla_host *ha, int hw_lock);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
uint8_t renew_ddb_list);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
-irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
void qla4xxx_free_ddb_list(struct scsi_qla_host * ha);
+void qla4xxx_free_ddb(struct scsi_qla_host *, struct ddb_entry *);
void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
uint16_t *tcp_source_port_num,
uint16_t *connection_id);
+struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
+ uint32_t fw_ddb_index);
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
dma_addr_t fw_ddb_entry_dma);
uint32_t fw_ddb_index, uint32_t state, uint32_t probe);
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
- uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
-int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry, int lun);
-
-
+ nuint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
extern int extended_error_logging;
extern int ql4xdiscoverywait;
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
+#include "ql4_os.h"
/* link auto negotiation normally takes roughly 2s. */
/* If we don't have link in 3 times that period quit. */
* QLogic ISP4xxx Hardware Support Function Prototypes.
*/
-static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index);
-
static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
{
uint32_t value;
}
/**
- * qla4xxx_free_ddb - deallocate ddb
+ * qla4xxx_free_ddb - deallocate ddb
* @ha: pointer to host adapter structure.
* @ddb_entry: pointer to device database entry
*
* This routine deallocates and unlinks the specified ddb_entry from the
* adapter's
**/
-static void qla4xxx_free_ddb(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry)
+void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry)
{
/* Remove device entry from list */
list_del_init(&ddb_entry->list);
* This routine allocates a ddb_entry, ititializes some values, and
* inserts it into the ddb list.
**/
-static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index)
+struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index)
{
struct ddb_entry *ddb_entry;
(strlen(fw_ddb_entry->iscsi_name) != 0)){
ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
if (ddb_entry == NULL) {
- DEBUG2(dev_info(&ha->pdev->dev,"%s alloc_ddb %d "
+ DEBUG2(dev_info(&ha->pdev->dev,"%s alloc_ddb %d"
"failed\n", __func__, fw_ddb_index));
goto exit_ddb_list;
}
ddb_entry->connection_id = conn_id;
qla4xxx_fill_ddb(ddb_entry, fw_ddb_entry);
ddb_entry->fw_ddb_device_state = ddb_state;
-
+
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
dev_info(&ha->pdev->dev,
qla4xxx_flush_AENS(ha);
+ /* Wait for an AEN */
+ qla4xxx_devices_ready(ha);
/*
* First perform device discovery for active
if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR)
return status;
- /* Wait for an AEN */
- qla4xxx_devices_ready(ha);
-
/*
* Targets can come online after the inital discovery, so processing
* the aens here will catch them.
qla4xxx_fill_ddb(ddb_entry, fw_ddb_entry);
- if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+ if (ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_ACTIVE) {
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
dev_info(&ha->pdev->dev,
- "scsi%ld: %s: ddb[%d] os[%d] marked ONLINE\n",
- ha->host_no, __func__, ddb_entry->fw_ddb_index,
+ "%s: ddb[%d] os[%d] marked ONLINE\n",
+ __func__, ddb_entry->fw_ddb_index,
ddb_entry->os_target_id);
- } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+ } else if (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
}
}
writel(set_rmask(NVR_WRITE_ENABLE),
&ha->reg->u1.isp4022.nvram);
- writel(2, &ha->reg->mailbox[6]);
- readl(&ha->reg->mailbox[6]);
+ writel(2, &ha->reg->mailbox[6]);
+ readl(&ha->reg->mailbox[6]);
writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
config_chip = 1;
- /* Reset clears the semaphore, so acquire again */
+ /* Reset clears the semaphore, so aquire again */
if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
return QLA_ERROR;
}
* @renew_ddb_list: Indicates what to do with the adapter's ddb list
* after adapter recovery has completed.
* 0=preserve ddb list, 1=destroy and rebuild ddb list
- *
+ *
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
* followed by 0x8014 aen" to trigger the tgt discovery process.
*/
if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
- goto exit_init_online;
+ goto exit_init_hba0;
/* Skip device discovery if ip and subnet is zero */
if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
- goto exit_init_online;
+ goto exit_init_hba0;
if (renew_ddb_list == PRESERVE_DDB_LIST) {
/*
ha->host_no));
}
-exit_init_online:
+exit_init_hba0:
set_bit(AF_ONLINE, &ha->flags);
+ dev_info(&ha->pdev->dev, "%s: adapter ONLINE\n", __func__);
+
exit_init_hba:
return status;
}
}
}
- if (!found)
- ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
- else if (ddb_entry->fw_ddb_index != fw_ddb_index) {
- /* Target has been bound to a new fw_ddb_index */
- qla4xxx_free_ddb(ha, ddb_entry);
+ if (!found) {
ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
- }
- if (ddb_entry == NULL) {
- DEBUG2(dev_info(&ha->pdev->dev, "%s NULL DDB %d\n",
- __func__, fw_ddb_index));
- goto exit_dyn_add;
- }
+ if (ddb_entry == NULL) {
+ DEBUG2(dev_info(&ha->pdev->dev, "%s NULL DDB %d\n",
+ __func__, fw_ddb_index));
+ goto exit_dyn_add;
+ }
- ddb_entry->fw_ddb_index = fw_ddb_index;
- ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
- ddb_entry->tcp_source_port_num = src_port;
- ddb_entry->connection_id = conn_id;
- qla4xxx_fill_ddb(ddb_entry, fw_ddb_entry);
- ddb_entry->fw_ddb_device_state = ddb_state;
+ ddb_entry->fw_ddb_index = fw_ddb_index;
+ ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
+ ddb_entry->tcp_source_port_num = src_port;
+ ddb_entry->connection_id = conn_id;
+ qla4xxx_fill_ddb(ddb_entry, fw_ddb_entry);
+ ddb_entry->fw_ddb_device_state = ddb_state;
+
+ if (probe)
+ goto exit_dyn_add;
- if (!probe) {
if (qla4xxx_add_sess(ddb_entry, 1)) {
- DEBUG2(printk(KERN_WARNING
- "scsi%ld: failed to add new device at index "
- "[%d]\n Unable to add connection and session\n",
- ha->host_no, fw_ddb_index));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "%s: failed to add new ddb %d\n",
+ __func__, fw_ddb_index));
qla4xxx_free_ddb(ha, ddb_entry);
+ } else {
+ DEBUG6(dev_info(&ha->pdev->dev,
+ "%s added ddb 0x%p sess 0x%p"
+ " conn 0x%p state 0x%x\n",
+ __func__, ddb_entry,
+ ddb_entry->sess, ddb_entry->conn,
+ ddb_entry->state));
}
- }
+ } else if (ddb_entry->fw_ddb_index != fw_ddb_index) {
+ /* Target has been bound to a new fw_ddb_index */
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = NULL;
+ ddb_entry->fw_ddb_index = fw_ddb_index;
+ ddb_entry->fw_ddb_device_state = ddb_state;
+ ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
+ atomic_set(&ddb_entry->port_down_timer,
+ ha->port_down_retry_count);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ clear_bit(DF_RELOGIN, &ddb_entry->flags);
+ clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
+ atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
- DEBUG6(dev_info(&ha->pdev->dev, "%s added ddb 0x%p sess 0x%p conn 0x%p"
- " state 0x%x\n", __func__, ddb_entry, ddb_entry->sess,
- ddb_entry->conn, ddb_entry->state));
+ dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: ddb[%d] os[%d] marked ONLINE sess:%p conn:%p\n",
+ ha->host_no, __func__, ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id, ddb_entry->sess, ddb_entry->conn);
+
+ if (!probe)
+ qla4xxx_conn_start(ddb_entry->conn);
+ DEBUG6(dev_info(&ha->pdev->dev, "%s calling conn_start ddb 0x%p sess 0x%p"
+ " conn 0x%p state 0x%x\n", __func__, ddb_entry, ddb_entry->sess,
+ ddb_entry->conn, ddb_entry->state));
+ }
exit_dyn_add:
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
fw_ddb_entry_dma);
qla4xxx_add_device_dynamically(ha, fw_ddb_index, probe);
return QLA_SUCCESS;
}
- DEBUG6(dev_info(&ha->pdev->dev, "%s ddb_entry 0x%p ostate 0x%x"
- " sess 0x%p conn 0x%p\n", __func__, ddb_entry,
- ddb_entry->state, ddb_entry->sess, ddb_entry->conn));
+ DEBUG6(dev_info(&ha->pdev->dev, "%s ddb[%d] os[%d] ostate 0x%x"
+ " sess 0x%p conn 0x%p o_fwstate 0x%x n_fwstate ox%x \n",
+ __func__, ddb_entry->fw_ddb_index, ddb_entry->os_target_id,
+ ddb_entry->state, ddb_entry->sess, ddb_entry->conn,
+ ddb_entry->fw_ddb_device_state, state));
/* Device already exists in our database. */
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
- DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for "
- "index [%d]\n", ha->host_no, __func__,
- ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
if (old_fw_ddb_device_state == state &&
state == DDB_DS_SESSION_ACTIVE) {
/* Do nothing, state not changed. */
atomic_set(&ddb_entry->port_down_timer,
ha->port_down_retry_count);
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+ dev_info(&ha->pdev->dev,
+ "%s: ddb[%d] os[%d] marked ONLINE\n",
+ __func__, ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id);
+
atomic_set(&ddb_entry->relogin_retry_count, 0);
atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags);
clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
- DEBUG6(dev_info(&ha->pdev->dev, "%s conn startddb_entry 0x%p"
- " sess 0x%p conn 0x%p\n",
- __func__, ddb_entry, ddb_entry->sess, ddb_entry->conn));
+ if (ddb_entry->conn) {
+ DEBUG6(dev_info(&ha->pdev->dev,
+ "%s conn startddb_entry 0x%p"
+ " sess 0x%p conn 0x%p\n",
+ __func__,
+ ddb_entry, ddb_entry->sess, ddb_entry->conn));
- qla4xxx_conn_start(ddb_entry->conn);
+ qla4xxx_conn_start(ddb_entry->conn);
- DEBUG6(dev_info(&ha->pdev->dev, "%s conn start done "
- "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
- __func__, ddb_entry, ddb_entry->sess, ddb_entry->conn));
+ DEBUG6(dev_info(&ha->pdev->dev, "%s conn start done "
+ "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
+ __func__, ddb_entry, ddb_entry->sess, ddb_entry->conn));
- if (!test_bit(DF_SCAN_ISSUED, &ddb_entry->flags)) {
- scsi_scan_target(&ddb_entry->sess->dev, 0,
- ddb_entry->sess->target_id,
- SCAN_WILD_CARD, 0);
- set_bit(DF_SCAN_ISSUED, &ddb_entry->flags);
+ if (!test_bit(DF_SCAN_ISSUED, &ddb_entry->flags)) {
+ qla4xxx_scan_target(ddb_entry);
+ set_bit(DF_SCAN_ISSUED, &ddb_entry->flags);
+ }
}
} else {
/* Device went away, try to relogin. */
return ddb_entry;
}
+/*
+ * The MBOX_CMD_CLEAR_DATABASE_ENTRY (0x31) mailbox command does not
+ * result in an AEN, so we need to process it seperately.
+ */
+static inline void qla4xxx_check_for_clear_ddb(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd)
+{
+ uint32_t fw_ddb_index;
+ struct ddb_entry *ddb_entry = NULL;
+
+ if (mbox_cmd[0] == MBOX_CMD_CLEAR_DATABASE_ENTRY) {
+
+ fw_ddb_index = mbox_cmd[1];
+
+ if (fw_ddb_index < MAX_DDB_ENTRIES)
+ ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
+
+ if (ddb_entry) {
+ dev_info(&ha->pdev->dev, "%s: ddb[%d] os[%d] freed\n",
+ __func__, ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id);
+ set_bit(DF_DELETED, &ddb_entry->flags);
+ set_bit(DPC_DELETE_DEVICE, &ha->dpc_flags);
+ queue_work(ha->dpc_thread, &ha->dpc_work);
+ }
+ }
+}
+
static inline void
__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
__qla4xxx_disable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+
+static inline int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
+ struct queue_entry **queue_entry)
+{
+ uint16_t request_in;
+ uint8_t status = QLA_SUCCESS;
+
+ *queue_entry = ha->request_ptr;
+
+ /* get the latest request_in and request_out index */
+ request_in = ha->request_in;
+ ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
+
+ /* Advance request queue pointer and check for queue full */
+ if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
+ request_in = 0;
+ ha->request_ptr = ha->request_ring;
+ } else {
+ request_in++;
+ ha->request_ptr++;
+ }
+
+ /* request queue is full, try again later */
+ if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
+ /* restore request pointer */
+ ha->request_ptr = *queue_entry;
+ status = QLA_ERROR;
+ } else {
+ ha->request_in = request_in;
+ memset(*queue_entry, 0, sizeof(**queue_entry));
+ }
+
+ return status;
+}
+
+/**
+ * qla4xxx_send_marker_iocb - issues marker iocb to HBA
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ * @lun: SCSI LUN
+ * @marker_type: marker identifier
+ *
+ * This routine issues a marker IOCB.
+ **/
+static inline int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry, int lun)
+{
+ struct marker_entry *marker_entry;
+ unsigned long flags = 0;
+ uint8_t status = QLA_SUCCESS;
+
+ /* Acquire hardware specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Get pointer to the queue entry for the marker */
+ if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
+ QLA_SUCCESS) {
+ status = QLA_ERROR;
+ goto exit_send_marker;
+ }
+
+ /* Put the marker in the request queue */
+ marker_entry->hdr.entryType = ET_MARKER;
+ marker_entry->hdr.entryCount = 1;
+ marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+ marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
+ int_to_scsilun(lun, &marker_entry->lun);
+ wmb();
+
+ /* Tell ISP it's got a new I/O request */
+ writel(ha->request_in, &ha->reg->req_q_in);
+ readl(&ha->reg->req_q_in);
+
+exit_send_marker:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return status;
+}
+
+static inline struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
+ struct scsi_qla_host *ha)
+{
+ struct continuation_t1_entry *cont_entry;
+
+ cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
+
+ /* Advance request queue pointer */
+ if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
+ ha->request_in = 0;
+ ha->request_ptr = ha->request_ring;
+ } else {
+ ha->request_in++;
+ ha->request_ptr++;
+ }
+
+ /* Load packet defaults */
+ cont_entry->hdr.entryType = ET_CONTINUE;
+ cont_entry->hdr.entryCount = 1;
+ cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
+
+ return cont_entry;
+}
+
+static inline uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > COMMAND_SEG) {
+ iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
+ if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
+ iocbs++;
+ }
+ return iocbs;
+}
+
#include "ql4_dbg.h"
#include "ql4_inline.h"
-#define VMWARE_CMD_TIMEOUT 30
#include <scsi/scsi_tcq.h>
-/**
- * qla4xxx_get_req_pkt - returns a valid entry in request queue.
- * @ha: Pointer to host adapter structure.
- * @queue_entry: Pointer to pointer to queue entry structure
- *
- * This routine performs the following tasks:
- * - returns the current request_in pointer (if queue not full)
- * - advances the request_in pointer
- * - checks for queue full
- **/
-static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
- struct queue_entry **queue_entry)
-{
- uint16_t request_in;
- uint8_t status = QLA_SUCCESS;
-
- *queue_entry = ha->request_ptr;
-
- /* get the latest request_in and request_out index */
- request_in = ha->request_in;
- ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
-
- /* Advance request queue pointer and check for queue full */
- if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
- request_in = 0;
- ha->request_ptr = ha->request_ring;
- } else {
- request_in++;
- ha->request_ptr++;
- }
-
- /* request queue is full, try again later */
- if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
- /* restore request pointer */
- ha->request_ptr = *queue_entry;
- status = QLA_ERROR;
- } else {
- ha->request_in = request_in;
- memset(*queue_entry, 0, sizeof(**queue_entry));
- }
-
- return status;
-}
-
-/**
- * qla4xxx_send_marker_iocb - issues marker iocb to HBA
- * @ha: Pointer to host adapter structure.
- * @ddb_entry: Pointer to device database entry
- * @lun: SCSI LUN
- * @marker_type: marker identifier
- *
- * This routine issues a marker IOCB.
- **/
-int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry, int lun)
-{
- struct marker_entry *marker_entry;
- unsigned long flags = 0;
- uint8_t status = QLA_SUCCESS;
-
- /* Acquire hardware specific lock */
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- /* Get pointer to the queue entry for the marker */
- if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
- QLA_SUCCESS) {
- status = QLA_ERROR;
- goto exit_send_marker;
- }
-
- /* Put the marker in the request queue */
- marker_entry->hdr.entryType = ET_MARKER;
- marker_entry->hdr.entryCount = 1;
- marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
- marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
- int_to_scsilun(lun, &marker_entry->lun);
- wmb();
-
- /* Tell ISP it's got a new I/O request */
- writel(ha->request_in, &ha->reg->req_q_in);
- readl(&ha->reg->req_q_in);
-
-exit_send_marker:
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- return status;
-}
-
-static struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
- struct scsi_qla_host *ha)
-{
- struct continuation_t1_entry *cont_entry;
-
- cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
-
- /* Advance request queue pointer */
- if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
- ha->request_in = 0;
- ha->request_ptr = ha->request_ring;
- } else {
- ha->request_in++;
- ha->request_ptr++;
- }
-
- /* Load packet defaults */
- cont_entry->hdr.entryType = ET_CONTINUE;
- cont_entry->hdr.entryCount = 1;
- cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
-
- return cont_entry;
-}
-
-static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
-{
- uint16_t iocbs;
-
- iocbs = 1;
- if (dsds > COMMAND_SEG) {
- iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
- if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
- iocbs++;
- }
- return iocbs;
-}
-
static void qla4xxx_build_scsi_iocbs(struct srb *srb,
struct command_t3_entry *cmd_entry,
uint16_t tot_dsds)
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
+ //index = (uint32_t)cmd->request->tag;
index = ha->current_active_index;
for (i = 0; i < MAX_SRBS; i++) {
index++;
}
/* Calculate the number of request entries needed. */
- nseg = scsi_dma_map(cmd);
- if (nseg < 0)
- goto queuing_error;
- tot_dsds = nseg;
+ if (srb->flags & SRB_SCSI_PASSTHRU)
+ tot_dsds = 1;
+ else {
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ goto queuing_error;
+ tot_dsds = nseg;
+ }
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
/* Set data transfer direction control flags
- * NOTE: Look at data_direction bits iff there is data to be
- * transferred, as the data direction bit is sometimed filled
- * in when there is no data to be transferred */
+ * NOTE: Look at data_direction bits iff there is data to be
+ * transferred, as the data direction bit is sometimed filled
+ * in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
if (scsi_bufflen(cmd)) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
/*
* Check to see if adapter is online before placing request on
- * request queue. If a reset occurs and a request is in the queue,
- * the firmware will still attempt to process the request, retrieving
- * garbage for pointers.
- */
+ * request queue. If a reset occurs and a request is in the queue,
+ * the firmware will still attempt to process the request, retrieving
+ * garbage for pointers.
+ */
if (!test_bit(AF_ONLINE, &ha->flags)) {
DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
"Do not issue command.\n",
return QLA_SUCCESS;
queuing_error:
- if (srb->flags & SRB_SCSI_PASSTHRU)
- return QLA_ERROR;
-
- if (tot_dsds)
- scsi_dma_unmap(cmd);
+ if (!(srb->flags & SRB_SCSI_PASSTHRU))
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;
}
+
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
+#include "ql4_os.h"
/**
* qla4xxx_status_entry - processes status IOCBs
break;
}
if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
- scsi_set_resid(cmd, residual);
- if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
+ QL_SET_SCSI_RESID(cmd, residual);
+ if (!scsi_status && ((QL_SCSI_BUFFLEN(cmd) - residual) <
cmd->underflow)) {
cmd->result = DID_ERROR << 16;
break;
break;
}
- scsi_set_resid(cmd, residual);
+ QL_SET_SCSI_RESID(cmd, residual);
/*
* If there is scsi_status, it takes precedense over
if ((sts_entry->iscsiFlags &
ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
cmd->result = DID_BUS_BUSY << 16;
- } else if ((scsi_bufflen(cmd) - residual) <
+ } else if ((QL_SCSI_BUFFLEN(cmd) - residual) <
cmd->underflow) {
/*
* Handle mid-layer underflow???
"resid = 0x%x, compstat = 0x%x\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun,
- __func__, scsi_bufflen(cmd),
+ __func__, QL_SCSI_BUFFLEN(cmd),
residual,
sts_entry->completionStatus));
/* Immediately process the AENs that don't require much work.
* Only queue the database_changed AENs */
- dev_info(&ha->pdev->dev, "%s mbx0 0x%08x mbx1 0x%08x"
+ DEBUG6(dev_info(&ha->pdev->dev, "%s mbx0 0x%08x mbx1 0x%08x"
" mbx2 0x%08x mbx3 0x%08x mbx4 0x%08x mbx5 0x%08x "
"mbx6 0x%08x mbx7 0x%08x\n", __func__,
readl(&ha->reg->mailbox[0]), readl(&ha->reg->mailbox[1]),
readl(&ha->reg->mailbox[2]), readl(&ha->reg->mailbox[3]),
readl(&ha->reg->mailbox[4]), readl(&ha->reg->mailbox[5]),
- readl(&ha->reg->mailbox[6]), readl(&ha->reg->mailbox[7]));
+ readl(&ha->reg->mailbox[6]), readl(&ha->reg->mailbox[7])));
if (ha->aen_log.count < MAX_AEN_ENTRIES) {
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
}
switch (mbox_status) {
case MBOX_ASTS_SYSTEM_ERROR:
+ dev_info(&ha->pdev->dev, "%s: System Err\n", __func__);
/* Log Mailbox registers */
if (ql4xdontresethba) {
DEBUG2(printk("%s:Dont Reset HBA\n",
__func__));
} else {
+ qla4xxx_hw_reset(ha, 0);
set_bit(AF_GET_CRASH_RECORD, &ha->flags);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
break;
case MBOX_ASTS_LINK_UP:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
- ha->host_no, mbox_status));
set_bit(AF_LINK_UP, &ha->flags);
+ dev_info(&ha->pdev->dev, "%s: LINK UP\n", __func__);
break;
case MBOX_ASTS_LINK_DOWN:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
- ha->host_no, mbox_status));
clear_bit(AF_LINK_UP, &ha->flags);
+ dev_info(&ha->pdev->dev, "%s: LINK DOWN\n", __func__);
break;
case MBOX_ASTS_HEARTBEAT:
mbox_stat2 = readl(&ha->reg->mailbox[2]);
mbox_stat3 = readl(&ha->reg->mailbox[3]);
- if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
+ if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
- else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
+ else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
* qla4xxx_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
+ * @regs: Unused
**/
-irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
+QL_DECLARE_INTR_HANDLER(qla4xxx_intr_handler, irq, dev_id, regs)
{
-
struct scsi_qla_host *ha;
uint32_t intr_status;
unsigned long flags = 0;
intr_status = readl(&ha->reg->ctrl_status);
if ((intr_status &
- (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
- 0) {
+ (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
if (reqs_count == 0)
ha->spurious_int_count++;
break;
break;
} else if (intr_status & CSR_SCSI_RESET_INTR) {
clear_bit(AF_ONLINE, &ha->flags);
+ dev_info(&ha->pdev->dev,"%s: adapter OFFLINE\n",
+ __func__);
__qla4xxx_disable_intrs(ha);
writel(set_rmask(CSR_SCSI_RESET_INTR),
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
+#include "ql4_os.h"
/**
/* Mailbox code active */
wait_count = MBOX_TOV * 100;
+
while (wait_count--) {
mutex_lock(&ha->mbox_sem);
if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mbox_exit:
+ if (status == QLA_SUCCESS)
+ qla4xxx_check_for_clear_ddb(ha, mbx_cmd);
mutex_lock(&ha->mbox_sem);
clear_bit(AF_MBOX_COMMAND, &ha->flags);
mutex_unlock(&ha->mbox_sem);
* qla4xxx_get_fw_version - gets firmware version
* @ha: Pointer to host adapter structure.
*
- * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
- * hold an address for data. Make sure that we write 0 to those mailboxes,
+ * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
+ * hold an address for data. Make sure that we write 0 to those mailboxes,
* if unused.
**/
int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
return QLA_SUCCESS;
}
-static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
- dma_addr_t dma_addr)
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
+#include "ql4_os.h"
+
/*
* Driver version
"Option to enable extended error logging, "
"Default is 0 - no logging, 1 - debug logging");
+/* Command Timeout before ddb state goes to MISSING */
+int cmd_timeout = IOCB_CMD_TIMEOUT;
+module_param(cmd_timeout, int, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(cmd_timeout, "Command Timeout");
+
+/* Timeout before ddb state MISSING goes DEAD */
+int recovery_tmo = RECOVERY_TIMEOUT;
+module_param(recovery_tmo, int, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(recovery_tmo, "Recovery Timeout");
+
int ql4_mod_unload = 0;
/*
* SCSI host template entry points
*/
-static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
+
+void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
/*
* iSCSI template entry points
*/
-static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
- enum iscsi_tgt_dscvr type, uint32_t enable,
- struct sockaddr *dst_addr);
+static int qla4xxx_host_get_param(struct Scsi_Host *,
+ enum iscsi_host_param, char *);
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
enum iscsi_param param, char *buf);
-static int qla4xxx_host_get_param(struct Scsi_Host *shost,
- enum iscsi_host_param param, char *buf);
static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
/*
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
+static void qla4xxx_slave_destroy(struct scsi_device *device);
static struct scsi_host_template qla4xxx_driver_template = {
.module = THIS_MODULE,
.name = DRIVER_NAME,
.proc_name = DRIVER_NAME,
+ .proc_info = qla4xxx_proc_info,
.queuecommand = qla4xxx_queuecommand,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
.slave_configure = qla4xxx_slave_configure,
.slave_alloc = qla4xxx_slave_alloc,
+ .slave_destroy = qla4xxx_slave_destroy,
.this_id = -1,
.cmd_per_lun = 3,
ISCSI_CONN_ADDRESS |
ISCSI_TARGET_NAME |
ISCSI_TPGT,
- .tgt_dscvr = qla4xxx_tgt_dscvr,
+
+ QL_INIT_SESSION_DATASIZE(sessiondata_size)
+ QL_INIT_HOST_TEMPLATE(host_template)
+
+ .get_host_param = qla4xxx_host_get_param,
.get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_sess_get_param,
- .get_host_param = qla4xxx_host_get_param,
.session_recovery_timedout = qla4xxx_recovery_timedout,
};
ddb_entry->fw_ddb_index, ddb_entry->os_target_id,
ha->port_down_retry_count);
- DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
- "0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
- queue_work(ha->dpc_thread, &ha->dpc_work);
-}
-
-static int qla4xxx_host_get_param(struct Scsi_Host *shost,
- enum iscsi_host_param param, char *buf)
-{
- struct scsi_qla_host *ha = to_qla_host(shost);
- int len;
-
- switch (param) {
- case ISCSI_HOST_PARAM_IPADDRESS:
- len = sprintf(buf, "%d.%d.%d.%d", ha->ip_address[0],
- ha->ip_address[1], ha->ip_address[2],
- ha->ip_address[3]);
- break;
- case ISCSI_HOST_PARAM_INITIATOR_NAME:
- len = sprintf(buf, "%s", ha->name_string);
- break;
- default:
- return -ENOSYS;
- }
+ QL_SET_DDB_OFFLINE(ha, ddb_entry);
- return len;
+ queue_work(ha->dpc_thread, &ha->dpc_work);
}
int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
struct iscsi_cls_session *session;
struct ddb_entry *ddb_entry;
- session = iscsi_dev_to_session(conn->dev.parent);
+ session = QL_ISCSI_CONN_TO_SESS(conn);
+
ddb_entry = session->dd_data;
DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
struct iscsi_cls_session *session;
struct ddb_entry *ddb_entry;
- session = iscsi_dev_to_session(conn->dev.parent);
+ session = QL_ISCSI_CONN_TO_SESS(conn);
+
ddb_entry = session->dd_data;
DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n",
case ISCSI_PARAM_TPGT:
len = sprintf(buf, "%u", ddb_entry->tpgt);
break;
+
+#ifdef ISCSI_ISID
+ case ISCSI_PARAM_ISID:
+ len = sprintf(buf, "%u", QL_ISCSI_SESSION_ID(ddb_entry));
+ break;
+#endif
default:
return -ENOSYS;
}
struct ddb_entry *ddb_entry;
int len;
- session = iscsi_dev_to_session(conn->dev.parent);
+ session = QL_ISCSI_CONN_TO_SESS(conn);
+
ddb_entry = session->dd_data;
switch (param) {
return len;
}
-static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
- enum iscsi_tgt_dscvr type, uint32_t enable,
- struct sockaddr *dst_addr)
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
{
- struct scsi_qla_host *ha;
- struct sockaddr_in *addr;
- struct sockaddr_in6 *addr6;
- int ret = 0;
-
- ha = (struct scsi_qla_host *) shost->hostdata;
-
- switch (type) {
- case ISCSI_TGT_DSCVR_SEND_TARGETS:
- if (dst_addr->sa_family == AF_INET) {
- addr = (struct sockaddr_in *)dst_addr;
- if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr,
- addr->sin_port) != QLA_SUCCESS)
- ret = -EIO;
- } else if (dst_addr->sa_family == AF_INET6) {
- /*
- * TODO: fix qla4xxx_send_tgts
- */
- addr6 = (struct sockaddr_in6 *)dst_addr;
- if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr,
- addr6->sin6_port) != QLA_SUCCESS)
- ret = -EIO;
- } else
- ret = -ENOSYS;
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ len = sprintf(buf, "%d.%d.%d.%d", ha->ip_address[0],
+ ha->ip_address[1], ha->ip_address[2],
+ ha->ip_address[3]);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s", ha->name_string);
break;
default:
- ret = -ENOSYS;
+ return -ENOSYS;
}
- return ret;
+
+ return len;
}
+static int ql_alloc_osindex(struct scsi_qla_host *ha)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++)
+ if (test_and_set_bit((idx & 0x1F), &ha->os_map[(idx >> 5)]) == 0)
+ return idx;
+ return -1;
+}
+
+static void free_osindex(struct scsi_qla_host *ha, uint32_t idx)
+{
+ clear_bit((idx & 0x1F), &ha->os_map[idx >> 5]);
+}
+
+
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
{
if (!ddb_entry->sess)
return;
+ free_osindex(ddb_entry->ha, ddb_entry->os_target_id);
if (ddb_entry->conn) {
+ QL_ISCSI_IF_DESTROY_SESSION_DONE(ddb_entry);
+ QL_ISCSI_DESTROY_CONN(ddb_entry);
iscsi_remove_session(ddb_entry->sess);
}
iscsi_free_session(ddb_entry->sess);
{
int err;
- err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
+ err = QL_ISCSI_ADD_SESS(ddb_entry);
+
if (err) {
DEBUG2(printk(KERN_ERR "Could not add session.\n"));
return err;
}
- ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
+ ddb_entry->conn = QL_ISCSI_CREATE_CONN(ddb_entry);
+
if (!ddb_entry->conn) {
iscsi_remove_session(ddb_entry->sess);
DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
return -ENOMEM;
}
- ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
- if (scan)
- scsi_scan_target(&ddb_entry->sess->dev, 0,
- ddb_entry->sess->target_id,
- SCAN_WILD_CARD, 0);
- iscsi_unblock_session(ddb_entry->sess);
+ ddb_entry->sess->recovery_tmo = QL_SESS_RECOVERY_TO(ddb_entry);
+
+ qla4xxx_scan_target(ddb_entry);
+
+ QL_ISCSI_CREATE_SESS_DONE(ddb_entry);
+
return 0;
}
struct ddb_entry *ddb_entry;
struct iscsi_cls_session *sess;
- sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
- sizeof(struct ddb_entry));
- if (!sess)
+ int os_idx;
+
+ if ((os_idx = ql_alloc_osindex(ha)) >= MAX_DDB_ENTRIES)
+ return NULL;
+
+ sess = QL_ISCSI_ALLOC_SESSION(ha, &qla4xxx_iscsi_transport);
+ if (!sess) {
+ free_osindex(ha, os_idx);
return NULL;
+ }
ddb_entry = sess->dd_data;
memset(ddb_entry, 0, sizeof(*ddb_entry));
+ ddb_entry->os_target_id = os_idx;
ddb_entry->ha = ha;
ddb_entry->sess = sess;
return ddb_entry;
}
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry,
- struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+ struct ddb_entry *ddb_entry,
+ struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
{
struct srb *srb;
return srb;
}
-static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
-{
- struct scsi_cmnd *cmd = srb->cmd;
-
- if (srb->flags & SRB_DMA_VALID) {
- scsi_dma_unmap(cmd);
- srb->flags &= ~SRB_DMA_VALID;
- }
- cmd->SCp.ptr = NULL;
-}
-
void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
* qla4xxx_queuecommand - scsi layer issues scsi command to driver.
* @cmd: Pointer to Linux's SCSI command structure
* @done_fn: Function that the driver calls to notify the SCSI mid-layer
- * that the command has been processed.
+ * that the command has been processed.
*
* Remarks:
* This routine is invoked by Linux to send a SCSI command to the driver.
* The mid-level driver tries to ensure that queuecommand never gets
* invoked concurrently with itself or the interrupt handler (although
* the interrupt handler may call this routine as part of request-
- * completion handling). Unfortunely, it sometimes calls the scheduler
+ * completion handling). Unfortunely, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
align = 0;
if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
- (MEM_ALIGN_VALUE - 1));
+ (MEM_ALIGN_VALUE - 1));
/* Update request and response queue pointers. */
ha->request_dma = ha->queues_dma + align;
ha->response_dma = ha->queues_dma + align +
(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
ha->response_ring = (struct queue_entry *) (ha->queues + align +
- (REQUEST_QUEUE_DEPTH *
- QUEUE_SIZE));
+ (REQUEST_QUEUE_DEPTH *
+ QUEUE_SIZE));
ha->shadow_regs_dma = ha->queues_dma + align +
(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
(REQUEST_QUEUE_DEPTH *
- QUEUE_SIZE) +
+ QUEUE_SIZE) +
(RESPONSE_QUEUE_DEPTH *
- QUEUE_SIZE));
+ QUEUE_SIZE));
/* Allocate memory for srb pool. */
ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
/* Count down time between sending relogins */
if (adapter_up(ha) &&
- !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
- atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
+ !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
if (atomic_read(&ddb_entry->retry_relogin_timer) !=
- INVALID_ENTRY) {
+ INVALID_ENTRY) {
if (atomic_read(&ddb_entry->retry_relogin_timer)
- == 0) {
+ == 0) {
atomic_set(&ddb_entry->
retry_relogin_timer,
INVALID_ENTRY);
&ha->dpc_flags);
set_bit(DF_RELOGIN, &ddb_entry->flags);
DEBUG2(printk("scsi%ld: %s: index [%d]"
- " login device\n",
- ha->host_no, __func__,
- ddb_entry->fw_ddb_index));
+ " login device\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
} else
atomic_dec(&ddb_entry->
retry_relogin_timer);
/* Wait for relogin to timeout */
if (atomic_read(&ddb_entry->relogin_timer) &&
- (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
/*
* If the relogin times out and the device is
* still NOT ONLINE then try and relogin again.
*/
if (atomic_read(&ddb_entry->state) !=
- DDB_STATE_ONLINE &&
- ddb_entry->fw_ddb_device_state ==
- DDB_DS_SESSION_FAILED) {
+ DDB_STATE_ONLINE &&
+ ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_FAILED) {
/* Reset retry relogin timer */
atomic_inc(&ddb_entry->relogin_retry_count);
DEBUG2(printk("scsi%ld: index[%d] relogin"
- " timed out-retrying"
- " relogin (%d)\n",
- ha->host_no,
- ddb_entry->fw_ddb_index,
- atomic_read(&ddb_entry->
+ " timed out-retrying"
+ " relogin (%d)\n",
+ ha->host_no,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->
relogin_retry_count))
);
start_dpc++;
DEBUG(printk("scsi%ld:%d:%d: index [%d] "
- "initate relogin after"
- " %d seconds\n",
- ha->host_no, ddb_entry->bus,
- ddb_entry->target,
- ddb_entry->fw_ddb_index,
- ddb_entry->default_time2wait + 4)
- );
+ "initate relogin after"
+ " %d seconds\n",
+ ha->host_no, ddb_entry->bus,
+ ddb_entry->target,
+ ddb_entry->fw_ddb_index,
+ ddb_entry->default_time2wait + 4));
atomic_set(&ddb_entry->retry_relogin_timer,
- ddb_entry->default_time2wait + 4);
+ ddb_entry->default_time2wait + 4);
}
}
}
/* Check for heartbeat interval. */
if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
- ha->heartbeat_interval != 0) {
+ ha->heartbeat_interval != 0) {
ha->seconds_since_last_heartbeat++;
if (ha->seconds_since_last_heartbeat >
- ha->heartbeat_interval + 2)
+ ha->heartbeat_interval + 2)
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
/* Wakeup the dpc routine for this adapter, if needed. */
if ((start_dpc ||
- test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
- test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
- test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
- test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
- test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
- test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
- test_bit(DPC_AEN, &ha->dpc_flags)) &&
- ha->dpc_thread) {
+ test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+ test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
+ QL_DPC_OFFLINE_SET(ha) ||
+ test_bit(DPC_AEN, &ha->dpc_flags)) &&
+ ha->dpc_thread) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
- " - dpc flags = 0x%lx\n",
- ha->host_no, __func__, ha->dpc_flags));
+ " - dpc flags = 0x%lx\n",
+ ha->host_no, __func__, ha->dpc_flags));
queue_work(ha->dpc_thread, &ha->dpc_work);
}
return stat;
}
-void qla4xxx_hw_reset(struct scsi_qla_host *ha)
+void qla4xxx_hw_reset(struct scsi_qla_host *ha, int hw_lock)
{
uint32_t ctrl_status;
unsigned long flags = 0;
DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (hw_lock)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
/*
* If the SCSI Reset Interrupt bit is set, clear it.
* Otherwise, the Soft Reset won't work.
writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (hw_lock)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/**
int status = QLA_ERROR;
uint32_t ctrl_status;
- qla4xxx_hw_reset(ha);
+ qla4xxx_hw_reset(ha, 1);
/* Wait until the Network Reset Intr bit is cleared */
max_wait_time = RESET_INTR_TOV;
if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
DEBUG2(printk(KERN_WARNING
- "scsi%ld: Network Reset Intr not cleared by "
- "Network function, clearing it now!\n",
- ha->host_no));
+ "scsi%ld: Network Reset Intr not cleared by "
+ "Network function, clearing it now!\n",
+ ha->host_no));
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
* qla4xxx_recover_adapter - recovers adapter after a fatal error
* @ha: Pointer to host adapter structure.
**/
-static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
+static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
{
int status;
/* Stall incoming I/O until we are done */
clear_bit(AF_ONLINE, &ha->flags);
- DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no,
- __func__));
+ dev_info(&ha->pdev->dev, "%s: adapter OFFLINE\n", __func__);
/* Wait for outstanding commands to complete.
* Stalls the driver for max 30 secs
*/
if (status == QLA_SUCCESS) {
DEBUG2(printk(KERN_ERR "scsi%ld: %s - Performing soft reset..\n",
- ha->host_no, __func__));
+ ha->host_no, __func__));
qla4xxx_flush_active_srbs(ha);
if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
status = qla4xxx_soft_reset(ha);
* with ISP interrupts enabled */
if (status == QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s - Initializing adapter..\n",
- ha->host_no, __func__));
+ ha->host_no, __func__));
/* If successful, AF_ONLINE flag set in
* qla4xxx_initialize_adapter */
+ if (ha->mac_index == 3)
+ ssleep(6);
status = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
}
/* Failed adapter initialization?
* Retry reset_ha only if invoked via DPC (DPC_RESET_HA) */
if ((test_bit(AF_ONLINE, &ha->flags) == 0) &&
- (test_bit(DPC_RESET_HA, &ha->dpc_flags))) {
+ (test_bit(DPC_RESET_HA, &ha->dpc_flags))) {
/* Adapter initialization failed, see if we can retry
* resetting the ha */
if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
DEBUG2(printk("scsi%ld: recover adapter - retrying "
- "(%d) more times\n", ha->host_no,
- ha->retry_reset_ha_cnt));
+ "(%d) more times\n", ha->host_no,
+ ha->retry_reset_ha_cnt));
set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
status = QLA_ERROR;
} else {
/* Schedule another Reset HA--DPC will retry */
ha->retry_reset_ha_cnt--;
DEBUG2(printk("scsi%ld: recover adapter - "
- "retry remaining %d\n",
- ha->host_no,
- ha->retry_reset_ha_cnt));
+ "retry remaining %d\n",
+ ha->host_no,
+ ha->retry_reset_ha_cnt));
status = QLA_ERROR;
}
/* Recover adapter retries have been exhausted.
* Adapter DEAD */
DEBUG2(printk("scsi%ld: recover adapter "
- "failed - board disabled\n",
- ha->host_no));
+ "failed - board disabled\n",
+ ha->host_no));
qla4xxx_flush_active_srbs(ha);
clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
* the mid-level tries to sleep when it reaches the driver threshold
* "host->can_queue". This can cause a panic if we were in our interrupt code.
**/
-static void qla4xxx_do_dpc(struct work_struct *work)
+static QL_DECLARE_DPC(qla4xxx_do_dpc, data)
{
- struct scsi_qla_host *ha =
- container_of(work, struct scsi_qla_host, dpc_work);
+ struct scsi_qla_host *ha = QL_DPC_DATA_TO_HA(data);
struct ddb_entry *ddb_entry, *dtemp;
int status = QLA_ERROR;
return;
if (adapter_up(ha) ||
- test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
- test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
- test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
+ test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
- test_bit(DPC_RESET_HA, &ha->dpc_flags))
+ test_bit(DPC_RESET_HA, &ha->dpc_flags))
qla4xxx_recover_adapter(ha);
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
break;
msleep(1000);
}
+
if (wait_time == 0)
DEBUG2(printk("scsi%ld: %s: SR|FSR "
- "bit not cleared-- resetting\n",
- ha->host_no, __func__));
+ "bit not cleared-- resetting\n",
+ ha->host_no, __func__));
qla4xxx_flush_active_srbs(ha);
if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
- status = qla4xxx_initialize_adapter(ha,
+ status = qla4xxx_initialize_adapter(ha,
PRESERVE_DDB_LIST);
}
clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ qla4xxx_check_dev_offline(ha);
+
+ if (test_and_clear_bit(DPC_DELETE_DEVICE, &ha->dpc_flags)) {
+ list_for_each_entry_safe(ddb_entry, dtemp,
+ &ha->ddb_list, list) {
+ if (test_and_clear_bit(DF_DELETED,
+ &ddb_entry->flags)) {
+ if (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_DEAD) {
+ dev_info(&ha->pdev->dev,
+ "%s: ddb[%d] os[%d] - "
+ "delete\n",
+ __func__,
+ ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id);
+ } else {
+ dev_info(&ha->pdev->dev,
+ "%s: ddb[%d] os[%d] - "
+ "ddb state not dead but"
+ " marked for delete\n",
+ __func__,
+ ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id);
+ }
+ }
+ }
+ }
+
/* ---- relogin device? --- */
if (adapter_up(ha) &&
- test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
list_for_each_entry_safe(ddb_entry, dtemp,
&ha->ddb_list, list) {
if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
- atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
+ (atomic_read(&ddb_entry->state) !=
+ DDB_STATE_ONLINE))
qla4xxx_relogin_device(ha, ddb_entry);
/*
/* Issue Soft Reset to put firmware in unknown state */
if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
- qla4xxx_hw_reset(ha);
+ qla4xxx_hw_reset(ha, 1);
/* Remove timer thread, if present */
if (ha->timer_active)
if (!(mmio_flags & IORESOURCE_MEM)) {
dev_err(&ha->pdev->dev,
"region #0 not an MMIO resource, aborting\n");
+
goto iospace_error_exit;
}
if (mmio_len < MIN_IOBASE_LEN) {
return -ENOMEM;
}
-static void ql4_get_aen_log(struct scsi_qla_host *ha, struct ql4_aen_log *aenl)
-{
- if (aenl) {
- memcpy(aenl, &ha->aen_log, sizeof (ha->aen_log));
- ha->aen_log.count = 0;
- }
-}
-
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
* the driver.
**/
static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int ret = -ENODEV, status;
struct Scsi_Host *host;
host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha));
if (host == NULL) {
printk(KERN_WARNING
- "qla4xxx: Couldn't allocate host from scsi layer!\n");
+ "qla4xxx: Couldn't allocate host from scsi layer!\n");
goto probe_disable_device;
}
ha->pdev = pdev;
ha->host = host;
ha->host_no = host->host_no;
-
- ha->ql4mbx = qla4xxx_mailbox_command;
- ha->ql4cmd = qla4xxx_send_command_to_isp;
- ha->ql4getaenlog = ql4_get_aen_log;
+ set_bit(AF_OS_INDEX_VALID, &ha->flags);
/* Configure PCI I/O space. */
ret = qla4xxx_iospace_config(ha);
goto probe_failed;
dev_info(&ha->pdev->dev, "Found an ISP%04x, irq %d, iobase 0x%p\n",
- pdev->device, pdev->irq, ha->reg);
+ pdev->device, pdev->irq, ha->reg);
qla4xxx_config_dma_addressing(ha);
/* Allocate dma buffers */
if (qla4xxx_mem_alloc(ha)) {
dev_warn(&ha->pdev->dev,
- "[ERROR] Failed to allocate memory for adapter\n");
+ "[ERROR] Failed to allocate memory for adapter\n");
ret = -ENOMEM;
goto probe_failed;
*/
status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
while (status == QLA_ERROR && init_retry_count++ < MAX_INIT_RETRIES) {
- DEBUG2(printk(KERN_ERR "scsi%ld: %s: retrying adapter initialization "
- "(%d)\n", ha->host_no, __func__, init_retry_count));
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: retry adapter init %d\n",
+ __func__, init_retry_count));
qla4xxx_soft_reset(ha);
status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
}
/* Startup the kernel thread for this host adapter. */
DEBUG2(printk("scsi: %s: Starting kernel thread for "
- "qla4xxx_dpc\n", __func__));
+ "qla4xxx_dpc\n", __func__));
sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
ha->dpc_thread = create_singlethread_workqueue(buf);
if (!ha->dpc_thread) {
ret = -ENODEV;
goto probe_failed;
}
- INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
+ QL_INIT_WORK(ha, qla4xxx_do_dpc);
+
ret = request_irq(pdev->irq, qla4xxx_intr_handler,
- IRQF_DISABLED | IRQF_SHARED, "qla4xxx", ha);
+ QL_REQ_IRQ_FLAGS, "qla4xxx", ha);
+
if (ret) {
dev_warn(&ha->pdev->dev, "Failed to reserve interrupt %d"
" already in use.\n", pdev->irq);
}
set_bit(AF_IRQ_ATTACHED, &ha->flags);
host->irq = pdev->irq;
- DEBUG(printk("scsi%d: irq %d attached\n", ha->host_no, ha->pdev->irq));
+ dev_info(&ha->pdev->dev, "irq %d attached\n", ha->pdev->irq);
qla4xxx_enable_intrs(ha);
if (ret)
goto probe_failed;
+ if ((ret = QL_ISCSI_REGISTER_HOST(host, qla4xxx_scsi_transport)))
+ goto remove_host;
+
/* Update transport device information for all devices. */
list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
set_bit(DF_SCAN_ISSUED, &ddb_entry->flags);
if (qla4xxx_add_sess(ddb_entry,
- test_bit(DF_SCAN_ISSUED, &ddb_entry->flags)))
+ test_bit(DF_SCAN_ISSUED, &ddb_entry->flags))) {
+ QL_ISCSI_UNREGISTER_HOST(host, qla4xxx_scsi_transport);
goto remove_host;
+ }
if (!test_bit(DF_SCAN_ISSUED, &ddb_entry->flags))
qla4xxx_mark_device_missing(ha, ddb_entry);
}
- printk(KERN_INFO
- " QLogic iSCSI HBA Driver version: %s\n"
- " QLogic ISP%04x @ %s, pdev = %p host#=%ld, fw=%02d.%02d.%02d.%02d\n",
- qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), pdev,
- ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
- ha->patch_number, ha->build_number);
+ dev_info(&ha->pdev->dev, " QLogic iSCSI HBA Driver version: %s\n"
+ " QLogic ISP%04x @ %s, pdev = %p host#=%ld,"
+ " fw=%02d.%02d.%02d.%02d\n", qla4xxx_version_str,
+ ha->pdev->device, pci_name(ha->pdev), pdev,
+ ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
/* Insert new entry into the list of adapters. */
klist_add_tail(&ha->node, &qla4xxx_hostlist);
ha->instance = atomic_inc_return(&qla4xxx_hba_count) - 1;
- DEBUG2(printk("qla4xxx: listhead=%p, done adding ha=%p i=%d\n",
- &qla4xxx_hostlist, &ha->node, ha->instance));
+ if (qla4xxx_ioctl_init(ha)) {
+ dev_info(&ha->pdev->dev, "ioctl init failed\n");
+ QL_ISCSI_UNREGISTER_HOST(host, qla4xxx_scsi_transport);
+ goto remove_host;
+ }
+
+ DEBUG2(dev_info(&ha->pdev->dev, "listhead=%p, done adding ha=%p i=%d\n",
+ &qla4xxx_hostlist, &ha->node, ha->instance));
+
+// set_bit(AF_INIT_DONE, &ha->flags);
+ dev_info(&ha->pdev->dev, "%s: AF_INIT_DONE\n", __func__);
return 0;
/* remove devs from iscsi_sessions to scsi_devices */
qla4xxx_free_ddb_list(ha);
+ QL_ISCSI_UNREGISTER_HOST(ha->host, qla4xxx_scsi_transport);
+
scsi_remove_host(ha->host);
+ qla4xxx_ioctl_exit(ha);
+
qla4xxx_free_adapter(ha);
scsi_host_put(ha->host);
* At exit, the @ha's flags.enable_64bit_addressing set to indicated
* supported addressing method.
*/
-static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
+void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
{
int retval;
if (pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
dev_dbg(&ha->pdev->dev,
"Failed to set 64 bit PCI consistent mask; "
- "using 32 bit.\n");
+ "using 32 bit.\n");
retval = pci_set_consistent_dma_mask(ha->pdev,
- DMA_32BIT_MASK);
+ DMA_32BIT_MASK);
}
} else
retval = pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
static int qla4xxx_slave_alloc(struct scsi_device *sdev)
{
- struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
+ struct iscsi_cls_session *sess = QL_ISCSI_SDEV_TO_SESS(sdev);
if (sess) {
sdev->hostdata = sess->dd_data;
return 0;
}
+static void qla4xxx_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+}
+
/**
* qla4xxx_del_from_active_array - returns an active srb
* @ha: Pointer to host adapter structure.
* for some max time.
**/
static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
- struct scsi_cmnd *cmd)
+ struct scsi_cmnd *cmd)
{
int done = 0;
struct srb *rp;
return ret;
dev_info(&ha->pdev->dev,
- "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
- cmd->device->channel, cmd->device->id, cmd->device->lun);
+ "%s: %d:%d:%d: DEVICE RESET ISSUED.\n", __func__,
+ cmd->device->channel, cmd->device->id, cmd->device->lun);
if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
dev_info(&ha->pdev->dev, "%s: HBA OFFLINE: FAILED\n", __func__);
*/
if (cmd->device->host->shost_state == SHOST_RECOVERY) {
if (qla4xxx_eh_wait_for_active_target_commands(ha,
- cmd->device->id,
- cmd->device->
- lun)) {
+ cmd->device->id,
+ cmd->device->
+ lun)) {
dev_info(&ha->pdev->dev,
- "DEVICE RESET FAILED - waiting for "
- "commands.\n");
+ "DEVICE RESET FAILED - waiting for "
+ "commands.\n");
goto eh_dev_reset_done;
}
}
goto eh_dev_reset_done;
dev_info(&ha->pdev->dev,
- "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
- ha->host_no, cmd->device->channel, cmd->device->id,
- cmd->device->lun);
+ "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
+ ha->host_no, cmd->device->channel, cmd->device->id,
+ cmd->device->lun);
ret = SUCCESS;
}
dev_info(&ha->pdev->dev, "HOST RESET %s.\n",
- return_status == FAILED ? "FAILED" : "SUCCEDED");
+ return_status == FAILED ? "FAILED" : "SUCCEDED");
return return_status;
}
};
MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
-static struct pci_driver qla4xxx_pci_driver = {
+struct pci_driver qla4xxx_pci_driver = {
.name = DRIVER_NAME,
.id_table = qla4xxx_pci_tbl,
.probe = qla4xxx_probe_adapter,
atomic_set(&qla4xxx_hba_count, 0);
klist_init(&qla4xxx_hostlist, NULL, NULL);
/* Allocate cache for SRBs. */
- srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ srb_cachep = ql_kmem_cache_create();
+
if (srb_cachep == NULL) {
- printk(KERN_ERR
- "%s: Unable to allocate SRB cache..."
- "Failing load!\n", DRIVER_NAME);
+ printk(KERN_ERR "%s: Unable to allocate SRB cache..."
+ "Failing load!\n", DRIVER_NAME);
ret = -ENOMEM;
goto no_srp_cache;
}
goto release_srb_cache;
}
+ ret = QL_MISC_INIT;
+ if (ret) {
+ printk(KERN_INFO "QLogic iSCSI HBA Driver ioctl init failed\n");
+ goto unregister_transport;
+ }
+
printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
+
ret = pci_register_driver(&qla4xxx_pci_driver);
if (ret)
goto unregister_transport;
static void __exit qla4xxx_module_exit(void)
{
ql4_mod_unload = 1;
+
+ QL_MISC_EXIT;
+
pci_unregister_driver(&qla4xxx_pci_driver);
iscsi_unregister_transport(&qla4xxx_iscsi_transport);
kmem_cache_destroy(srb_cachep);
--- /dev/null
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+/*
+ * This file encapsulates RHEL5 Specific Code
+ */
+
+#ifndef __QLA4x_OS_H
+#define __QLA4x_OS_H
+
+/* Common across all O.S platforms */
+#define IOCB_CMD_TIMEOUT 30
+#define RELOGIN_TOV 18
+#define RECOVERY_TIMEOUT 20 /* ddb state MISSING -> DEAD */
+
+#define QL_IOCB_CMD_TIMEOUT(cmd)
+
+#define QL_SET_DDB_OFFLINE(ha, ddb_entry)
+
+#define QL_SESS_RECOVERY_TO(ddb_entry) ddb_entry->ha->port_down_retry_count
+
+#define QL_DPC_OFFLINE_SET(ha) 0
+
+#define QL_ISCSI_CONN_TO_SESS(conn) iscsi_dev_to_session(conn->dev.parent)
+
+#define QL_ISCSI_SDEV_TO_SESS(sdev) starget_to_session(sdev->sdev_target)
+
+#define QL_ISCSI_ADD_SESS(ddb_entry) \
+ iscsi_add_session(ddb_entry->sess, ddb_entry->os_target_id)
+
+#define QL_ISCSI_REGISTER_HOST(host, trans) 0
+#define QL_ISCSI_UNREGISTER_HOST(host, trans)
+
+#define QL_ISCSI_SESSION_ID(ddb_entry) ddb_entry->sess->sid
+#define QL_ISCSI_IF_DESTROY_SESSION_DONE(ddb_entry)
+#define QL_ISCSI_DESTROY_CONN(ddb_entry)
+#define QL_ISCSI_CREATE_CONN(ddb_entry) \
+ iscsi_create_conn(ddb_entry->sess, 0, 0)
+#define QL_ISCSI_CREATE_SESS_DONE(ddb_entry) \
+ iscsi_unblock_session(ddb_entry->sess)
+#define QL_ISCSI_ALLOC_SESSION(ha, trans) \
+ iscsi_alloc_session(ha->host, trans, sizeof(struct ddb_entry))
+
+
+#define QL_MISC_INIT 0
+#define QL_MISC_EXIT
+
+#define qla4xxx_check_dev_offline(ha)
+#define qla4xxx_proc_info NULL
+
+#define QL_SET_SCSI_RESID(cmd, residual) scsi_set_resid(cmd, residual)
+#define QL_SCSI_BUFFLEN(cmd) scsi_bufflen(cmd)
+
+#define QL_DPC_DATA_TO_HA(work) \
+ container_of((struct work_struct *)work, struct scsi_qla_host, dpc_work)
+
+#define QL_INIT_WORK(ha, dpc_func) INIT_WORK(&ha->dpc_work, dpc_func)
+
+#define QL_REQ_IRQ_FLAGS (IRQF_DISABLED | IRQF_SHARED)
+
+#define QL_DECLARE_INTR_HANDLER(intr_func, irq, dev_id, regs) \
+ irqreturn_t intr_func(int irq, void *dev_id)
+
+#define QL_DECLARE_DPC(dpc_func, data) \
+ void dpc_func(struct work_struct *data)
+
+#define QL_INIT_SESSION_DATASIZE(sessiondata_size)
+// .sessiondata_size = sizeof(struct ddb_entry),
+
+#define QL_INIT_HOST_TEMPLATE(host_template)
+// .host_template = &qla4xxx_driver_template,
+
+QL_DECLARE_INTR_HANDLER(qla4xxx_intr_handler, irq, dev_id, regs);
+
+static inline struct kmem_cache *ql_kmem_cache_create(void)
+{
+ return (kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
+ SLAB_HWCACHE_ALIGN, NULL));
+}
+
+static inline void qla4xxx_scan_target(struct ddb_entry * ddb_entry)
+{
+ scsi_scan_target(&ddb_entry->sess->dev, 0,
+ ddb_entry->sess->target_id, SCAN_WILD_CARD, 0);
+}
+
+static void ql4_get_aen_log(struct scsi_qla_host *ha, struct ql4_aen_log *aenl)
+{
+ if (aenl) {
+ memcpy(aenl, &ha->aen_log, sizeof (ha->aen_log));
+ ha->aen_log.count = 0;
+ }
+}
+
+static inline int qla4xxx_ioctl_init(struct scsi_qla_host *ha)
+{
+ ha->ql4mbx = qla4xxx_mailbox_command;
+ ha->ql4cmd = qla4xxx_send_command_to_isp;
+ ha->ql4getaenlog = ql4_get_aen_log;
+ return 0;
+}
+
+static inline void qla4xxx_ioctl_exit(struct scsi_qla_host *ha)
+{
+ return;
+}
+
+static inline void qla4xxx_srb_free_dma(struct scsi_qla_host *ha,
+ struct srb *srb)
+{
+ struct scsi_cmnd *cmd = srb->cmd;
+
+ if (srb->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ srb->flags &= ~SRB_DMA_VALID;
+ }
+
+ cmd->SCp.ptr = NULL;
+}
+
+#endif /* _QLA4x_OS_H */
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.01.00-k8_sles11-01"
-
+#define QLA4XXX_DRIVER_VERSION "5.01.00-k8_sles11-03"
sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
sdev->lockable = sdev->removable;
sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
+ sdev->tgps = (inq_result[5] >> 4) & 3;
if (sdev->scsi_level >= SCSI_3 ||
(sdev->inquiry_len > 56 && inq_result[56] & 0x04))
sdev_rd_attr (vendor, "%.8s\n");
sdev_rd_attr (model, "%.16s\n");
sdev_rd_attr (rev, "%.4s\n");
+sdev_rd_attr (tgps, "%d\n");
/*
* TODO: can we make these symlinks to the block layer ones?
void *hostdata; /* available to low-level driver */
char type;
char scsi_level;
- char inq_periph_qual; /* PQ from INQUIRY data */
+ char inq_periph_qual; /* PQ from INQUIRY data */
+ char tgps; /* Target port group support */
unsigned char inquiry_len; /* valid bytes in 'inquiry' */
unsigned char * inquiry; /* INQUIRY response data */
const char * vendor; /* [back_compat] point into 'inquiry' ... */
struct scsi_dh_devlist {
char *vendor;
char *model;
+ char tgps;
};
struct scsi_device_handler {