SCSI misc on 20240928

These are mostly minor updates.  There are two drivers (lpfc and
 mpi3mr) which missed the initial pull and a core change to retry a
 start/stop unit which affect suspend/resume.
 
 Signed-off-by: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCZvh4QiYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishQajAQDx561I
 cgbPZjZkSOYp+qJowaphyySZ1SS8pfMlVAIiXQEAs4SqhIN8e9QWpgI0bA7X7xtB
 UiOUsIHPHM+BFU6kbJQ=
 =qjsp
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
 "These are mostly minor updates.

  There are two drivers (lpfc and mpi3mr) which missed the initial
  pull and a core change to retry a start/stop unit which affect
  suspend/resume"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (32 commits)
  scsi: lpfc: Update lpfc version to 14.4.0.5
  scsi: lpfc: Support loopback tests with VMID enabled
  scsi: lpfc: Revise TRACE_EVENT log flag severities from KERN_ERR to KERN_WARNING
  scsi: lpfc: Ensure DA_ID handling completion before deleting an NPIV instance
  scsi: lpfc: Fix kref imbalance on fabric ndlps from dev_loss_tmo handler
  scsi: lpfc: Restrict support for 32 byte CDBs to specific HBAs
  scsi: lpfc: Update phba link state conditional before sending CMF_SYNC_WQE
  scsi: lpfc: Add ELS_RSP cmd to the list of WQEs to flush in lpfc_els_flush_cmd()
  scsi: mpi3mr: Update driver version to 8.12.0.0.50
  scsi: mpi3mr: Improve wait logic while controller transitions to READY state
  scsi: mpi3mr: Update MPI Headers to revision 34
  scsi: mpi3mr: Use firmware-provided timestamp update interval
  scsi: mpi3mr: Enhance the Enable Controller retry logic
  scsi: sd: Fix off-by-one error in sd_read_block_characteristics()
  scsi: pm8001: Do not overwrite PCI queue mapping
  scsi: scsi_debug: Remove a useless memset()
  scsi: pmcraid: Convert comma to semicolon
  scsi: sd: Retry START STOP UNIT commands
  scsi: mpi3mr: A performance fix
  scsi: ufs: qcom: Update MODE_MAX cfg_bw value
  ...
This commit is contained in:
Linus Torvalds 2024-09-29 09:22:34 -07:00
commit 3ed7df0852
34 changed files with 415 additions and 154 deletions

View file

@ -1609,7 +1609,7 @@ mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; max = min(maxEvents, MPTCTL_EVENT_LOG_SIZE);
/* If fewer than 1 event is requested, there must have /* If fewer than 1 event is requested, there must have
* been some type of error. * been some type of error.

View file

@ -485,7 +485,6 @@ struct cxgbi_device {
unsigned char nmtus; unsigned char nmtus;
unsigned char nports; unsigned char nports;
struct pci_dev *pdev; struct pci_dev *pdev;
struct dentry *debugfs_root;
struct iscsi_transport *itp; struct iscsi_transport *itp;
struct module *owner; struct module *owner;
@ -499,7 +498,6 @@ struct cxgbi_device {
unsigned int rxq_idx_cntr; unsigned int rxq_idx_cntr;
struct cxgbi_ports_map pmap; struct cxgbi_ports_map pmap;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *);
int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *,
struct cxgbi_task_tag_info *); struct cxgbi_task_tag_info *);
@ -512,7 +510,6 @@ struct cxgbi_device {
unsigned int, int); unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *); void (*csk_release_offload_resources)(struct cxgbi_sock *);
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
int (*csk_push_tx_frames)(struct cxgbi_sock *, int); int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
void (*csk_send_abort_req)(struct cxgbi_sock *); void (*csk_send_abort_req)(struct cxgbi_sock *);

View file

@ -2421,7 +2421,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
spin_lock_irqsave(&device->done_lock, flags); spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) { if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags); spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n ", dev_info(dev, "slot complete: task(%pK) ignored\n",
task); task);
return; return;
} }

View file

@ -37,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static u64 max_lun = IBMVFC_MAX_LUN; static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS; static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
static u16 max_sectors = IBMVFC_MAX_SECTORS;
static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH; static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG; static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
@ -83,6 +84,9 @@ MODULE_PARM_DESC(default_timeout,
module_param_named(max_requests, max_requests, uint, S_IRUGO); module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
module_param_named(max_sectors, max_sectors, ushort, S_IRUGO);
MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_SECTORS) "]");
module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO); module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. " MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
"[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]"); "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
@ -1494,7 +1498,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
memset(login_info, 0, sizeof(*login_info)); memset(login_info, 0, sizeof(*login_info));
login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9); login_info->max_dma_len = cpu_to_be64(max_sectors << 9);
login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
login_info->partition_num = cpu_to_be32(vhost->partition_number); login_info->partition_num = cpu_to_be32(vhost->partition_number);
@ -5230,7 +5234,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
} }
vhost->logged_in = 1; vhost->logged_in = 1;
npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code, rsp->partition_name, rsp->device_name, rsp->port_loc_code,
rsp->drc_name, npiv_max_sectors); rsp->drc_name, npiv_max_sectors);
@ -6329,7 +6333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
shost->can_queue = scsi_qdepth; shost->can_queue = scsi_qdepth;
shost->max_lun = max_lun; shost->max_lun = max_lun;
shost->max_id = max_targets; shost->max_id = max_targets;
shost->max_sectors = IBMVFC_MAX_SECTORS; shost->max_sectors = max_sectors;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no; shost->unique_id = shost->host_no;
shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1; shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
@ -6556,6 +6560,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
**/ **/
static int __init ibmvfc_module_init(void) static int __init ibmvfc_module_init(void)
{ {
int min_max_sectors = PAGE_SIZE >> 9;
int rc; int rc;
if (!firmware_has_feature(FW_FEATURE_VIO)) if (!firmware_has_feature(FW_FEATURE_VIO))
@ -6564,6 +6569,16 @@ static int __init ibmvfc_module_init(void)
printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
/*
* Range check the max_sectors module parameter. The upper bounds is
* implicity checked since the parameter is a ushort.
*/
if (max_sectors < min_max_sectors) {
printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n",
min_max_sectors);
max_sectors = min_max_sectors;
}
ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
if (!ibmvfc_transport_template) if (!ibmvfc_transport_template)
return -ENOMEM; return -ENOMEM;

View file

@ -32,7 +32,7 @@
#define IBMVFC_DEBUG 0 #define IBMVFC_DEBUG 0
#define IBMVFC_MAX_TARGETS 1024 #define IBMVFC_MAX_TARGETS 1024
#define IBMVFC_MAX_LUN 0xffffffff #define IBMVFC_MAX_LUN 0xffffffff
#define IBMVFC_MAX_SECTORS 0xffffu #define IBMVFC_MAX_SECTORS 2048
#define IBMVFC_MAX_DISC_THREADS 4 #define IBMVFC_MAX_DISC_THREADS 4
#define IBMVFC_TGT_MEMPOOL_SZ 64 #define IBMVFC_TGT_MEMPOOL_SZ 64
#define IBMVFC_MAX_CMDS_PER_LUN 64 #define IBMVFC_MAX_CMDS_PER_LUN 64

View file

@ -3208,6 +3208,9 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
cmdiocbq->num_bdes = num_bde; cmdiocbq->num_bdes = num_bde;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
if (phba->cfg_vmid_app_header)
cmdiocbq->cmd_flag |= LPFC_IO_VMID;
cmdiocbq->vport = phba->pport; cmdiocbq->vport = phba->pport;
cmdiocbq->cmd_cmpl = NULL; cmdiocbq->cmd_cmpl = NULL;
cmdiocbq->bpl_dmabuf = txbmp; cmdiocbq->bpl_dmabuf = txbmp;

View file

@ -1572,8 +1572,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
} }
} else } else
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY,
"3065 GFT_ID failed x%08x\n", ulp_status); "3065 GFT_ID status x%08x\n", ulp_status);
out: out:
lpfc_ct_free_iocb(phba, cmdiocb); lpfc_ct_free_iocb(phba, cmdiocb);
@ -1647,6 +1647,18 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
out: out:
/* If the caller wanted a synchronous DA_ID completion, signal the
* wait obj and clear flag to reset the vport.
*/
if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
if (ndlp->da_id_waitq)
wake_up(ndlp->da_id_waitq);
}
spin_lock_irq(&ndlp->lock);
ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
lpfc_ct_free_iocb(phba, cmdiocb); lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
return; return;
@ -2246,7 +2258,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0229 FDMI cmd %04x failed, latt = %d " "0229 FDMI cmd %04x latt = %d "
"ulp_status: x%x, rid x%x\n", "ulp_status: x%x, rid x%x\n",
be16_to_cpu(fdmi_cmd), latt, ulp_status, be16_to_cpu(fdmi_cmd), latt, ulp_status,
ulp_word4); ulp_word4);
@ -2263,9 +2275,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for a CT LS_RJT response */ /* Check for a CT LS_RJT response */
cmd = be16_to_cpu(fdmi_cmd); cmd = be16_to_cpu(fdmi_cmd);
if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) {
/* FDMI rsp failed */ /* Log FDMI reject */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
"0220 FDMI cmd failed FS_RJT Data: x%x", cmd); "0220 FDMI cmd FS_RJT Data: x%x", cmd);
/* Should we fallback to FDMI-2 / FDMI-1 ? */ /* Should we fallback to FDMI-2 / FDMI-1 ? */
switch (cmd) { switch (cmd) {

View file

@ -90,6 +90,8 @@ enum lpfc_nlp_save_flags {
NLP_IN_RECOV_POST_DEV_LOSS = 0x1, NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
/* wait for outstanding LOGO to cmpl */ /* wait for outstanding LOGO to cmpl */
NLP_WAIT_FOR_LOGO = 0x2, NLP_WAIT_FOR_LOGO = 0x2,
/* wait for outstanding DA_ID to finish */
NLP_WAIT_FOR_DA_ID = 0x4
}; };
struct lpfc_nodelist { struct lpfc_nodelist {
@ -159,7 +161,12 @@ struct lpfc_nodelist {
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ #define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did; uint32_t nlp_defer_did;
/* These wait objects are NPIV specific. These IOs must complete
* synchronously.
*/
wait_queue_head_t *logo_waitq; wait_queue_head_t *logo_waitq;
wait_queue_head_t *da_id_waitq;
}; };
struct lpfc_node_rrq { struct lpfc_node_rrq {

View file

@ -979,7 +979,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag_attn =
phba->fcoe_cvl_eventtag; phba->fcoe_cvl_eventtag;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
"2611 FLOGI failed on FCF (x%x), " "2611 FLOGI FCF (x%x), "
"status:x%x/x%x, tmo:x%x, perform " "status:x%x/x%x, tmo:x%x, perform "
"roundrobin FCF failover\n", "roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx, phba->fcf.current_rec.fcf_indx,
@ -997,11 +997,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!(ulp_status == IOSTAT_LOCAL_REJECT && if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
((ulp_word4 & IOERR_PARAM_MASK) == ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE))) IOERR_LOOP_OPEN_FAILURE)))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2858 FLOGI failure Status:x%x/x%x TMO" "2858 FLOGI Status:x%x/x%x TMO"
":x%x Data x%lx x%x\n", ":x%x Data x%lx x%x\n",
ulp_status, ulp_word4, tmo, ulp_status, ulp_word4, tmo,
phba->hba_flag, phba->fcf.fcf_flag); phba->hba_flag, phba->fcf.fcf_flag);
/* Check for retry */ /* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@ -1023,7 +1023,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI failure Status:x%x/x%x " "0150 FLOGI Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n", "xri x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag, ulp_status, ulp_word4, cmdiocb->sli4_xritag,
tmo, kref_read(&ndlp->kref)); tmo, kref_read(&ndlp->kref));
@ -1032,11 +1032,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!(ulp_status == IOSTAT_LOCAL_REJECT && if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
((ulp_word4 & IOERR_PARAM_MASK) == ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE))) { IOERR_LOOP_OPEN_FAILURE))) {
/* FLOGI failure */ /* Warn FLOGI status */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"0100 FLOGI failure Status:x%x/x%x " "0100 FLOGI Status:x%x/x%x "
"TMO:x%x\n", "TMO:x%x\n",
ulp_status, ulp_word4, tmo); ulp_status, ulp_word4, tmo);
goto flogifail; goto flogifail;
} }
@ -1964,16 +1964,16 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ulp_status) { if (ulp_status) {
/* Check for retry */ /* Check for retry */
/* RRQ failed Don't print the vport to vport rjts */ /* Warn RRQ status Don't print the vport to vport rjts */
if (ulp_status != IOSTAT_LS_RJT || if (ulp_status != IOSTAT_LS_RJT ||
(((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS) (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2881 RRQ failure DID:%06X Status:" "2881 RRQ DID:%06X Status:"
"x%x/x%x\n", "x%x/x%x\n",
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4); ulp_word4);
} }
lpfc_clr_rrq_active(phba, rrq->xritag, rrq); lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
@ -2077,16 +2077,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
goto out; goto out;
} }
/* PLOGI failed Don't print the vport to vport rjts */ /* Warn PLOGI status Don't print the vport to vport rjts */
if (ulp_status != IOSTAT_LS_RJT || if (ulp_status != IOSTAT_LS_RJT ||
(((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS) (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2753 PLOGI failure DID:%06X " "2753 PLOGI DID:%06X "
"Status:x%x/x%x\n", "Status:x%x/x%x\n",
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4); ulp_word4);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
@ -2323,7 +2323,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
char *mode; char *mode;
u32 loglevel;
u32 ulp_status; u32 ulp_status;
u32 ulp_word4; u32 ulp_word4;
bool release_node = false; bool release_node = false;
@ -2372,17 +2371,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* could be expected. * could be expected.
*/ */
if (test_bit(FC_FABRIC, &vport->fc_flag) || if (test_bit(FC_FABRIC, &vport->fc_flag) ||
vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) { vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)
mode = KERN_ERR; mode = KERN_WARNING;
loglevel = LOG_TRACE_EVENT; else
} else {
mode = KERN_INFO; mode = KERN_INFO;
loglevel = LOG_ELS;
}
/* PRLI failed */ /* Warn PRLI status */
lpfc_printf_vlog(vport, mode, loglevel, lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, " "2754 PRLI DID:%06X Status:x%x/x%x, "
"data: x%x x%x x%x\n", "data: x%x x%x x%x\n",
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4, ndlp->nlp_state, ulp_word4, ndlp->nlp_state,
@ -2854,11 +2850,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
goto out; goto out;
} }
/* ADISC failed */ /* Warn ADISC status */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n", "2755 ADISC DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4); ulp_word4);
lpfc_disc_state_machine(vport, ndlp, cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC); NLP_EVT_CMPL_ADISC);
@ -3045,12 +3041,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* discovery. The PLOGI will retry. * discovery. The PLOGI will retry.
*/ */
if (ulp_status) { if (ulp_status) {
/* LOGO failed */ /* Warn LOGO status */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2756 LOGO failure, No Retry DID:%06X " "2756 LOGO, No Retry DID:%06X "
"Status:x%x/x%x\n", "Status:x%x/x%x\n",
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4); ulp_word4);
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1; skip_recovery = 1;
@ -4837,11 +4833,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) && (cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
lpfc_printf_vlog(vport, KERN_ERR, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
LOG_TRACE_EVENT, "0125 FDISC (x%x). "
"0125 FDISC Failed (x%x). " "Fabric out of resources\n",
"Fabric out of resources\n", stat.un.lsRjtError);
stat.un.lsRjtError);
lpfc_vport_set_state(vport, lpfc_vport_set_state(vport,
FC_VPORT_NO_FABRIC_RSCS); FC_VPORT_NO_FABRIC_RSCS);
} }
@ -4877,11 +4872,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
LSEXP_NOTHING_MORE) { LSEXP_NOTHING_MORE) {
vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
retry = 1; retry = 1;
lpfc_printf_vlog(vport, KERN_ERR, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
LOG_TRACE_EVENT, "0820 FLOGI (x%x). "
"0820 FLOGI Failed (x%x). " "BBCredit Not Supported\n",
"BBCredit Not Supported\n", stat.un.lsRjtError);
stat.un.lsRjtError);
} }
break; break;
@ -4891,11 +4885,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) { ) {
lpfc_printf_vlog(vport, KERN_ERR, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
LOG_TRACE_EVENT, "0122 FDISC (x%x). "
"0122 FDISC Failed (x%x). " "Fabric Detected Bad WWN\n",
"Fabric Detected Bad WWN\n", stat.un.lsRjtError);
stat.un.lsRjtError);
lpfc_vport_set_state(vport, lpfc_vport_set_state(vport,
FC_VPORT_FABRIC_REJ_WWN); FC_VPORT_FABRIC_REJ_WWN);
} }
@ -5355,8 +5348,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
u32 ulp_status, ulp_word4, tmo, did, iotag; u32 ulp_status, ulp_word4, tmo, did, iotag;
if (!vport) { if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"3177 ELS response failed\n"); "3177 null vport in ELS rsp\n");
goto out; goto out;
} }
if (cmdiocb->context_un.mbox) if (cmdiocb->context_un.mbox)
@ -9658,11 +9651,12 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
continue; continue;
/* On the ELS ring we can have ELS_REQUESTs or /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs,
* GEN_REQUESTs waiting for a response. * or GEN_REQUESTs waiting for a CQE response.
*/ */
ulp_command = get_job_cmnd(phba, piocb); ulp_command = get_job_cmnd(phba, piocb);
if (ulp_command == CMD_ELS_REQUEST64_CR) { if (ulp_command == CMD_ELS_REQUEST64_WQE ||
ulp_command == CMD_XMIT_ELS_RSP64_WQE) {
list_add_tail(&piocb->dlist, &abort_list); list_add_tail(&piocb->dlist, &abort_list);
/* If the link is down when flushing ELS commands /* If the link is down when flushing ELS commands
@ -11327,10 +11321,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */ /* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out; goto out;
/* FDISC failed */ /* Warn FDISC status */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"0126 FDISC failed. (x%x/x%x)\n", "0126 FDISC cmpl status: x%x/x%x)\n",
ulp_status, ulp_word4); ulp_status, ulp_word4);
goto fdisc_failed; goto fdisc_failed;
} }

View file

@ -527,6 +527,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
* the following lpfc_nlp_put is necessary after fabric node is * the following lpfc_nlp_put is necessary after fabric node is
* recovered. * recovered.
*/ */
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
if (recovering) { if (recovering) {
lpfc_printf_vlog(vport, KERN_INFO, lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_NODE, LOG_DISCOVERY | LOG_NODE,
@ -539,6 +542,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags); spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags); spin_unlock_irqrestore(&ndlp->lock, iflags);
return fcf_inuse;
} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Fabric node fully recovered before this dev_loss_tmo /* Fabric node fully recovered before this dev_loss_tmo
* queue work is processed. Thus, ignore the * queue work is processed. Thus, ignore the
@ -552,15 +556,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag, ndlp, ndlp->nlp_flag,
vport->port_state); vport->port_state);
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
return fcf_inuse; return fcf_inuse;
} }
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
return fcf_inuse; return fcf_inuse;
} }

View file

@ -561,6 +561,27 @@ struct fc_vft_header {
#include <uapi/scsi/fc/fc_els.h> #include <uapi/scsi/fc/fc_els.h>
/*
* Application Header
*/
struct fc_app_header {
uint32_t dst_app_id;
uint32_t src_app_id;
#define LOOPBACK_SRC_APPID 0x4321
uint32_t word2;
uint32_t word3;
};
/*
* dfctl optional header definition
*/
enum lpfc_fc_dfctl {
LPFC_FC_NO_DEVICE_HEADER,
LPFC_FC_16B_DEVICE_HEADER,
LPFC_FC_32B_DEVICE_HEADER,
LPFC_FC_64B_DEVICE_HEADER,
};
/* /*
* Extended Link Service LS_COMMAND codes (Payload Word 0) * Extended Link Service LS_COMMAND codes (Payload Word 0)
*/ */

View file

@ -4847,6 +4847,7 @@ struct fcp_iwrite64_wqe {
#define cmd_buff_len_SHIFT 16 #define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3 #define cmd_buff_len_WORD word3
/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0 #define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3 #define payload_offset_len_WORD word3
@ -4863,6 +4864,7 @@ struct fcp_iread64_wqe {
#define cmd_buff_len_SHIFT 16 #define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3 #define cmd_buff_len_WORD word3
/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0 #define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3 #define payload_offset_len_WORD word3
@ -4879,6 +4881,7 @@ struct fcp_icmnd64_wqe {
#define cmd_buff_len_SHIFT 16 #define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3 #define cmd_buff_len_WORD word3
/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0 #define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3 #define payload_offset_len_WORD word3

View file

@ -4699,6 +4699,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
uint64_t wwn; uint64_t wwn;
bool use_no_reset_hba = false; bool use_no_reset_hba = false;
int rc; int rc;
u8 if_type;
if (lpfc_no_hba_reset_cnt) { if (lpfc_no_hba_reset_cnt) {
if (phba->sli_rev < LPFC_SLI_REV4 && if (phba->sli_rev < LPFC_SLI_REV4 &&
@ -4773,10 +4774,24 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_id = LPFC_MAX_TARGET; shost->max_id = LPFC_MAX_TARGET;
shost->max_lun = vport->cfg_max_luns; shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1; shost->this_id = -1;
if (phba->sli_rev == LPFC_SLI_REV4)
shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; /* Set max_cmd_len applicable to ASIC support */
else if (phba->sli_rev == LPFC_SLI_REV4) {
if_type = bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf);
switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_2:
fallthrough;
case LPFC_SLI_INTF_IF_TYPE_6:
shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
break;
default:
shost->max_cmd_len = LPFC_FCP_CDB_LEN;
break;
}
} else {
shost->max_cmd_len = LPFC_FCP_CDB_LEN; shost->max_cmd_len = LPFC_FCP_CDB_LEN;
}
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
if (!phba->cfg_fcp_mq_threshold || if (!phba->cfg_fcp_mq_threshold ||
@ -10436,6 +10451,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *eqcpup; struct lpfc_vector_map_info *eqcpup;
struct lpfc_eq_intr_info *eqi; struct lpfc_eq_intr_info *eqi;
u32 wqesize;
/* /*
* Create HBA Record arrays. * Create HBA Record arrays.
@ -10655,9 +10671,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
* Create ELS Work Queues * Create ELS Work Queues
*/ */
/* Create slow-path ELS Work Queue */ /*
* Create slow-path ELS Work Queue.
* Increase the ELS WQ size when WQEs contain an embedded cdb
*/
wqesize = (phba->fcp_embed_io) ?
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.wq_esize, wqesize,
phba->sli4_hba.wq_ecount, cpu); phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) { if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,

View file

@ -4760,7 +4760,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
/* Word 3 */ /* Word 3 */
bf_set(payload_offset_len, &wqe->fcp_icmd, bf_set(payload_offset_len, &wqe->fcp_icmd,
sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp)); sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
/* Word 6 */ /* Word 6 */
bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,

View file

@ -1940,12 +1940,15 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
spin_lock_irqsave(&phba->hbalock, iflags);
/* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
if (phba->cmf_active_mode != LPFC_CFG_MANAGED || if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
phba->link_state == LPFC_LINK_DOWN) phba->link_state < LPFC_LINK_UP) {
return 0; ret_val = 0;
goto out_unlock;
}
spin_lock_irqsave(&phba->hbalock, iflags);
sync_buf = __lpfc_sli_get_iocbq(phba); sync_buf = __lpfc_sli_get_iocbq(phba);
if (!sync_buf) { if (!sync_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
@ -8818,7 +8821,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_queue_setup(phba); rc = lpfc_sli4_queue_setup(phba);
if (unlikely(rc)) { if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0381 Error %d during queue setup.\n ", rc); "0381 Error %d during queue setup.\n", rc);
goto out_stop_timers; goto out_stop_timers;
} }
/* Initialize the driver internal SLI layer lists. */ /* Initialize the driver internal SLI layer lists. */
@ -11090,9 +11093,17 @@ __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
/* Word 9 */ /* Word 9 */
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
/* Word 12 */ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) /* Word 10 */
if (cmdiocbq->cmd_flag & LPFC_IO_VMID) {
bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1);
bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1);
wqe->words[31] = LOOPBACK_SRC_APPID;
}
/* Word 12 */
wqe->xmit_sequence.xmit_len = full_size; wqe->xmit_sequence.xmit_len = full_size;
}
else else
wqe->xmit_sequence.xmit_len = wqe->xmit_sequence.xmit_len =
wqe->xmit_sequence.bde.tus.f.bdeSize; wqe->xmit_sequence.bde.tus.f.bdeSize;
@ -18431,6 +18442,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
{ {
/* make rctl_names static to save stack space */ /* make rctl_names static to save stack space */
struct fc_vft_header *fc_vft_hdr; struct fc_vft_header *fc_vft_hdr;
struct fc_app_header *fc_app_hdr;
uint32_t *header = (uint32_t *) fc_hdr; uint32_t *header = (uint32_t *) fc_hdr;
#define FC_RCTL_MDS_DIAGS 0xF4 #define FC_RCTL_MDS_DIAGS 0xF4
@ -18486,6 +18498,32 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
goto drop; goto drop;
} }
if (unlikely(phba->link_flag == LS_LOOPBACK_MODE &&
phba->cfg_vmid_app_header)) {
/* Application header is 16B device header */
if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) {
fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1);
if (be32_to_cpu(fc_app_hdr->src_app_id) !=
LOOPBACK_SRC_APPID) {
lpfc_printf_log(phba, KERN_WARNING,
LOG_ELS | LOG_LIBDFC,
"1932 Loopback src app id "
"not matched, app_id:x%x\n",
be32_to_cpu(fc_app_hdr->src_app_id));
goto drop;
}
} else {
lpfc_printf_log(phba, KERN_WARNING,
LOG_ELS | LOG_LIBDFC,
"1933 Loopback df_ctl bit not set, "
"df_ctl:x%x\n",
fc_hdr->fh_df_ctl);
goto drop;
}
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS, lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2538 Received frame rctl:x%x, type:x%x, " "2538 Received frame rctl:x%x, type:x%x, "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n", "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
@ -21149,7 +21187,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
if (!piocbq) { if (!piocbq) {
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2823 txq empty and txq_cnt is %d\n ", "2823 txq empty and txq_cnt is %d\n",
txq_cnt); txq_cnt);
break; break;
} }

View file

@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "14.4.0.4" #define LPFC_DRIVER_VERSION "14.4.0.5"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View file

@ -626,6 +626,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
int rc; int rc;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
if (vport->port_type == LPFC_PHYSICAL_PORT) { if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@ -679,21 +680,49 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
if (!ndlp) if (!ndlp)
goto skip_logo; goto skip_logo;
/* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP && phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != LPFC_TOPOLOGY_LOOP) { phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) { if (vport->cfg_enable_da_id) {
/* Send DA_ID and wait for a completion. */ /* Send DA_ID and wait for a completion. This is best
* effort. If the DA_ID fails, likely the fabric will
* "leak" NportIDs but at least the driver issued the
* command.
*/
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp)
goto issue_logo;
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = &waitq;
ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0); rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
if (rc) { if (!rc) {
lpfc_printf_log(vport->phba, KERN_WARNING, wait_event_timeout(waitq,
LOG_VPORT, !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
"1829 CT command failed to " msecs_to_jiffies(phba->fc_ratov * 2000));
"delete objects on fabric, "
"rc %d\n", rc);
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
"1829 DA_ID issue status %d. "
"SFlag x%x NState x%x, NFlag x%x "
"Rpi x%x\n",
rc, ndlp->save_flags, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
/* Remove the waitq and save_flags. It no
* longer matters if the wake happened.
*/
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = NULL;
ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
} }
issue_logo:
/* /*
* If the vpi is not registered, then a valid FDISC doesn't * If the vpi is not registered, then a valid FDISC doesn't
* exist and there is no need for a ELS LOGO. Just cleanup * exist and there is no need for a ELS LOGO. Just cleanup

View file

@ -6380,7 +6380,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
GFP_KERNEL); GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) { if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev, dev_err(&instance->pdev->dev,
"unable to allocate stream detect by LD\n "); "unable to allocate stream detect by LD\n");
for (j = 0; j < i; ++j) for (j = 0; j < i; ++j)
kfree(fusion->stream_detect_by_ld[j]); kfree(fusion->stream_detect_by_ld[j]);
kfree(fusion->stream_detect_by_ld); kfree(fusion->stream_detect_by_ld);

View file

@ -67,6 +67,7 @@
#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00) #define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
#define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8) #define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8)
#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff) #define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
#define MPI3_INSTANCE_PGAD_INSTANCE_MASK (0x0000ffff)
struct mpi3_config_request { struct mpi3_config_request {
__le16 host_tag; __le16 host_tag;
u8 ioc_use_only02; u8 ioc_use_only02;
@ -75,7 +76,8 @@ struct mpi3_config_request {
u8 ioc_use_only06; u8 ioc_use_only06;
u8 msg_flags; u8 msg_flags;
__le16 change_count; __le16 change_count;
__le16 reserved0a; u8 proxy_ioc_number;
u8 reserved0b;
u8 page_version; u8 page_version;
u8 page_number; u8 page_number;
u8 page_type; u8 page_type;
@ -206,6 +208,9 @@ struct mpi3_config_page_header {
#define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5) #define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5)
#define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6) #define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6)
#define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8) #define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8)
#define MPI3_MFGPAGE_DEVID_SAS5248_MPI (0x00f0)
#define MPI3_MFGPAGE_DEVID_SAS5248_MPI_NS (0x00f1)
#define MPI3_MFGPAGE_DEVID_SAS5248_PCIE_SWITCH (0x00f2)
struct mpi3_man_page0 { struct mpi3_man_page0 {
struct mpi3_config_page_header header; struct mpi3_config_page_header header;
u8 chip_revision[8]; u8 chip_revision[8];
@ -1074,6 +1079,8 @@ struct mpi3_io_unit_page8 {
#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04) #define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02) #define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01) #define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
#define MPI3_IOUNIT8_SBMODE_CURRENT_KEY_IOUNIT17 (0x10)
#define MPI3_IOUNIT8_SBMODE_HARD_SECURE_RECERTIFIED (0x08)
struct mpi3_io_unit_page9 { struct mpi3_io_unit_page9 {
struct mpi3_config_page_header header; struct mpi3_config_page_header header;
__le32 flags; __le32 flags;
@ -1089,6 +1096,8 @@ struct mpi3_io_unit_page9 {
#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004) #define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001) #define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff) #define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
#define MPI3_IOUNIT9_FIRSTDEVICE_IN_DRIVER_PAGE_0 (0xfffe)
struct mpi3_io_unit_page10 { struct mpi3_io_unit_page10 {
struct mpi3_config_page_header header; struct mpi3_config_page_header header;
u8 flags; u8 flags;
@ -1224,6 +1233,19 @@ struct mpi3_io_unit_page15 {
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01) #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01)
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02) #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02)
#define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00) #define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00)
struct mpi3_io_unit_page17 {
struct mpi3_config_page_header header;
u8 num_instances;
u8 instance;
__le16 reserved0a;
__le32 reserved0c[4];
__le16 key_length;
u8 encryption_algorithm;
u8 reserved1f;
__le32 current_key[];
};
#define MPI3_IOUNIT17_PAGEVERSION (0x00)
struct mpi3_ioc_page0 { struct mpi3_ioc_page0 {
struct mpi3_config_page_header header; struct mpi3_config_page_header header;
__le32 reserved08; __le32 reserved08;
@ -1311,7 +1333,7 @@ struct mpi3_driver_page0 {
u8 tur_interval; u8 tur_interval;
u8 reserved10; u8 reserved10;
u8 security_key_timeout; u8 security_key_timeout;
__le16 reserved12; __le16 first_device;
__le32 reserved14; __le32 reserved14;
__le32 reserved18; __le32 reserved18;
}; };
@ -1324,10 +1346,13 @@ struct mpi3_driver_page0 {
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002)
#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE1 (0x0000)
#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE2 (0xffff)
struct mpi3_driver_page1 { struct mpi3_driver_page1 {
struct mpi3_config_page_header header; struct mpi3_config_page_header header;
__le32 flags; __le32 flags;
__le32 reserved0c; u8 time_stamp_update;
u8 reserved0d[3];
__le16 host_diag_trace_max_size; __le16 host_diag_trace_max_size;
__le16 host_diag_trace_min_size; __le16 host_diag_trace_min_size;
__le16 host_diag_trace_decrement_size; __le16 host_diag_trace_decrement_size;
@ -2347,6 +2372,10 @@ struct mpi3_device0_vd_format {
#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001) #define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000) #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12) #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_MASK (0x0003)
#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_HDD (0x0000)
#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SSD (0x0001)
#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_NO_GUIDANCE (0x0002)
union mpi3_device0_dev_spec_format { union mpi3_device0_dev_spec_format {
struct mpi3_device0_sas_sata_format sas_sata_format; struct mpi3_device0_sas_sata_format sas_sata_format;
struct mpi3_device0_pcie_format pcie_format; struct mpi3_device0_pcie_format pcie_format;

View file

@ -205,13 +205,14 @@ struct mpi3_encrypted_hash_entry {
u8 hash_image_type; u8 hash_image_type;
u8 hash_algorithm; u8 hash_algorithm;
u8 encryption_algorithm; u8 encryption_algorithm;
u8 reserved03; u8 flags;
__le16 public_key_size; __le16 public_key_size;
__le16 signature_size; __le16 signature_size;
__le32 public_key[MPI3_PUBLIC_KEY_MAX]; __le32 public_key[MPI3_PUBLIC_KEY_MAX];
}; };
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH (0x03)
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03) #define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0) #define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00) #define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20) #define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
@ -230,6 +231,12 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05) #define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06) #define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
/* hierarchical signature system (hss) */
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_87 (0x0b)
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c)
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d)
#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f)
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX #ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1) #define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
#endif #endif

View file

@ -39,6 +39,12 @@ struct mpi3_ioc_init_request {
#define MPI3_WHOINIT_HOST_DRIVER (0x03) #define MPI3_WHOINIT_HOST_DRIVER (0x03)
#define MPI3_WHOINIT_MANUFACTURER (0x04) #define MPI3_WHOINIT_MANUFACTURER (0x04)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_SSD (0x00000003)
struct mpi3_ioc_facts_request { struct mpi3_ioc_facts_request {
__le16 host_tag; __le16 host_tag;
u8 ioc_use_only02; u8 ioc_use_only02;
@ -140,6 +146,8 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020) #define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010) #define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008) #define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)

View file

@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3) #define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0) #define MPI3_VERSION_MINOR (0)
#define MPI3_VERSION_UNIT (31) #define MPI3_VERSION_UNIT (34)
#define MPI3_VERSION_DEV (0) #define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff) #define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes { struct mpi3_sysif_oper_queue_indexes {
@ -158,6 +158,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004) #define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005) #define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006) #define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
#define MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER (0x0000f007)
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14) #define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18) #define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c) #define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
@ -410,6 +411,7 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) #define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007) #define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008) #define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI3_IOCSTATUS_SHUTDOWN_ACTIVE (0x0009)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a) #define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b) #define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c) #define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)

View file

@ -57,8 +57,8 @@ extern struct list_head mrioc_list;
extern int prot_mask; extern int prot_mask;
extern atomic64_t event_counter; extern atomic64_t event_counter;
#define MPI3MR_DRIVER_VERSION "8.10.0.5.50" #define MPI3MR_DRIVER_VERSION "8.12.0.0.50"
#define MPI3MR_DRIVER_RELDATE "08-Aug-2024" #define MPI3MR_DRIVER_RELDATE "05-Sept-2024"
#define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL" #define MPI3MR_DRIVER_LICENSE "GPL"
@ -178,7 +178,7 @@ extern atomic64_t event_counter;
#define MPI3MR_DEFAULT_SDEV_QD 32 #define MPI3MR_DEFAULT_SDEV_QD 32
/* Definitions for Threaded IRQ poll*/ /* Definitions for Threaded IRQ poll*/
#define MPI3MR_IRQ_POLL_SLEEP 2 #define MPI3MR_IRQ_POLL_SLEEP 20
#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8 #define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
/* Definitions for the controller security status*/ /* Definitions for the controller security status*/
@ -1090,6 +1090,7 @@ struct scmd_priv {
* @evtack_cmds_bitmap: Event Ack bitmap * @evtack_cmds_bitmap: Event Ack bitmap
* @delayed_evtack_cmds_list: Delayed event acknowledgment list * @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter * @ts_update_counter: Timestamp update counter
* @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag * @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag * @unrecoverable: Controller unrecoverable flag
* @prev_reset_result: Result of previous reset * @prev_reset_result: Result of previous reset
@ -1277,7 +1278,8 @@ struct mpi3mr_ioc {
unsigned long *evtack_cmds_bitmap; unsigned long *evtack_cmds_bitmap;
struct list_head delayed_evtack_cmds_list; struct list_head delayed_evtack_cmds_list;
u32 ts_update_counter; u16 ts_update_counter;
u16 ts_update_interval;
u8 reset_in_progress; u8 reset_in_progress;
u8 unrecoverable; u8 unrecoverable;
int prev_reset_result; int prev_reset_result;

View file

@ -728,7 +728,7 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
mpi3mr_process_op_reply_q(mrioc, mpi3mr_process_op_reply_q(mrioc,
intr_info->op_reply_q); intr_info->op_reply_q);
usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
} while (atomic_read(&intr_info->op_reply_q->pend_ios) && } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
(num_op_reply < mrioc->max_host_ios)); (num_op_reply < mrioc->max_host_ios));
@ -1362,6 +1362,10 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
int retval = 0; int retval = 0;
enum mpi3mr_iocstate ioc_state; enum mpi3mr_iocstate ioc_state;
u64 base_info; u64 base_info;
u8 retry = 0;
u64 start_time, elapsed_time_sec;
retry_bring_ioc_ready:
ioc_status = readl(&mrioc->sysif_regs->ioc_status); ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
@ -1380,26 +1384,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
ioc_info(mrioc, "controller is in %s state during detection\n", ioc_info(mrioc, "controller is in %s state during detection\n",
mpi3mr_iocstate_name(ioc_state)); mpi3mr_iocstate_name(ioc_state));
if (ioc_state == MRIOC_STATE_BECOMING_READY || timeout = mrioc->ready_timeout * 10;
ioc_state == MRIOC_STATE_RESET_REQUESTED) {
timeout = mrioc->ready_timeout * 10; do {
do { ioc_state = mpi3mr_get_iocstate(mrioc);
msleep(100);
} while (--timeout); if (ioc_state != MRIOC_STATE_BECOMING_READY &&
ioc_state != MRIOC_STATE_RESET_REQUESTED)
break;
if (!pci_device_is_present(mrioc->pdev)) { if (!pci_device_is_present(mrioc->pdev)) {
mrioc->unrecoverable = 1; mrioc->unrecoverable = 1;
ioc_err(mrioc, ioc_err(mrioc, "controller is not present while waiting to reset\n");
"controller is not present while waiting to reset\n");
retval = -1;
goto out_device_not_present; goto out_device_not_present;
} }
ioc_state = mpi3mr_get_iocstate(mrioc); msleep(100);
ioc_info(mrioc, } while (--timeout);
"controller is in %s state after waiting to reset\n",
mpi3mr_iocstate_name(ioc_state));
}
if (ioc_state == MRIOC_STATE_READY) { if (ioc_state == MRIOC_STATE_READY) {
ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
@ -1460,6 +1461,9 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
if (retry == 0)
start_time = jiffies;
timeout = mrioc->ready_timeout * 10; timeout = mrioc->ready_timeout * 10;
do { do {
ioc_state = mpi3mr_get_iocstate(mrioc); ioc_state = mpi3mr_get_iocstate(mrioc);
@ -1469,6 +1473,12 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
mpi3mr_iocstate_name(ioc_state)); mpi3mr_iocstate_name(ioc_state));
return 0; return 0;
} }
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
mpi3mr_print_fault_info(mrioc);
goto out_failed;
}
if (!pci_device_is_present(mrioc->pdev)) { if (!pci_device_is_present(mrioc->pdev)) {
mrioc->unrecoverable = 1; mrioc->unrecoverable = 1;
ioc_err(mrioc, ioc_err(mrioc,
@ -1477,9 +1487,19 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
goto out_device_not_present; goto out_device_not_present;
} }
msleep(100); msleep(100);
} while (--timeout); elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
} while (elapsed_time_sec < mrioc->ready_timeout);
out_failed: out_failed:
elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
retry++;
ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
" elapsed time =%llu\n", retry, elapsed_time_sec);
goto retry_bring_ioc_ready;
}
ioc_state = mpi3mr_get_iocstate(mrioc); ioc_state = mpi3mr_get_iocstate(mrioc);
ioc_err(mrioc, ioc_err(mrioc,
"failed to bring to ready state, current state: %s\n", "failed to bring to ready state, current state: %s\n",
@ -2671,7 +2691,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return; return;
} }
if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
mrioc->ts_update_counter = 0; mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc); mpi3mr_sync_timestamp(mrioc);
} }
@ -3844,6 +3864,29 @@ static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
return retval; return retval;
} }
/**
* mpi3mr_read_tsu_interval - Update time stamp interval
* @mrioc: Adapter instance reference
*
* Update time stamp interval if its defined in driver page 1,
* otherwise use default value.
*
* Return: Nothing
*/
static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
{
struct mpi3_driver_page1 driver_pg1;
u16 pg_sz = sizeof(driver_pg1);
int retval = 0;
mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
if (!retval && driver_pg1.time_stamp_update)
mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
}
/** /**
* mpi3mr_print_ioc_info - Display controller information * mpi3mr_print_ioc_info - Display controller information
* @mrioc: Adapter instance reference * @mrioc: Adapter instance reference
@ -4140,6 +4183,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
goto out_failed_noretry; goto out_failed_noretry;
} }
mpi3mr_read_tsu_interval(mrioc);
mpi3mr_print_ioc_info(mrioc); mpi3mr_print_ioc_info(mrioc);
if (!mrioc->cfg_page) { if (!mrioc->cfg_page) {
@ -4321,6 +4365,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
goto out_failed_noretry; goto out_failed_noretry;
} }
mpi3mr_read_tsu_interval(mrioc);
mpi3mr_print_ioc_info(mrioc); mpi3mr_print_ioc_info(mrioc);
if (is_resume) { if (is_resume) {

View file

@ -8898,9 +8898,8 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
if (!device_remove_in_progress) { if (!device_remove_in_progress) {
ioc_info(ioc, ioc_info(ioc,
"Unable to allocate the memory for " "Unable to allocate the memory for device_remove_in_progress of sz: %d\n",
"device_remove_in_progress of sz: %d\n " pd_handles_sz);
, pd_handles_sz);
return -ENOMEM; return -ENOMEM;
} }
memset(device_remove_in_progress + memset(device_remove_in_progress +

View file

@ -100,10 +100,12 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (pm8001_ha->number_of_intr > 1) if (pm8001_ha->number_of_intr > 1) {
blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
return;
}
return blk_mq_map_queues(qmap); blk_mq_map_queues(qmap);
} }
/* /*

View file

@ -2037,7 +2037,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
atomic_dec(&pm8001_dev->running_req); atomic_dec(&pm8001_dev->running_req);
break; break;
} }
pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ", pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n",
psspPayload->ssp_resp_iu.status); psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags); spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_STATE_PENDING;

View file

@ -1946,7 +1946,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
} }
iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg), ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
pmcraid_info("Waiting for IOA to become operational %x:%x\n", pmcraid_info("Waiting for IOA to become operational %x:%x\n",

View file

@ -310,7 +310,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
if (!free_sqes) { if (!free_sqes) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
"Returning NULL, free_sqes=%d.\n ", "Returning NULL, free_sqes=%d.\n",
free_sqes); free_sqes);
goto out_failed; goto out_failed;
} }

View file

@ -2760,7 +2760,6 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
else else
bd_len = 0; bd_len = 0;
alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
if (0x3 == pcontrol) { /* Saving values not supported */ if (0x3 == pcontrol) { /* Saving values not supported */
mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
return check_condition_result; return check_condition_result;

View file

@ -38,7 +38,6 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/bio-integrity.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/idr.h> #include <linux/idr.h>
@ -3404,7 +3403,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp,
rcu_read_lock(); rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb1); vpd = rcu_dereference(sdkp->device->vpd_pgb1);
if (!vpd || vpd->len < 8) { if (!vpd || vpd->len <= 8) {
rcu_read_unlock(); rcu_read_unlock();
return; return;
} }
@ -4093,9 +4092,38 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{ {
unsigned char cmd[6] = { START_STOP }; /* START_VALID */ unsigned char cmd[6] = { START_STOP }; /* START_VALID */
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
struct scsi_failure failure_defs[] = {
{
/* Power on, reset, or bus device reset occurred */
.sense = UNIT_ATTENTION,
.asc = 0x29,
.ascq = 0,
.result = SAM_STAT_CHECK_CONDITION,
},
{
/* Power on occurred */
.sense = UNIT_ATTENTION,
.asc = 0x29,
.ascq = 1,
.result = SAM_STAT_CHECK_CONDITION,
},
{
/* SCSI bus reset */
.sense = UNIT_ATTENTION,
.asc = 0x29,
.ascq = 2,
.result = SAM_STAT_CHECK_CONDITION,
},
{}
};
struct scsi_failures failures = {
.total_allowed = 3,
.failure_definitions = failure_defs,
};
const struct scsi_exec_args exec_args = { const struct scsi_exec_args exec_args = {
.sshdr = &sshdr, .sshdr = &sshdr,
.req_flags = BLK_MQ_REQ_PM, .req_flags = BLK_MQ_REQ_PM,
.failures = &failures,
}; };
struct scsi_device *sdp = sdkp->device; struct scsi_device *sdp = sdkp->device;
int res; int res;

View file

@ -834,6 +834,9 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
int backspace, result; int backspace, result;
struct st_partstat *STps; struct st_partstat *STps;
if (STp->ready != ST_READY)
return 0;
/* /*
* If there was a bus reset, block further access * If there was a bus reset, block further access
* to this device. * to this device.
@ -841,8 +844,6 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
if (STp->pos_unknown) if (STp->pos_unknown)
return (-EIO); return (-EIO);
if (STp->ready != ST_READY)
return 0;
STps = &(STp->ps[STp->partition]); STps = &(STp->ps[STp->partition]);
if (STps->rw == ST_WRITING) /* Writing */ if (STps->rw == ST_WRITING) /* Writing */
return st_flush_write_buffer(STp); return st_flush_write_buffer(STp);

View file

@ -139,7 +139,7 @@ zalon_probe(struct parisc_device *dev)
return -ENODEV; return -ENODEV;
if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n",
dev->irq); dev->irq);
goto fail; goto fail;
} }

View file

@ -93,7 +93,7 @@ static const struct __ufs_qcom_bw_table {
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
[MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_MAX][0][0] = { 7643136, 307200 }, [MODE_MAX][0][0] = { 7643136, 819200 },
}; };
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);