diff options
author | Linus Torvalds | 2017-03-19 18:06:31 -0700 |
---|---|---|
committer | Linus Torvalds | 2017-03-19 18:06:31 -0700 |
commit | 8aa3417255fababc0cab7128dd7520d3af344ab8 (patch) | |
tree | 521faa00d95dcf17c69482136c86b427beb826ca /drivers | |
parent | 1b8df61908bde12946877b4c079bb73fc0bd3409 (diff) | |
parent | 6c611d18f386d37cce3afbd921568e2a895bd86e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger:
"The bulk of the changes are in qla2xxx target driver code to address
various issues found during Cavium/QLogic's internal testing (stable
CC's included), along with a few other stability and smaller
miscellaneous improvements.
There are also a couple of different patch sets from Mike Christie,
which have been a result of his work to use target-core ALUA logic
together with tcm-user backend driver.
Finally, a patch to address some long standing issues with
pass-through SCSI export of TYPE_TAPE + TYPE_MEDIUM_CHANGER devices,
which will make folks using physical (or virtual) magnetic tape happy"
* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (28 commits)
qla2xxx: Update driver version to 9.00.00.00-k
qla2xxx: Fix delayed response to command for loop mode/direct connect.
qla2xxx: Change scsi host lookup method.
qla2xxx: Add DebugFS node to display Port Database
qla2xxx: Use IOCB interface to submit non-critical MBX.
qla2xxx: Add async new target notification
qla2xxx: Export DIF stats via debugfs
qla2xxx: Improve T10-DIF/PI handling in driver.
qla2xxx: Allow relogin to proceed if remote login did not finish
qla2xxx: Fix sess_lock & hardware_lock lock order problem.
qla2xxx: Fix inadequate lock protection for ABTS.
qla2xxx: Fix request queue corruption.
qla2xxx: Fix memory leak for abts processing
qla2xxx: Allow vref count to timeout on vport delete.
tcmu: Convert cmd_time_out into backend device attribute
tcmu: make cmd timeout configurable
tcmu: add helper to check if dev was configured
target: fix race during implicit transition work flushes
target: allow userspace to set state to transitioning
target: fix ALUA transition timeout handling
...
Diffstat (limited to 'drivers')
23 files changed, 1267 insertions, 546 deletions
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 67c0d5aa3212..de952935b5d2 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -3,6 +3,7 @@ config SCSI_QLA_FC depends on PCI && SCSI depends on SCSI_FC_ATTRS select FW_LOADER + select BTREE ---help--- This qla2xxx driver supports all QLogic Fibre Channel PCI and PCIe host adapters. diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f610103994af..435ff7fd6384 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) "Timer for the VP[%d] has stopped\n", vha->vp_idx); } - BUG_ON(atomic_read(&vha->vref_count)); - qla2x00_free_fcports(vha); mutex_lock(&ha->vport_lock); @@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); - if (vha->qpair->vp_idx == vha->vp_idx) { + if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7087, "Queue Pair delete failed.\n"); diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index e1fc4e66966a..c6bffe929fe7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); #define ql_dbg_tgt 0x00004000 /* Target mode */ #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ +#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 625d438e3cce..ae119018dfaa 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include <linux/aer.h> #include <linux/mutex.h> +#include <linux/btree.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -395,11 +396,15 @@ struct srb_iocb { struct completion comp; } abt; struct ct_arg ctarg; +#define MAX_IOCB_MB_REG 28 +#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) struct { - __le16 in_mb[28]; /* fr fw */ - __le16 out_mb[28]; /* to fw */ + __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ + __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ void *out, *in; dma_addr_t out_dma, in_dma; + struct completion comp; + int rc; } mbx; struct { struct imm_ntfy_from_isp *ntfy; @@ -437,7 +442,7 @@ typedef struct srb { uint32_t handle; uint16_t flags; uint16_t type; - char *name; + const char *name; int iocbs; struct qla_qpair *qpair; u32 gen1; /* scratch */ @@ -2300,6 +2305,8 @@ typedef struct fc_port { struct ct_sns_desc ct_desc; enum discovery_state disc_state; enum login_state fw_login_state; + unsigned long plogi_nack_done_deadline; + u32 login_gen, last_login_gen; u32 rscn_gen, last_rscn_gen; u32 chip_reset; @@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx { uint32_t gold_fw_version; }; +struct qla_dif_statistics { + uint64_t dif_input_bytes; + uint64_t dif_output_bytes; + uint64_t dif_input_requests; + uint64_t dif_output_requests; + uint32_t dif_guard_err; + uint32_t dif_ref_tag_err; + uint32_t dif_app_tag_err; +}; + struct qla_statistics { uint32_t total_isp_aborts; uint64_t input_bytes; @@ -3118,6 +3135,8 @@ struct qla_statistics { uint32_t stat_max_pend_cmds; uint32_t stat_max_qfull_cmds_alloc; uint32_t stat_max_qfull_cmds_dropped; + + struct qla_dif_statistics qla_dif_stats; }; struct bidi_statistics { @@ -3125,6 +3144,16 @@ struct bidi_statistics { unsigned long long transfer_bytes; }; +struct qla_tc_param { + struct scsi_qla_host *vha; + uint32_t blk_sz; + uint32_t bufflen; + struct scatterlist *sg; + struct scatterlist *prot_sg; + struct crc_context *ctx; + uint8_t *ctx_dsd_alloced; +}; + /* Multi queue support */ #define MBC_INITIALIZE_MULTIQ 0x1f #define QLA_QUE_PAGE 0X1000 @@ -3272,6 +3301,8 @@ struct qlt_hw_data { uint8_t tgt_node_name[WWN_SIZE]; struct dentry *dfs_tgt_sess; + struct dentry *dfs_tgt_port_database; + struct list_head q_full_list; uint32_t num_pend_cmds; uint32_t num_qfull_cmds_alloc; @@ -3281,6 +3312,7 @@ struct qlt_hw_data { spinlock_t sess_lock; int rspq_vector_cpuid; spinlock_t atio_lock ____cacheline_aligned; + struct btree_head32 host_map; }; #define MAX_QFULL_CMDS_ALLOC 8192 @@ -3290,6 +3322,10 @@ struct qlt_hw_data { #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ +#define QLA_EARLY_LINKUP(_ha) \ + ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \ + _ha->flags.fw_started && !_ha->flags.fw_init_done) + /* * Qlogic host adapter specific data structure. */ @@ -3339,7 +3375,11 @@ struct qla_hw_data { uint32_t fawwpn_enabled:1; uint32_t exlogins_enabled:1; uint32_t exchoffld_enabled:1; - /* 35 bits */ + + uint32_t lip_ae:1; + uint32_t n2n_ae:1; + uint32_t fw_started:1; + uint32_t fw_init_done:1; } flags; /* This spinlock is used to protect "io transactions", you must @@ -3432,7 +3472,6 @@ struct qla_hw_data { #define P2P_LOOP 3 uint8_t interrupts_on; uint32_t isp_abort_cnt; - #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 @@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host { struct list_head vp_fcports; /* list of fcports */ struct list_head work_list; spinlock_t work_lock; + struct work_struct iocb_work; /* Commonly used flags and state information. */ struct Scsi_Host *host; @@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host { /* Count of active session/fcport */ int fcport_count; wait_queue_head_t fcport_waitQ; + wait_queue_head_t vref_waitq; } scsi_qla_host_t; struct qla27xx_image_status { @@ -4131,14 +4172,17 @@ struct qla2_sgx { mb(); \ if (__vha->flags.delete_progress) { \ atomic_dec(&__vha->vref_count); \ + wake_up(&__vha->vref_waitq); \ __bail = 1; \ } else { \ __bail = 0; \ } \ } while (0) -#define QLA_VHA_MARK_NOT_BUSY(__vha) \ +#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ atomic_dec(&__vha->vref_count); \ + wake_up(&__vha->vref_waitq); \ +} while (0) \ #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ atomic_inc(&__qpair->ref_count); \ diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index b48cce696bac..989e17b0758c 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) struct qla_hw_data *ha = vha->hw; unsigned long flags; struct fc_port *sess = NULL; - struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - seq_printf(s, "%s\n",vha->host_str); + seq_printf(s, "%s\n", vha->host_str); if (tgt) { - seq_printf(s, "Port ID Port Name Handle\n"); + seq_puts(s, "Port ID Port Name Handle\n"); spin_lock_irqsave(&ha->tgt.sess_lock, flags); list_for_each_entry(sess, &vha->vp_fcports, list) @@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file) return single_open(file, qla2x00_dfs_tgt_sess_show, vha); } - static const struct file_operations dfs_tgt_sess_ops = { .open = qla2x00_dfs_tgt_sess_open, .read = seq_read, @@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = { }; static int +qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) +{ + scsi_qla_host_t *vha = s->private; + struct qla_hw_data *ha = vha->hw; + struct gid_list_info *gid_list; + dma_addr_t gid_list_dma; + fc_port_t fc_port; + char *id_iter; + int rc, i; + uint16_t entries, loop_id; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + + seq_printf(s, "%s\n", vha->host_str); + if (tgt) { + gid_list = dma_alloc_coherent(&ha->pdev->dev, + qla2x00_gid_list_size(ha), + &gid_list_dma, GFP_KERNEL); + if (!gid_list) { + ql_dbg(ql_dbg_user, vha, 0x705c, + "DMA allocation failed for %u\n", + qla2x00_gid_list_size(ha)); + return 0; + } + + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, + &entries); + if (rc != QLA_SUCCESS) + goto out_free_id_list; + + id_iter = (char *)gid_list; + + seq_puts(s, "Port Name Port ID Loop ID\n"); + + for (i = 0; i < entries; i++) { + struct gid_list_info *gid = + (struct gid_list_info *)id_iter; + loop_id = le16_to_cpu(gid->loop_id); + memset(&fc_port, 0, sizeof(fc_port_t)); + + fc_port.loop_id = loop_id; + + rc = qla24xx_gpdb_wait(vha, &fc_port, 0); + seq_printf(s, "%8phC %02x%02x%02x %d\n", + fc_port.port_name, fc_port.d_id.b.domain, + fc_port.d_id.b.area, fc_port.d_id.b.al_pa, + fc_port.loop_id); + id_iter += ha->gid_list_info_size; + } +out_free_id_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + gid_list, gid_list_dma); + } + + return 0; +} + +static int +qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file) +{ + scsi_qla_host_t *vha = inode->i_private; + + return single_open(file, qla2x00_dfs_tgt_port_database_show, vha); +} + +static const struct file_operations dfs_tgt_port_database_ops = { + .open = qla2x00_dfs_tgt_port_database_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) { struct scsi_qla_host *vha = s->private; @@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) seq_printf(s, "num Q full sent = %lld\n", vha->tgt_counters.num_q_full_sent); + /* DIF stats */ + seq_printf(s, "DIF Inp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_bytes); + seq_printf(s, "DIF Outp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_bytes); + seq_printf(s, "DIF Inp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_requests); + seq_printf(s, "DIF Outp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_requests); + seq_printf(s, "DIF Guard err = %d\n", + vha->qla_stats.qla_dif_stats.dif_guard_err); + seq_printf(s, "DIF Ref tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_ref_tag_err); + seq_printf(s, "DIF App tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_app_tag_err); return 0; } @@ -281,6 +367,14 @@ create_nodes: goto out; } + ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", + S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); + if (!ha->tgt.dfs_tgt_port_database) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to create debugFS tgt_port_database node.\n"); + goto out; + } + ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); if (!ha->dfs_fce) { @@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) ha->tgt.dfs_tgt_sess = NULL; } + if (ha->tgt.dfs_tgt_port_database) { + debugfs_remove(ha->tgt.dfs_tgt_port_database); + ha->tgt.dfs_tgt_port_database = NULL; + } + if (ha->dfs_fw_resource_cnt) { debugfs_remove(ha->dfs_fw_resource_cnt); ha->dfs_fw_resource_cnt = NULL; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index b3d6441d1d90..5b2451745e9f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, uint16_t *); int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_abort_cmd(srb_t *); /* * Global Functions in qla_mid.c source file. @@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); extern int qla2x00_issue_marker(scsi_qla_host_t *, int); extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, @@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, - dma_addr_t, uint); + dma_addr_t, uint16_t); extern int qla24xx_abort_command(srb_t *); extern int qla24xx_async_abort_command(srb_t *); @@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); extern int qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); +int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); +int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, + uint16_t *); +int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, + struct port_database_24xx *); + /* * Global Function Prototypes in qla_isr.c source file. */ @@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); void qla24xx_delete_sess_fn(struct work_struct *); void qlt_unknown_atio_work_fn(struct work_struct *); +void qlt_update_host_map(struct scsi_qla_host *, port_id_t); +void qlt_remove_target_resources(struct qla_hw_data *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 32fb9007f137..f9d2fe7b1ade 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; - uint64_t zero = 0; struct port_database_24xx *pd; fc_port_t *fcport = sp->fcport; u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; @@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; - /* Check for logged in state. */ - if (pd->current_login_state != PDS_PRLI_COMPLETE && - pd->last_login_state != PDS_PRLI_COMPLETE) { - ql_dbg(ql_dbg_mbx, vha, 0xffff, - "Unable to verify login-state (%x/%x) for " - "loop_id %x.\n", pd->current_login_state, - pd->last_login_state, fcport->loop_id); - rval = QLA_FUNCTION_FAILED; - goto gpd_error_out; - } - - if (fcport->loop_id == FC_NO_LOOP_ID || - (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && - memcmp(fcport->port_name, pd->port_name, 8))) { - /* We lost the device mid way. */ - rval = QLA_NOT_LOGGED_IN; - goto gpd_error_out; - } - - /* Names are little-endian. */ - memcpy(fcport->node_name, pd->node_name, WWN_SIZE); - - /* Get port_id of device. */ - fcport->d_id.b.domain = pd->port_id[0]; - fcport->d_id.b.area = pd->port_id[1]; - fcport->d_id.b.al_pa = pd->port_id[2]; - fcport->d_id.b.rsvd_1 = 0; - - /* If not target must be initiator or unknown type. */ - if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) - fcport->port_type = FCT_INITIATOR; - else - fcport->port_type = FCT_TARGET; - - /* Passback COS information. */ - fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? - FC_COS_CLASS2 : FC_COS_CLASS3; - - if (pd->prli_svc_param_word_3[0] & BIT_7) { - fcport->flags |= FCF_CONF_COMP_SUPPORTED; - fcport->conf_compl_supported = 1; - } + rval = __qla24xx_parse_gpdb(vha, fcport, pd); gpd_error_out: memset(&ea, 0, sizeof(ea)); @@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->login_retry--; if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || - (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || (fcport->fw_login_state == DSC_LS_PRLI_PEND)) return 0; + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) + return 0; + } + /* for pure Target Mode. Login will not be initiated */ if (vha->host->active_mode == MODE_TARGET) return 0; @@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, fcport->flags); if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || - (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || (fcport->fw_login_state == DSC_LS_PRLI_PEND)) return; + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) + return; + } + if (fcport->flags & FCF_ASYNC_SENT) { fcport->login_retry++; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res) complete(&abt->u.abt.comp); } -static int +int qla24xx_async_abort_cmd(srb_t *cmd_sp) { scsi_qla_host_t *vha = cmd_sp->vha; @@ -3212,6 +3178,7 @@ next_check: } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); + ha->flags.fw_started = 1; } return (rval); @@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) uint8_t domain; char connect_type[22]; struct qla_hw_data *ha = vha->hw; - unsigned long flags; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + port_id_t id; /* Get host addresses. */ rval = qla2x00_get_adapter_id(vha, @@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) /* Save Host port and loop ID. */ /* byte order - Big Endian */ - vha->d_id.b.domain = domain; - vha->d_id.b.area = area; - vha->d_id.b.al_pa = al_pa; - - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + id.b.domain = domain; + id.b.area = area; + id.b.al_pa = al_pa; + id.b.rsvd_1 = 0; + qlt_update_host_map(vha, id); if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x2010, @@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_READY); ql_dbg(ql_dbg_disc, vha, 0x2069, "LOOP READY.\n"); + ha->flags.fw_init_done = 1; /* * Process any ATIO queue entries that came in @@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) } } atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); } spin_unlock_irqrestore(&ha->vport_slock, flags); } @@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); + ha->flags.n2n_ae = 0; + ha->flags.lip_ae = 0; + ha->current_topology = 0; + ha->flags.fw_started = 0; + ha->flags.fw_init_done = 0; ha->chip_reset++; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); @@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) return; if (!ha->fw_major_version) return; + if (!ha->flags.fw_started) + return; ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && @@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); } + + ha->flags.fw_started = 0; + ha->flags.fw_init_done = 0; } int diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 535079280288..ea027f6a7fd4 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, struct scatterlist *sg_prot; uint32_t *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; - uint32_t prot_int; /* protection interval */ uint32_t partial; struct qla2_sgx sgx; @@ -966,7 +965,7 @@ alloc_and_fill: } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } @@ -1005,7 +1004,7 @@ alloc_and_fill: int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, - uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ @@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3c66ea29de27..3203367a4f42 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -708,6 +708,8 @@ skip_rio: "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); ha->isp_ops->fw_dump(vha, 1); + ha->flags.fw_init_done = 0; + ha->flags.fw_started = 0; if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { @@ -761,6 +763,9 @@ skip_rio: break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ + ha->flags.lip_ae = 1; + ha->flags.n2n_ae = 0; + ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); @@ -797,6 +802,10 @@ skip_rio: break; case MBA_LOOP_DOWN: /* Loop Down Event */ + ha->flags.n2n_ae = 0; + ha->flags.lip_ae = 0; + ha->current_topology = 0; + mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? RD_REG_WORD(®24->mailbox4) : 0; mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) @@ -866,6 +875,9 @@ skip_rio: /* case MBA_DCBX_COMPLETE: */ case MBA_POINT_TO_POINT: /* Point-to-Point */ + ha->flags.lip_ae = 0; + ha->flags.n2n_ae = 1; + if (IS_QLA2100(ha)) break; @@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, QLA_LOGIO_LOGIN_RETRIED : 0; if (logio->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5034, - "Async-%s error entry - hdl=%x" + "Async-%s error entry - %8phC hdl=%x" "portid=%02x%02x%02x entry-status=%x.\n", - type, sp->handle, fcport->d_id.b.domain, + type, fcport->port_name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, logio->entry_status); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, @@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { ql_dbg(ql_dbg_async, fcport->vha, 0x5036, - "Async-%s complete - hdl=%x portid=%02x%02x%02x " - "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, + "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x " + "iop0=%x.\n", type, fcport->port_name, sp->handle, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le32_to_cpu(logio->io_parameter[0])); @@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; + case LSC_SCODE_CMD_FAILED: + if (iop[1] == 0x0606) { + /* + * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, + * Target side acked. + */ + data[0] = MBS_COMMAND_COMPLETE; + goto logio_done; + } + data[0] = MBS_COMMAND_ERROR; + break; case LSC_SCODE_NOXCB: vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { @@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, } ql_dbg(ql_dbg_async, fcport->vha, 0x5037, - "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " - "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, + "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x " + "iop0=%x iop1=%x.\n", type, fcport->port_name, + sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), @@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, return; abt = &sp->u.iocb_cmd; - abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); + abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); sp->done(sp, 0); } @@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, struct sts_entry_24xx *pkt; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online) + if (!ha->flags.fw_started) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 35079f417417..a113ab3592a7 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -10,6 +10,28 @@ #include <linux/delay.h> #include <linux/gfp.h> +static struct mb_cmd_name { + uint16_t cmd; + const char *str; +} mb_str[] = { + {MBC_GET_PORT_DATABASE, "GPDB"}, + {MBC_GET_ID_LIST, "GIDList"}, + {MBC_GET_LINK_PRIV_STATS, "Stats"}, +}; + +static const char *mb_to_str(uint16_t cmd) +{ + int i; + struct mb_cmd_name *e; + + for (i = 0; i < ARRAY_SIZE(mb_str); i++) { + e = mb_str + i; + if (cmd == e->cmd) + return e->str; + } + return "unknown"; +} + static struct rom_cmd { uint16_t cmd; } rom_cmds[] = { @@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, - dma_addr_t stats_dma, uint options) + dma_addr_t stats_dma, uint16_t options) { int rval; mbx_cmd_t mc; @@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, "Entered %s.\n", __func__); - mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; - mcp->mb[2] = MSW(stats_dma); - mcp->mb[3] = LSW(stats_dma); - mcp->mb[6] = MSW(MSD(stats_dma)); - mcp->mb[7] = LSW(MSD(stats_dma)); - mcp->mb[8] = sizeof(struct link_statistics) / 4; - mcp->mb[9] = vha->vp_idx; - mcp->mb[10] = options; - mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; - mcp->in_mb = MBX_2|MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = IOCTL_CMD; - rval = qla2x00_mailbox_command(vha, mcp); + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_LINK_PRIV_STATS; + mc.mb[2] = MSW(stats_dma); + mc.mb[3] = LSW(stats_dma); + mc.mb[6] = MSW(MSD(stats_dma)); + mc.mb[7] = LSW(MSD(stats_dma)); + mc.mb[8] = sizeof(struct link_statistics) / 4; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16(options); + + rval = qla24xx_send_mb_cmd(vha, &mc); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { @@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, scsi_qla_host_t *vp = NULL; unsigned long flags; int found; + port_id_t id; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, "Entered %s.\n", __func__); @@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (rptid_entry->entry_status != 0) return; + id.b.domain = rptid_entry->port_id[2]; + id.b.area = rptid_entry->port_id[1]; + id.b.al_pa = rptid_entry->port_id[0]; + id.b.rsvd_1 = 0; + if (rptid_entry->format == 0) { /* loop */ - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, + ql_dbg(ql_dbg_async, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " "VPs acquired %d.\n", rptid_entry->vp_setup, rptid_entry->vp_acquired); - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, + ql_dbg(ql_dbg_async, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); - vha->d_id.b.domain = rptid_entry->port_id[2]; - vha->d_id.b.area = rptid_entry->port_id[1]; - vha->d_id.b.al_pa = rptid_entry->port_id[0]; - - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + qlt_update_host_map(vha, id); } else if (rptid_entry->format == 1) { /* fabric */ - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, + ql_dbg(ql_dbg_async, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " "port id %02x%02x%02x.\n", rptid_entry->vp_idx, rptid_entry->vp_status, @@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, WWN_SIZE); } - vha->d_id.b.domain = rptid_entry->port_id[2]; - vha->d_id.b.area = rptid_entry->port_id[1]; - vha->d_id.b.al_pa = rptid_entry->port_id[0]; - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + qlt_update_host_map(vha, id); } fc_host_port_name(vha->host) = @@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (!found) return; - vp->d_id.b.domain = rptid_entry->port_id[2]; - vp->d_id.b.area = rptid_entry->port_id[1]; - vp->d_id.b.al_pa = rptid_entry->port_id[0]; - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vp, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + qlt_update_host_map(vp, id); /* * Cannot configure here as we are still sitting on the @@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, return rval; } + +static void qla2x00_async_mb_sp_done(void *s, int res) +{ + struct srb *sp = s; + + sp->u.iocb_cmd.u.mbx.rc = res; + + complete(&sp->u.iocb_cmd.u.mbx.comp); + /* don't free sp here. Let the caller do the free */ +} + +/* + * This mailbox uses the iocb interface to send MB command. + * This allows non-critial (non chip setup) command to go + * out in parrallel. + */ +int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + struct srb_iocb *c; + + if (!vha->hw->flags.fw_started) + goto done; + + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MB_IOCB; + sp->name = mb_to_str(mcp->mb[0]); + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); + + c = &sp->u.iocb_cmd; + c->timeout = qla2x00_async_iocb_timeout; + init_completion(&c->u.mbx.comp); + + sp->done = qla2x00_async_mb_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: %s Failed submission. %x.\n", + __func__, sp->name, rval); + goto done_free_sp; + } + + ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n", + sp->name, sp->handle); + + wait_for_completion(&c->u.mbx.comp); + memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); + + rval = c->u.mbx.rc; + switch (rval) { + case QLA_FUNCTION_TIMEOUT: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n", + __func__, sp->name, rval); + break; + case QLA_SUCCESS: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n", + __func__, sp->name); + sp->free(sp); + break; + default: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n", + __func__, sp->name, rval); + sp->free(sp); + break; + } + + return rval; + +done_free_sp: + sp->free(sp); +done: + return rval; +} + +/* + * qla24xx_gpdb_wait + * NOTE: Do not call this routine from DPC thread + */ +int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + int rval = QLA_FUNCTION_FAILED; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_PORT_DATABASE; + mc.mb[1] = cpu_to_le16(fcport->loop_id); + mc.mb[2] = MSW(pd_dma); + mc.mb[3] = LSW(pd_dma); + mc.mb[6] = MSW(MSD(pd_dma)); + mc.mb[7] = LSW(MSD(pd_dma)); + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16((uint16_t)opt); + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: %8phC fail\n", __func__, fcport->port_name); + goto done_free_sp; + } + + rval = __qla24xx_parse_gpdb(vha, fcport, pd); + + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n", + __func__, fcport->port_name); + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); +done: + return rval; +} + +int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, + struct port_database_24xx *pd) +{ + int rval = QLA_SUCCESS; + uint64_t zero = 0; + + /* Check for logged in state. */ + if (pd->current_login_state != PDS_PRLI_COMPLETE && + pd->last_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "Unable to verify login-state (%x/%x) for " + "loop_id %x.\n", pd->current_login_state, + pd->last_login_state, fcport->loop_id); + rval = QLA_FUNCTION_FAILED; + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); + memcpy(fcport->port_name, pd->port_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd->port_id[0]; + fcport->d_id.b.area = pd->port_id[1]; + fcport->d_id.b.al_pa = pd->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd->prli_svc_param_word_3[0] & BIT_7) { + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + fcport->conf_compl_supported = 1; + } + +gpd_error_out: + return rval; +} + +/* + * qla24xx_gidlist__wait + * NOTE: don't call this routine from DPC thread. + */ +int qla24xx_gidlist_wait(struct scsi_qla_host *vha, + void *id_list, dma_addr_t id_list_dma, uint16_t *entries) +{ + int rval = QLA_FUNCTION_FAILED; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_ID_LIST; + mc.mb[2] = MSW(id_list_dma); + mc.mb[3] = LSW(id_list_dma); + mc.mb[6] = MSW(MSD(id_list_dma)); + mc.mb[7] = LSW(MSD(id_list_dma)); + mc.mb[8] = 0; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: fail\n", __func__); + } else { + *entries = mc.mb[1]; + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: done\n", __func__); + } +done: + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c6d6f0d912ff..09a490c98763 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - spin_lock_irqsave(&ha->vport_slock, flags); - while (atomic_read(&vha->vref_count)) { - spin_unlock_irqrestore(&ha->vport_slock, flags); - - msleep(500); + wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count), + 10*HZ); - spin_lock_irqsave(&ha->vport_slock, flags); + spin_lock_irqsave(&ha->vport_slock, flags); + if (atomic_read(&vha->vref_count)) { + ql_dbg(ql_dbg_vport, vha, 0xfffa, + "vha->vref_count=%u timeout\n", vha->vref_count.counter); + vha->vref_count = (atomic_t)ATOMIC_INIT(0); } list_del(&vha->list); qlt_update_vp_map(vha, RESET_VP_IDX); @@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); } i++; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1fed235a1b4a..41d5b09f7326 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) return atomic_read(&vha->loop_state) == LOOP_READY; } +static void qla2x00_iocb_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(work, + struct scsi_qla_host, iocb_work); + int cnt = 0; + + while (!list_empty(&vha->work_list)) { + qla2x00_do_work(vha); + cnt++; + if (cnt > 10) + break; + } +} + /* * PCI driver interface */ @@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ qla2xxx_wake_dpc(base_vha); + INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { @@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_free_sysfs_attr(base_vha, true); fc_remove_host(base_vha->host); + qlt_remove_target_resources(ha); scsi_remove_host(base_vha->host); @@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); init_waitqueue_head(&vha->fcport_waitQ); + init_waitqueue_head(&vha->vref_waitq); vha->gnl.size = sizeof(struct get_name_list_extended) * (ha->max_loop_id + 1); @@ -4319,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) spin_lock_irqsave(&vha->work_lock, flags); list_add_tail(&e->list, &vha->work_list); spin_unlock_irqrestore(&vha->work_lock, flags); - qla2xxx_wake_dpc(vha); + + if (QLA_EARLY_LINKUP(vha->hw)) + schedule_work(&vha->iocb_work); + else + qla2xxx_wake_dpc(vha); return QLA_SUCCESS; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 45f5077684f0..0e03ca2ab3e5 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, fc_port_t *fcport, bool local); void qlt_unreg_sess(struct fc_port *sess); +static void qlt_24xx_handle_abts(struct scsi_qla_host *, + struct abts_recv_from_24xx *); + /* * Global Variables */ @@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); +static const char *prot_op_str(u32 prot_op) +{ + switch (prot_op) { + case TARGET_PROT_NORMAL: return "NORMAL"; + case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; + case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; + case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; + case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; + case TARGET_PROT_DIN_PASS: return "DIN_PASS"; + case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; + default: return "UNKNOWN"; + } +} + /* This API intentionally takes dest as a parameter, rather than returning * int value to avoid caller forgetting to issue wmb() after the store */ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) @@ -170,21 +187,23 @@ static inline struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, uint8_t *d_id) { - struct qla_hw_data *ha = vha->hw; - uint8_t vp_idx; - - if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) - return NULL; + struct scsi_qla_host *host; + uint32_t key = 0; - if (vha->d_id.b.al_pa == d_id[2]) + if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && + (vha->d_id.b.al_pa == d_id[2])) return vha; - BUG_ON(ha->tgt.tgt_vp_map == NULL); - vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; - if (likely(test_bit(vp_idx, ha->vp_idx_map))) - return ha->tgt.tgt_vp_map[vp_idx].vha; + key = (uint32_t)d_id[0] << 16; + key |= (uint32_t)d_id[1] << 8; + key |= (uint32_t)d_id[2]; - return NULL; + host = btree_lookup32(&vha->hw->tgt.host_map, key); + if (!host) + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "Unable to find host %06x\n", key); + + return host; } static inline @@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, (struct abts_recv_from_24xx *)atio; struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, entry->vp_index); + unsigned long flags; + if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xffff, "qla_target(%d): Response pkt (ABTS_RECV_24XX) " @@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, vha->vp_idx, entry->vp_index); break; } - qlt_response_pkt(host, (response_t *)atio); + if (!ha_locked) + spin_lock_irqsave(&host->hw->hardware_lock, flags); + qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); + if (!ha_locked) + spin_unlock_irqrestore(&host->hw->hardware_lock, flags); break; - } /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ @@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res) sp->fcport->login_gen++; sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; sp->fcport->logout_on_delete = 1; + sp->fcport->plogi_nack_done_deadline = jiffies + HZ; break; case SRB_NACK_PRLI: @@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, break; case SRB_NACK_PRLI: fcport->fw_login_state = DSC_LS_PRLI_PEND; + fcport->deleted = 0; c = "PRLI"; break; case SRB_NACK_LOGO: @@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, } /* Get list of logged in devices */ - rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, "qla_target(%d): get_id_list() failed: %x\n", @@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, request_t *pkt; struct nack_to_isp *nack; + if (!ha->flags.fw_started) + return; + ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); /* Send marker if required */ @@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) } EXPORT_SYMBOL(qlt_free_mcmd); +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then + * reacquire + */ +void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, + uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) +{ + struct atio_from_isp *atio = &cmd->atio; + struct ctio7_to_24xx *ctio; + uint16_t temp; + + ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, + "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " + "sense_key=%02x, asc=%02x, ascq=%02x", + vha, atio, scsi_status, sense_key, asc, ascq); + + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + if (!ctio) { + ql_dbg(ql_dbg_async, vha, 0x3067, + "qla2x00t(%ld): %s failed: unable to allocate request packet", + vha->host_no, __func__); + goto out; + } + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE; + ctio->nport_handle = cmd->sess->loop_id; + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = vha->vp_idx; + ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio->exchange_addr = atio->u.isp24.exchange_addr; + ctio->u.status1.flags = (atio->u.isp24.attr << 9) | + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); + ctio->u.status1.scsi_status = + cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); + ctio->u.status1.response_len = cpu_to_le16(18); + ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); + + if (ctio->u.status1.residual != 0) + ctio->u.status1.scsi_status |= + cpu_to_le16(SS_RESIDUAL_UNDER); + + /* Response code and sense key */ + put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), + (&ctio->u.status1.sense_data)[0]); + /* Additional sense length */ + put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); + /* ASC and ASCQ */ + put_unaligned_le32(((asc << 24) | (ascq << 16)), + (&ctio->u.status1.sense_data)[3]); + + /* Memory Barrier */ + wmb(); + + qla2x00_start_iocbs(vha, vha->req); +out: + return; +} + /* callback from target fabric module code */ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) { @@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, */ return -EAGAIN; } else - ha->tgt.cmds[h-1] = prm->cmd; + ha->tgt.cmds[h - 1] = prm->cmd; pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = prm->cmd->loop_id; @@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd) return cmd->bufflen > 0; } +static void qlt_print_dif_err(struct qla_tgt_prm *prm) +{ + struct qla_tgt_cmd *cmd; + struct scsi_qla_host *vha; + + /* asc 0x10=dif error */ + if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { + cmd = prm->cmd; + vha = cmd->vha; + /* ASCQ */ + switch (prm->sense_buffer[13]) { + case 1: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 2: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 3: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + default: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected Dif ERR: lba[%llx|%lld] len[%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + } + ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16); + } +} + /* * Called without ha->hardware_lock held */ @@ -2512,18 +2649,9 @@ skip_explict_conf: for (i = 0; i < prm->sense_buffer_len/4; i++) ((uint32_t *)ctio->u.status1.sense_data)[i] = cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); -#if 0 - if (unlikely((prm->sense_buffer_len % 4) != 0)) { - static int q; - if (q < 10) { - ql_dbg(ql_dbg_tgt, vha, 0xe04f, - "qla_target(%d): %d bytes of sense " - "lost", prm->tgt->ha->vp_idx, - prm->sense_buffer_len % 4); - q++; - } - } -#endif + + qlt_print_dif_err(prm); + } else { ctio->u.status1.flags &= ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); @@ -2537,19 +2665,9 @@ skip_explict_conf: /* Sense with len > 24, is it possible ??? */ } - - -/* diff */ static inline int qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) { - /* - * Uncomment when corresponding SCSI changes are done. - * - if (!sp->cmd->prot_chk) - return 0; - * - */ switch (se_cmd->prot_op) { case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_STRIP: @@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) return 0; } +static inline int +qla_tgt_ref_mask_check(struct se_cmd *se_cmd) +{ + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_STRIP: + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + return 1; + default: + return 0; + } + return 0; +} + /* - * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command - * + * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command */ -static inline void -qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) +static void +qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, + uint16_t *pfw_prot_opts) { + struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t lba = 0xffffffff & se_cmd->t_task_lba; + scsi_qla_host_t *vha = cmd->tgt->vha; + struct qla_hw_data *ha = vha->hw; + uint32_t t32 = 0; - /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 + /* + * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 * have been immplemented by TCM, before AppTag is avail. * Look for modesense_handlers[] */ @@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) ctx->app_tag_mask[0] = 0x0; ctx->app_tag_mask[1] = 0x0; + if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) + *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) + *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; + } + + t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); + switch (se_cmd->prot_type) { case TARGET_DIF_TYPE0_PROT: /* - * No check for ql2xenablehba_err_chk, as it would be an - * I/O error if hba tag generation is not done. + * No check for ql2xenablehba_err_chk, as it + * would be an I/O error if hba tag generation + * is not done. */ ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - /* enable ALL bytes of the ref tag */ ctx->ref_tag_mask[0] = 0xff; ctx->ref_tag_mask[1] = 0xff; ctx->ref_tag_mask[2] = 0xff; ctx->ref_tag_mask[3] = 0xff; break; - /* - * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and - * 16 bit app tag. - */ case TARGET_DIF_TYPE1_PROT: - ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - - /* enable ALL bytes of the ref tag */ - ctx->ref_tag_mask[0] = 0xff; - ctx->ref_tag_mask[1] = 0xff; - ctx->ref_tag_mask[2] = 0xff; - ctx->ref_tag_mask[3] = 0xff; - break; - /* - * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to - * match LBA in CDB + N - */ + /* + * For TYPE 1 protection: 16 bit GUARD tag, 32 bit + * REF tag, and 16 bit app tag. + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; case TARGET_DIF_TYPE2_PROT: - ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - - /* enable ALL bytes of the ref tag */ - ctx->ref_tag_mask[0] = 0xff; - ctx->ref_tag_mask[1] = 0xff; - ctx->ref_tag_mask[2] = 0xff; - ctx->ref_tag_mask[3] = 0xff; - break; - - /* For Type 3 protection: 16 bit GUARD only */ + /* + * For TYPE 2 protection: 16 bit GUARD + 32 bit REF + * tag has to match LBA in CDB + N + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; case TARGET_DIF_TYPE3_PROT: - ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = - ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; - break; + /* For TYPE 3 protection: 16 bit GUARD only */ + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = + ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; + break; } } - static inline int qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) { @@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t h; struct atio_from_isp *atio = &prm->cmd->atio; + struct qla_tc_param tc; uint16_t t16; ha = vha->hw; @@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DOUT_STRIP: transfer_length = data_bytes; - data_bytes += dif_bytes; + if (cmd->prot_sg_cnt) + data_bytes += dif_bytes; break; - case TARGET_PROT_DIN_STRIP: case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_PASS: case TARGET_PROT_DOUT_PASS: transfer_length = data_bytes + dif_bytes; break; - default: BUG(); break; @@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) break; } - /* ---- PKT ---- */ /* Update entry type to indicate Command Type CRC_2 IOCB */ pkt->entry_type = CTIO_CRC2; @@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) } else ha->tgt.cmds[h-1] = prm->cmd; - pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; - pkt->nport_handle = prm->cmd->loop_id; + pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; @@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) else if (cmd->dma_data_direction == DMA_FROM_DEVICE) pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); - pkt->dseg_count = prm->tot_dsds; /* Fibre channel byte count */ pkt->transfer_length = cpu_to_le32(transfer_length); - /* ----- CRC context -------- */ /* Allocate CRC context from global pool */ @@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) /* Set handle */ crc_ctx_pkt->handle = pkt->handle; - qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); + qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); pkt->crc_context_len = CRC_CONTEXT_LEN_FW; - if (!bundling) { cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; } else { @@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); crc_ctx_pkt->guard_seed = cpu_to_le16(0); + memset((uint8_t *)&tc, 0 , sizeof(tc)); + tc.vha = vha; + tc.blk_sz = cmd->blk_sz; + tc.bufflen = cmd->bufflen; + tc.sg = cmd->sg; + tc.prot_sg = cmd->prot_sg; + tc.ctx = crc_ctx_pkt; + tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; /* Walks data segments */ pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); if (!bundling && prm->prot_seg_cnt) { if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, - prm->tot_dsds, cmd)) + prm->tot_dsds, &tc)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, - (prm->tot_dsds - prm->prot_seg_cnt), cmd)) + (prm->tot_dsds - prm->prot_seg_cnt), &tc)) goto crc_queuing_error; if (bundling && prm->prot_seg_cnt) { @@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, - prm->prot_seg_cnt, cmd)) + prm->prot_seg_cnt, &tc)) goto crc_queuing_error; } return QLA_SUCCESS; crc_queuing_error: /* Cleanup will be performed by the caller */ + vha->hw->tgt.cmds[h - 1] = NULL; return QLA_FUNCTION_FAILED; } - /* * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * * QLA_TGT_XMIT_STATUS for >= 24xx silicon @@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, else vha->tgt_counters.core_qla_que_buf++; - if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { + if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || + if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from @@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer); /* - * Checks the guard or meta-data for the type of error - * detected by the HBA. + * it is assumed either hardware_lock or qpair lock is held. */ -static inline int +static void qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, - struct ctio_crc_from_fw *sts) + struct ctio_crc_from_fw *sts) { uint8_t *ap = &sts->actual_dif[0]; uint8_t *ep = &sts->expected_dif[0]; - uint32_t e_ref_tag, a_ref_tag; - uint16_t e_app_tag, a_app_tag; - uint16_t e_guard, a_guard; uint64_t lba = cmd->se_cmd.t_task_lba; + uint8_t scsi_status, sense_key, asc, ascq; + unsigned long flags; - a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); - a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); - a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); - - e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); - e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); - e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); - - ql_dbg(ql_dbg_tgt, vha, 0xe075, - "iocb(s) %p Returned STATUS.\n", sts); - - ql_dbg(ql_dbg_tgt, vha, 0xf075, - "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); - - /* - * Ignore sector if: - * For type 3: ref & app tag is all 'f's - * For type 0,1,2: app tag is all 'f's - */ - if ((a_app_tag == 0xffff) && - ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || - (a_ref_tag == 0xffffffff))) { - uint32_t blocks_done; - - /* 2TB boundary case covered automatically with this */ - blocks_done = e_ref_tag - (uint32_t)lba + 1; - cmd->se_cmd.bad_sector = e_ref_tag; - cmd->se_cmd.pi_err = 0; - ql_dbg(ql_dbg_tgt, vha, 0xf074, - "need to return scsi good\n"); - - /* Update protection tag */ - if (cmd->prot_sg_cnt) { - uint32_t i, k = 0, num_ent; - struct scatterlist *sg, *sgl; - - - sgl = cmd->prot_sg; - - /* Patch the corresponding protection tags */ - for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { - num_ent = sg_dma_len(sg) / 8; - if (k + num_ent < blocks_done) { - k += num_ent; - continue; - } - k = blocks_done; - break; - } + cmd->trc_flags |= TRC_DIF_ERR; - if (k != blocks_done) { - ql_log(ql_log_warn, vha, 0xf076, - "unexpected tag values tag:lba=%u:%llu)\n", - e_ref_tag, (unsigned long long)lba); - goto out; - } + cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); + cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); + cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); -#if 0 - struct sd_dif_tuple *spt; - /* TODO: - * This section came from initiator. Is it valid here? - * should ulp be override with actual val??? - */ - spt = page_address(sg_page(sg)) + sg->offset; - spt += j; + cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); + cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); + cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); - spt->app_tag = 0xffff; - if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) - spt->ref_tag = 0xffffffff; -#endif - } + ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, + "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); - return 0; - } + scsi_status = sense_key = asc = ascq = 0; - /* check guard */ - if (e_guard != a_guard) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; - - ql_log(ql_log_warn, vha, 0xe076, - "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); - goto out; + /* check appl tag */ + if (cmd->e_app_tag != cmd->a_app_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_APP; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x2; } /* check ref tag */ - if (e_ref_tag != a_ref_tag) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; - cmd->se_cmd.bad_sector = e_ref_tag; - - ql_log(ql_log_warn, vha, 0xe077, - "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); + if (cmd->e_ref_tag != cmd->a_ref_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard[%x|%x] cmd=%p ox_id[%04x] ", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_REF; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x3; goto out; } - /* check appl tag */ - if (e_app_tag != a_app_tag) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; - - ql_log(ql_log_warn, vha, 0xe078, - "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); - goto out; + /* check guard */ + if (cmd->e_guard != cmd->a_guard) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + cmd->dif_err_code = DIF_ERR_GRD; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x1; } out: - return 1; -} + switch (cmd->state) { + case QLA_TGT_STATE_NEED_DATA: + /* handle_data will load DIF error code */ + cmd->state = QLA_TGT_STATE_DATA_IN; + vha->hw->tgt.tgt_ops->handle_data(cmd); + break; + default: + spin_lock_irqsave(&cmd->cmd_lock, flags); + if (cmd->aborted) { + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq); + /* assume scsi status gets out on the wire. + * Will not wait for completion. + */ + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } +} /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ @@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, "Sending TERM ELS CTIO (ha=%p)\n", ha); - pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe080, "qla_target(%d): %s failed: unable to allocate " @@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, { int term = 0; + if (cmd->se_cmd.prot_op) + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x] op %#x/%s", + cmd->lba, cmd->lba, + cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr, + cmd->se_cmd.prot_op, + prot_op_str(cmd->se_cmd.prot_op)); + if (ctio != NULL) { struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; term = !(c->flags & @@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, struct ctio_crc_from_fw *crc = (struct ctio_crc_from_fw *)ctio; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, - "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", + "qla_target(%d): CTIO with DIF_ERROR status %x " + "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " + "expect_dif[0x%llx]\n", vha->vp_idx, status, cmd->state, se_cmd, *((u64 *)&crc->actual_dif[0]), *((u64 *)&crc->expected_dif[0])); - if (qlt_handle_dif_error(vha, cmd, ctio)) { - if (cmd->state == QLA_TGT_STATE_NEED_DATA) { - /* scsi Write/xfer rdy complete */ - goto skip_term; - } else { - /* scsi read/xmit respond complete - * call handle dif to send scsi status - * rather than terminate exchange. - */ - cmd->state = QLA_TGT_STATE_PROCESSED; - ha->tgt.tgt_ops->handle_dif_err(cmd); - return; - } - } else { - /* Need to generate a SCSI good completion. - * because FW did not send scsi status. - */ - status = 0; - goto skip_term; - } - break; + qlt_handle_dif_error(vha, cmd, ctio); + return; } default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, @@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, return; } } -skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { cmd->trc_flags |= TRC_CTIO_DONE; @@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } if (sess != NULL) { - if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { + if (sess->fw_login_state != DSC_LS_PLOGI_PEND && + sess->fw_login_state != DSC_LS_PLOGI_COMP) { /* * Impatient initiator sent PRLI before last * PLOGI could finish. Will force him to re-try, @@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, /* Make session global (not used in fabric mode) */ if (ha->current_topology != ISP_CFG_F) { - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; + } else { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } } else { if (sess) { ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post nack\n", - __func__, __LINE__, sess->port_name); - + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); qla24xx_post_nack_work(vha, sess, iocb, SRB_NACK_PRLI); res = 0; @@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } break; - case ELS_TPRLO: if (le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { @@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha, static int qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, - struct atio_from_isp *atio) + struct atio_from_isp *atio, bool ha_locked) { struct qla_hw_data *ha = vha->hw; uint16_t status; + unsigned long flags; if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) return 0; + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); status = temp_sam_status; qlt_send_busy(vha, atio, status); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 1; } @@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, unsigned long flags; if (unlikely(tgt == NULL)) { - ql_dbg(ql_dbg_io, vha, 0x3064, + ql_dbg(ql_dbg_tgt, vha, 0x3064, "ATIO pkt, but no tgt (ha %p)", ha); return; } @@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { - rc = qlt_chk_qfull_thresh_hold(vha, atio); + rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); if (rc != 0) { tgt->atio_irq_cmd_count--; return; @@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) break; } - rc = qlt_chk_qfull_thresh_hold(vha, atio); + rc = qlt_chk_qfull_thresh_hold(vha, atio, true); if (rc != 0) { tgt->irq_cmd_count--; return; @@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, fcport->loop_id = loop_id; - rc = qla2x00_get_port_database(vha, fcport, 0); + rc = qla24xx_gpdb_wait(vha, fcport, 0); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, "qla_target(%d): Failed to retrieve fcport " @@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt, } } - spin_lock_irqsave(&ha->hardware_lock, flags); - - if (tgt->tgt_stop) - goto out_term; - rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + if (rc != 0) goto out_term; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (sess) - ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); return; out_term2: - spin_lock_irqsave(&ha->hardware_lock, flags); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); out_term: + spin_lock_irqsave(&ha->hardware_lock, flags); qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (sess) - ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); } static void qlt_tmr_work(struct qla_tgt *tgt, @@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (tgt->tgt_stop) - goto out_term; + goto out_term2; s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); @@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt, spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (!sess) - goto out_term; + goto out_term2; } else { if (sess->deleted) { sess = NULL; - goto out_term; + goto out_term2; } if (!kref_get_unless_zero(&sess->sess_kref)) { @@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, "%s: kref_get fail %8phC\n", __func__, sess->port_name); sess = NULL; - goto out_term; + goto out_term2; } } @@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt, unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); - if (rc != 0) - goto out_term; - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + if (rc != 0) + goto out_term; return; +out_term2: + if (sess) + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); - ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static void qlt_sess_work_fn(struct work_struct *work) @@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; - if (base_vha->fc_vport) - return 0; - mutex_lock(&qla_tgt_mutex); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); mutex_unlock(&qla_tgt_mutex); + if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) + ha->tgt.tgt_ops->add_target(base_vha); + return 0; } @@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) return 0; } +void qlt_remove_target_resources(struct qla_hw_data *ha) +{ + struct scsi_qla_host *node; + u32 key = 0; + + btree_for_each_safe32(&ha->tgt.host_map, key, node) + btree_remove32(&ha->tgt.host_map, key); + + btree_destroy32(&ha->tgt.host_map); +} + static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, unsigned char *b) { @@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) struct atio_from_isp *pkt; int cnt, i; - if (!vha->flags.online) + if (!ha->flags.fw_started) return; while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || @@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha, void qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) { + int rc; + if (!QLA_TGT_MODE_ENABLED()) return; @@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) qlt_unknown_atio_work_fn); qlt_clear_mode(base_vha); + + rc = btree_init32(&ha->tgt.host_map); + if (rc) + ql_log(ql_log_info, base_vha, 0xffff, + "Unable to initialize ha->host_map btree\n"); + + qlt_update_vp_map(base_vha, SET_VP_IDX); } irqreturn_t @@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work) spin_lock_irqsave(&ha->hardware_lock, flags); qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); spin_unlock_irqrestore(&ha->hardware_lock, flags); + + kfree(op); } void @@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha) void qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) { + void *slot; + u32 key; + int rc; + if (!QLA_TGT_MODE_ENABLED()) return; + key = vha->d_id.b24; + switch (cmd) { case SET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; break; case SET_AL_PA: - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; + slot = btree_lookup32(&vha->hw->tgt.host_map, key); + if (!slot) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "Save vha in host_map %p %06x\n", vha, key); + rc = btree_insert32(&vha->hw->tgt.host_map, + key, vha, GFP_ATOMIC); + if (rc) + ql_log(ql_log_info, vha, 0xffff, + "Unable to insert s_id into host_map: %06x\n", + key); + return; + } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "replace existing vha in host_map %p %06x\n", vha, key); + btree_update32(&vha->hw->tgt.host_map, key, vha); break; case RESET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; break; case RESET_AL_PA: - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "clear vha in host_map %p %06x\n", vha, key); + slot = btree_lookup32(&vha->hw->tgt.host_map, key); + if (slot) + btree_remove32(&vha->hw->tgt.host_map, key); + vha->d_id.b24 = 0; break; } } +void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) +{ + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + if (!vha->d_id.b24) { + spin_lock_irqsave(&ha->vport_slock, flags); + vha->d_id = id; + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else if (vha->d_id.b24 != id.b24) { + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, RESET_AL_PA); + vha->d_id = id; + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } +} + static int __init qlt_parse_ini_mode(void) { if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index a7f90dcaae37..d64420251194 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio) atio->u.isp24.fcp_cmnd.add_cdb_len = 0; } +static inline int get_datalen_for_atio(struct atio_from_isp *atio) +{ + int len = atio->u.isp24.fcp_cmnd.add_cdb_len; + + return (be32_to_cpu(get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* @@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); - void (*handle_dif_err)(struct qla_tgt_cmd *); int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); @@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl { void (*clear_nacl_from_fcport_map)(struct fc_port *); void (*put_sess)(struct fc_port *); void (*shutdown_sess)(struct fc_port *); + int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); + int (*chk_dif_tags)(uint32_t tag); + void (*add_target)(struct scsi_qla_host *); }; int qla2x00_wait_for_hba_online(struct scsi_qla_host *); @@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define QLA_TGT_ABORT_ALL 0xFFFE #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD #define QLA_TGT_NEXUS_LOSS 0xFFFC -#define QLA_TGT_ABTS 0xFFFB -#define QLA_TGT_2G_ABORT_TASK 0xFFFA +#define QLA_TGT_ABTS 0xFFFB +#define QLA_TGT_2G_ABORT_TASK 0xFFFA /* Notify Acknowledge flags */ #define NOTIFY_ACK_RES_COUNT BIT_8 @@ -845,6 +855,7 @@ enum trace_flags { TRC_CMD_FREE = BIT_17, TRC_DATA_IN = BIT_18, TRC_ABORT = BIT_19, + TRC_DIF_ERR = BIT_20, }; struct qla_tgt_cmd { @@ -862,7 +873,6 @@ struct qla_tgt_cmd { unsigned int sg_mapped:1; unsigned int free_sg:1; unsigned int write_data_transferred:1; - unsigned int ctx_dsd_alloced:1; unsigned int q_full:1; unsigned int term_exchg:1; unsigned int cmd_sent_to_fw:1; @@ -885,11 +895,25 @@ struct qla_tgt_cmd { struct list_head cmd_list; struct atio_from_isp atio; - /* t10dif */ + + uint8_t ctx_dsd_alloced; + + /* T10-DIF */ +#define DIF_ERR_NONE 0 +#define DIF_ERR_GRD 1 +#define DIF_ERR_REF 2 +#define DIF_ERR_APP 3 + int8_t dif_err_code; struct scatterlist *prot_sg; uint32_t prot_sg_cnt; - uint32_t blk_sz; + uint32_t blk_sz, num_blks; + uint8_t scsi_status, sense_key, asc, ascq; + struct crc_context *ctx; + uint8_t *cdb; + uint64_t lba; + uint16_t a_guard, e_guard, a_app_tag, e_app_tag; + uint32_t a_ref_tag, e_ref_tag; uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; @@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *); extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); +void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t, + uint8_t, uint8_t, uint8_t); + #endif /* __QLA_TARGET_H */ diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 3cb1964b7786..45bc84e8e3bf 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.38-k" +#define QLA2XXX_VERSION "9.00.00.00-k" -#define QLA_DRIVER_MAJOR_VER 8 -#define QLA_DRIVER_MINOR_VER 7 +#define QLA_DRIVER_MAJOR_VER 9 +#define QLA_DRIVER_MINOR_VER 0 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 8e8ab0fa9672..7443e4efa3ae 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) return; } + switch (cmd->dif_err_code) { + case DIF_ERR_GRD: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case DIF_ERR_REF: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case DIF_ERR_APP: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + case DIF_ERR_NONE: + default: + break; + } + if (cmd->se_cmd.pi_err) transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); @@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); } -static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) +static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) { - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); - - /* take an extra kref to prevent cmd free too early. - * need to wait for SCSI status/check condition to - * finish responding generate by transport_generic_request_failure. - */ - kref_get(&cmd->se_cmd.cmd_kref); - transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); + return 0; } -/* - * Called from qla_target.c:qlt_do_ctio_completion() - */ -static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) +static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, + uint16_t *pfw_prot_opts) { - INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); - queue_work(tcm_qla2xxx_free_wq, &cmd->work); + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) + *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) + *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; + + return 0; } /* @@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .handle_cmd = tcm_qla2xxx_handle_cmd, .handle_data = tcm_qla2xxx_handle_data, - .handle_dif_err = tcm_qla2xxx_handle_dif_err, .handle_tmr = tcm_qla2xxx_handle_tmr, .free_cmd = tcm_qla2xxx_free_cmd, .free_mcmd = tcm_qla2xxx_free_mcmd, @@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, .put_sess = tcm_qla2xxx_put_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess, + .get_dif_tags = tcm_qla2xxx_dif_tags, + .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, }; static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index f5e330099bfc..fd7c16a7ca6e 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -43,7 +43,7 @@ #include "target_core_ua.h" static sense_reason_t core_alua_check_transition(int state, int valid, - int *primary); + int *primary, int explicit); static int core_alua_set_tg_pt_secondary_state( struct se_lun *lun, int explicit, int offline); @@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * the state is a primary or secondary target port asymmetric * access state. */ - rc = core_alua_check_transition(alua_access_state, - valid_states, &primary); + rc = core_alua_check_transition(alua_access_state, valid_states, + &primary, 1); if (rc) { /* * If the SET TARGET PORT GROUPS attempts to establish @@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) return 0; /* @@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd) * Check implicit and explicit ALUA state change request. */ static sense_reason_t -core_alua_check_transition(int state, int valid, int *primary) +core_alua_check_transition(int state, int valid, int *primary, int explicit) { /* * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are @@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary) *primary = 0; break; case ALUA_ACCESS_STATE_TRANSITION: - /* - * Transitioning is set internally, and - * cannot be selected manually. - */ - goto not_supported; + if (!(valid & ALUA_T_SUP) || explicit) + /* + * Transitioning is set internally and by tcmu daemon, + * and cannot be selected through a STPG. + */ + goto not_supported; + *primary = 0; + break; default: pr_err("Unknown ALUA access state: 0x%02x\n", state); return TCM_INVALID_PARAMETER_LIST; @@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) static void core_alua_do_transition_tg_pt_work(struct work_struct *work) { struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); @@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt( if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) return 0; - if (new_state == ALUA_ACCESS_STATE_TRANSITION) + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) return -EAGAIN; /* * Flush any pending transitions */ - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == - ALUA_ACCESS_STATE_TRANSITION) { - /* Just in case */ - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; - tg_pt_gp->tg_pt_gp_transition_complete = &wait; - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); - wait_for_completion(&wait); - tg_pt_gp->tg_pt_gp_transition_complete = NULL; - return 0; - } + if (!explicit) + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); /* * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ - tg_pt_gp->tg_pt_gp_alua_previous_state = - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_TRANSITION); tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? @@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt( core_alua_queue_state_change_ua(tg_pt_gp); + if (new_state == ALUA_ACCESS_STATE_TRANSITION) + return 0; + + tg_pt_gp->tg_pt_gp_alua_previous_state = + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + /* * Check for the optional ALUA primary state transition delay */ @@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt( atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { - unsigned long transition_tmo; - - transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, - &tg_pt_gp->tg_pt_gp_transition_work, - transition_tmo); - } else { + schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); + if (explicit) { tg_pt_gp->tg_pt_gp_transition_complete = &wait; - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, - &tg_pt_gp->tg_pt_gp_transition_work, 0); wait_for_completion(&wait); tg_pt_gp->tg_pt_gp_transition_complete = NULL; } @@ -1149,8 +1138,12 @@ int core_alua_do_port_transition( struct t10_alua_tg_pt_gp *tg_pt_gp; int primary, valid_states, rc = 0; + if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) + return -ENODEV; + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; - if (core_alua_check_transition(new_state, valid_states, &primary) != 0) + if (core_alua_check_transition(new_state, valid_states, &primary, + explicit) != 0) return -EINVAL; local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; @@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); - INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, - core_alua_do_transition_tg_pt_work); + INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, + core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); @@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by @@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info( unsigned char buf[TG_PT_GROUP_NAME_BUF]; int move = 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) return -ENODEV; @@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit( unsigned long tmp; int ret; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) return -ENODEV; @@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata( int core_setup_alua(struct se_device *dev) { - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && + if (!(dev->transport->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { struct t10_alua_lu_gp_member *lu_gp_mem; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 54b36c9835be..38b5025e4c7a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) pr_err("Missing tfo->aborted_task()\n"); return -EINVAL; } + if (!tfo->check_stop_free) { + pr_err("Missing tfo->check_stop_free()\n"); + return -EINVAL; + } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index a8f8e53f2f57..94cda7991e80 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, buf = kzalloc(12, GFP_KERNEL); if (!buf) - return; + goto out_free; memset(cdb, 0, MAX_COMMAND_SIZE); cdb[0] = MODE_SENSE; @@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, * If MODE_SENSE still returns zero, set the default value to 1024. */ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); +out_free: if (!sdev->sector_size) sdev->sector_size = 1024; -out_free: + kfree(buf); } @@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, sd->lun, sd->queue_depth); } - dev->dev_attrib.hw_block_size = sd->sector_size; + dev->dev_attrib.hw_block_size = + min_not_zero((int)sd->sector_size, 512); dev->dev_attrib.hw_max_sectors = - min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); + min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); dev->dev_attrib.hw_queue_depth = sd->queue_depth; /* @@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, /* * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. */ - if (sd->type == TYPE_TAPE) + if (sd->type == TYPE_TAPE) { pscsi_tape_read_blocksize(dev, sd); + dev->dev_attrib.hw_block_size = sd->sector_size; + } return 0; } @@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) /* * Called with struct Scsi_Host->host_lock called. */ -static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) +static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) __releases(sh->host_lock) { struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; @@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) return 0; } -/* - * Called with struct Scsi_Host->host_lock called. - */ -static int pscsi_create_type_other(struct se_device *dev, - struct scsi_device *sd) - __releases(sh->host_lock) -{ - struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; - struct Scsi_Host *sh = sd->host; - int ret; - - spin_unlock_irq(sh->host_lock); - ret = pscsi_add_device_to_list(dev, sd); - if (ret) - return ret; - - pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", - phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, - sd->channel, sd->id, sd->lun); - return 0; -} - static int pscsi_configure_device(struct se_device *dev) { struct se_hba *hba = dev->se_hba; @@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) case TYPE_DISK: ret = pscsi_create_type_disk(dev, sd); break; - case TYPE_ROM: - ret = pscsi_create_type_rom(dev, sd); - break; default: - ret = pscsi_create_type_other(dev, sd); + ret = pscsi_create_type_nondisk(dev, sd); break; } @@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) else if (pdv->pdv_lld_host) scsi_host_put(pdv->pdv_lld_host); - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) - scsi_device_put(sd); + scsi_device_put(sd); pdv->pdv_sd = NULL; } @@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) if (pdv->pdv_bd && pdv->pdv_bd->bd_part) return pdv->pdv_bd->bd_part->nr_sects; - dump_stack(); return 0; } @@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate) static const struct target_backend_ops pscsi_ops = { .name = "pscsi", .owner = THIS_MODULE, - .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH | + TRANSPORT_FLAG_PASSTHROUGH_ALUA, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 68d8aef7ab78..c194063f169b 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) return ret; break; case VERIFY: + case VERIFY_16: size = 0; - sectors = transport_get_sectors_10(cdb); - cmd->t_task_lba = transport_lba_32(cdb); + if (cdb[0] == VERIFY) { + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + } else { + sectors = transport_get_sectors_16(cdb); + cmd->t_task_lba = transport_lba_64(cdb); + } cmd->execute_cmd = sbc_emulate_noop; goto check_lba; case REZERO_UNIT: diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index c0dbfa016575..6fb191914f45 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -602,7 +602,8 @@ int core_tpg_add_lun( if (ret) goto out_kill_ref; - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && + if (!(dev->transport->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 434d9d693989..b1a3cdb29468 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) * Fabric modules are expected to return '1' here if the se_cmd being * passed is released at this point, or zero if not being released. */ - return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) - : 0; + return cmd->se_tfo->check_stop_free(cmd); } static void transport_lun_remove_cmd(struct se_cmd *cmd) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c3adefe95e50..c6874c38a10b 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -28,6 +28,7 @@ #include <linux/stringify.h> #include <linux/bitops.h> #include <linux/highmem.h> +#include <linux/configfs.h> #include <net/genetlink.h> #include <scsi/scsi_common.h> #include <scsi/scsi_proto.h> @@ -112,6 +113,7 @@ struct tcmu_dev { spinlock_t commands_lock; struct timer_list timeout; + unsigned int cmd_time_out; char dev_config[TCMU_CONFIG_LEN]; }; @@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; - tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); + if (udev->cmd_time_out) + tcmu_cmd->deadline = jiffies + + msecs_to_jiffies(udev->cmd_time_out); idr_preload(GFP_KERNEL); spin_lock_irq(&udev->commands_lock); @@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) pr_debug("sleeping for ring space\n"); spin_unlock_irq(&udev->cmdr_lock); - ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); + if (udev->cmd_time_out) + ret = schedule_timeout( + msecs_to_jiffies(udev->cmd_time_out)); + else + ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); finish_wait(&udev->wait_cmdr, &__wait); if (!ret) { pr_warn("tcmu: command timed out\n"); @@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) /* TODO: only if FLUSH and FUA? */ uio_event_notify(&udev->uio_info); - mod_timer(&udev->timeout, - round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); + if (udev->cmd_time_out) + mod_timer(&udev->timeout, round_jiffies_up(jiffies + + msecs_to_jiffies(udev->cmd_time_out))); return TCM_NO_SENSE; } @@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) } udev->hba = hba; + udev->cmd_time_out = TCMU_TIME_OUT; init_waitqueue_head(&udev->wait_cmdr); spin_lock_init(&udev->cmdr_lock); @@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev) if (dev->dev_attrib.hw_block_size == 0) dev->dev_attrib.hw_block_size = 512; /* Other attributes can be configured in userspace */ - dev->dev_attrib.hw_max_sectors = 128; + if (!dev->dev_attrib.hw_max_sectors) + dev->dev_attrib.hw_max_sectors = 128; dev->dev_attrib.hw_queue_depth = 128; ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, @@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p) kfree(udev); } +static bool tcmu_dev_configured(struct tcmu_dev *udev) +{ + return udev->uio_info.uio_dev ? true : false; +} + static void tcmu_free_device(struct se_device *dev) { struct tcmu_dev *udev = TCMU_DEV(dev); @@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev) spin_unlock_irq(&udev->commands_lock); WARN_ON(!all_expired); - /* Device was configured */ - if (udev->uio_info.uio_dev) { + if (tcmu_dev_configured(udev)) { tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, udev->uio_info.uio_dev->minor); @@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev) } enum { - Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, + Opt_err, }; static match_table_t tokens = { {Opt_dev_config, "dev_config=%s"}, {Opt_dev_size, "dev_size=%u"}, {Opt_hw_block_size, "hw_block_size=%u"}, + {Opt_hw_max_sectors, "hw_max_sectors=%u"}, {Opt_err, NULL} }; +static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) +{ + unsigned long tmp_ul; + char *arg_p; + int ret; + + arg_p = match_strdup(arg); + if (!arg_p) + return -ENOMEM; + + ret = kstrtoul(arg_p, 0, &tmp_ul); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoul() failed for dev attrib\n"); + return ret; + } + if (!tmp_ul) { + pr_err("dev attrib must be nonzero\n"); + return -EINVAL; + } + *dev_attrib = tmp_ul; + return 0; +} + static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, const char *page, ssize_t count) { @@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, char *orig, *ptr, *opts, *arg_p; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; - unsigned long tmp_ul; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, pr_err("kstrtoul() failed for dev_size=\n"); break; case Opt_hw_block_size: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; - break; - } - ret = kstrtoul(arg_p, 0, &tmp_ul); - kfree(arg_p); - if (ret < 0) { - pr_err("kstrtoul() failed for hw_block_size=\n"); - break; - } - if (!tmp_ul) { - pr_err("hw_block_size must be nonzero\n"); - break; - } - dev->dev_attrib.hw_block_size = tmp_ul; + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_block_size)); + break; + case Opt_hw_max_sectors: + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_max_sectors)); break; default: break; } + + if (ret) + break; } kfree(orig); @@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd) return passthrough_parse_cdb(cmd, tcmu_queue_cmd); } -static const struct target_backend_ops tcmu_ops = { +static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = container_of(da->da_dev, + struct tcmu_dev, se_dev); + + return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); +} + +static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = container_of(da->da_dev, + struct tcmu_dev, se_dev); + u32 val; + int ret; + + if (da->da_dev->export_count) { + pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); + return -EINVAL; + } + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (!val) { + pr_err("Illegal value for cmd_time_out\n"); + return -EINVAL; + } + + udev->cmd_time_out = val * MSEC_PER_SEC; + return count; +} +CONFIGFS_ATTR(tcmu_, cmd_time_out); + +static struct configfs_attribute **tcmu_attrs; + +static struct target_backend_ops tcmu_ops = { .name = "user", .owner = THIS_MODULE, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, @@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = { .show_configfs_dev_params = tcmu_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = tcmu_get_blocks, - .tb_dev_attrib_attrs = passthrough_attrib_attrs, + .tb_dev_attrib_attrs = NULL, }; static int __init tcmu_module_init(void) { - int ret; + int ret, i, len = 0; BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); @@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void) goto out_unreg_device; } + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { + len += sizeof(struct configfs_attribute *); + } + len += sizeof(struct configfs_attribute *) * 2; + + tcmu_attrs = kzalloc(len, GFP_KERNEL); + if (!tcmu_attrs) { + ret = -ENOMEM; + goto out_unreg_genl; + } + + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { + tcmu_attrs[i] = passthrough_attrib_attrs[i]; + } + tcmu_attrs[i] = &tcmu_attr_cmd_time_out; + tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; + ret = transport_backend_register(&tcmu_ops); if (ret) - goto out_unreg_genl; + goto out_attrs; return 0; +out_attrs: + kfree(tcmu_attrs); out_unreg_genl: genl_unregister_family(&tcmu_genl_family); out_unreg_device: @@ -1194,6 +1287,7 @@ out_free_cache: static void __exit tcmu_module_exit(void) { target_backend_unregister(&tcmu_ops); + kfree(tcmu_attrs); genl_unregister_family(&tcmu_genl_family); root_device_unregister(tcmu_root_device); kmem_cache_destroy(tcmu_cmd_cache); |