diff options
Diffstat (limited to 'drivers/scsi')
97 files changed, 3526 insertions, 2059 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 3ddc85e6efd6..95e32a47face 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1120,9 +1120,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, "reselection is tag %d, slot %p(%d)\n", hostdata->msgin[2], slot, slot->tag); } else { - struct scsi_cmnd *SCp; + struct NCR_700_Device_Parameters *p = SDp->hostdata; + struct scsi_cmnd *SCp = p->current_cmnd; - SCp = SDp->current_cmnd; if(unlikely(SCp == NULL)) { sdev_printk(KERN_ERR, SDp, "no saved request for untagged cmd\n"); @@ -1825,9 +1825,11 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", slot->tag, slot); } else { + struct NCR_700_Device_Parameters *p = SCp->device->hostdata; + slot->tag = SCSI_NO_TAG; /* save current command for reselection */ - SCp->device->current_cmnd = SCp; + p->current_cmnd = SCp; } /* sanity check: some of the commands generated by the mid-layer * have an eccentric idea of their sc_data_direction */ @@ -1892,7 +1894,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); slot->SG[i].pAddr = 0; dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); - DEBUG((" SETTING %08lx to %x\n", + DEBUG((" SETTING %p to %x\n", (&slot->pSG[i].ins), slot->SG[i].ins)); } diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h index e06bdfeab420..f34c916b95bc 100644 --- a/drivers/scsi/53c700.h +++ b/drivers/scsi/53c700.h @@ -82,6 +82,7 @@ struct NCR_700_Device_Parameters { * cmnd[1], this could be in static storage */ unsigned char cmnd[MAX_COMMAND_SIZE]; __u8 depth; + struct scsi_cmnd *current_cmnd; /* currently active command */ }; @@ -423,23 +424,25 @@ struct NCR_700_Host_Parameters { #define script_patch_32(dev, script, symbol, value) \ { \ int i; \ + dma_addr_t da = value; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ - __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \ + __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + da; \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ - DEBUG((" script, patching %s at %d to 0x%lx\n", \ - #symbol, A_##symbol##_used[i], (value))); \ + DEBUG((" script, patching %s at %d to %pad\n", \ + #symbol, A_##symbol##_used[i], &da)); \ } \ } #define script_patch_32_abs(dev, script, symbol, value) \ { \ int i; \ + dma_addr_t da = value; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ - (script)[A_##symbol##_used[i]] = bS_to_host(value); \ + (script)[A_##symbol##_used[i]] = bS_to_host(da); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ - DEBUG((" script, patching %s at %d to 0x%lx\n", \ - #symbol, A_##symbol##_used[i], (value))); \ + DEBUG((" script, patching %s at %d to %pad\n", \ + #symbol, A_##symbol##_used[i], &da)); \ } \ } diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 98e5d51a3346..1918f5483b23 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1433,7 +1433,7 @@ config SCSI_U14_34F_MAX_TAGS config SCSI_ULTRASTOR tristate "UltraStor SCSI support" - depends on X86 && ISA && SCSI + depends on X86 && ISA && SCSI && ISA_DMA_API ---help--- This is support for the UltraStor 14F, 24F and 34F SCSI-2 host adapter family. This driver is explained in section 3.12 of the diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 4b3bb52b5108..b381b3718a98 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -635,15 +635,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) } } else { struct user_sgmap* usg; - usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) - + sizeof(struct sgmap), GFP_KERNEL); + usg = kmemdup(upsg, + actual_fibsize - sizeof(struct aac_srb) + + sizeof(struct sgmap), GFP_KERNEL); if (!usg) { dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); rcode = -ENOMEM; goto cleanup; } - memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) - + sizeof(struct sgmap)); actual_fibsize = actual_fibsize64; for (i = 0; i < usg->count; i++) { diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index a1881993982c..a5052dd8d7e6 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -57,7 +57,7 @@ static struct scsi_host_template bnx2fc_shost_template; static struct fc_function_template bnx2fc_transport_function; static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ; static struct fc_function_template bnx2fc_vport_xport_function; -static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode); static void __bnx2fc_destroy(struct bnx2fc_interface *interface); static int bnx2fc_destroy(struct net_device *net_device); static int bnx2fc_enable(struct net_device *netdev); @@ -486,7 +486,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, __skb_queue_tail(&bg->fcoe_rx_list, skb); if (bg->fcoe_rx_list.qlen == 1) - wake_up_process(bg->thread); + wake_up_process(bg->kthread); spin_unlock(&bg->fcoe_rx_list.lock); @@ -2260,7 +2260,7 @@ enum bnx2fc_create_link_state { * Returns: 0 for success */ static int _bnx2fc_create(struct net_device *netdev, - enum fip_state fip_mode, + enum fip_mode fip_mode, enum bnx2fc_create_link_state link_state) { struct fcoe_ctlr_device *cdev; @@ -2412,7 +2412,7 @@ mod_err: * * Returns: 0 for success */ -static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode) { return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP); } @@ -2715,7 +2715,7 @@ static int __init bnx2fc_mod_init(void) } wake_up_process(l2_thread); spin_lock_bh(&bg->fcoe_rx_list.lock); - bg->thread = l2_thread; + bg->kthread = l2_thread; spin_unlock_bh(&bg->fcoe_rx_list.lock); for_each_possible_cpu(cpu) { @@ -2788,8 +2788,8 @@ static void __exit bnx2fc_mod_exit(void) /* Destroy global thread */ bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); - l2_thread = bg->thread; - bg->thread = NULL; + l2_thread = bg->kthread; + bg->kthread = NULL; while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) kfree_skb(skb); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 026f394a3851..8f24d60f09d7 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1758,7 +1758,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { /* Only for task management function */ io_req->fcp_rsp_code = rq_data[3]; - printk(KERN_ERR PFX "fcp_rsp_code = %d\n", + BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); } diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index fb072cc5e9fd..42921dbba927 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -2417,7 +2417,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); if (!ep) { printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " - "offload request, unexpected complection\n"); + "offload request, unexpected completion\n"); return; } diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile index 86007e344955..a73781ac1800 100644 --- a/drivers/scsi/cxgbi/Makefile +++ b/drivers/scsi/cxgbi/Makefile @@ -1,2 +1,4 @@ +ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb + obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild index 961a12f6d318..663c52e05d81 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kbuild +++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild @@ -1,3 +1,4 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig index e4603985dce3..f68c871b16ca 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig @@ -5,6 +5,7 @@ config SCSI_CXGB3_ISCSI select ETHERNET select NET_VENDOR_CHELSIO select CHELSIO_T3 + select CHELSIO_LIB select SCSI_ISCSI_ATTRS ---help--- This driver supports iSCSI offload for the Chelsio T3 devices. diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index e22a268fd311..33e83464e091 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -1028,7 +1028,7 @@ cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { * cxgb3i_ofld_init - allocate and initialize resources for each adapter found * @cdev: cxgbi adapter */ -int cxgb3i_ofld_init(struct cxgbi_device *cdev) +static int cxgb3i_ofld_init(struct cxgbi_device *cdev) { struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; struct adap_ports port; @@ -1076,64 +1076,69 @@ static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | V_ULPTX_CMD(ULP_MEM_WRITE)); - req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | - V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); + req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) | + V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1)); } -static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, - unsigned int idx, unsigned int npods, - struct cxgbi_gather_list *gl) +static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) { - struct cxgbi_device *cdev = csk->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; - int i; + return ((struct t3cdev *)cdev->lldev)->ulp_iscsi; +} - log_debug(1 << CXGBI_DBG_DDP, - "csk 0x%p, idx %u, npods %u, gl 0x%p.\n", - csk, idx, npods, gl); +static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo) +{ + unsigned int idx = ttinfo->idx; + unsigned int npods = ttinfo->npods; + struct scatterlist *sg = ttinfo->sgl; + struct cxgbi_pagepod *ppod; + struct ulp_mem_io *req; + unsigned int sg_off; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + int i; - for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { + for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + - PPOD_SIZE, 0, GFP_ATOMIC); + IPPOD_SIZE, 0, GFP_ATOMIC); if (!skb) return -ENOMEM; - ulp_mem_io_set_hdr(skb, pm_addr); - cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + - sizeof(struct ulp_mem_io)), - hdr, gl, i * PPOD_PAGES_MAX); + req = (struct ulp_mem_io *)skb->head; + ppod = (struct cxgbi_pagepod *)(req + 1); + sg_off = i * PPOD_PAGES_MAX; + cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg, + &sg_off); skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(cdev->lldev, skb); + cxgb3_ofld_send(ppm->lldev, skb); } return 0; } -static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, - unsigned int idx, unsigned int npods) +static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, + struct cxgbi_task_tag_info *ttinfo) { - struct cxgbi_device *cdev = chba->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; + unsigned int idx = ttinfo->idx; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int npods = ttinfo->npods; int i; log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n", - cdev, idx, npods, tag); + "cdev 0x%p, clear idx %u, npods %u.\n", + cdev, idx, npods); - for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { + for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + - PPOD_SIZE, 0, GFP_ATOMIC); + IPPOD_SIZE, 0, GFP_ATOMIC); if (!skb) { - pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n", - tag, idx, i, npods); + pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n", + cdev, idx, i, npods); continue; } ulp_mem_io_set_hdr(skb, pm_addr); skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(cdev->lldev, skb); + cxgb3_ofld_send(ppm->lldev, skb); } } @@ -1203,82 +1208,68 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, } /** - * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource - * @cdev: cxgb3i adapter - * release all the resource held by the ddp pagepod manager for a given - * adapter if needed - */ - -static void t3_ddp_cleanup(struct cxgbi_device *cdev) -{ - struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; - - if (cxgbi_ddp_cleanup(cdev)) { - pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev); - tdev->ulp_iscsi = NULL; - } -} - -/** - * ddp_init - initialize the cxgb3 adapter's ddp resource + * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource * @cdev: cxgb3i adapter * initialize the ddp pagepod manager for a given adapter */ static int cxgb3i_ddp_init(struct cxgbi_device *cdev) { struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; - struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; + struct net_device *ndev = cdev->ports[0]; + struct cxgbi_tag_format tformat; + unsigned int ppmax, tagmask = 0; struct ulp_iscsi_info uinfo; - unsigned int pgsz_factor[4]; int i, err; - if (ddp) { - kref_get(&ddp->refcnt); - pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n", - tdev, tdev->ulp_iscsi); - cdev->ddp = ddp; - return -EALREADY; - } - err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); if (err < 0) { - pr_err("%s, failed to get iscsi param err=%d.\n", - tdev->name, err); + pr_err("%s, failed to get iscsi param %d.\n", + ndev->name, err); return err; } + if (uinfo.llimit >= uinfo.ulimit) { + pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n", + ndev->name, uinfo.llimit, uinfo.ulimit); + return -EACCES; + } - err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit, - uinfo.max_txsz, uinfo.max_rxsz); - if (err < 0) - return err; + ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT; + tagmask = cxgbi_tagmask_set(ppmax); - ddp = cdev->ddp; + pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n", + ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask, + tagmask); - uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; - cxgbi_ddp_page_size_factor(pgsz_factor); + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); for (i = 0; i < 4; i++) - uinfo.pgsz_factor[i] = pgsz_factor[i]; - uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); + tformat.pgsz_order[i] = uinfo.pgsz_factor[i]; + cxgbi_tagmask_check(tagmask, &tformat); - err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); - if (err < 0) { - pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n", - tdev->name, err); - cxgbi_ddp_cleanup(cdev); - return err; + cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat, ppmax, + uinfo.llimit, uinfo.llimit, 0); + if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) { + uinfo.tagmask = tagmask; + uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT); + + err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); + if (err < 0) { + pr_err("T3 %s fail to set iscsi param %d.\n", + ndev->name, err); + cdev->flags |= CXGBI_FLAG_DDP_OFF; + } + err = 0; } - tdev->ulp_iscsi = ddp; cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; - cdev->csk_ddp_set = ddp_set_map; - cdev->csk_ddp_clear = ddp_clear_map; - - pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " - "%u/%u.\n", - tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, - ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, - ddp->max_rxsz, uinfo.max_rxsz); + cdev->csk_ddp_set_map = ddp_set_map; + cdev->csk_ddp_clear_map = ddp_clear_map; + cdev->cdev2ppm = cdev2ppm; + cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); + return 0; } @@ -1325,7 +1316,6 @@ static void cxgb3i_dev_open(struct t3cdev *t3dev) cdev->rx_credit_thres = cxgb3i_rx_credit_thres; cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); - cdev->dev_ddp_cleanup = t3_ddp_cleanup; cdev->itp = &cxgb3i_iscsi_transport; err = cxgb3i_ddp_init(cdev); diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild index 37458643749b..38e03c280417 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kbuild +++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild @@ -1,3 +1,4 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index 8c4e423037b6..594f593c8821 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -5,6 +5,7 @@ config SCSI_CXGB4_ISCSI select ETHERNET select NET_VENDOR_CHELSIO select CHELSIO_T4 + select CHELSIO_LIB select SCSI_ISCSI_ATTRS ---help--- This driver supports iSCSI offload for the Chelsio T4 devices. diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 339f6b7f4803..e4ba2d2616cd 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1503,7 +1503,7 @@ rel_resource_without_clip: return -EINVAL; } -cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { +static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = do_act_establish, [CPL_ACT_OPEN_RPL] = do_act_open_rpl, [CPL_PEER_CLOSE] = do_peer_close, @@ -1519,7 +1519,7 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_RX_DATA] = do_rx_data, }; -int cxgb4i_ofld_init(struct cxgbi_device *cdev) +static int cxgb4i_ofld_init(struct cxgbi_device *cdev) { int rc; @@ -1543,24 +1543,22 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev) return 0; } -/* - * functions to program the pagepod in h/w - */ -#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ -static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, - struct ulp_mem_io *req, - unsigned int wr_len, unsigned int dlen, - unsigned int pm_addr) +static inline void +ulp_mem_io_set_hdr(struct cxgbi_device *cdev, + struct ulp_mem_io *req, + unsigned int wr_len, unsigned int dlen, + unsigned int pm_addr, + int tid) { + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); - INIT_ULPTX_WR(req, wr_len, 0, 0); - if (is_t4(lldi->adapter_type)) - req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | - (ULP_MEMIO_ORDER_F)); - else - req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | - (T5_ULP_MEMIO_IMM_F)); + INIT_ULPTX_WR(req, wr_len, 0, tid); + req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | + FW_WR_ATOMIC_V(0)); + req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | + T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); @@ -1569,82 +1567,89 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, idata->len = htonl(dlen); } -static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, - struct cxgbi_pagepod_hdr *hdr, unsigned int idx, - unsigned int npods, - struct cxgbi_gather_list *gl, - unsigned int gl_pidx) +static struct sk_buff * +ddp_ppod_init_idata(struct cxgbi_device *cdev, + struct cxgbi_ppm *ppm, + unsigned int idx, unsigned int npods, + unsigned int tid) { - struct cxgbi_ddp_info *ddp = cdev->ddp; - struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); - struct sk_buff *skb; - struct ulp_mem_io *req; - struct ulptx_idata *idata; - struct cxgbi_pagepod *ppod; - unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; - unsigned int dlen = PPOD_SIZE * npods; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int dlen = npods << PPOD_SIZE_SHIFT; unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_idata) + dlen, 16); - unsigned int i; + struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); - skb = alloc_wr(wr_len, 0, GFP_ATOMIC); if (!skb) { - pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", - cdev, idx, npods); - return -ENOMEM; + pr_err("%s: %s idx %u, npods %u, OOM.\n", + __func__, ppm->ndev->name, idx, npods); + return NULL; } - req = (struct ulp_mem_io *)skb->head; - set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); + ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, + pm_addr, tid); + + return skb; +} + +static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo, + unsigned int idx, unsigned int npods, + struct scatterlist **sg_pp, + unsigned int *sg_off) +{ + struct cxgbi_device *cdev = csk->cdev; + struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, + csk->tid); + struct ulp_mem_io *req; + struct ulptx_idata *idata; + struct cxgbi_pagepod *ppod; + int i; + + if (!skb) + return -ENOMEM; + + req = (struct ulp_mem_io *)skb->head; idata = (struct ulptx_idata *)(req + 1); ppod = (struct cxgbi_pagepod *)(idata + 1); - for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { - if (!hdr && !gl) - cxgbi_ddp_ppod_clear(ppod); - else - cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); - } + for (i = 0; i < npods; i++, ppod++) + cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); + + cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); + cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + + spin_lock_bh(&csk->lock); + cxgbi_sock_skb_entail(csk, skb); + spin_unlock_bh(&csk->lock); - cxgb4_ofld_send(cdev->ports[port_id], skb); return 0; } -static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, - unsigned int idx, unsigned int npods, - struct cxgbi_gather_list *gl) +static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo) { + unsigned int pidx = ttinfo->idx; + unsigned int npods = ttinfo->npods; unsigned int i, cnt; int err = 0; + struct scatterlist *sg = ttinfo->sgl; + unsigned int offset = 0; - for (i = 0; i < npods; i += cnt, idx += cnt) { - cnt = npods - i; - if (cnt > ULPMEM_IDATA_MAX_NPPODS) - cnt = ULPMEM_IDATA_MAX_NPPODS; - err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, - idx, cnt, gl, 4 * i); - if (err < 0) - break; - } - return err; -} - -static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, - unsigned int idx, unsigned int npods) -{ - unsigned int i, cnt; - int err; + ttinfo->cid = csk->port_id; - for (i = 0; i < npods; i += cnt, idx += cnt) { + for (i = 0; i < npods; i += cnt, pidx += cnt) { cnt = npods - i; + if (cnt > ULPMEM_IDATA_MAX_NPPODS) cnt = ULPMEM_IDATA_MAX_NPPODS; - err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, - idx, cnt, NULL, 0); + err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, + &sg, &offset); if (err < 0) break; } + + return err; } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, @@ -1710,48 +1715,46 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, return 0; } +static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) +{ + return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) + (cxgbi_cdev_priv(cdev)))->iscsi_ppm); +} + static int cxgb4i_ddp_init(struct cxgbi_device *cdev) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); - struct cxgbi_ddp_info *ddp = cdev->ddp; - unsigned int tagmask, pgsz_factor[4]; - int err; - - if (ddp) { - kref_get(&ddp->refcnt); - pr_warn("cdev 0x%p, ddp 0x%p already set up.\n", - cdev, cdev->ddp); - return -EALREADY; + struct net_device *ndev = cdev->ports[0]; + struct cxgbi_tag_format tformat; + unsigned int ppmax; + int i; + + if (!lldi->vr->iscsi.size) { + pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); + return -EACCES; } - err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start, - lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1, - lldi->iscsi_iolen, lldi->iscsi_iolen); - if (err < 0) - return err; + cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; + ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; - ddp = cdev->ddp; + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); + for (i = 0; i < 4; i++) + tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) + & 0xF; + cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); - tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; - cxgbi_ddp_page_size_factor(pgsz_factor); - cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); + cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax, + lldi->iscsi_llimit, lldi->vr->iscsi.start, 2); cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; - cdev->csk_ddp_set = ddp_set_map; - cdev->csk_ddp_clear = ddp_clear_map; - - pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n", - cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits, - cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask); - pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " - " %u/%u.\n", - cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, - ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen, - ddp->max_rxsz, lldi->iscsi_iolen); - pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n", - cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size, - ddp->max_rxsz); + cdev->csk_ddp_set_map = ddp_set_map; + cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->cdev2ppm = cdev2ppm; + return 0; } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index ead83a24bcd1..d1421139e6ea 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -64,6 +64,14 @@ static DEFINE_MUTEX(cdev_mutex); static LIST_HEAD(cdev_rcu_list); static DEFINE_SPINLOCK(cdev_rcu_lock); +static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age) +{ + if (age) + *age = sw_tag & 0x7FFF; + if (idx) + *idx = (sw_tag >> 16) & 0x7FFF; +} + int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, unsigned int max_conn) { @@ -113,12 +121,7 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) "cdev 0x%p, p# %u.\n", cdev, cdev->nports); cxgbi_hbas_remove(cdev); cxgbi_device_portmap_cleanup(cdev); - if (cdev->dev_ddp_cleanup) - cdev->dev_ddp_cleanup(cdev); - else - cxgbi_ddp_cleanup(cdev); - if (cdev->ddp) - cxgbi_ddp_cleanup(cdev); + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); if (cdev->pmap.max_connect) cxgbi_free_big_mem(cdev->pmap.port_csk); kfree(cdev); @@ -1182,525 +1185,245 @@ out_err: goto done; } -/* - * Direct Data Placement - - * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted - * final destination host-memory buffers based on the Initiator Task Tag (ITT) - * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. - * The host memory address is programmed into h/w in the format of pagepod - * entries. - * The location of the pagepod entry is encoded into ddp tag which is used as - * the base for ITT/TTT. - */ - -static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; -static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; -static unsigned char page_idx = DDP_PGIDX_MAX; - -static unsigned char sw_tag_idx_bits; -static unsigned char sw_tag_age_bits; - -/* - * Direct-Data Placement page size adjustment - */ -static int ddp_adjust_page_table(void) +static inline void +scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, + unsigned int *sgcnt, unsigned int *dlen, + unsigned int prot) { - int i; - unsigned int base_order, order; - - if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { - pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", - PAGE_SIZE, 1UL << ddp_page_shift[0]); - return -EINVAL; - } - - base_order = get_order(1UL << ddp_page_shift[0]); - order = get_order(1UL << PAGE_SHIFT); + struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc); - for (i = 0; i < DDP_PGIDX_MAX; i++) { - /* first is the kernel page size, then just doubling */ - ddp_page_order[i] = order - base_order + i; - ddp_page_shift[i] = PAGE_SHIFT + i; - } - return 0; + *sgl = sdb->table.sgl; + *sgcnt = sdb->table.nents; + *dlen = sdb->length; + /* Caution: for protection sdb, sdb->length is invalid */ } -static int ddp_find_page_index(unsigned long pgsz) +void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod, + struct cxgbi_task_tag_info *ttinfo, + struct scatterlist **sg_pp, unsigned int *sg_off) { + struct scatterlist *sg = sg_pp ? *sg_pp : NULL; + unsigned int offset = sg_off ? *sg_off : 0; + dma_addr_t addr = 0UL; + unsigned int len = 0; int i; - for (i = 0; i < DDP_PGIDX_MAX; i++) { - if (pgsz == (1UL << ddp_page_shift[i])) - return i; - } - pr_info("ddp page size %lu not supported.\n", pgsz); - return DDP_PGIDX_MAX; -} + memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); -static void ddp_setup_host_page_size(void) -{ - if (page_idx == DDP_PGIDX_MAX) { - page_idx = ddp_find_page_index(PAGE_SIZE); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } - if (page_idx == DDP_PGIDX_MAX) { - pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); - if (ddp_adjust_page_table() < 0) { - pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); - return; + for (i = 0; i < PPOD_PAGES_MAX; i++) { + if (sg) { + ppod->addr[i] = cpu_to_be64(addr + offset); + offset += PAGE_SIZE; + if (offset == (len + sg->offset)) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } } - page_idx = ddp_find_page_index(PAGE_SIZE); + } else { + ppod->addr[i] = 0ULL; } - pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); } -} - -void cxgbi_ddp_page_size_factor(int *pgsz_factor) -{ - int i; - - for (i = 0; i < DDP_PGIDX_MAX; i++) - pgsz_factor[i] = ddp_page_order[i]; -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); - -/* - * DDP setup & teardown - */ - -void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, - struct cxgbi_pagepod_hdr *hdr, - struct cxgbi_gather_list *gl, unsigned int gidx) -{ - int i; - - memcpy(ppod, hdr, sizeof(*hdr)); - for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { - ppod->addr[i] = gidx < gl->nelem ? - cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; - } -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); - -void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) -{ - memset(ppod, 0, sizeof(*ppod)); -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); - -static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, - unsigned int start, unsigned int max, - unsigned int count, - struct cxgbi_gather_list *gl) -{ - unsigned int i, j, k; - /* not enough entries */ - if ((max - start) < count) { - log_debug(1 << CXGBI_DBG_DDP, - "NOT enough entries %u+%u < %u.\n", start, count, max); - return -EBUSY; + /* + * the fifth address needs to be repeated in the next ppod, so do + * not move sg + */ + if (sg_pp) { + *sg_pp = sg; + *sg_off = offset; } - max -= count; - spin_lock(&ddp->map_lock); - for (i = start; i < max;) { - for (j = 0, k = i; j < count; j++, k++) { - if (ddp->gl_map[k]) - break; - } - if (j == count) { - for (j = 0, k = i; j < count; j++, k++) - ddp->gl_map[k] = gl; - spin_unlock(&ddp->map_lock); - return i; + if (offset == len) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); } - i += j + 1; } - spin_unlock(&ddp->map_lock); - log_debug(1 << CXGBI_DBG_DDP, - "NO suitable entries %u available.\n", count); - return -EBUSY; -} - -static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, - int start, int count) -{ - spin_lock(&ddp->map_lock); - memset(&ddp->gl_map[start], 0, - count * sizeof(struct cxgbi_gather_list *)); - spin_unlock(&ddp->map_lock); + ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; } +EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod); -static inline void ddp_gl_unmap(struct pci_dev *pdev, - struct cxgbi_gather_list *gl) -{ - int i; +/* + * APIs interacting with open-iscsi libraries + */ - for (i = 0; i < gl->nelem; i++) - dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, - PCI_DMA_FROMDEVICE); -} +static unsigned char padding[4]; -static inline int ddp_gl_map(struct pci_dev *pdev, - struct cxgbi_gather_list *gl) +void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, + struct cxgbi_tag_format *tformat, unsigned int ppmax, + unsigned int llimit, unsigned int start, + unsigned int rsvd_factor) { - int i; + int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev, + cdev->lldev, tformat, ppmax, llimit, start, + rsvd_factor); - for (i = 0; i < gl->nelem; i++) { - gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, - PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { - log_debug(1 << CXGBI_DBG_DDP, - "page %d 0x%p, 0x%p dma mapping err.\n", - i, gl->pages[i], pdev); - goto unmap; - } - } - return i; -unmap: - if (i) { - unsigned int nelem = gl->nelem; + if (err >= 0) { + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); - gl->nelem = i; - ddp_gl_unmap(pdev, gl); - gl->nelem = nelem; + if (ppm->ppmax < 1024 || + ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) + cdev->flags |= CXGBI_FLAG_DDP_OFF; + err = 0; + } else { + cdev->flags |= CXGBI_FLAG_DDP_OFF; } - return -EINVAL; -} - -static void ddp_release_gl(struct cxgbi_gather_list *gl, - struct pci_dev *pdev) -{ - ddp_gl_unmap(pdev, gl); - kfree(gl); } +EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup); -static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, - struct scatterlist *sgl, - unsigned int sgcnt, - struct pci_dev *pdev, - gfp_t gfp) +static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents) { - struct cxgbi_gather_list *gl; + int i; + int last_sgidx = nents - 1; struct scatterlist *sg = sgl; - struct page *sgpage = sg_page(sg); - unsigned int sglen = sg->length; - unsigned int sgoffset = sg->offset; - unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> - PAGE_SHIFT; - int i = 1, j = 0; - - if (xferlen < DDP_THRESHOLD) { - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u < threshold %u, no ddp.\n", - xferlen, DDP_THRESHOLD); - return NULL; - } - - gl = kzalloc(sizeof(struct cxgbi_gather_list) + - npages * (sizeof(dma_addr_t) + - sizeof(struct page *)), gfp); - if (!gl) { - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u, %u pages, OOM.\n", xferlen, npages); - return NULL; - } - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); - - gl->pages = (struct page **)&gl->phys_addr[npages]; - gl->nelem = npages; - gl->length = xferlen; - gl->offset = sgoffset; - gl->pages[0] = sgpage; - - for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; - i++, sg = sg_next(sg)) { - struct page *page = sg_page(sg); - - if (sgpage == page && sg->offset == sgoffset + sglen) - sglen += sg->length; - else { - /* make sure the sgl is fit for ddp: - * each has the same page size, and - * all of the middle pages are used completely - */ - if ((j && sgoffset) || ((i != sgcnt - 1) && - ((sglen + sgoffset) & ~PAGE_MASK))) { - log_debug(1 << CXGBI_DBG_DDP, - "page %d/%u, %u + %u.\n", - i, sgcnt, sgoffset, sglen); - goto error_out; - } + for (i = 0; i < nents; i++, sg = sg_next(sg)) { + unsigned int len = sg->length + sg->offset; - j++; - if (j == gl->nelem || sg->offset) { - log_debug(1 << CXGBI_DBG_DDP, - "page %d/%u, offset %u.\n", - j, gl->nelem, sg->offset); - goto error_out; - } - gl->pages[j] = page; - sglen = sg->length; - sgoffset = sg->offset; - sgpage = page; - } - } - gl->nelem = ++j; - - if (ddp_gl_map(pdev, gl) < 0) - goto error_out; - - return gl; - -error_out: - kfree(gl); - return NULL; -} - -static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) -{ - struct cxgbi_device *cdev = chba->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - u32 idx; - - idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; - if (idx < ddp->nppods) { - struct cxgbi_gather_list *gl = ddp->gl_map[idx]; - unsigned int npods; - - if (!gl || !gl->nelem) { - pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", - tag, idx, gl, gl ? gl->nelem : 0); - return; - } - npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; - log_debug(1 << CXGBI_DBG_DDP, - "tag 0x%x, release idx %u, npods %u.\n", - tag, idx, npods); - cdev->csk_ddp_clear(chba, tag, idx, npods); - ddp_unmark_entries(ddp, idx, npods); - ddp_release_gl(gl, ddp->pdev); - } else - pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); -} - -static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, - u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, - gfp_t gfp) -{ - struct cxgbi_device *cdev = csk->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - struct cxgbi_tag_format *tformat = &cdev->tag_format; - struct cxgbi_pagepod_hdr hdr; - unsigned int npods; - int idx = -1; - int err = -ENOMEM; - u32 tag; - - npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; - if (ddp->idx_last == ddp->nppods) - idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, - npods, gl); - else { - idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, - ddp->nppods, npods, - gl); - if (idx < 0 && ddp->idx_last >= npods) { - idx = ddp_find_unused_entries(ddp, 0, - min(ddp->idx_last + npods, ddp->nppods), - npods, gl); + if ((sg->offset & 0x3) || (i && sg->offset) || + ((i != last_sgidx) && len != PAGE_SIZE)) { + log_debug(1 << CXGBI_DBG_DDP, + "sg %u/%u, %u,%u, not aligned.\n", + i, nents, sg->offset, sg->length); + goto err_out; } } - if (idx < 0) { - log_debug(1 << CXGBI_DBG_DDP, - "xferlen %u, gl %u, npods %u NO DDP.\n", - gl->length, gl->nelem, npods); - return idx; - } - - tag = cxgbi_ddp_tag_base(tformat, sw_tag); - tag |= idx << PPOD_IDX_SHIFT; - - hdr.rsvd = 0; - hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); - hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); - hdr.max_offset = htonl(gl->length); - hdr.page_offset = htonl(gl->offset); - - err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); - if (err < 0) - goto unmark_entries; - - ddp->idx_last = idx; - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", - gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, - npods); - *tagp = tag; return 0; - -unmark_entries: - ddp_unmark_entries(ddp, idx, npods); - return err; +err_out: + return -EINVAL; } -int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, - unsigned int sw_tag, unsigned int xferlen, - struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) +static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn, + struct cxgbi_task_data *tdata, u32 sw_tag, + unsigned int xferlen) { + struct cxgbi_sock *csk = cconn->cep->csk; struct cxgbi_device *cdev = csk->cdev; - struct cxgbi_tag_format *tformat = &cdev->tag_format; - struct cxgbi_gather_list *gl; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + struct scatterlist *sgl = ttinfo->sgl; + unsigned int sgcnt = ttinfo->nents; + unsigned int sg_offset = sgl->offset; int err; - if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || - xferlen < DDP_THRESHOLD) { + if (cdev->flags & CXGBI_FLAG_DDP_OFF) { log_debug(1 << CXGBI_DBG_DDP, - "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); + "cdev 0x%p DDP off.\n", cdev); return -EINVAL; } - if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { + if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt || + ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) { log_debug(1 << CXGBI_DBG_DDP, - "sw_tag 0x%x NOT usable.\n", sw_tag); + "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", + ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX, + xferlen, ttinfo->nents); return -EINVAL; } - gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); - if (!gl) - return -ENOMEM; + /* make sure the buffer is suitable for ddp */ + if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0) + return -EINVAL; - err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); - if (err < 0) - ddp_release_gl(gl, cdev->pdev); + ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> + PAGE_SHIFT; - return err; -} + /* + * the ddp tag will be used for the itt in the outgoing pdu, + * the itt genrated by libiscsi is saved in the ppm and can be + * retrieved via the ddp tag + */ + err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, + &ttinfo->tag, (unsigned long)sw_tag); + if (err < 0) { + cconn->ddp_full++; + return err; + } + ttinfo->npods = err; -static void ddp_destroy(struct kref *kref) -{ - struct cxgbi_ddp_info *ddp = container_of(kref, - struct cxgbi_ddp_info, - refcnt); - struct cxgbi_device *cdev = ddp->cdev; - int i = 0; + /* setup dma from scsi command sgl */ + sgl->offset = 0; + err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + sgl->offset = sg_offset; + if (err == 0) { + pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", + __func__, sw_tag, xferlen, sgcnt); + goto rel_ppods; + } + if (err != ttinfo->nr_pages) { + log_debug(1 << CXGBI_DBG_DDP, + "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n", + __func__, sw_tag, xferlen, sgcnt, err); + } - pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); + ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED; + ttinfo->cid = csk->port_id; - while (i < ddp->nppods) { - struct cxgbi_gather_list *gl = ddp->gl_map[i]; + cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, + xferlen, &ttinfo->hdr); - if (gl) { - int npods = (gl->nelem + PPOD_PAGES_MAX - 1) - >> PPOD_PAGES_SHIFT; - pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); - kfree(gl); - i += npods; - } else - i++; + if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) { + /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */ + ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID; + } else { + /* write ppod from control queue now */ + err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); + if (err < 0) + goto rel_ppods; } - cxgbi_free_big_mem(ddp); -} - -int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) -{ - struct cxgbi_ddp_info *ddp = cdev->ddp; - log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); - cdev->ddp = NULL; - if (ddp) - return kref_put(&ddp->refcnt, ddp_destroy); return 0; -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); -int cxgbi_ddp_init(struct cxgbi_device *cdev, - unsigned int llimit, unsigned int ulimit, - unsigned int max_txsz, unsigned int max_rxsz) -{ - struct cxgbi_ddp_info *ddp; - unsigned int ppmax, bits; +rel_ppods: + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); - ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; - bits = __ilog2_u32(ppmax) + 1; - if (bits > PPOD_IDX_MAX_SIZE) - bits = PPOD_IDX_MAX_SIZE; - ppmax = (1 << (bits - 1)) - 1; - - ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + - ppmax * (sizeof(struct cxgbi_gather_list *) + - sizeof(struct sk_buff *)), - GFP_KERNEL); - if (!ddp) { - pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); - return -ENOMEM; + if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) { + ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED; + dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); } - ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); - cdev->ddp = ddp; - - spin_lock_init(&ddp->map_lock); - kref_init(&ddp->refcnt); - - ddp->cdev = cdev; - ddp->pdev = cdev->pdev; - ddp->llimit = llimit; - ddp->ulimit = ulimit; - ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); - ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); - ddp->nppods = ppmax; - ddp->idx_last = ppmax; - ddp->idx_bits = bits; - ddp->idx_mask = (1 << bits) - 1; - ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; - - cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; - cdev->tag_format.rsvd_bits = ddp->idx_bits; - cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; - cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; - - pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", - cdev->ports[0]->name, cdev->tag_format.sw_bits, - cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, - cdev->tag_format.rsvd_mask); - - cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, - ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); - cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, - ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); - - log_debug(1 << CXGBI_DBG_DDP, - "%s max payload size: %u/%u, %u/%u.\n", - cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, - cdev->rx_max_size, ddp->max_rxsz); - return 0; + return -EINVAL; } -EXPORT_SYMBOL_GPL(cxgbi_ddp_init); - -/* - * APIs interacting with open-iscsi libraries - */ - -static unsigned char padding[4]; static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) { struct scsi_cmnd *sc = task->sc; struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; - struct cxgbi_hba *chba = cconn->chba; - struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); u32 tag = ntohl((__force u32)hdr_itt); log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); + "cdev 0x%p, task 0x%p, release tag 0x%x.\n", + cdev, task, tag); if (sc && (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && - cxgbi_is_ddp_tag(tformat, tag)) - ddp_tag_release(chba, tag); + cxgbi_ppm_is_ddp_tag(ppm, tag)) { + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + + if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ)) + cdev->csk_ddp_clear_map(cdev, ppm, ttinfo); + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, + DMA_FROM_DEVICE); + } +} + +static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age) +{ + /* assume idx and age both are < 0x7FFF (32767) */ + return (idx << 16) | age; } static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) @@ -1710,34 +1433,41 @@ static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) struct iscsi_session *sess = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; - struct cxgbi_hba *chba = cconn->chba; - struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; - u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age); u32 tag = 0; int err = -EINVAL; if (sc && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { - err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, - scsi_in(sc)->length, - scsi_in(sc)->table.sgl, - scsi_in(sc)->table.nents, - GFP_ATOMIC); - if (err < 0) - log_debug(1 << CXGBI_DBG_DDP, - "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", - cconn->cep->csk, task, scsi_in(sc)->length, - scsi_in(sc)->table.nents); + (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) + ) { + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + + scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents, + &tdata->dlen, 0); + err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen); + if (!err) + tag = ttinfo->tag; + else + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", + cconn->cep->csk, task, tdata->dlen, + ttinfo->nents); } - if (err < 0) - tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); + if (err < 0) { + err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag); + if (err < 0) + return err; + } /* the itt need to sent in big-endian order */ *hdr_itt = (__force itt_t)htonl(tag); log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", - chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); + "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", + cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); return 0; } @@ -1746,19 +1476,24 @@ void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_device *cdev = cconn->chba->cdev; - u32 tag = ntohl((__force u32) itt); + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + u32 tag = ntohl((__force u32)itt); u32 sw_bits; - sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); - if (idx) - *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); - if (age) - *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; + if (ppm) { + if (cxgbi_ppm_is_ddp_tag(ppm, tag)) + sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag); + else + sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag); + } else { + sw_bits = tag; + } + cxgbi_decode_sw_tag(sw_bits, idx, age); log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", - cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, - age ? *age : 0xFF); + "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", + cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, + age ? *age : 0xFF); } EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); @@ -2260,7 +1995,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; struct sk_buff *skb = tdata->skb; + struct cxgbi_sock *csk = NULL; unsigned int datalen; int err; @@ -2270,8 +2007,28 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) return 0; } + if (cconn && cconn->cep) + csk = cconn->cep->csk; + if (!csk) { + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p, csk gone.\n", task); + return -EPIPE; + } + datalen = skb->data_len; tdata->skb = NULL; + + /* write ppod first if using ofldq to write ppod */ + if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { + struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); + + ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID; + if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) + pr_err("task 0x%p, ppod writing using ofldq failed.\n", + task); + /* continue. Let fl get the data */ + } + err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); if (err > 0) { int pdulen = err; @@ -2313,12 +2070,14 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); void cxgbi_cleanup_task(struct iscsi_task *task) { + struct iscsi_tcp_task *tcp_task = task->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); log_debug(1 << CXGBI_DBG_ISCSI, "task 0x%p, skb 0x%p, itt 0x%x.\n", task, tdata->skb, task->hdr_itt); + tcp_task->dd_data = NULL; /* never reached the xmit task callout */ if (tdata->skb) __kfree_skb(tdata->skb); @@ -2528,6 +2287,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_ppm *ppm; struct iscsi_endpoint *ep; struct cxgbi_endpoint *cep; struct cxgbi_sock *csk; @@ -2540,7 +2300,10 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, /* setup ddp pagesize */ cep = ep->dd_data; csk = cep->csk; - err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); + + ppm = csk->cdev->cdev2ppm(csk->cdev); + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, + ppm->tformat.pgsz_idx_dflt, 0); if (err < 0) return err; @@ -2915,16 +2678,7 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); static int __init libcxgbi_init_module(void) { - sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; - sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; - pr_info("%s", version); - - pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", - ISCSI_ITT_MASK, sw_tag_idx_bits, - ISCSI_AGE_MASK, sw_tag_age_bits); - - ddp_setup_host_page_size(); return 0; } diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 9842301f7980..e7802738f5d2 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -24,9 +24,12 @@ #include <linux/scatterlist.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> +#include <linux/version.h> #include <scsi/scsi_device.h> #include <scsi/libiscsi_tcp.h> +#include <libcxgb_ppm.h> + enum cxgbi_dbg_flag { CXGBI_DBG_ISCSI, CXGBI_DBG_DDP, @@ -84,92 +87,11 @@ static inline unsigned int cxgbi_ulp_extra_len(int submode) return ulp2_extra_len[submode & 3]; } -/* - * struct pagepod_hdr, pagepod - pagepod format - */ - #define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ #define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ -struct cxgbi_pagepod_hdr { - u32 vld_tid; - u32 pgsz_tag_clr; - u32 max_offset; - u32 page_offset; - u64 rsvd; -}; - -#define PPOD_PAGES_MAX 4 -struct cxgbi_pagepod { - struct cxgbi_pagepod_hdr hdr; - u64 addr[PPOD_PAGES_MAX + 1]; -}; - -struct cxgbi_tag_format { - unsigned char sw_bits; - unsigned char rsvd_bits; - unsigned char rsvd_shift; - unsigned char filler[1]; - u32 rsvd_mask; -}; - -struct cxgbi_gather_list { - unsigned int tag; - unsigned int length; - unsigned int offset; - unsigned int nelem; - struct page **pages; - dma_addr_t phys_addr[0]; -}; - -struct cxgbi_ddp_info { - struct kref refcnt; - struct cxgbi_device *cdev; - struct pci_dev *pdev; - unsigned int max_txsz; - unsigned int max_rxsz; - unsigned int llimit; - unsigned int ulimit; - unsigned int nppods; - unsigned int idx_last; - unsigned char idx_bits; - unsigned char filler[3]; - unsigned int idx_mask; - unsigned int rsvd_tag_mask; - spinlock_t map_lock; - struct cxgbi_gather_list **gl_map; -}; - -#define DDP_PGIDX_MAX 4 -#define DDP_THRESHOLD 2048 - -#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ - -#define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ -#define PPOD_SIZE_SHIFT 6 - -#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ -#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ -#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ - -#define PPOD_COLOR_SHIFT 0 -#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) - -#define PPOD_IDX_SHIFT 6 -#define PPOD_IDX_MAX_SIZE 24 - -#define PPOD_TID_SHIFT 0 -#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) - -#define PPOD_TAG_SHIFT 6 -#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) - -#define PPOD_VALID_SHIFT 24 -#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) -#define PPOD_VALID_FLAG PPOD_VALID(1U) - /* * sge_opaque_hdr - * Opaque version of structure the SGE stores at skb->head of TX_DATA packets @@ -279,6 +201,8 @@ struct cxgbi_skb_tx_cb { enum cxgbi_skcb_flags { SKCBF_TX_NEED_HDR, /* packet needs a header */ + SKCBF_TX_MEM_WRITE, /* memory write */ + SKCBF_TX_FLAG_COMPL, /* wr completion flag */ SKCBF_RX_COALESCED, /* received whole pdu */ SKCBF_RX_HDR, /* received pdu header */ SKCBF_RX_DATA, /* received pdu payload */ @@ -527,6 +451,9 @@ struct cxgbi_ports_map { #define CXGBI_FLAG_DEV_T4 0x2 #define CXGBI_FLAG_ADAPTER_RESET 0x4 #define CXGBI_FLAG_IPV4_SET 0x10 +#define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40 +#define CXGBI_FLAG_DDP_OFF 0x100 + struct cxgbi_device { struct list_head list_head; struct list_head rcu_node; @@ -548,15 +475,14 @@ struct cxgbi_device { unsigned int tx_max_size; unsigned int rx_max_size; struct cxgbi_ports_map pmap; - struct cxgbi_tag_format tag_format; - struct cxgbi_ddp_info *ddp; void (*dev_ddp_cleanup)(struct cxgbi_device *); - int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, - unsigned int, unsigned int, - struct cxgbi_gather_list *); - void (*csk_ddp_clear)(struct cxgbi_hba *, - unsigned int, unsigned int, unsigned int); + struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); + int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, + struct cxgbi_task_tag_info *); + void (*csk_ddp_clear_map)(struct cxgbi_device *cdev, + struct cxgbi_ppm *, + struct cxgbi_task_tag_info *); int (*csk_ddp_setup_digest)(struct cxgbi_sock *, unsigned int, int, int, int); int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, @@ -580,6 +506,8 @@ struct cxgbi_conn { struct iscsi_conn *iconn; struct cxgbi_hba *chba; u32 task_idx_bits; + unsigned int ddp_full; + unsigned int ddp_tag_full; }; struct cxgbi_endpoint { @@ -593,85 +521,15 @@ struct cxgbi_task_data { unsigned short nr_frags; struct page_frag frags[MAX_PDU_FRAGS]; struct sk_buff *skb; + unsigned int dlen; unsigned int offset; unsigned int count; unsigned int sgoffset; + struct cxgbi_task_tag_info ttinfo; }; #define iscsi_task_cxgbi_data(task) \ ((task)->dd_data + sizeof(struct iscsi_tcp_task)) -static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag) -{ - return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); -} - -static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat, - u32 sw_tag) -{ - sw_tag >>= (32 - tformat->rsvd_bits); - return !sw_tag; -} - -static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat, - u32 sw_tag) -{ - unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; - u32 mask = (1 << shift) - 1; - - if (sw_tag && (sw_tag & ~mask)) { - u32 v1 = sw_tag & ((1 << shift) - 1); - u32 v2 = (sw_tag >> (shift - 1)) << shift; - - return v2 | v1 | 1 << shift; - } - - return sw_tag | 1 << shift; -} - -static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat, - u32 sw_tag) -{ - u32 mask = (1 << tformat->rsvd_shift) - 1; - - if (sw_tag && (sw_tag & ~mask)) { - u32 v1 = sw_tag & mask; - u32 v2 = sw_tag >> tformat->rsvd_shift; - - v2 <<= tformat->rsvd_bits + tformat->rsvd_shift; - - return v2 | v1; - } - - return sw_tag; -} - -static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat, - u32 tag) -{ - if (cxgbi_is_ddp_tag(tformat, tag)) - return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; - - return 0; -} - -static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat, - u32 tag) -{ - unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; - u32 v1, v2; - - if (cxgbi_is_ddp_tag(tformat, tag)) { - v1 = tag & ((1 << tformat->rsvd_shift) - 1); - v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; - } else { - u32 mask = (1 << shift) - 1; - tag &= ~(1 << shift); - v1 = tag & mask; - v2 = (tag >> 1) & ~mask; - } - return v1 | v2; -} - static inline void *cxgbi_alloc_big_mem(unsigned int size, gfp_t gfp) { @@ -749,7 +607,11 @@ int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, unsigned int, unsigned int); int cxgbi_ddp_cleanup(struct cxgbi_device *); void cxgbi_ddp_page_size_factor(int *); -void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *); -void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *, - struct cxgbi_gather_list *, unsigned int); +void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *, + struct cxgbi_task_tag_info *, + struct scatterlist **sg_pp, unsigned int *sg_off); +void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *, + struct cxgbi_tag_format *, unsigned int ppmax, + unsigned int llimit, unsigned int start, + unsigned int rsvd_factor); #endif /*__LIBCXGBI_H__*/ diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 8fb9643fe6e3..860008d42466 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -765,6 +765,67 @@ static void term_afu(struct cxlflash_cfg *cfg) } /** + * notify_shutdown() - notifies device of pending shutdown + * @cfg: Internal structure associated with the host. + * @wait: Whether to wait for shutdown processing to complete. + * + * This function will notify the AFU that the adapter is being shutdown + * and will wait for shutdown processing to complete if wait is true. + * This notification should flush pending I/Os to the device and halt + * further I/Os until the next AFU reset is issued and device restarted. + */ +static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct sisl_global_map __iomem *global = &afu->afu_map->global; + struct dev_dependent_vals *ddv; + u64 reg, status; + int i, retry_cnt = 0; + + ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; + if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) + return; + + /* Notify AFU */ + for (i = 0; i < NUM_FC_PORTS; i++) { + reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]); + reg |= SISL_FC_SHUTDOWN_NORMAL; + writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]); + } + + if (!wait) + return; + + /* Wait up to 1.5 seconds for shutdown processing to complete */ + for (i = 0; i < NUM_FC_PORTS; i++) { + retry_cnt = 0; + while (true) { + status = readq_be(&global->fc_regs[i][FC_STATUS / 8]); + if (status & SISL_STATUS_SHUTDOWN_COMPLETE) + break; + if (++retry_cnt >= MC_RETRY_CNT) { + dev_dbg(dev, "%s: port %d shutdown processing " + "not yet completed\n", __func__, i); + break; + } + msleep(100 * retry_cnt); + } + } +} + +/** + * cxlflash_shutdown() - shutdown handler + * @pdev: PCI device associated with the host. + */ +static void cxlflash_shutdown(struct pci_dev *pdev) +{ + struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); + + notify_shutdown(cfg, false); +} + +/** * cxlflash_remove() - PCI entry point to tear down host * @pdev: PCI device associated with the host. * @@ -785,6 +846,9 @@ static void cxlflash_remove(struct pci_dev *pdev) cfg->tmf_slock); spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + /* Notify AFU and wait for shutdown processing to complete */ + notify_shutdown(cfg, true); + cfg->state = STATE_FAILTERM; cxlflash_stop_term_user_contexts(cfg); @@ -1916,6 +1980,19 @@ static int afu_reset(struct cxlflash_cfg *cfg) } /** + * drain_ioctls() - wait until all currently executing ioctls have completed + * @cfg: Internal structure associated with the host. + * + * Obtain write access to read/write semaphore that wraps ioctl + * handling to 'drain' ioctls currently executing. + */ +static void drain_ioctls(struct cxlflash_cfg *cfg) +{ + down_write(&cfg->ioctl_rwsem); + up_write(&cfg->ioctl_rwsem); +} + +/** * cxlflash_eh_device_reset_handler() - reset a single LUN * @scp: SCSI command to send. * @@ -1986,6 +2063,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) switch (cfg->state) { case STATE_NORMAL: cfg->state = STATE_RESET; + drain_ioctls(cfg); cxlflash_mark_contexts_error(cfg); rcr = afu_reset(cfg); if (rcr) { @@ -2319,8 +2397,10 @@ static struct scsi_host_template driver_template = { /* * Device dependent values */ -static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; -static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS }; +static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, + 0ULL }; +static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, + CXLFLASH_NOTIFY_SHUTDOWN }; /* * PCI device binding table @@ -2504,19 +2584,6 @@ out_remove: } /** - * drain_ioctls() - wait until all currently executing ioctls have completed - * @cfg: Internal structure associated with the host. - * - * Obtain write access to read/write semaphore that wraps ioctl - * handling to 'drain' ioctls currently executing. - */ -static void drain_ioctls(struct cxlflash_cfg *cfg) -{ - down_write(&cfg->ioctl_rwsem); - up_write(&cfg->ioctl_rwsem); -} - -/** * cxlflash_pci_error_detected() - called when a PCI error is detected * @pdev: PCI device struct. * @state: PCI channel state. @@ -2610,6 +2677,7 @@ static struct pci_driver cxlflash_driver = { .id_table = cxlflash_pci_table, .probe = cxlflash_probe, .remove = cxlflash_remove, + .shutdown = cxlflash_shutdown, .err_handler = &cxlflash_err_handler, }; diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index eb9d8f730b38..e43545c86bcf 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -88,6 +88,8 @@ enum undo_level { struct dev_dependent_vals { u64 max_sectors; + u64 flags; +#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL }; struct asyc_intr_info { @@ -100,8 +102,4 @@ struct asyc_intr_info { #define SCAN_HOST 0x04 }; -#ifndef CONFIG_CXL_EEH -#define cxl_perst_reloads_same_image(_a, _b) do { } while (0) -#endif - #endif /* _CXLFLASH_MAIN_H */ diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index 0b3366f5e6f6..347fc1671975 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -311,6 +311,12 @@ struct sisl_global_regs { #define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK) #define SISL_FC_INTERNAL_SHIFT 32 +#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL +#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL + +#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL +#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL + #define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */ #define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */ diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0efe7112fc1f..c8a4305c7662 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -67,9 +67,6 @@ static DEFINE_MUTEX(fcoe_config_mutex); static struct workqueue_struct *fcoe_wq; -/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ -static DECLARE_COMPLETION(fcoe_flush_completion); - /* fcoe host list */ /* must only by accessed under the RTNL mutex */ static LIST_HEAD(fcoe_hostlist); @@ -80,7 +77,6 @@ static int fcoe_reset(struct Scsi_Host *); static int fcoe_xmit(struct fc_lport *, struct fc_frame *); static int fcoe_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); -static int fcoe_percpu_receive_thread(void *); static void fcoe_percpu_clean(struct fc_lport *); static int fcoe_link_ok(struct fc_lport *); @@ -107,12 +103,11 @@ static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, static int fcoe_ddp_done(struct fc_lport *, u16); static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, unsigned int); -static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); static int fcoe_dcb_app_notification(struct notifier_block *notifier, ulong event, void *ptr); static bool fcoe_match(struct net_device *netdev); -static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode); static int fcoe_destroy(struct net_device *netdev); static int fcoe_enable(struct net_device *netdev); static int fcoe_disable(struct net_device *netdev); @@ -120,7 +115,7 @@ static int fcoe_disable(struct net_device *netdev); /* fcoe_syfs control interface handlers */ static int fcoe_ctlr_alloc(struct net_device *netdev); static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev); - +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev); static struct fc_seq *fcoe_elsct_send(struct fc_lport *, u32 did, struct fc_frame *, @@ -136,11 +131,6 @@ static struct notifier_block fcoe_notifier = { .notifier_call = fcoe_device_notification, }; -/* notification function for CPU hotplug events */ -static struct notifier_block fcoe_cpu_notifier = { - .notifier_call = fcoe_cpu_callback, -}; - /* notification function for DCB events */ static struct notifier_block dcb_notifier = { .notifier_call = fcoe_dcb_app_notification, @@ -156,8 +146,9 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *); static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); + static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { - .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode, + .set_fcoe_ctlr_mode = fcoe_ctlr_mode, .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled, .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, @@ -682,6 +673,12 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) fcoe = port->priv; ctlr = fcoe_to_ctlr(fcoe); + /* Figure out the VLAN ID, if any */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + lport->vlan = vlan_dev_vlan_id(netdev); + else + lport->vlan = 0; + /* * Determine max frame size based on underlying device and optional * user-configured limit. If the MFS is too low, fcoe_link_ok() @@ -780,9 +777,6 @@ static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) fcoe = port->priv; realdev = fcoe->realdev; - if (!realdev) - return; - /* No FDMI state m/c for NPIV ports */ if (lport->vport) return; @@ -1245,152 +1239,21 @@ static int __exit fcoe_if_exit(void) return 0; } -/** - * fcoe_percpu_thread_create() - Create a receive thread for an online CPU - * @cpu: The CPU index of the CPU to create a receive thread for - */ -static void fcoe_percpu_thread_create(unsigned int cpu) +static void fcoe_thread_cleanup_local(unsigned int cpu) { - struct fcoe_percpu_s *p; - struct task_struct *thread; - - p = &per_cpu(fcoe_percpu, cpu); - - thread = kthread_create_on_node(fcoe_percpu_receive_thread, - (void *)p, cpu_to_node(cpu), - "fcoethread/%d", cpu); - - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, cpu); - wake_up_process(thread); - - spin_lock_bh(&p->fcoe_rx_list.lock); - p->thread = thread; - spin_unlock_bh(&p->fcoe_rx_list.lock); - } -} - -/** - * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU - * @cpu: The CPU index of the CPU whose receive thread is to be destroyed - * - * Destroys a per-CPU Rx thread. Any pending skbs are moved to the - * current CPU's Rx thread. If the thread being destroyed is bound to - * the CPU processing this context the skbs will be freed. - */ -static void fcoe_percpu_thread_destroy(unsigned int cpu) -{ - struct fcoe_percpu_s *p; - struct task_struct *thread; struct page *crc_eof; - struct sk_buff *skb; -#ifdef CONFIG_SMP - struct fcoe_percpu_s *p0; - unsigned targ_cpu = get_cpu(); -#endif /* CONFIG_SMP */ - - FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); + struct fcoe_percpu_s *p; - /* Prevent any new skbs from being queued for this CPU. */ - p = &per_cpu(fcoe_percpu, cpu); + p = per_cpu_ptr(&fcoe_percpu, cpu); spin_lock_bh(&p->fcoe_rx_list.lock); - thread = p->thread; - p->thread = NULL; crc_eof = p->crc_eof_page; p->crc_eof_page = NULL; p->crc_eof_offset = 0; spin_unlock_bh(&p->fcoe_rx_list.lock); -#ifdef CONFIG_SMP - /* - * Don't bother moving the skb's if this context is running - * on the same CPU that is having its thread destroyed. This - * can easily happen when the module is removed. - */ - if (cpu != targ_cpu) { - p0 = &per_cpu(fcoe_percpu, targ_cpu); - spin_lock_bh(&p0->fcoe_rx_list.lock); - if (p0->thread) { - FCOE_DBG("Moving frames from CPU %d to CPU %d\n", - cpu, targ_cpu); - - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - __skb_queue_tail(&p0->fcoe_rx_list, skb); - spin_unlock_bh(&p0->fcoe_rx_list.lock); - } else { - /* - * The targeted CPU is not initialized and cannot accept - * new skbs. Unlock the targeted CPU and drop the skbs - * on the CPU that is going offline. - */ - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p0->fcoe_rx_list.lock); - } - } else { - /* - * This scenario occurs when the module is being removed - * and all threads are being destroyed. skbs will continue - * to be shifted from the CPU thread that is being removed - * to the CPU thread associated with the CPU that is processing - * the module removal. Once there is only one CPU Rx thread it - * will reach this case and we will drop all skbs and later - * stop the thread. - */ - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - } - put_cpu(); -#else - /* - * This a non-SMP scenario where the singular Rx thread is - * being removed. Free all skbs and stop the thread. - */ - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); -#endif - - if (thread) - kthread_stop(thread); - if (crc_eof) put_page(crc_eof); -} - -/** - * fcoe_cpu_callback() - Handler for CPU hotplug events - * @nfb: The callback data block - * @action: The event triggering the callback - * @hcpu: The index of the CPU that the event is for - * - * This creates or destroys per-CPU data for fcoe - * - * Returns NOTIFY_OK always. - */ -static int fcoe_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); - fcoe_percpu_thread_create(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); - fcoe_percpu_thread_destroy(cpu); - break; - default: - break; - } - return NOTIFY_OK; + flush_work(&p->work); } /** @@ -1509,26 +1372,6 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, fps = &per_cpu(fcoe_percpu, cpu); spin_lock(&fps->fcoe_rx_list.lock); - if (unlikely(!fps->thread)) { - /* - * The targeted CPU is not ready, let's target - * the first CPU now. For non-SMP systems this - * will check the same CPU twice. - */ - FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " - "ready for incoming skb- using first online " - "CPU.\n"); - - spin_unlock(&fps->fcoe_rx_list.lock); - cpu = cpumask_first(cpu_online_mask); - fps = &per_cpu(fcoe_percpu, cpu); - spin_lock(&fps->fcoe_rx_list.lock); - if (!fps->thread) { - spin_unlock(&fps->fcoe_rx_list.lock); - goto err; - } - } - /* * We now have a valid CPU that we're targeting for * this skb. We also have this receive thread locked, @@ -1543,8 +1386,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, * in softirq context. */ __skb_queue_tail(&fps->fcoe_rx_list, skb); - if (fps->thread->state == TASK_INTERRUPTIBLE) - wake_up_process(fps->thread); + schedule_work_on(cpu, &fps->work); spin_unlock(&fps->fcoe_rx_list.lock); return NET_RX_SUCCESS; @@ -1713,15 +1555,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) } /** - * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion - * @skb: The completed skb (argument required by destructor) - */ -static void fcoe_percpu_flush_done(struct sk_buff *skb) -{ - complete(&fcoe_flush_completion); -} - -/** * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC * @lport: The local port the frame was received on * @fp: The received frame @@ -1792,8 +1625,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) fr = fcoe_dev_from_skb(skb); lport = fr->fr_dev; if (unlikely(!lport)) { - if (skb->destructor != fcoe_percpu_flush_done) - FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); kfree_skb(skb); return; } @@ -1857,40 +1689,28 @@ drop: } /** - * fcoe_percpu_receive_thread() - The per-CPU packet receive thread - * @arg: The per-CPU context + * fcoe_receive_work() - The per-CPU worker + * @work: The work struct * - * Return: 0 for success */ -static int fcoe_percpu_receive_thread(void *arg) +static void fcoe_receive_work(struct work_struct *work) { - struct fcoe_percpu_s *p = arg; + struct fcoe_percpu_s *p; struct sk_buff *skb; struct sk_buff_head tmp; + p = container_of(work, struct fcoe_percpu_s, work); skb_queue_head_init(&tmp); - set_user_nice(current, MIN_NICE); - - while (!kthread_should_stop()) { - - spin_lock_bh(&p->fcoe_rx_list.lock); - skb_queue_splice_init(&p->fcoe_rx_list, &tmp); - - if (!skb_queue_len(&tmp)) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_bh(&p->fcoe_rx_list.lock); - schedule(); - continue; - } - - spin_unlock_bh(&p->fcoe_rx_list.lock); + spin_lock_bh(&p->fcoe_rx_list.lock); + skb_queue_splice_init(&p->fcoe_rx_list, &tmp); + spin_unlock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&tmp)) != NULL) - fcoe_recv_frame(skb); + if (!skb_queue_len(&tmp)) + return; - } - return 0; + while ((skb = __skb_dequeue(&tmp))) + fcoe_recv_frame(skb); } /** @@ -2163,6 +1983,32 @@ static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev) } /** + * fcoe_ctlr_mode() - Switch FIP mode + * @cdev: The FCoE Controller that is being modified + * + * When the FIP mode has been changed we need to update + * the multicast addresses to ensure we get the correct + * frames. + */ +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); + + if (ctlr_dev->mode == FIP_CONN_TYPE_VN2VN && + ctlr->mode != FIP_MODE_VN2VN) { + dev_mc_del(fcoe->netdev, FIP_ALL_ENODE_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_P2P_MACS); + } else if (ctlr->mode != FIP_MODE_FABRIC) { + dev_mc_del(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_del(fcoe->netdev, FIP_ALL_P2P_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_ENODE_MACS); + } + fcoe_ctlr_set_fip_mode(ctlr_dev); +} + +/** * fcoe_destroy() - Destroy a FCoE interface * @netdev : The net_device object the Ethernet interface to create on * @@ -2317,7 +2163,7 @@ enum fcoe_create_link_state { * consolidation of code can be done when that interface is * removed. */ -static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, +static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode, enum fcoe_create_link_state link_state) { int rc = 0; @@ -2406,7 +2252,7 @@ out: * * Returns: 0 for success */ -static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode) { return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP); } @@ -2450,36 +2296,19 @@ static int fcoe_link_ok(struct fc_lport *lport) * * Must be called with fcoe_create_mutex held to single-thread completion. * - * This flushes the pending skbs by adding a new skb to each queue and - * waiting until they are all freed. This assures us that not only are - * there no packets that will be handled by the lport, but also that any - * threads already handling packet have returned. + * This flushes the pending skbs by flush the work item for each CPU. The work + * item on each possible CPU is flushed because we may have used the per-CPU + * struct of an offline CPU. */ static void fcoe_percpu_clean(struct fc_lport *lport) { struct fcoe_percpu_s *pp; - struct sk_buff *skb; unsigned int cpu; for_each_possible_cpu(cpu) { pp = &per_cpu(fcoe_percpu, cpu); - if (!pp->thread || !cpu_online(cpu)) - continue; - - skb = dev_alloc_skb(0); - if (!skb) - continue; - - skb->destructor = fcoe_percpu_flush_done; - - spin_lock_bh(&pp->fcoe_rx_list.lock); - __skb_queue_tail(&pp->fcoe_rx_list, skb); - if (pp->fcoe_rx_list.qlen == 1) - wake_up_process(pp->thread); - spin_unlock_bh(&pp->fcoe_rx_list.lock); - - wait_for_completion(&fcoe_flush_completion); + flush_work(&pp->work); } } @@ -2625,22 +2454,11 @@ static int __init fcoe_init(void) mutex_lock(&fcoe_config_mutex); for_each_possible_cpu(cpu) { - p = &per_cpu(fcoe_percpu, cpu); + p = per_cpu_ptr(&fcoe_percpu, cpu); + INIT_WORK(&p->work, fcoe_receive_work); skb_queue_head_init(&p->fcoe_rx_list); } - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - fcoe_percpu_thread_create(cpu); - - /* Initialize per CPU interrupt thread */ - rc = __register_hotcpu_notifier(&fcoe_cpu_notifier); - if (rc) - goto out_free; - - cpu_notifier_register_done(); - /* Setup link change notification */ fcoe_dev_setup(); @@ -2652,12 +2470,6 @@ static int __init fcoe_init(void) return 0; out_free: - for_each_online_cpu(cpu) { - fcoe_percpu_thread_destroy(cpu); - } - - cpu_notifier_register_done(); - mutex_unlock(&fcoe_config_mutex); destroy_workqueue(fcoe_wq); return rc; @@ -2690,14 +2502,8 @@ static void __exit fcoe_exit(void) } rtnl_unlock(); - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - fcoe_percpu_thread_destroy(cpu); - - __unregister_hotcpu_notifier(&fcoe_cpu_notifier); - - cpu_notifier_register_done(); + for_each_possible_cpu(cpu) + fcoe_thread_cleanup_local(cpu); mutex_unlock(&fcoe_config_mutex); diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 3e83d485f743..a569c65f22b1 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -59,6 +59,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *, struct sk_buff *); + static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; @@ -149,6 +151,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) { fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); fip->mode = mode; + fip->fip_resp = false; INIT_LIST_HEAD(&fip->fcfs); mutex_init(&fip->ctlr_mutex); spin_lock_init(&fip->ctlr_lock); @@ -991,7 +994,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) return -EINVAL; break; } @@ -1232,7 +1235,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto drop; if (desc_cnt <= 2) { LIBFCOE_FIP_DBG(fip, "FIP descriptors " @@ -1410,7 +1413,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, break; default: /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto err; break; } @@ -1513,6 +1516,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) struct fip_header *fiph; struct ethhdr *eh; enum fip_state state; + bool fip_vlan_resp = false; u16 op; u8 sub; @@ -1546,11 +1550,17 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) state = FIP_ST_ENABLED; LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); } + fip_vlan_resp = fip->fip_resp; mutex_unlock(&fip->ctlr_mutex); if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) return fcoe_ctlr_vn_recv(fip, skb); + if (fip_vlan_resp && op == FIP_OP_VLAN) { + LIBFCOE_FIP_DBG(fip, "fip vlan discovery\n"); + return fcoe_ctlr_vlan_recv(fip, skb); + } + if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && state != FIP_ST_VNMP_CLAIM) goto drop; @@ -1989,7 +1999,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, const u8 *dest, size_t min_len) { struct sk_buff *skb; - struct fip_frame { + struct fip_vn2vn_probe_frame { struct ethhdr eth; struct fip_header fip; struct fip_mac_desc mac; @@ -2016,7 +2026,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, if (!skb) return; - frame = (struct fip_frame *)skb->data; + frame = (struct fip_vn2vn_probe_frame *)skb->data; memset(frame, 0, len); memcpy(frame->eth.h_dest, dest, ETH_ALEN); @@ -2338,7 +2348,7 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP probe\n", dtype); /* standard says ignore unknown descriptors >= 128 */ - if (dtype < FIP_DT_VENDOR_BASE) + if (dtype < FIP_DT_NON_CRITICAL) return -EINVAL; break; } @@ -2496,14 +2506,13 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) struct fcoe_rport *frport; int ret = -1; - rcu_read_lock(); rdata = lport->tt.rport_lookup(lport, port_id); if (rdata) { frport = fcoe_ctlr_rport(rdata); memcpy(mac, frport->enode_mac, ETH_ALEN); ret = 0; + kref_put(&rdata->kref, lport->tt.rport_destroy); } - rcu_read_unlock(); return ret; } @@ -2585,11 +2594,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, new->ids.port_id); - if (rdata) - kref_get(&rdata->kref); - mutex_unlock(&lport->disc.disc_mutex); if (rdata) { if (rdata->ids.node_name == new->ids.node_name && rdata->ids.port_name == new->ids.port_name) { @@ -2709,6 +2714,220 @@ drop: } /** + * fcoe_ctlr_vlan_parse - parse vlan discovery request or response + * @fip: The FCoE controller + * @skb: incoming packet + * @rdata: buffer for resulting parsed VLAN entry plus fcoe_rport + * + * Returns non-zero error number on error. + * Does not consume the packet. + */ +static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, + struct sk_buff *skb, + struct fc_rport_priv *rdata) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_mac_desc *macd = NULL; + struct fip_wwn_desc *wwn = NULL; + struct fcoe_rport *frport; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 dtype; + u8 sub; + + memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); + frport = fcoe_ctlr_rport(rdata); + + fiph = (struct fip_header *)skb->data; + frport->flags = ntohs(fiph->fip_flags); + + sub = fiph->fip_subcode; + switch (sub) { + case FIP_SC_VL_REQ: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); + return -EINVAL; + } + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + + dtype = desc->fip_dtype; + if (dtype < 32) { + if (!(desc_mask & BIT(dtype))) { + LIBFCOE_FIP_DBG(fip, + "unexpected or duplicated desc " + "desc type %u in " + "FIP VN2VN subtype %u\n", + dtype, sub); + return -EINVAL; + } + desc_mask &= ~BIT(dtype); + } + + switch (dtype) { + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + macd = (struct fip_mac_desc *)desc; + if (!is_valid_ether_addr(macd->fd_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP VN2VN\n", + macd->fd_mac); + return -EINVAL; + } + memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP probe\n", dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (dtype < FIP_DT_NON_CRITICAL) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_vlan_send() - Send a FIP VLAN Notification + * @fip: The FCoE controller + * @sub: sub-opcode for vlan notification or vn2vn vlan notification + * @dest: The destination Ethernet MAC address + * @min_len: minimum size of the Ethernet payload to be sent + */ +static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip, + enum fip_vlan_subcode sub, + const u8 *dest) +{ + struct sk_buff *skb; + struct fip_vlan_notify_frame { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + struct fip_vlan_desc vlan; + } __packed * frame; + size_t len; + size_t dlen; + + len = sizeof(*frame); + dlen = sizeof(frame->mac) + sizeof(frame->vlan); + len = max(len, sizeof(struct ethhdr)); + + skb = dev_alloc_skb(len); + if (!skb) + return; + + LIBFCOE_FIP_DBG(fip, "fip %s vlan notification, vlan %d\n", + fip->mode == FIP_MODE_VN2VN ? "vn2vn" : "fcf", + fip->lp->vlan); + + frame = (struct fip_vlan_notify_frame *)skb->data; + memset(frame, 0, len); + memcpy(frame->eth.h_dest, dest, ETH_ALEN); + + memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + frame->eth.h_proto = htons(ETH_P_FIP); + + frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + frame->fip.fip_op = htons(FIP_OP_VLAN); + frame->fip.fip_subcode = sub; + frame->fip.fip_dl_len = htons(dlen / FIP_BPW); + + frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; + frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; + memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + frame->vlan.fd_desc.fip_dtype = FIP_DT_VLAN; + frame->vlan.fd_desc.fip_dlen = sizeof(frame->vlan) / FIP_BPW; + put_unaligned_be16(fip->lp->vlan, &frame->vlan.fd_vlan); + + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, + struct fc_rport_priv *rdata) +{ + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; + + if (fip->mode == FIP_MODE_VN2VN) + sub = FIP_SC_VL_VN2VN_NOTE; + + fcoe_ctlr_vlan_send(fip, sub, frport->enode_mac); +} + +/** + * fcoe_ctlr_vlan_recv - vlan request receive handler for VN2VN mode. + * @lport: The local port + * @fp: The received frame + * + */ +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + enum fip_vlan_subcode sub; + struct { + struct fc_rport_priv rdata; + struct fcoe_rport frport; + } buf; + int rc; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); + if (rc) { + LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); + goto drop; + } + mutex_lock(&fip->ctlr_mutex); + if (sub == FIP_SC_VL_REQ) + fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); + mutex_unlock(&fip->ctlr_mutex); + +drop: + kfree(skb); + return rc; +} + +/** * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. * @lport: The local port * @fp: The received frame @@ -2869,7 +3088,7 @@ unlock: * when nothing is happening. */ static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, - enum fip_state fip_mode) + enum fip_mode fip_mode) { void *priv; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 045c4e11ee54..0675fd128734 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -385,6 +385,44 @@ static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, show_ctlr_enabled_state, store_ctlr_enabled); +static ssize_t store_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + mutex_lock(&fip->ctlr_mutex); + if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { + if (buf[0] == '1') { + fip->fip_resp = 1; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + if (buf[0] == '0') { + fip->fip_resp = 0; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + } + mutex_unlock(&fip->ctlr_mutex); + return -EINVAL; +} + +static ssize_t show_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0); +} + +static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, + show_ctlr_fip_resp, + store_ctlr_fip_resp); + static ssize_t store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, @@ -467,6 +505,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = { }; static struct attribute *fcoe_ctlr_attrs[] = { + &device_attr_fcoe_ctlr_fip_vlan_responder.attr, &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, &device_attr_fcoe_ctlr_enabled.attr, &device_attr_fcoe_ctlr_mode.attr, diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 641c60e8fda3..7028dd37e5dd 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -133,10 +133,10 @@ int fcoe_link_speed_update(struct fc_lport *lport) case SPEED_10000: lport->link_speed = FC_PORTSPEED_10GBIT; break; - case 20000: + case SPEED_20000: lport->link_speed = FC_PORTSPEED_20GBIT; break; - case 40000: + case SPEED_40000: lport->link_speed = FC_PORTSPEED_40GBIT; break; default: diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 67669a9e73c1..3b7da66e2771 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -359,7 +359,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic) vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); vlan->fip.fip_op = htons(FIP_OP_VLAN); - vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_subcode = FIP_SC_VL_NOTE; vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; @@ -551,7 +551,7 @@ static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) goto drop; /* pass it on to fcoe */ ret = 1; - } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { /* set the vlan as used */ fnic_fcoe_process_vlan_resp(fnic, skb); ret = 0; @@ -954,8 +954,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq) skb_put(skb, len); pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); - r = pci_dma_mapping_error(fnic->pdev, pa); - if (r) { + if (pci_dma_mapping_error(fnic->pdev, pa)) { + r = -ENOMEM; printk(KERN_ERR "PCI mapping failed with error %d\n", r); goto free_skb; } @@ -1093,8 +1093,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); - ret = pci_dma_mapping_error(fnic->pdev, pa); - if (ret) { + if (pci_dma_mapping_error(fnic->pdev, pa)) { + ret = -ENOMEM; printk(KERN_ERR "DMA map failed with error %d\n", ret); goto free_skb_on_err; } @@ -1308,7 +1308,7 @@ void fnic_handle_fip_timer(struct fnic *fnic) } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (fnic->ctlr.mode == FIP_ST_NON_FIP) + if (fnic->ctlr.mode == FIP_MODE_NON_FIP) return; spin_lock_irqsave(&fnic->vlans_lock, flags); diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h index 87e74c2ab971..7761f33ab5d4 100644 --- a/drivers/scsi/fnic/fnic_fip.h +++ b/drivers/scsi/fnic/fnic_fip.h @@ -26,14 +26,6 @@ #define FINC_MAX_FLOGI_REJECTS 8 -/* - * FIP_DT_VLAN descriptor. - */ -struct fip_vlan_desc { - struct fip_desc fd_desc; - __be16 fd_vlan; -} __attribute__((packed)); - struct vlan { __be16 vid; __be16 type; diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index d7cab724f203..4731d3241323 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -23,7 +23,7 @@ #include <scsi/sas_ata.h> #include <scsi/libsas.h> -#define DRV_VERSION "v1.4" +#define DRV_VERSION "v1.5" #define HISI_SAS_MAX_PHYS 9 #define HISI_SAS_MAX_QUEUES 32 diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index bd20c5488768..f96560431cf1 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -721,30 +721,41 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) return -EIO; } - /* reset and disable clock*/ - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, - reset_val); - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, - reset_val); - msleep(1); - regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); - if (reset_val != (val & reset_val)) { - dev_err(dev, "SAS reset fail.\n"); - return -EIO; - } + if (ACPI_HANDLE(dev)) { + acpi_status s; - /* De-reset and enable clock*/ - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, - reset_val); - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, - reset_val); - msleep(1); - regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, - &val); - if (val & reset_val) { - dev_err(dev, "SAS de-reset fail.\n"); - return -EIO; - } + s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + } else if (hisi_hba->ctrl) { + /* reset and disable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (reset_val != (val & reset_val)) { + dev_err(dev, "SAS reset fail.\n"); + return -EIO; + } + + /* De-reset and enable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, + &val); + if (val & reset_val) { + dev_err(dev, "SAS de-reset fail.\n"); + return -EIO; + } + } else + dev_warn(dev, "no reset method\n"); return 0; } @@ -752,13 +763,12 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) static void init_reg_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = &hisi_hba->pdev->dev; - struct device_node *np = dev->of_node; int i; /* Global registers init */ /* Deal with am-max-transmissions quirk */ - if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) { + if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) { hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, 0x2020); @@ -1902,14 +1912,9 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_ha_struct *sas_ha = &hisi_hba->sha; - unsigned long flags; hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); - - spin_lock_irqsave(&hisi_hba->lock, flags); sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_RX_BCST_ACK_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); @@ -2260,12 +2265,20 @@ static const struct of_device_id sas_v2_of_match[] = { }; MODULE_DEVICE_TABLE(of, sas_v2_of_match); +static const struct acpi_device_id sas_v2_acpi_match[] = { + { "HISI0162", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); + static struct platform_driver hisi_sas_v2_driver = { .probe = hisi_sas_v2_probe, .remove = hisi_sas_v2_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v2_of_match, + .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), }, }; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 1547bd93c70b..ba9af4a2bd2a 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -486,8 +486,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) else shost->dma_boundary = 0xffffffff; - shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq; - device_initialize(&shost->shost_gendev); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); shost->shost_gendev.bus = &scsi_bus_type; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ff8dcd5b0631..030d0023e1d2 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4105,6 +4105,70 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h, return rc; } +static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) +{ + struct bmic_identify_physical_device *id_phys; + bool is_spare = false; + int rc; + + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) + return false; + + rc = hpsa_bmic_id_physical_device(h, + lunaddrbytes, + GET_BMIC_DRIVE_NUMBER(lunaddrbytes), + id_phys, sizeof(*id_phys)); + if (rc == 0) + is_spare = (id_phys->more_flags >> 6) & 0x01; + + kfree(id_phys); + return is_spare; +} + +#define RPL_DEV_FLAG_NON_DISK 0x1 +#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 +#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 + +#define BMIC_DEVICE_TYPE_ENCLOSURE 6 + +static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, + struct ext_report_lun_entry *rle) +{ + u8 device_flags; + u8 device_type; + + if (!MASKED_DEVICE(lunaddrbytes)) + return false; + + device_flags = rle->device_flags; + device_type = rle->device_type; + + if (device_flags & RPL_DEV_FLAG_NON_DISK) { + if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) + return false; + return true; + } + + if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) + return false; + + if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) + return false; + + /* + * Spares may be spun down, we do not want to + * do an Inquiry to a RAID set spare drive as + * that would have them spun up, that is a + * performance hit because I/O to the RAID device + * stops while the spin up occurs which can take + * over 50 seconds. + */ + if (hpsa_is_disk_spare(h, lunaddrbytes)) + return true; + + return false; +} static void hpsa_update_scsi_devices(struct ctlr_info *h) { @@ -4198,6 +4262,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) u8 *lunaddrbytes, is_OBDR = 0; int rc = 0; int phys_dev_index = i - (raid_ctlr_position == 0); + bool skip_device = false; physical_device = i < nphysicals + (raid_ctlr_position == 0); @@ -4205,11 +4270,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); - /* skip masked non-disk devices */ - if (MASKED_DEVICE(lunaddrbytes) && physical_device && - (physdev_list->LUN[phys_dev_index].device_type != 0x06) && - (physdev_list->LUN[phys_dev_index].device_flags & 0x01)) - continue; + /* + * Skip over some devices such as a spare. + */ + if (!tmpdevice->external && physical_device) { + skip_device = hpsa_skip_device(h, lunaddrbytes, + &physdev_list->LUN[phys_dev_index]); + if (skip_device) + continue; + } /* Get device type, vendor, model, device id */ rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, @@ -6455,7 +6524,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, - DEFAULT_TIMEOUT); + NO_TIMEOUT); if (iocommand.buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); @@ -6588,7 +6657,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, - DEFAULT_TIMEOUT); + NO_TIMEOUT); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index fc523c3e5019..ab67ec4b6bd6 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4722,6 +4722,8 @@ static void ibmvfc_rport_add_thread(struct work_struct *work) tgt_dbg(tgt, "Setting rport roles\n"); fc_remote_port_rolechg(rport, tgt->ids.roles); put_device(&rport->dev); + } else { + spin_unlock_irqrestore(vhost->host->host_lock, flags); } kref_put(&tgt->kref, ibmvfc_release_tgt); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index d6a691e27d33..1f539c288ae8 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -98,7 +98,7 @@ static unsigned int ipr_transop_timeout = 0; static unsigned int ipr_debug = 0; static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; static unsigned int ipr_dual_ioa_raid = 1; -static unsigned int ipr_number_of_msix = 2; +static unsigned int ipr_number_of_msix = 16; static unsigned int ipr_fast_reboot; static DEFINE_SPINLOCK(ipr_driver_lock); @@ -194,7 +194,8 @@ static const struct ipr_chip_t ipr_chip[] = { { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } }; static int ipr_max_bus_speeds[] = { @@ -221,7 +222,7 @@ module_param_named(max_devs, ipr_max_devs, int, 0); MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); module_param_named(number_of_msix, ipr_number_of_msix, int, 0); -MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)"); +MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)"); module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); MODULE_LICENSE("GPL"); @@ -10093,6 +10094,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ioa_cfg->intr_flag = IPR_USE_MSI; else { ioa_cfg->intr_flag = IPR_USE_LSI; + ioa_cfg->clear_isr = 1; ioa_cfg->nvectors = 1; dev_info(&pdev->dev, "Cannot enable MSI.\n"); } @@ -10564,6 +10566,10 @@ static struct pci_device_id ipr_pci_table[] = { PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 56c57068300a..1d42c7464dfc 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -60,6 +60,7 @@ #define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D #define PCI_DEVICE_ID_IBM_CROCODILE 0x034A +#define PCI_DEVICE_ID_IBM_RATTLESNAKE 0x04DA #define IPR_SUBS_DEV_ID_2780 0x0264 #define IPR_SUBS_DEV_ID_5702 0x0266 @@ -111,6 +112,8 @@ #define IPR_SUBS_DEV_ID_2CCA 0x04C7 #define IPR_SUBS_DEV_ID_2CD2 0x04C8 #define IPR_SUBS_DEV_ID_2CCD 0x04C9 +#define IPR_SUBS_DEV_ID_580A 0x04FC +#define IPR_SUBS_DEV_ID_580B 0x04FB #define IPR_NAME "ipr" /* diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 30f9ef0c0d4f..e72673b0a8fb 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -908,9 +908,17 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) { struct fc_exch_pool *pool; struct fc_exch *ep = NULL; + u16 cpu = xid & fc_cpu_mask; + + if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { + printk_ratelimited(KERN_ERR + "libfc: lookup request for XID = %d, " + "indicates invalid CPU %d\n", xid, cpu); + return NULL; + } if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { - pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); + pool = per_cpu_ptr(mp->pool, cpu); spin_lock_bh(&pool->lock); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); if (ep) { diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e01a29863c38..04ce7cfb6d1b 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -301,7 +301,6 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *fc_stats; struct fc_lport *lport = shost_priv(shost); - struct timespec v0, v1; unsigned int cpu; u64 fcp_in_bytes = 0; u64 fcp_out_bytes = 0; @@ -309,9 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) fc_stats = &lport->host_stats; memset(fc_stats, 0, sizeof(struct fc_host_statistics)); - jiffies_to_timespec(jiffies, &v0); - jiffies_to_timespec(lport->boot_time, &v1); - fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); + fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ; for_each_possible_cpu(cpu) { struct fc_stats *stats; @@ -2090,7 +2087,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) struct fc_rport *rport; struct fc_rport_priv *rdata; int rc = -EINVAL; - u32 did; + u32 did, tov; job->reply->reply_payload_rcv_len = 0; if (rsp) @@ -2121,15 +2118,20 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) case FC_BSG_HST_CT: did = ntoh24(job->request->rqst_data.h_ct.port_id); - if (did == FC_FID_DIR_SERV) + if (did == FC_FID_DIR_SERV) { rdata = lport->dns_rdata; - else + if (!rdata) + break; + tov = rdata->e_d_tov; + } else { rdata = lport->tt.rport_lookup(lport, did); + if (!rdata) + break; + tov = rdata->e_d_tov; + kref_put(&rdata->kref, lport->tt.rport_destroy); + } - if (!rdata) - break; - - rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); + rc = fc_lport_ct_request(job, lport, did, tov); break; case FC_BSG_HST_ELS_NOLOGIN: diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 589ff9aedd31..93f596182145 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -95,17 +95,23 @@ static const char *fc_rport_state_names[] = { * @lport: The local port to lookup the remote port on * @port_id: The remote port ID to look up * - * The caller must hold either disc_mutex or rcu_read_lock(). + * The reference count of the fc_rport_priv structure is + * increased by one. */ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, u32 port_id) { - struct fc_rport_priv *rdata; + struct fc_rport_priv *rdata = NULL, *tmp_rdata; - list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) - if (rdata->ids.port_id == port_id) - return rdata; - return NULL; + rcu_read_lock(); + list_for_each_entry_rcu(tmp_rdata, &lport->disc.rports, peers) + if (tmp_rdata->ids.port_id == port_id && + kref_get_unless_zero(&tmp_rdata->kref)) { + rdata = tmp_rdata; + break; + } + rcu_read_unlock(); + return rdata; } /** @@ -340,7 +346,6 @@ static void fc_rport_work(struct work_struct *work) fc_remote_port_delete(rport); } - mutex_lock(&lport->disc.disc_mutex); mutex_lock(&rdata->rp_mutex); if (rdata->rp_state == RPORT_ST_DELETE) { if (port_id == FC_FID_DIR_SERV) { @@ -370,7 +375,6 @@ static void fc_rport_work(struct work_struct *work) fc_rport_enter_ready(rdata); mutex_unlock(&rdata->rp_mutex); } - mutex_unlock(&lport->disc.disc_mutex); break; default: @@ -702,7 +706,7 @@ out: err: mutex_unlock(&rdata->rp_mutex); put: - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); + kref_put(&rdata->kref, lport->tt.rport_destroy); return; bad: FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); @@ -762,8 +766,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); disc = &lport->disc; - mutex_lock(&disc->disc_mutex); - if (!lport->point_to_multipoint) { rjt_data.reason = ELS_RJT_UNSUP; rjt_data.explan = ELS_EXPL_NONE; @@ -808,7 +810,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_FIP; rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; - goto reject; + goto reject_put; case RPORT_ST_FLOGI: case RPORT_ST_PLOGI_WAIT: case RPORT_ST_PLOGI: @@ -825,13 +827,13 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_BUSY; rjt_data.explan = ELS_EXPL_NONE; - goto reject; + goto reject_put; } if (fc_rport_login_complete(rdata, fp)) { mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; - goto reject; + goto reject_put; } fp = fc_frame_alloc(lport, sizeof(*flp)); @@ -851,12 +853,13 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); out: mutex_unlock(&rdata->rp_mutex); - mutex_unlock(&disc->disc_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); fc_frame_free(rx_fp); return; +reject_put: + kref_put(&rdata->kref, lport->tt.rport_destroy); reject: - mutex_unlock(&disc->disc_mutex); lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); fc_frame_free(rx_fp); } @@ -923,7 +926,7 @@ out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); + kref_put(&rdata->kref, lport->tt.rport_destroy); } static bool @@ -1477,14 +1480,11 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) struct fc_rport_priv *rdata; struct fc_seq_els_data els_data; - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); - if (!rdata) { - mutex_unlock(&lport->disc.disc_mutex); + if (!rdata) goto reject; - } + mutex_lock(&rdata->rp_mutex); - mutex_unlock(&lport->disc.disc_mutex); switch (rdata->rp_state) { case RPORT_ST_PRLI: @@ -1494,6 +1494,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) break; default: mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); goto reject; } @@ -1524,6 +1525,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) } mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); return; reject: @@ -1907,7 +1909,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) sid = fc_frame_sid(fp); - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, sid); if (rdata) { mutex_lock(&rdata->rp_mutex); @@ -1916,10 +1917,10 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) fc_rport_enter_delete(rdata, RPORT_EV_LOGO); mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } else FC_RPORT_ID_DBG(lport, sid, "Received LOGO from non-logged-in port\n"); - mutex_unlock(&lport->disc.disc_mutex); fc_frame_free(fp); } diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 935c43095109..763f012fdeca 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -233,15 +233,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->task_state_flags = SAS_TASK_STATE_PENDING; qc->lldd_task = task; - switch (qc->tf.protocol) { - case ATA_PROT_NCQ: - task->ata_task.use_ncq = 1; - /* fall through */ - case ATAPI_PROT_DMA: - case ATA_PROT_DMA: - task->ata_task.dma_xfer = 1; - break; - } + task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol); + task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol); if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, task); @@ -253,6 +246,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, NULL); sas_free_task(task); + qc->lldd_task = NULL; ret = AC_ERR_SYSTEM; } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index d5bd420595c1..b484859464f6 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -647,6 +647,7 @@ struct lpfc_hba { #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ #define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ +#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -694,7 +695,8 @@ struct lpfc_hba { uint8_t wwnn[8]; uint8_t wwpn[8]; uint32_t RandomData[7]; - uint32_t fcp_embed_io; + uint8_t fcp_embed_io; + uint8_t mds_diags_support; /* HBA Config Parameters */ uint32_t cfg_ack0; @@ -741,6 +743,7 @@ struct lpfc_hba { #define OAS_FIND_ANY_VPORT 0x01 #define OAS_FIND_ANY_TARGET 0x02 #define OAS_LUN_VALID 0x04 + uint32_t cfg_oas_priority; uint32_t cfg_XLanePriority; uint32_t cfg_enable_bg; uint32_t cfg_hostmem_hgp; @@ -751,6 +754,8 @@ struct lpfc_hba { uint32_t cfg_iocb_cnt; uint32_t cfg_suppress_link_up; uint32_t cfg_rrq_xri_bitmap_sz; + uint32_t cfg_delay_discovery; + uint32_t cfg_sli_mode; #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ @@ -759,6 +764,7 @@ struct lpfc_hba { #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ uint32_t cfg_enable_SmartSAN; + uint32_t cfg_enable_mds_diags; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; @@ -779,9 +785,9 @@ struct lpfc_hba { atomic_t fcp_qidx; /* next work queue to post work to */ - unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ - unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ - unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ + phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */ + phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */ + phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */ void __iomem *slim_memmap_p; /* Kernel memory mapped address for PCI BAR0 */ void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for @@ -827,6 +833,7 @@ struct lpfc_hba { struct timer_list fcp_poll_timer; struct timer_list eratt_poll; + uint32_t eratt_poll_interval; /* * stat counters @@ -999,6 +1006,18 @@ struct lpfc_hba { spinlock_t devicelock; /* lock for luns list */ mempool_t *device_data_mem_pool; struct list_head luns; +#define LPFC_TRANSGRESSION_HIGH_TEMPERATURE 0x0080 +#define LPFC_TRANSGRESSION_LOW_TEMPERATURE 0x0040 +#define LPFC_TRANSGRESSION_HIGH_VOLTAGE 0x0020 +#define LPFC_TRANSGRESSION_LOW_VOLTAGE 0x0010 +#define LPFC_TRANSGRESSION_HIGH_TXBIAS 0x0008 +#define LPFC_TRANSGRESSION_LOW_TXBIAS 0x0004 +#define LPFC_TRANSGRESSION_HIGH_TXPOWER 0x0002 +#define LPFC_TRANSGRESSION_LOW_TXPOWER 0x0001 +#define LPFC_TRANSGRESSION_HIGH_RXPOWER 0x8000 +#define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000 + uint16_t sfp_alarm; + uint16_t sfp_warning; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index cfec2eca4dd3..f1019908800e 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -48,6 +48,7 @@ #include "lpfc_compat.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" +#include "lpfc_attr.h" #define LPFC_DEF_DEVLOSS_TMO 30 #define LPFC_MIN_DEVLOSS_TMO 1 @@ -1620,6 +1621,11 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } +static inline bool lpfc_rangecheck(uint val, uint min, uint max) +{ + return val >= min && val <= max; +} + /** * lpfc_param_show - Return a cfg attribute value in decimal * @@ -1697,7 +1703,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ static int \ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ phba->cfg_##attr = val;\ return 0;\ }\ @@ -1732,7 +1738,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ static int \ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "3052 lpfc_" #attr " changed from %d to %d\n", \ phba->cfg_##attr, val); \ @@ -1856,7 +1862,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ static int \ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ vport->cfg_##attr = val;\ return 0;\ }\ @@ -1888,7 +1894,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ static int \ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "3053 lpfc_" #attr \ " changed from %d (x%x) to %d (x%x)\n", \ @@ -1939,102 +1945,6 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ } -#define LPFC_ATTR(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_init(name, defval, minval, maxval) - -#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -lpfc_param_set(name, defval, minval, maxval)\ -lpfc_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - -#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_hex_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_hex_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -lpfc_param_set(name, defval, minval, maxval)\ -lpfc_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - -#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_init(name, defval, minval, maxval) - -#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \ -static uint64_t lpfc_##name = defval;\ -module_param(lpfc_##name, ullong, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -lpfc_vport_param_set(name, defval, minval, maxval)\ -lpfc_vport_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - -#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_hex_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_hex_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -lpfc_vport_param_set(name, defval, minval, maxval)\ -lpfc_vport_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); @@ -2401,6 +2311,69 @@ static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, lpfc_oas_tgt_show, lpfc_oas_tgt_store); /** + * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count + **/ +static ssize_t +lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); +} + +/** + * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + unsigned long val; + int ret; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + ret = kstrtoul(buf, 0, &val); + if (ret || (val > 0x7f)) + return -EINVAL; + + if (val) + phba->cfg_oas_priority = (uint8_t)val; + else + phba->cfg_oas_priority = phba->cfg_XLanePriority; + return count; +} +static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, + lpfc_oas_priority_show, lpfc_oas_priority_store); + +/** * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled * for Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. @@ -2462,6 +2435,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, else phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; phba->cfg_oas_flags &= ~OAS_LUN_VALID; + phba->cfg_oas_priority = phba->cfg_XLanePriority; phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; return count; } @@ -2524,7 +2498,6 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, return -EINVAL; phba->cfg_oas_lun_state = val; - return strlen(buf); } static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, @@ -2572,7 +2545,8 @@ static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, */ static size_t lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], - uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state) + uint8_t tgt_wwpn[], uint64_t lun, + uint32_t oas_state, uint8_t pri) { int rc = 0; @@ -2582,7 +2556,8 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], if (oas_state) { if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, - (struct lpfc_name *)tgt_wwpn, lun)) + (struct lpfc_name *)tgt_wwpn, + lun, pri)) rc = -ENOMEM; } else { lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, @@ -2648,13 +2623,13 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], static ssize_t lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], uint8_t tgt_wwpn[], uint64_t lun, - uint32_t oas_state) + uint32_t oas_state, uint8_t pri) { int rc; rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, - oas_state); + oas_state, pri); return rc; } @@ -2744,16 +2719,16 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, return -EINVAL; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3372 Try to set vport 0x%llx target 0x%llx lun:%lld " - "with oas set to %d\n", + "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " + "priority 0x%x with oas state %d\n", wwn_to_u64(phba->cfg_oas_vpt_wwpn), wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, - phba->cfg_oas_lun_state); + phba->cfg_oas_priority, phba->cfg_oas_lun_state); rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, - phba->cfg_oas_tgt_wwpn, scsi_lun, - phba->cfg_oas_lun_state); - + phba->cfg_oas_tgt_wwpn, scsi_lun, + phba->cfg_oas_lun_state, + phba->cfg_oas_priority); if (rc) return rc; @@ -2772,19 +2747,14 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); -int lpfc_sli_mode = 0; -module_param(lpfc_sli_mode, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" - " 0 - auto (SLI-3 if supported)," - " 2 - select SLI-2 even on SLI-3 capable HBAs," - " 3 - select SLI-3"); +LPFC_ATTR(sli_mode, 0, 0, 3, + "SLI mode selector:" + " 0 - auto (SLI-3 if supported)," + " 2 - select SLI-2 even on SLI-3 capable HBAs," + " 3 - select SLI-3"); -int lpfc_enable_npiv = 1; -module_param(lpfc_enable_npiv, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); -lpfc_param_show(enable_npiv); -lpfc_param_init(enable_npiv, 1, 0, 1); -static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); +LPFC_ATTR_R(enable_npiv, 1, 0, 1, + "Enable NPIV functionality"); LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); @@ -4754,11 +4724,8 @@ MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); * accept and FCID/Fabric name/Fabric portname is changed. * Default value is 0. */ -int lpfc_delay_discovery; -module_param(lpfc_delay_discovery, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_delay_discovery, - "Delay NPort discovery when Clean Address bit is cleared. " - "Allowed values: 0,1."); +LPFC_ATTR(delay_discovery, 0, 0, 1, + "Delay NPort discovery when Clean Address bit is cleared."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count @@ -4780,6 +4747,14 @@ LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Protection Scatter Gather Segment Count"); +/* + * lpfc_enable_mds_diags: Enable MDS Diagnostics + * 0 = MDS Diagnostics disabled (default) + * 1 = MDS Diagnostics enabled + * Value range is [0,1]. Default value is 0. + */ +LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); + struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, &dev_attr_bg_guard_err, @@ -4857,6 +4832,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_xlane_vpt, &dev_attr_lpfc_xlane_lun_state, &dev_attr_lpfc_xlane_lun_status, + &dev_attr_lpfc_xlane_priority, &dev_attr_lpfc_sg_seg_cnt, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, @@ -4876,6 +4852,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_sriov_hw_max_virtfn, &dev_attr_protocol, &dev_attr_lpfc_xlane_supported, + &dev_attr_lpfc_enable_mds_diags, NULL, }; @@ -5849,6 +5826,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_oas_lun_state = 0; phba->cfg_oas_lun_status = 0; phba->cfg_oas_flags = 0; + phba->cfg_oas_priority = 0; lpfc_enable_bg_init(phba, lpfc_enable_bg); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; @@ -5866,7 +5844,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); + lpfc_delay_discovery_init(phba, lpfc_delay_discovery); + lpfc_sli_mode_init(phba, lpfc_sli_mode); phba->cfg_enable_dss = 1; + lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); return; } diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h new file mode 100644 index 000000000000..b2bd28e965fa --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -0,0 +1,116 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LPFC_ATTR(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_init(name, defval, minval, maxval) + +#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_init(name, defval, minval, maxval) + +#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \ +static uint64_t lpfc_##name = defval;\ +module_param(lpfc_##name, ullong, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 4e55b35180a4..bd7576d452f2 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -359,9 +359,6 @@ extern struct scsi_host_template lpfc_template_s3; extern struct scsi_host_template lpfc_vport_template; extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; -extern int lpfc_sli_mode; -extern int lpfc_enable_npiv; -extern int lpfc_delay_discovery; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); @@ -492,7 +489,7 @@ struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, uint64_t); bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, - struct lpfc_name *, uint64_t); + struct lpfc_name *, uint64_t, uint8_t); bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, uint64_t); bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index a38816e96654..63e48d4277b0 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1510,6 +1510,10 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) if (!lpfc_is_link_up(phba)) return; + /* Must be connected to a Fabric */ + if (!(vport->fc_flag & FC_FABRIC)) + return; + if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) return; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 0498f5760d2b..c0af32f24954 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -594,6 +594,7 @@ static uint8_t lpfc_check_clean_addr_bit(struct lpfc_vport *vport, struct serv_parm *sp) { + struct lpfc_hba *phba = vport->phba; uint8_t fabric_param_changed = 0; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -615,7 +616,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport, * - lpfc_delay_discovery module parameter is set. */ if (fabric_param_changed && !sp->cmn.clean_address_bit && - (vport->fc_prevDID || lpfc_delay_discovery)) { + (vport->fc_prevDID || phba->cfg_delay_discovery)) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_DELAYED; spin_unlock_irq(shost->host_lock); @@ -3299,6 +3300,12 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, FC_VPORT_FABRIC_REJ_WWN); } break; + case LSRJT_VENDOR_UNIQUE: + if ((stat.un.b.vendorUnique == 0x45) && + (cmd == ELS_CMD_FLOGI)) { + goto out_retry; + } + break; } break; @@ -3344,6 +3351,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((vport->load_flag & FC_UNLOADING) != 0) retry = 0; +out_retry: if (retry) { if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { /* Stop retrying PLOGI and FDISC if in FCF discovery */ @@ -4609,7 +4617,7 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) return sentplogi; } -void +uint32_t lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, uint32_t word0) { @@ -4617,9 +4625,11 @@ lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); desc->payload.els_req = word0; desc->length = cpu_to_be32(sizeof(desc->payload)); + + return sizeof(struct fc_rdp_link_service_desc); } -void +uint32_t lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, uint8_t *page_a0, uint8_t *page_a2) { @@ -4680,9 +4690,11 @@ lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, desc->sfp_info.flags = cpu_to_be16(flag); desc->length = cpu_to_be32(sizeof(desc->sfp_info)); + + return sizeof(struct fc_rdp_sfp_desc); } -void +uint32_t lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, READ_LNK_VAR *stat) { @@ -4707,134 +4719,181 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); desc->length = cpu_to_be32(sizeof(desc->info)); + + return sizeof(struct fc_rdp_link_error_status_desc); } -void +uint32_t lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, struct lpfc_vport *vport) { + uint32_t bbCredit; + desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); - desc->bbc_info.port_bbc = cpu_to_be32( - vport->fc_sparam.cmn.bbCreditMsb | - vport->fc_sparam.cmn.bbCreditlsb << 8); - if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) - desc->bbc_info.attached_port_bbc = cpu_to_be32( - vport->phba->fc_fabparam.cmn.bbCreditMsb | - vport->phba->fc_fabparam.cmn.bbCreditlsb << 8); - else + bbCredit = vport->fc_sparam.cmn.bbCreditLsb | + (vport->fc_sparam.cmn.bbCreditMsb << 8); + desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); + if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { + bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | + (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); + desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); + } else { desc->bbc_info.attached_port_bbc = 0; + } desc->bbc_info.rtt = 0; desc->length = cpu_to_be32(sizeof(desc->bbc_info)); + + return sizeof(struct fc_rdp_bbc_desc); } -void -lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) +uint32_t +lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void +uint32_t lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, uint8_t *page_a0, struct lpfc_vport *vport) { @@ -4845,9 +4904,10 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2); memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); desc->length = cpu_to_be32(sizeof(desc->opd_info)); + return sizeof(struct fc_rdp_opd_sfp_desc); } -int +uint32_t lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) { if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) @@ -4864,7 +4924,7 @@ lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) return sizeof(struct fc_fec_rdp_desc); } -void +uint32_t lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) { uint16_t rdp_cap = 0; @@ -4923,9 +4983,10 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); desc->length = cpu_to_be32(sizeof(desc->info)); + return sizeof(struct fc_rdp_port_speed_desc); } -void +uint32_t lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_hba *phba) { @@ -4939,9 +5000,10 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, sizeof(desc->port_names.wwpn)); desc->length = cpu_to_be32(sizeof(desc->port_names)); + return sizeof(struct fc_rdp_port_name_desc); } -void +uint32_t lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { @@ -4962,6 +5024,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, } desc->length = cpu_to_be32(sizeof(desc->port_names)); + return sizeof(struct fc_rdp_port_name_desc); } void @@ -4976,8 +5039,9 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, uint8_t *pcmd; struct ls_rjt *stat; struct fc_rdp_res_frame *rdp_res; - uint32_t cmdsize; - int rc, fec_size; + uint32_t cmdsize, len; + uint16_t *flag_ptr; + int rc; if (status != SUCCESS) goto error; @@ -5008,39 +5072,61 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + /* Update Alarm and Warning */ + flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); + phba->sfp_alarm |= *flag_ptr; + flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); + phba->sfp_warning |= *flag_ptr; + /* For RDP payload */ - lpfc_rdp_res_link_service(&rdp_res->link_service_desc, ELS_CMD_RDP); + len = 8; + len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) + (len + pcmd), ELS_CMD_RDP); - lpfc_rdp_res_sfp_desc(&rdp_res->sfp_desc, + len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), rdp_context->page_a0, rdp_context->page_a2); - lpfc_rdp_res_speed(&rdp_res->portspeed_desc, phba); - lpfc_rdp_res_link_error(&rdp_res->link_error_desc, - &rdp_context->link_stat); - lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); - lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, - vport, ndlp); - lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat, - vport); - lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc, - rdp_context->page_a2); - lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport); - fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc, + len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), + phba); + len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) + (len + pcmd), &rdp_context->link_stat); + len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) + (len + pcmd), phba); + len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) + (len + pcmd), vport, ndlp); + len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), &rdp_context->link_stat); - rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE); + /* Check if nport is logged, BZ190632 */ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + goto lpfc_skip_descriptor; + + len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), + &rdp_context->link_stat, vport); + len += lpfc_rdp_res_oed_temp_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_voltage_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_txbias_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_txpower_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_rxpower_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), + rdp_context->page_a0, vport); + +lpfc_skip_descriptor: + rdp_res->length = cpu_to_be32(len - 8); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; /* Now that we know the true size of the payload, update the BPL */ bpl = (struct ulp_bde64 *) (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); - bpl->tus.f.bdeSize = (fec_size + RDP_DESC_PAYLOAD_SIZE + 8); + bpl->tus.f.bdeSize = len; bpl->tus.f.bdeFlags = 0; bpl->tus.w = le32_to_cpu(bpl->tus.w); @@ -5165,6 +5251,12 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, be32_to_cpu(rdp_req->nport_id_desc.nport_id), be32_to_cpu(rdp_req->nport_id_desc.length)); + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) && + !phba->cfg_enable_SmartSAN) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_expl = LSEXP_PORT_LOGIN_REQ; + goto error; + } if (sizeof(struct fc_rdp_nport_desc) != be32_to_cpu(rdp_req->rdp_des_length)) goto rjt_logerr; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 39f0fd000d2c..822654322e67 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -346,7 +346,7 @@ struct csp { uint8_t fcphHigh; /* FC Word 0, byte 0 */ uint8_t fcphLow; uint8_t bbCreditMsb; - uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ + uint8_t bbCreditLsb; /* FC Word 0, byte 3 */ /* * Word 1 Bit 31 in common service parameter is overloaded. @@ -1206,6 +1206,12 @@ struct fc_rdp_bbc_desc { struct fc_rdp_bbc_info bbc_info; }; +/* Optical Element Type Transgression Flags */ +#define RDP_OET_LOW_WARNING 0x1 +#define RDP_OET_HIGH_WARNING 0x2 +#define RDP_OET_LOW_ALARM 0x4 +#define RDP_OET_HIGH_ALARM 0x8 + #define RDP_OED_TEMPERATURE 0x1 #define RDP_OED_VOLTAGE 0x2 #define RDP_OED_TXBIAS 0x3 @@ -1233,8 +1239,8 @@ struct fc_rdp_opd_sfp_info { uint8_t vendor_name[16]; uint8_t model_number[16]; uint8_t serial_number[16]; - uint8_t reserved[2]; uint8_t revision[2]; + uint8_t reserved[2]; uint8_t date[8]; }; @@ -1261,27 +1267,17 @@ struct fc_rdp_res_frame { struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ - struct fc_rdp_bbc_desc bbc_desc; /* FC Word 34-38*/ - struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 39-43*/ - struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 44-48*/ - struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 49-53*/ - struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 54-58*/ - struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 59-63*/ - struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 64-80*/ - struct fc_fec_rdp_desc fec_desc; /* FC word 81-84*/ + struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/ + struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/ + struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/ + struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 48-52*/ + struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 53-57*/ + struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 58-62*/ + struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 63-67*/ + struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 68-84*/ }; -#define RDP_DESC_PAYLOAD_SIZE (sizeof(struct fc_rdp_link_service_desc) \ - + sizeof(struct fc_rdp_sfp_desc) \ - + sizeof(struct fc_rdp_port_speed_desc) \ - + sizeof(struct fc_rdp_link_error_status_desc) \ - + (sizeof(struct fc_rdp_port_name_desc) * 2) \ - + sizeof(struct fc_rdp_bbc_desc) \ - + (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \ - + sizeof(struct fc_rdp_opd_sfp_desc)) - - /******** FDMI ********/ /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 0c7070bf2813..ee8022737591 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -544,6 +544,8 @@ struct lpfc_register { uint32_t word0; }; +#define LPFC_PORT_SEM_UE_RECOVERABLE 0xE000 +#define LPFC_PORT_SEM_MASK 0xF000 /* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */ #define LPFC_UERR_STATUS_HI 0x00A4 #define LPFC_UERR_STATUS_LO 0x00A0 @@ -937,6 +939,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD #define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE #define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 +#define LPFC_MBOX_OPCODE_SET_FEATURES 0xBF /* FCoE Opcodes */ #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 @@ -2590,10 +2593,8 @@ struct lpfc_mbx_memory_dump_type3 { #define SFF_RXPOWER_B1 104 #define SFF_RXPOWER_B0 105 #define SSF_STATUS_CONTROL 110 -#define SSF_ALARM_FLAGS_B1 112 -#define SSF_ALARM_FLAGS_B0 113 -#define SSF_WARNING_FLAGS_B1 116 -#define SSF_WARNING_FLAGS_B0 117 +#define SSF_ALARM_FLAGS 112 +#define SSF_WARNING_FLAGS 116 #define SSF_EXT_TATUS_CONTROL_B1 118 #define SSF_EXT_TATUS_CONTROL_B0 119 #define SSF_A2_VENDOR_SPECIFIC 120 @@ -2887,8 +2888,37 @@ struct lpfc_sli4_parameters { #define cfg_ext_embed_cb_SHIFT 0 #define cfg_ext_embed_cb_MASK 0x00000001 #define cfg_ext_embed_cb_WORD word19 +#define cfg_mds_diags_SHIFT 1 +#define cfg_mds_diags_MASK 0x00000001 +#define cfg_mds_diags_WORD word19 }; +#define LPFC_SET_UE_RECOVERY 0x10 +#define LPFC_SET_MDS_DIAGS 0x11 +struct lpfc_mbx_set_feature { + struct mbox_header header; + uint32_t feature; + uint32_t param_len; + uint32_t word6; +#define lpfc_mbx_set_feature_UER_SHIFT 0 +#define lpfc_mbx_set_feature_UER_MASK 0x00000001 +#define lpfc_mbx_set_feature_UER_WORD word6 +#define lpfc_mbx_set_feature_mds_SHIFT 0 +#define lpfc_mbx_set_feature_mds_MASK 0x00000001 +#define lpfc_mbx_set_feature_mds_WORD word6 +#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 +#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001 +#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6 + uint32_t word7; +#define lpfc_mbx_set_feature_UERP_SHIFT 0 +#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff +#define lpfc_mbx_set_feature_UERP_WORD word7 +#define lpfc_mbx_set_feature_UESR_SHIFT 16 +#define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff +#define lpfc_mbx_set_feature_UESR_WORD word7 +}; + + struct lpfc_mbx_get_sli4_parameters { struct mbox_header header; struct lpfc_sli4_parameters sli4_parameters; @@ -3281,6 +3311,7 @@ struct lpfc_mqe { struct lpfc_mbx_get_prof_cfg get_prof_cfg; struct lpfc_mbx_wr_object wr_object; struct lpfc_mbx_get_port_name get_port_name; + struct lpfc_mbx_set_feature set_feature; struct lpfc_mbx_memory_dump_type3 mem_dump_type3; struct lpfc_mbx_nop nop; } un; @@ -3443,6 +3474,8 @@ struct lpfc_acqe_fc_la { #define LPFC_FC_LA_TYPE_LINK_UP 0x1 #define LPFC_FC_LA_TYPE_LINK_DOWN 0x2 #define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3 +#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4 +#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5 #define lpfc_acqe_fc_la_port_type_SHIFT 6 #define lpfc_acqe_fc_la_port_type_MASK 0x00000003 #define lpfc_acqe_fc_la_port_type_WORD word0 diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h new file mode 100644 index 000000000000..5733feafe25f --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -0,0 +1,122 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include <linux/pci.h> + +const struct pci_device_id lpfc_id_table[] = { + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0 } +}; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 3a0f3a4ee944..734a0428ef0e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -52,6 +52,7 @@ #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" +#include "lpfc_ids.h" char *_dump_buf_data; unsigned long _dump_buf_data_order; @@ -568,7 +569,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); + jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -1587,35 +1588,39 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, int rc; uint32_t intr_mode; - /* - * On error status condition, driver need to wait for port - * ready before performing reset. - */ - rc = lpfc_sli4_pdev_status_reg_wait(phba); - if (!rc) { - /* need reset: attempt for port recovery */ - if (en_rn_msg) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2887 Reset Needed: Attempting Port " - "Recovery...\n"); - lpfc_offline_prep(phba, mbx_action); - lpfc_offline(phba); - /* release interrupt for possible resource change */ - lpfc_sli4_disable_intr(phba); - lpfc_sli_brdrestart(phba); - /* request and enable interrupt */ - intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); - if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3175 Failed to enable interrupt\n"); - return -EIO; - } else { - phba->intr_mode = intr_mode; - } - rc = lpfc_online(phba); - if (rc == 0) - lpfc_unblock_mgmt_io(phba); + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_2) { + /* + * On error status condition, driver need to wait for port + * ready before performing reset. + */ + rc = lpfc_sli4_pdev_status_reg_wait(phba); + if (rc) + return rc; } + + /* need reset: attempt for port recovery */ + if (en_rn_msg) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2887 Reset Needed: Attempting Port " + "Recovery...\n"); + lpfc_offline_prep(phba, mbx_action); + lpfc_offline(phba); + /* release interrupt for possible resource change */ + lpfc_sli4_disable_intr(phba); + lpfc_sli_brdrestart(phba); + /* request and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3175 Failed to enable interrupt\n"); + return -EIO; + } + phba->intr_mode = intr_mode; + rc = lpfc_online(phba); + if (rc == 0) + lpfc_unblock_mgmt_io(phba); + return rc; } @@ -1636,10 +1641,11 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) struct lpfc_register portstat_reg = {0}; uint32_t reg_err1, reg_err2; uint32_t uerrlo_reg, uemasklo_reg; - uint32_t pci_rd_rc1, pci_rd_rc2; + uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; bool en_rn_msg = true; struct temp_event temp_event_data; - int rc; + struct lpfc_register portsmphr_reg; + int rc, i; /* If the pci channel is offline, ignore possible errors, since * we cannot communicate with the pci card anyway. @@ -1647,6 +1653,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) if (pci_channel_offline(phba->pcidev)) return; + memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: @@ -1659,6 +1666,55 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) return; + if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { + lpfc_sli4_offline_eratt(phba); + return; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "7623 Checking UE recoverable"); + + for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { + if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0)) + continue; + + smphr_port_status = bf_get(lpfc_port_smphr_port_status, + &portsmphr_reg); + if ((smphr_port_status & LPFC_PORT_SEM_MASK) == + LPFC_PORT_SEM_UE_RECOVERABLE) + break; + /*Sleep for 1Sec, before checking SEMAPHORE */ + msleep(1000); + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "4827 smphr_port_status x%x : Waited %dSec", + smphr_port_status, i); + + /* Recoverable UE, reset the HBA device */ + if ((smphr_port_status & LPFC_PORT_SEM_MASK) == + LPFC_PORT_SEM_UE_RECOVERABLE) { + for (i = 0; i < 20; i++) { + msleep(1000); + if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0) && + (LPFC_POST_STAGE_PORT_READY == + bf_get(lpfc_port_smphr_port_status, + &portsmphr_reg))) { + rc = lpfc_sli4_port_sta_fn_reset(phba, + LPFC_MBX_NO_WAIT, en_rn_msg); + if (rc == 0) + return; + lpfc_printf_log(phba, + KERN_ERR, LOG_INIT, + "4215 Failed to recover UE"); + break; + } + } + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "7624 Firmware not ready: Failing UE recovery," + " waited %dSec", i); lpfc_sli4_offline_eratt(phba); break; @@ -1681,6 +1737,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) "taking port offline Data: x%x x%x\n", reg_err1, reg_err2); + phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_CRIT_TEMP; temp_event_data.data = 0xFFFFFFFF; @@ -3985,6 +4042,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) { struct lpfc_dmabuf *mp; LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + struct lpfc_mbx_read_top *la; int rc; if (bf_get(lpfc_trailer_type, acqe_fc) != @@ -4055,6 +4114,24 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = phba->pport; + if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { + /* Parse and translate status field */ + mb = &pmb->u.mb; + mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, + (void *)acqe_fc); + + /* Parse and translate link attention fields */ + la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; + la->eventTag = acqe_fc->event_tag; + bf_set(lpfc_mbx_read_top_att_type, la, + LPFC_FC_LA_TYPE_LINK_DOWN); + + /* Invoke the mailbox command callback function */ + lpfc_mbx_cmpl_read_topology(phba, pmb); + + return; + } + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_dmabuf; @@ -4107,6 +4184,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) "3190 Over Temperature:%d Celsius- Port Name %c\n", acqe_sli->event_data1, port_name); + phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), @@ -4408,7 +4486,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * the corresponding FCF bit in the roundrobin bitmap. */ spin_lock_irq(&phba->hbalock); - if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && + (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { spin_unlock_irq(&phba->hbalock); /* Update FLOGI FCF failover eligible FCF bmask */ lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); @@ -5357,6 +5436,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_bsmbx; } } + /* * Get sli4 parameters that override parameters from Port capabilities. * If this call fails, it isn't critical unless the SLI4 parameters come @@ -6085,6 +6165,7 @@ lpfc_hba_alloc(struct pci_dev *pdev) kfree(phba); return NULL; } + phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; spin_lock_init(&phba->ct_ev_lock); INIT_LIST_HEAD(&phba->ct_ev_waiters); @@ -9521,6 +9602,14 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->fcp_embed_io = 1; else phba->fcp_embed_io = 0; + + /* + * Check if the SLI port supports MDS Diagnostics + */ + if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) + phba->mds_diags_support = 1; + else + phba->mds_diags_support = 0; return 0; } @@ -11291,106 +11380,6 @@ lpfc_fof_queue_destroy(struct lpfc_hba *phba) return 0; } -static struct pci_device_id lpfc_id_table[] = { - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - { 0 } -}; - MODULE_DEVICE_TABLE(pci, lpfc_id_table); static const struct pci_error_handlers lpfc_err_handler = { @@ -11445,21 +11434,17 @@ lpfc_init(void) printk(KERN_ERR "Could not register lpfcmgmt device, " "misc_register returned with status %d", error); - if (lpfc_enable_npiv) { - lpfc_transport_functions.vport_create = lpfc_vport_create; - lpfc_transport_functions.vport_delete = lpfc_vport_delete; - } + lpfc_transport_functions.vport_create = lpfc_vport_create; + lpfc_transport_functions.vport_delete = lpfc_vport_delete; lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); if (lpfc_transport_template == NULL) return -ENOMEM; - if (lpfc_enable_npiv) { - lpfc_vport_transport_template = - fc_attach_transport(&lpfc_vport_transport_functions); - if (lpfc_vport_transport_template == NULL) { - fc_release_transport(lpfc_transport_template); - return -ENOMEM; - } + lpfc_vport_transport_template = + fc_attach_transport(&lpfc_vport_transport_functions); + if (lpfc_vport_transport_template == NULL) { + fc_release_transport(lpfc_transport_template); + return -ENOMEM; } /* Initialize in case vector mapping is needed */ @@ -11471,8 +11456,7 @@ lpfc_init(void) error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); - if (lpfc_enable_npiv) - fc_release_transport(lpfc_vport_transport_template); + fc_release_transport(lpfc_vport_transport_template); } return error; @@ -11491,8 +11475,7 @@ lpfc_exit(void) misc_deregister(&lpfc_mgmt_dev); pci_unregister_driver(&lpfc_driver); fc_release_transport(lpfc_transport_template); - if (lpfc_enable_npiv) - fc_release_transport(lpfc_vport_transport_template); + fc_release_transport(lpfc_vport_transport_template); if (_dump_buf_data) { printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " "_dump_buf_data at 0x%p\n", diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 3bd0be6277b3..a5655d571906 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -3335,8 +3335,11 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * OAS, set the oas iocb related flags. */ if ((phba->cfg_fof) && ((struct lpfc_device_data *) - scsi_cmnd->device->hostdata)->oas_enabled) + scsi_cmnd->device->hostdata)->oas_enabled) { lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); + lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->priority; + } return 0; } @@ -5607,6 +5610,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, sizeof(struct lpfc_name)); lun_info->device_id.lun = lun; lun_info->oas_enabled = false; + lun_info->priority = phba->cfg_XLanePriority; lun_info->available = false; return lun_info; } @@ -5798,7 +5802,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, **/ bool lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, - struct lpfc_name *target_wwpn, uint64_t lun) + struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) { struct lpfc_device_data *lun_info; @@ -5825,6 +5829,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, false); if (lun_info) { lun_info->oas_enabled = true; + lun_info->priority = pri; lun_info->available = false; list_add_tail(&lun_info->listentry, &phba->luns); spin_unlock_irqrestore(&phba->devicelock, flags); @@ -5886,6 +5891,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct scsi_host_template lpfc_template_s3 = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, @@ -5910,6 +5916,7 @@ struct scsi_host_template lpfc_template_s3 = { struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, @@ -5935,6 +5942,7 @@ struct scsi_host_template lpfc_template = { struct scsi_host_template lpfc_vport_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 18b9260ccfac..8cb80dabada8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -51,6 +51,7 @@ struct lpfc_device_data { struct list_head listentry; struct lpfc_rport_data *rport_data; struct lpfc_device_id device_id; + uint8_t priority; bool oas_enabled; bool available; }; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 70edf21ae1b9..351d08ace24a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2947,8 +2947,8 @@ void lpfc_poll_eratt(unsigned long ptr) else cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); - /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ - do_div(cnt, LPFC_ERATT_POLL_INTERVAL); + /* 64-bit integer division not supported on 32-bit x86 - use do_div */ + do_div(cnt, phba->eratt_poll_interval); phba->sli.slistat.sli_ips = cnt; phba->sli.slistat.sli_prev_intr = sli_intr; @@ -2963,7 +2963,7 @@ void lpfc_poll_eratt(unsigned long ptr) /* Restart the timer for next eratt poll */ mod_timer(&phba->eratt_poll, jiffies + - msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); return; } @@ -4665,13 +4665,13 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) int mode = 3, i; int longs; - switch (lpfc_sli_mode) { + switch (phba->cfg_sli_mode) { case 2: if (phba->cfg_enable_npiv) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, - "1824 NPIV enabled: Override lpfc_sli_mode " + "1824 NPIV enabled: Override sli_mode " "parameter (%d) to auto (0).\n", - lpfc_sli_mode); + phba->cfg_sli_mode); break; } mode = 2; @@ -4681,8 +4681,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, - "1819 Unrecognized lpfc_sli_mode " - "parameter: %d.\n", lpfc_sli_mode); + "1819 Unrecognized sli_mode parameter: %d.\n", + phba->cfg_sli_mode); break; } @@ -4690,12 +4690,14 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli_config_port(phba, mode); - if (rc && lpfc_sli_mode == 3) + if (rc && phba->cfg_sli_mode == 3) lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, "1820 Unable to select SLI-3. " "Not supported by adapter.\n"); if (rc && mode != 2) rc = lpfc_sli_config_port(phba, 2); + else if (rc && mode == 2) + rc = lpfc_sli_config_port(phba, 3); if (rc) goto lpfc_sli_hba_setup_error; @@ -5690,6 +5692,38 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) return rc; } +void +lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, + uint32_t feature) +{ + uint32_t len; + + len = sizeof(struct lpfc_mbx_set_feature) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_FEATURES, len, + LPFC_SLI4_MBX_EMBED); + + switch (feature) { + case LPFC_SET_UE_RECOVERY: + bf_set(lpfc_mbx_set_feature_UER, + &mbox->u.mqe.un.set_feature, 1); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; + mbox->u.mqe.un.set_feature.param_len = 8; + break; + case LPFC_SET_MDS_DIAGS: + bf_set(lpfc_mbx_set_feature_mds, + &mbox->u.mqe.un.set_feature, 1); + bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, + &mbox->u.mqe.un.set_feature, 0); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; + mbox->u.mqe.un.set_feature.param_len = 8; + break; + } + + return; +} + /** * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. * @phba: Pointer to HBA context object. @@ -6414,6 +6448,30 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->pport->cfg_lun_queue_depth = rc; } + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_0) { + lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc == MBX_SUCCESS) { + phba->hba_flag |= HBA_RECOVERABLE_UE; + /* Set 1Sec interval to detect UE */ + phba->eratt_poll_interval = 1; + phba->sli4_hba.ue_to_sr = bf_get( + lpfc_mbx_set_feature_UESR, + &mboxq->u.mqe.un.set_feature); + phba->sli4_hba.ue_to_rp = bf_get( + lpfc_mbx_set_feature_UERP, + &mboxq->u.mqe.un.set_feature); + } + } + + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { + /* Enable MDS Diagnostics only if the SLI Port supports it */ + lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + phba->mds_diags_support = 0; + } /* * Discover the port's supported feature set and match it against the @@ -6612,7 +6670,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); + jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -8383,8 +8441,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); - if (phba->cfg_XLanePriority) { - bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + if (iocbq->priority) { + bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, + (iocbq->priority << 1)); + } else { bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, (phba->cfg_XLanePriority << 1)); } @@ -8439,8 +8500,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); - if (phba->cfg_XLanePriority) { - bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); + if (iocbq->priority) { + bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, + (iocbq->priority << 1)); + } else { bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, (phba->cfg_XLanePriority << 1)); } @@ -8494,8 +8558,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.ulpFCP2Rcvy); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); - if (phba->cfg_XLanePriority) { - bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); + if (iocbq->priority) { + bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, + (iocbq->priority << 1)); + } else { bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, (phba->cfg_XLanePriority << 1)); } @@ -10136,6 +10203,7 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, struct lpfc_iocbq *iocbq; int sum, i; + spin_lock_irq(&phba->hbalock); for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; @@ -10143,6 +10211,7 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, ctx_cmd) == 0) sum++; } + spin_unlock_irq(&phba->hbalock); return sum; } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7fe99ff80846..74227a28bd56 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -57,6 +57,7 @@ struct lpfc_iocbq { struct lpfc_cq_event cq_event; IOCB_t iocb; /* IOCB cmd */ + uint8_t priority; /* OAS priority */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ uint32_t iocb_flag; #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index cd780c29495a..0b88b5703e0f 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2015 Emulex. All rights reserved. * + * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -511,6 +511,8 @@ struct lpfc_sli4_hba { uint32_t ue_mask_lo; uint32_t ue_mask_hi; + uint32_t ue_to_sr; + uint32_t ue_to_rp; struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; struct msix_entry *msix_entries; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index fa0d531bf351..c9bf20eb7223 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.1.0.0." +#define LPFC_DRIVER_VERSION "11.2.0.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index f4b0690450d2..2dab3dc2aa69 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4079,6 +4079,12 @@ megasas_get_pd_list(struct megasas_instance *instance) struct MR_PD_ADDRESS *pd_addr; dma_addr_t ci_h = 0; + if (instance->pd_list_not_supported) { + dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " + "not supported by firmware\n"); + return ret; + } + cmd = megasas_get_cmd(instance); if (!cmd) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6bff13e7afc7..cd91a684c945 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -4903,13 +4903,22 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) u16 ioc_status; u16 sz; u8 device_missing_delay; + u8 num_phys; - mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys); - if (!ioc->sas_hba.num_phys) { + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } + ioc->sas_hba.phy = kcalloc(num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!ioc->sas_hba.phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.num_phys = num_phys; /* sas_iounit page 0 */ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * @@ -4969,13 +4978,6 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; - ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys, - sizeof(struct _sas_phy), GFP_KERNEL); - if (!ioc->sas_hba.phy) { - pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", - ioc->name, __FILE__, __LINE__, __func__); - goto out; - } for (i = 0; i < ioc->sas_hba.num_phys ; i++) { if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, i))) { @@ -9033,8 +9035,11 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) /* TODO - dump whatever for debugging purposes */ - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; + /* This called only if scsih_pci_error_detected returns + * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still + * works, no need to reset slot. + */ + return PCI_ERS_RESULT_RECOVERED; } /* diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 6a84b82d71bb..ff93286bc32f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -705,6 +705,11 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, goto out_fail; } + if (!sas_node->parent_dev) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } port = sas_port_alloc_num(sas_node->parent_dev); if ((sas_port_add(port))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 3b11aad03752..2f2a9910e30e 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -726,7 +726,7 @@ static int _osd_req_list_objects(struct osd_request *or, return PTR_ERR(bio); } - bio->bi_rw &= ~REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_READ, 0); or->in.bio = bio; or->in.total_bytes = bio->bi_iter.bi_size; return 0; @@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); WARN_ON(or->out.bio || or->out.total_bytes); - WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); + WARN_ON(!op_is_write(bio_op(bio))); or->out.bio = bio; or->out.total_bytes = len; } @@ -839,7 +839,7 @@ int osd_req_write_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); osd_req_write(or, obj, offset, bio, len); return 0; } @@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(bio->bi_rw & REQ_WRITE); + WARN_ON(op_is_write(bio_op(bio))); or->in.bio = bio; or->in.total_bytes = len; } @@ -956,7 +956,7 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); /* integrity check the continuation before the bio is linked * with the other data segments since the continuation @@ -1077,7 +1077,7 @@ int osd_req_write_sg_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); osd_req_write_sg(or, obj, bio, sglist, numentries); return 0; @@ -1558,18 +1558,25 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or, static struct request *_make_request(struct request_queue *q, bool has_write, struct _osd_io_info *oii, gfp_t flags) { - if (oii->bio) - return blk_make_request(q, oii->bio, flags); - else { - struct request *req; - - req = blk_get_request(q, has_write ? WRITE : READ, flags); - if (IS_ERR(req)) - return req; + struct request *req; + struct bio *bio = oii->bio; + int ret; - blk_rq_set_block_pc(req); + req = blk_get_request(q, has_write ? WRITE : READ, flags); + if (IS_ERR(req)) return req; + blk_rq_set_block_pc(req); + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + + blk_queue_bounce(req->q, &bounce_bio); + ret = blk_rq_append_bio(req, bounce_bio); + if (ret) + return ERR_PTR(ret); } + + return req; } static int _init_blk_request(struct osd_request *or, diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 6bd7bf4f4a81..9fc675f57e33 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1249,7 +1249,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) /* Chip documentation for the 8070 and 8072 SPCv */ /* states that a 500ms minimum delay is required */ - /* before issuing commands. Otherwise, the firmare */ + /* before issuing commands. Otherwise, the firmware */ /* will enter an unrecoverable state. */ if (pm8001_ha->chip_id == chip_8070 || diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 4dc06a13cab8..fe7469c901f7 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -147,92 +147,6 @@ static struct bin_attribute sysfs_fw_dump_attr = { }; static ssize_t -qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, - struct device, kobj))); - struct qla_hw_data *ha = vha->hw; - - if (!ha->fw_dump_template || !ha->fw_dump_template_len) - return 0; - - ql_dbg(ql_dbg_user, vha, 0x70e2, - "chunk <- off=%llx count=%zx\n", off, count); - return memory_read_from_buffer(buf, count, &off, - ha->fw_dump_template, ha->fw_dump_template_len); -} - -static ssize_t -qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, - struct device, kobj))); - struct qla_hw_data *ha = vha->hw; - uint32_t size; - - if (off == 0) { - if (ha->fw_dump) - vfree(ha->fw_dump); - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - - ha->fw_dump = NULL; - ha->fw_dump_len = 0; - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - - size = qla27xx_fwdt_template_size(buf); - ql_dbg(ql_dbg_user, vha, 0x70d1, - "-> allocating fwdt (%x bytes)...\n", size); - ha->fw_dump_template = vmalloc(size); - if (!ha->fw_dump_template) { - ql_log(ql_log_warn, vha, 0x70d2, - "Failed allocate fwdt (%x bytes).\n", size); - return -ENOMEM; - } - ha->fw_dump_template_len = size; - } - - if (off + count > ha->fw_dump_template_len) { - count = ha->fw_dump_template_len - off; - ql_dbg(ql_dbg_user, vha, 0x70d3, - "chunk -> truncating to %zx bytes.\n", count); - } - - ql_dbg(ql_dbg_user, vha, 0x70d4, - "chunk -> off=%llx count=%zx\n", off, count); - memcpy(ha->fw_dump_template + off, buf, count); - - if (off + count == ha->fw_dump_template_len) { - size = qla27xx_fwdt_calculate_dump_size(vha); - ql_dbg(ql_dbg_user, vha, 0x70d5, - "-> allocating fwdump (%x bytes)...\n", size); - ha->fw_dump = vmalloc(size); - if (!ha->fw_dump) { - ql_log(ql_log_warn, vha, 0x70d6, - "Failed allocate fwdump (%x bytes).\n", size); - return -ENOMEM; - } - ha->fw_dump_len = size; - } - - return count; -} -static struct bin_attribute sysfs_fw_dump_template_attr = { - .attr = { - .name = "fw_dump_template", - .mode = S_IRUSR | S_IWUSR, - }, - .size = 0, - .read = qla2x00_sysfs_read_fw_dump_template, - .write = qla2x00_sysfs_write_fw_dump_template, -}; - -static ssize_t qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) @@ -973,7 +887,6 @@ static struct sysfs_entry { int is4GBp_only; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr, }, - { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 }, { "nvram", &sysfs_nvram_attr, }, { "optrom", &sysfs_optrom_attr, }, { "optrom_ctl", &sysfs_optrom_ctl_attr, }, @@ -1000,8 +913,6 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) continue; - if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) - continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -1858,6 +1769,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (!fcport) return; + if (test_bit(UNLOADING, &fcport->vha->dpc_flags)) + return; + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; @@ -1900,10 +1814,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) int rval; struct link_statistics *stats; dma_addr_t stats_dma; - struct fc_host_statistics *pfc_host_stat; + struct fc_host_statistics *p = &vha->fc_host_stat; - pfc_host_stat = &vha->fc_host_stat; - memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); + memset(p, -1, sizeof(*p)); if (IS_QLAFX00(vha->hw)) goto done; @@ -1918,17 +1831,17 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) goto done; stats = dma_alloc_coherent(&ha->pdev->dev, - sizeof(struct link_statistics), &stats_dma, GFP_KERNEL); - if (stats == NULL) { + sizeof(*stats), &stats_dma, GFP_KERNEL); + if (!stats) { ql_log(ql_log_warn, vha, 0x707d, "Failed to allocate memory for stats.\n"); goto done; } - memset(stats, 0, DMA_POOL_SIZE); + memset(stats, 0, sizeof(*stats)); rval = QLA_FUNCTION_FAILED; if (IS_FWI2_CAPABLE(ha)) { - rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0); } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && !ha->dpc_active) { /* Must be in a 'READY' state for statistics retrieval. */ @@ -1939,47 +1852,68 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) if (rval != QLA_SUCCESS) goto done_free; - pfc_host_stat->link_failure_count = stats->link_fail_cnt; - pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt; - pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt; - pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; - pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; - pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; + p->link_failure_count = stats->link_fail_cnt; + p->loss_of_sync_count = stats->loss_sync_cnt; + p->loss_of_signal_count = stats->loss_sig_cnt; + p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; + p->invalid_tx_word_count = stats->inval_xmit_word_cnt; + p->invalid_crc_count = stats->inval_crc_cnt; if (IS_FWI2_CAPABLE(ha)) { - pfc_host_stat->lip_count = stats->lip_cnt; - pfc_host_stat->tx_frames = stats->tx_frames; - pfc_host_stat->rx_frames = stats->rx_frames; - pfc_host_stat->dumped_frames = stats->discarded_frames; - pfc_host_stat->nos_count = stats->nos_rcvd; - pfc_host_stat->error_frames = + p->lip_count = stats->lip_cnt; + p->tx_frames = stats->tx_frames; + p->rx_frames = stats->rx_frames; + p->dumped_frames = stats->discarded_frames; + p->nos_count = stats->nos_rcvd; + p->error_frames = stats->dropped_frames + stats->discarded_frames; - pfc_host_stat->rx_words = vha->qla_stats.input_bytes; - pfc_host_stat->tx_words = vha->qla_stats.output_bytes; + p->rx_words = vha->qla_stats.input_bytes; + p->tx_words = vha->qla_stats.output_bytes; } - pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests; - pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests; - pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests; - pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; - pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; - pfc_host_stat->seconds_since_last_reset = + p->fcp_control_requests = vha->qla_stats.control_requests; + p->fcp_input_requests = vha->qla_stats.input_requests; + p->fcp_output_requests = vha->qla_stats.output_requests; + p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; + p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; + p->seconds_since_last_reset = get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; - do_div(pfc_host_stat->seconds_since_last_reset, HZ); + do_div(p->seconds_since_last_reset, HZ); done_free: dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), stats, stats_dma); done: - return pfc_host_stat; + return p; } static void qla2x00_reset_host_stats(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct link_statistics *stats; + dma_addr_t stats_dma; + memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); + + if (IS_FWI2_CAPABLE(ha)) { + stats = dma_alloc_coherent(&ha->pdev->dev, + sizeof(*stats), &stats_dma, GFP_KERNEL); + if (!stats) { + ql_log(ql_log_warn, vha, 0x70d7, + "Failed to allocate memory for stats.\n"); + return; + } + + /* reset firmware statistics */ + qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); + + dma_free_coherent(&ha->pdev->dev, sizeof(*stats), + stats, stats_dma); + } } static void diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 392c147d5793..643014f82f7d 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2246,53 +2246,94 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); struct link_statistics *stats = NULL; dma_addr_t stats_dma; - int rval = QLA_FUNCTION_FAILED; + int rval; + uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd; + uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; if (test_bit(UNLOADING, &vha->dpc_flags)) - goto done; + return -ENODEV; if (unlikely(pci_channel_offline(ha->pdev))) - goto done; + return -ENODEV; if (qla2x00_reset_active(vha)) - goto done; + return -EBUSY; if (!IS_FWI2_CAPABLE(ha)) - goto done; + return -EPERM; stats = dma_alloc_coherent(&ha->pdev->dev, - sizeof(struct link_statistics), &stats_dma, GFP_KERNEL); + sizeof(*stats), &stats_dma, GFP_KERNEL); if (!stats) { ql_log(ql_log_warn, vha, 0x70e2, - "Failed to allocate memory for stats.\n"); - goto done; + "Failed to allocate memory for stats.\n"); + return -ENOMEM; } - memset(stats, 0, sizeof(struct link_statistics)); + memset(stats, 0, sizeof(*stats)); - rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); - if (rval != QLA_SUCCESS) - goto done_free; + if (rval == QLA_SUCCESS) { + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, + (uint8_t *)stats, sizeof(*stats)); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); + } - ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, - (uint8_t *)stats, sizeof(struct link_statistics)); + bsg_job->reply->reply_payload_rcv_len = sizeof(*stats); + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, stats, sizeof(struct link_statistics)); - bsg_job->reply->reply_payload_rcv_len = sizeof(struct link_statistics); + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + dma_free_coherent(&ha->pdev->dev, sizeof(*stats), + stats, stats_dma); - bsg_job->reply_len = sizeof(struct fc_bsg_reply); + return 0; +} + +static int +qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + int rval; + struct qla_dport_diag *dd; + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) + return -EPERM; + + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) { + ql_log(ql_log_warn, vha, 0x70db, + "Failed to allocate memory for dport.\n"); + return -ENOMEM; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); + + rval = qla26xx_dport_diagnostics( + vha, dd->buf, sizeof(dd->buf), dd->options); + if (rval == QLA_SUCCESS) { + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); + } + + bsg_job->reply->reply_payload_rcv_len = sizeof(*dd); + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_job->reply); bsg_job->reply->result = DID_OK << 16; bsg_job->job_done(bsg_job); -done_free: - dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), - stats, stats_dma); -done: - return rval; + kfree(dd); + + return 0; } static int @@ -2360,8 +2401,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) return qla27xx_get_bbcr_data(bsg_job); case QL_VND_GET_PRIV_STATS: + case QL_VND_GET_PRIV_STATS_EX: return qla2x00_get_priv_stats(bsg_job); + case QL_VND_DPORT_DIAGNOSTICS: + return qla2x00_do_dport_diagnostics(bsg_job); + default: return -ENOSYS; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index c80192d45536..d97dfd521356 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -29,6 +29,8 @@ #define QL_VND_SET_FLASH_UPDATE_CAPS 0x16 #define QL_VND_GET_BBCR_DATA 0x17 #define QL_VND_GET_PRIV_STATS 0x18 +#define QL_VND_DPORT_DIAGNOSTICS 0x19 +#define QL_VND_GET_PRIV_STATS_EX 0x1A /* BSG Vendor specific subcode returns */ #define EXT_STATUS_OK 0 @@ -266,4 +268,15 @@ struct qla_bbcr_data { uint16_t mbx1; /* Port state */ uint8_t reserved[9]; } __packed; + +struct qla_dport_diag { + uint16_t options; + uint32_t buf[16]; + uint8_t unused[62]; +} __packed; + +/* D_Port options */ +#define QLA_DPORT_RESULT 0x0 +#define QLA_DPORT_START 0x2 + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index b64c504ff12f..45af34ddc432 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,12 +11,11 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x018f | 0x0146 | + * | Module Init and Probe | 0x0191 | 0x0146 | * | | | 0x015b-0x0160 | - * | | | 0x016e-0x0170 | - * | Mailbox commands | 0x1192 | | - * | | | | - * | Device Discovery | 0x2016 | 0x2020-0x2022, | + * | | | 0x016e | + * | Mailbox commands | 0x1199 | 0x1193 | + * | Device Discovery | 0x2004 | 0x2016 | * | | | 0x2011-0x2012, | * | | | 0x2099-0x20a4 | * | Queue Command and IO tracing | 0x3074 | 0x300b | @@ -26,11 +25,11 @@ * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | - * | Async Events | 0x5089 | 0x502b-0x502f | - * | | | 0x505e | + * | Async Events | 0x5090 | 0x502b-0x502f | + * | | | 0x5047 | * | | | 0x5084,0x5075 | * | | | 0x503d,0x5044 | - * | | | 0x507b,0x505f | + * | | | 0x505f | * | Timer Routines | 0x6012 | | * | User Space Interactions | 0x70e3 | 0x7018,0x702e | * | | | 0x7020,0x7024 | @@ -39,9 +38,9 @@ * | | | 0x70a5-0x70a6 | * | | | 0x70a8,0x70ab | * | | | 0x70ad-0x70ae | + * | | | 0x70d0-0x70d6 | * | | | 0x70d7-0x70db | - * | | | 0x70de-0x70df | - * | Task Management | 0x803d | 0x8000,0x800b | + * | Task Management | 0x8042 | 0x8000,0x800b | * | | | 0x8019 | * | | | 0x8025,0x8026 | * | | | 0x8031,0x8032 | @@ -2697,29 +2696,24 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) void ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, - uint8_t *b, uint32_t size) + uint8_t *buf, uint size) { - uint32_t cnt; - uint8_t c; + uint cnt; if (!ql_mask_match(level)) return; - ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " - "9 Ah Bh Ch Dh Eh Fh\n"); - ql_dbg(level, vha, id, "----------------------------------" - "----------------------------\n"); - - ql_dbg(level, vha, id, " "); - for (cnt = 0; cnt < size;) { - c = *b++; - printk("%02x", (uint32_t) c); - cnt++; - if (!(cnt % 16)) + ql_dbg(level, vha, id, + "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); + ql_dbg(level, vha, id, + "----- -----------------------------------------------\n"); + for (cnt = 0; cnt < size; cnt++, buf++) { + if (cnt % 16 == 0) + ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU); + printk(" %02x", *buf); + if (cnt % 16 == 15) printk("\n"); - else - printk(" "); } - if (cnt % 16) - ql_dbg(level, vha, id, "\n"); + if (cnt % 16 != 0) + printk("\n"); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 47f8b9b49bac..ae4a74756128 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -829,6 +829,7 @@ struct mbx_cmd_32 { #define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */ #define MBA_INIT_REQUIRED 0x8061 /* Initialization required */ #define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */ +#define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */ #define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */ #define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */ #define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change @@ -3028,6 +3029,7 @@ struct qla_hw_data { uint32_t mr_reset_hdlr_active:1; uint32_t mr_intr_valid:1; + uint32_t dport_enabled:1; uint32_t fawwpn_enabled:1; uint32_t exlogins_enabled:1; uint32_t exchoffld_enabled:1; @@ -3128,7 +3130,7 @@ struct qla_hw_data { #define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 #define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261 - uint32_t device_type; + uint32_t isp_type; #define DT_ISP2100 BIT_0 #define DT_ISP2200 BIT_1 #define DT_ISP2300 BIT_2 @@ -3153,6 +3155,7 @@ struct qla_hw_data { #define DT_ISP2261 BIT_21 #define DT_ISP_LAST (DT_ISP2261 << 1) + uint32_t device_type; #define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 #define DT_FWI2 BIT_27 @@ -3160,7 +3163,8 @@ struct qla_hw_data { #define DT_OEM_001 BIT_29 #define DT_ISP2200A BIT_30 #define DT_EXTENDED_IDS BIT_31 -#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1)) + +#define DT_MASK(ha) ((ha)->isp_type & (DT_ISP_LAST - 1)) #define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100) #define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200) #define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300) @@ -3370,6 +3374,8 @@ struct qla_hw_data { uint32_t fw_shared_ram_start; uint32_t fw_shared_ram_end; + uint32_t fw_ddr_ram_start; + uint32_t fw_ddr_ram_end; uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ uint8_t fw_seriallink_options[4]; @@ -3505,7 +3511,6 @@ struct qla_hw_data { int cur_vport_count; struct qla_chip_state_84xx *cs84xx; - struct qla_statistics qla_stats; struct isp_operations *isp_ops; struct workqueue_struct *wq; struct qlfc_fw fw_buf; @@ -3656,6 +3661,7 @@ typedef struct scsi_qla_host { #define PFLG_DISCONNECTED 0 /* PCI device removed */ #define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */ #define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */ +#define PCI_ERR 30 uint32_t device_flags; #define SWITCH_FOUND BIT_0 diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 4c0f3a774799..8a2368b32dec 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1288,7 +1288,7 @@ struct vp_rpt_id_entry_24xx { uint8_t vp_idx_map[16]; - uint8_t reserved_4[28]; + uint8_t reserved_4[24]; uint16_t bbcr; uint8_t reserved_5[6]; }; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index fe943772fe7b..6ca00813c71f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -344,7 +344,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, - dma_addr_t); + dma_addr_t, uint); extern int qla24xx_abort_command(srb_t *); extern int qla24xx_async_abort_command(srb_t *); @@ -445,6 +445,9 @@ qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *); extern int qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); +extern int +qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); + /* * Global Function Prototypes in qla_isr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index c56cdb35f3ed..5b09296b46a3 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -624,6 +624,9 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; + memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); + memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); + /* Clear adapter flags. */ vha->flags.online = 0; ha->flags.chip_reset_done = 0; @@ -2053,6 +2056,14 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha) if (IS_QLA6312(ha)) ha->fw_options[2] |= BIT_13; + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2100, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + /* Update firmware options. */ qla2x00_set_fw_options(vha, ha->fw_options); } @@ -2070,6 +2081,14 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) if (ql2xfwholdabts) ha->fw_options[3] |= BIT_12; + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2101, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; @@ -2269,13 +2288,13 @@ qla2x00_init_rings(scsi_qla_host_t *vha) mid_init_cb->options = cpu_to_le16(BIT_1); mid_init_cb->init_cb.execution_throttle = cpu_to_le16(ha->cur_fw_xcb_count); - /* D-Port Status */ - if (IS_DPORT_CAPABLE(ha)) - mid_init_cb->init_cb.firmware_options_1 |= - cpu_to_le16(BIT_7); - /* Enable FA-WWPN */ + ha->flags.dport_enabled = + (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; + ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", + (ha->flags.dport_enabled) ? "enabled" : "disabled"); + /* FA-WWPN Status */ ha->flags.fawwpn_enabled = - (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0; + (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n", (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); } @@ -6513,6 +6532,14 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) if (ql2xfwholdabts) ha->fw_options[3] |= BIT_12; + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2103, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + if (!ql2xetsenable) goto out; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 5649c200d37c..987f1c729e9c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -710,16 +710,23 @@ skip_rio: case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ ql_log(ql_log_warn, vha, 0x5007, - "ISP Response Transfer Error.\n"); + "ISP Response Transfer Error (%x).\n", mb[1]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ ql_dbg(ql_dbg_async, vha, 0x5008, - "Asynchronous WAKEUP_THRES.\n"); + "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); + break; + case MBA_LOOP_INIT_ERR: + ql_log(ql_log_warn, vha, 0x5090, + "LOOP INIT ERROR (%x).\n", mb[1]); + ha->isp_ops->fw_dump(vha, 1); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; + case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); @@ -1152,12 +1159,20 @@ global_port_update: case MBA_DPORT_DIAGNOSTICS: ql_dbg(ql_dbg_async, vha, 0x5052, - "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1], + "D-Port Diagnostics: %04x result=%s\n", + mb[0], mb[1] == 0 ? "start" : - mb[1] == 1 ? "done (ok)" : + mb[1] == 1 ? "done (pass)" : mb[1] == 2 ? "done (error)" : "other"); break; + case MBA_TEMPERATURE_ALERT: + ql_dbg(ql_dbg_async, vha, 0x505e, + "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); + if (mb[1] == 0x12) + schedule_work(&ha->board_disable); + break; + default: ql_dbg(ql_dbg_async, vha, 0x5057, "Unknown AEN:%04x %04x %04x %04x\n", @@ -2548,7 +2563,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!vha->flags.online) return; - if (rsp->msix->cpuid != smp_processor_id()) { + if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) { /* if kernel does not notify qla of IRQ's CPU change, * then set it here. */ @@ -3086,6 +3101,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) /* Enable MSI-X vectors for the base queue */ for (i = 0; i < 2; i++) { qentry = &ha->msix_entries[i]; + qentry->rsp = rsp; + rsp->msix = qentry; if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, @@ -3097,8 +3114,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) if (ret) goto msix_register_fail; qentry->have_irq = 1; - qentry->rsp = rsp; - rsp->msix = qentry; /* Register for CPU affinity notification. */ irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); @@ -3119,12 +3134,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) */ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { qentry = &ha->msix_entries[ATIO_VECTOR]; + qentry->rsp = rsp; + rsp->msix = qentry; ret = request_irq(qentry->vector, qla83xx_msix_entries[ATIO_VECTOR].handler, 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); qentry->have_irq = 1; - qentry->rsp = rsp; - rsp->msix = qentry; } msix_register_fail: diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 968b84613096..23698c998699 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -64,6 +64,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } + /* if PCI error, then avoid mbx processing.*/ + if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x1191, + "PCI error, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + reg = ha->iobase; io_lock_on = base_vha->flags.init_done; @@ -266,6 +273,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) uint16_t mb0; uint32_t ictrl; + uint16_t w; if (IS_FWI2_CAPABLE(ha)) { mb0 = RD_REG_WORD(®->isp24.mailbox0); @@ -279,15 +287,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); - /* - * Attempt to capture a firmware dump for further analysis - * of the current firmware state. We do not need to do this - * if we are intentionally generating a dump. - */ - if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) - ha->isp_ops->fw_dump(vha, 0); + /* Capture FW dump only, if PCI device active */ + if (!pci_channel_offline(vha->hw->pdev)) { + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); + if (w == 0xffff || ictrl == 0xffffffff) { + /* This is special case if there is unload + * of driver happening and if PCI device go + * into bad state due to PCI error condition + * then only PCI ERR flag would be set. + * we will do premature exit for above case. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + set_bit(PCI_ERR, &base_vha->dpc_flags); + ha->flags.mbox_busy = 0; + rval = QLA_FUNCTION_TIMEOUT; + goto premature_exit; + } - rval = QLA_FUNCTION_TIMEOUT; + /* Attempt to capture firmware dump for further + * anallysis of the current formware state. we do not + * need to do this if we are intentionally generating + * a dump + */ + if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) + ha->isp_ops->fw_dump(vha, 0); + rval = QLA_FUNCTION_TIMEOUT; + } } ha->flags.mbox_busy = 0; @@ -379,7 +404,7 @@ mbx_done: "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); - ql_dbg(ql_dbg_disc, vha, 0x1115, + ql_dbg(ql_dbg_mbx, vha, 0x1198, "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n", RD_REG_DWORD(®->isp24.host_status), ha->fw_dump_cap_flags, @@ -388,7 +413,7 @@ mbx_done: mbx_reg = ®->isp24.mailbox0; for (i = 0; i < 6; i++) - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1199, "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); @@ -782,8 +807,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; if (IS_QLA27XX(ha)) - mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 | - MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8; + mcp->in_mb |= + MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| + MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; @@ -842,6 +868,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->pep_version[2] = mcp->mb[14] & 0xff; ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; + ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; + ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; } failed: @@ -1844,7 +1872,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) states[0] = mcp->mb[1]; if (IS_FWI2_CAPABLE(vha->hw)) { states[1] = mcp->mb[2]; - states[2] = mcp->mb[3]; + states[2] = mcp->mb[3]; /* SFP info */ states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; states[5] = mcp->mb[6]; /* DPORT status */ @@ -2759,15 +2787,16 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - uint32_t *iter, dwords; + uint32_t *iter = (void *)stats; + ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_STATUS; - mcp->mb[2] = MSW(stats_dma); - mcp->mb[3] = LSW(stats_dma); + mcp->mb[2] = MSW(LSD(stats_dma)); + mcp->mb[3] = LSW(LSD(stats_dma)); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; @@ -2796,12 +2825,9 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { - /* Copy over data -- firmware data is LE. */ + /* Re-endianize - firmware data is le32. */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, "Done %s.\n", __func__); - dwords = offsetof(struct link_statistics, - link_up_cnt) / 4; - iter = &stats->link_fail_cnt; for ( ; dwords--; iter++) le32_to_cpus(iter); } @@ -2815,7 +2841,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, - dma_addr_t stats_dma) + dma_addr_t stats_dma, uint options) { int rval; mbx_cmd_t mc; @@ -2832,7 +2858,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = sizeof(struct link_statistics) / 4; mcp->mb[9] = vha->vp_idx; - mcp->mb[10] = 0; + mcp->mb[10] = options; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; @@ -2847,7 +2873,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, "Done %s.\n", __func__); - /* Copy over data -- firmware data is LE. */ + /* Re-endianize - firmware data is le32. */ dwords = sizeof(struct link_statistics) / 4; iter = &stats->link_fail_cnt; for ( ; dwords--; iter++) @@ -5722,3 +5748,54 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, return rval; } + +int +qla26xx_dport_diagnostics(scsi_qla_host_t *vha, + void *dd_buf, uint size, uint options) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + dma_addr_t dd_dma; + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, + "Entered %s.\n", __func__); + + dd_dma = dma_map_single(&vha->hw->pdev->dev, + dd_buf, size, DMA_FROM_DEVICE); + if (!dd_dma) { + ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memset(dd_buf, 0, size); + + mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; + mcp->mb[1] = options; + mcp->mb[2] = MSW(LSD(dd_dma)); + mcp->mb[3] = LSW(LSD(dd_dma)); + mcp->mb[6] = MSW(MSD(dd_dma)); + mcp->mb[7] = LSW(MSD(dd_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_IN; + mcp->tov = MBX_TOV_SECONDS * 4; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, + "Done %s.\n", __func__); + } + + dma_unmap_single(&vha->hw->pdev->dev, dd_dma, + size, DMA_FROM_DEVICE); + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 59c477883a73..6201dce3553b 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1183,7 +1183,6 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 #define qla82xx_get_temp_val(x) ((x) >> 16) -#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF) #define qla82xx_get_temp_state(x) ((x) & 0xffff) #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7c0b60ca158f..2674f4c16bc3 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -80,6 +80,7 @@ MODULE_PARM_DESC(ql2xallocfwdump, int ql2xextended_error_logging; module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); +module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xextended_error_logging, "Option to enable extended error logging,\n" "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" @@ -106,6 +107,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd, int ql2xfdmienable=1; module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); +module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfdmienable, "Enables FDMI registrations. " "0 - no FDMI. Default is 1 - perform FDMI."); @@ -157,6 +159,7 @@ MODULE_PARM_DESC(ql2xmultique_tag, int ql2xfwloadbin; module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); +module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfwloadbin, "Option to specify location from which to load ISP firmware:.\n" " 2 -- load firmware via the request_firmware() (hotplug).\n" @@ -894,12 +897,16 @@ static void qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); while (((qla2x00_reset_active(vha)) || ha->dpc_active || ha->flags.mbox_busy) || test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || - test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) + test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + break; msleep(1000); + } } int @@ -936,6 +943,30 @@ sp_get(struct srb *sp) atomic_inc(&sp->ref_count); } +#define ISP_REG_DISCONNECT 0xffffffffU +/************************************************************************** +* qla2x00_isp_reg_stat +* +* Description: +* Read the host status register of ISP before aborting the command. +* +* Input: +* ha = pointer to host adapter structure. +* +* +* Returns: +* Either true or false. +* +* Note: Return true if there is register disconnect. +**************************************************************************/ +static inline +uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT); +} + /************************************************************************** * qla2xxx_eh_abort * @@ -963,6 +994,11 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) int rval, wait = 0; struct qla_hw_data *ha = vha->hw; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8042, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } if (!CMD_SP(cmd)) return SUCCESS; @@ -1146,6 +1182,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) scsi_qla_host_t *vha = shost_priv(cmd->device->host); struct qla_hw_data *ha = vha->hw; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803e, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, ha->isp_ops->lun_reset); } @@ -1156,6 +1198,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) scsi_qla_host_t *vha = shost_priv(cmd->device->host); struct qla_hw_data *ha = vha->hw; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, ha->isp_ops->target_reset); } @@ -1183,6 +1231,13 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) int ret = FAILED; unsigned int id; uint64_t lun; + struct qla_hw_data *ha = vha->hw; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8040, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } id = cmd->device->id; lun = cmd->device->lun; @@ -1252,6 +1307,13 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) uint64_t lun; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8041, + "PCI/Register disconnect, exiting.\n"); + schedule_work(&ha->board_disable); + return SUCCESS; + } + id = cmd->device->id; lun = cmd->device->lun; @@ -2103,27 +2165,27 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type = DT_EXTENDED_IDS; switch (ha->pdev->device) { case PCI_DEVICE_ID_QLOGIC_ISP2100: - ha->device_type |= DT_ISP2100; + ha->isp_type |= DT_ISP2100; ha->device_type &= ~DT_EXTENDED_IDS; ha->fw_srisc_address = RISC_START_ADDRESS_2100; break; case PCI_DEVICE_ID_QLOGIC_ISP2200: - ha->device_type |= DT_ISP2200; + ha->isp_type |= DT_ISP2200; ha->device_type &= ~DT_EXTENDED_IDS; ha->fw_srisc_address = RISC_START_ADDRESS_2100; break; case PCI_DEVICE_ID_QLOGIC_ISP2300: - ha->device_type |= DT_ISP2300; + ha->isp_type |= DT_ISP2300; ha->device_type |= DT_ZIO_SUPPORTED; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2312: - ha->device_type |= DT_ISP2312; + ha->isp_type |= DT_ISP2312; ha->device_type |= DT_ZIO_SUPPORTED; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2322: - ha->device_type |= DT_ISP2322; + ha->isp_type |= DT_ISP2322; ha->device_type |= DT_ZIO_SUPPORTED; if (ha->pdev->subsystem_vendor == 0x1028 && ha->pdev->subsystem_device == 0x0170) @@ -2131,60 +2193,60 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP6312: - ha->device_type |= DT_ISP6312; + ha->isp_type |= DT_ISP6312; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP6322: - ha->device_type |= DT_ISP6322; + ha->isp_type |= DT_ISP6322; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2422: - ha->device_type |= DT_ISP2422; + ha->isp_type |= DT_ISP2422; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2432: - ha->device_type |= DT_ISP2432; + ha->isp_type |= DT_ISP2432; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8432: - ha->device_type |= DT_ISP8432; + ha->isp_type |= DT_ISP8432; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP5422: - ha->device_type |= DT_ISP5422; + ha->isp_type |= DT_ISP5422; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP5432: - ha->device_type |= DT_ISP5432; + ha->isp_type |= DT_ISP5432; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2532: - ha->device_type |= DT_ISP2532; + ha->isp_type |= DT_ISP2532; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8001: - ha->device_type |= DT_ISP8001; + ha->isp_type |= DT_ISP8001; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8021: - ha->device_type |= DT_ISP8021; + ha->isp_type |= DT_ISP8021; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; @@ -2192,7 +2254,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) qla82xx_init_flags(ha); break; case PCI_DEVICE_ID_QLOGIC_ISP8044: - ha->device_type |= DT_ISP8044; + ha->isp_type |= DT_ISP8044; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; @@ -2200,7 +2262,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) qla82xx_init_flags(ha); break; case PCI_DEVICE_ID_QLOGIC_ISP2031: - ha->device_type |= DT_ISP2031; + ha->isp_type |= DT_ISP2031; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2208,7 +2270,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8031: - ha->device_type |= DT_ISP8031; + ha->isp_type |= DT_ISP8031; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2216,10 +2278,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISPF001: - ha->device_type |= DT_ISPFX00; + ha->isp_type |= DT_ISPFX00; break; case PCI_DEVICE_ID_QLOGIC_ISP2071: - ha->device_type |= DT_ISP2071; + ha->isp_type |= DT_ISP2071; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2227,7 +2289,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2271: - ha->device_type |= DT_ISP2271; + ha->isp_type |= DT_ISP2271; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2235,7 +2297,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2261: - ha->device_type |= DT_ISP2261; + ha->isp_type |= DT_ISP2261; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2901,6 +2963,10 @@ skip_dpc: qlt_add_target(ha, base_vha); clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); + + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return -ENODEV; + return 0; probe_init_failed: @@ -3128,6 +3194,12 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_wait_for_hba_ready(base_vha); + /* if UNLOAD flag is already set, then continue unload, + * where it was set first. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return; + set_bit(UNLOADING, &base_vha->dpc_flags); if (IS_QLAFX00(ha)) @@ -4907,6 +4979,12 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + /* if UNLOAD flag is already set, then continue unload, + * where it was set first. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return; + ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); @@ -5002,6 +5080,9 @@ qla2x00_do_dpc(void *data) "DPC handler waking up, dpc_flags=0x%lx.\n", base_vha->dpc_flags); + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + break; + qla2x00_do_work(base_vha); if (IS_P3P_TYPE(ha)) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ca39deb4ff5b..bff9689f5ca9 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -215,8 +215,8 @@ static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); vha->hw->tgt.num_pend_cmds++; - if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds) - vha->hw->qla_stats.stat_max_pend_cmds = + if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) + vha->qla_stats.stat_max_pend_cmds = vha->hw->tgt.num_pend_cmds; spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } @@ -5231,8 +5231,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { vha->hw->tgt.num_qfull_cmds_dropped++; if (vha->hw->tgt.num_qfull_cmds_dropped > - vha->hw->qla_stats.stat_max_qfull_cmds_dropped) - vha->hw->qla_stats.stat_max_qfull_cmds_dropped = + vha->qla_stats.stat_max_qfull_cmds_dropped) + vha->qla_stats.stat_max_qfull_cmds_dropped = vha->hw->tgt.num_qfull_cmds_dropped; ql_dbg(ql_dbg_io, vha, 0x3068, @@ -5263,8 +5263,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, vha->hw->tgt.num_qfull_cmds_dropped++; if (vha->hw->tgt.num_qfull_cmds_dropped > - vha->hw->qla_stats.stat_max_qfull_cmds_dropped) - vha->hw->qla_stats.stat_max_qfull_cmds_dropped = + vha->qla_stats.stat_max_qfull_cmds_dropped) + vha->qla_stats.stat_max_qfull_cmds_dropped = vha->hw->tgt.num_qfull_cmds_dropped; qlt_chk_exch_leak_thresh_hold(vha); @@ -5293,8 +5293,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, vha->hw->tgt.num_qfull_cmds_alloc++; if (vha->hw->tgt.num_qfull_cmds_alloc > - vha->hw->qla_stats.stat_max_qfull_cmds_alloc) - vha->hw->qla_stats.stat_max_qfull_cmds_alloc = + vha->qla_stats.stat_max_qfull_cmds_alloc) + vha->qla_stats.stat_max_qfull_cmds_alloc = vha->hw->tgt.num_qfull_cmds_alloc; } diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index c3e622524604..36935c9ed669 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -357,6 +357,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, ent->t262.start_addr = start; ent->t262.end_addr = end; } + } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) { + start = vha->hw->fw_ddr_ram_start; + end = vha->hw->fw_ddr_ram_end; + if (buf) { + ent->t262.start_addr = start; + ent->t262.end_addr = end; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd022, "%s: unknown area %x\n", __func__, ent->t262.ram_area); @@ -364,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, goto done; } - if (end < start || end == 0) { + if (end <= start || start == 0 || end == 0) { ql_dbg(ql_dbg_misc, vha, 0xd023, "%s: unusable range (start=%x end=%x)\n", __func__, ent->t262.end_addr, ent->t262.start_addr); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 0bc93fa46dae..3cb1964b7786 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.33-k" +#define QLA2XXX_VERSION "8.07.00.38-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 7 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1deb6adc411f..1f36aca44394 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1160,6 +1160,7 @@ bool scsi_use_blk_mq = true; bool scsi_use_blk_mq = false; #endif module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); +EXPORT_SYMBOL_GPL(scsi_use_blk_mq); static int __init init_scsi(void) { diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0f9ba41e27d8..6a219a0844d3 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -890,7 +890,7 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return 0; } -/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */ +/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, int arr_len) { @@ -909,7 +909,35 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, return 0; } -/* Returns number of bytes fetched into 'arr' or -1 if error. */ +/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else + * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple + * calls, not required to write in ascending offset order. Assumes resid + * set to scsi_bufflen() prior to any calls. + */ +static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, + int arr_len, unsigned int off_dst) +{ + int act_len, n; + struct scsi_data_buffer *sdb = scsi_in(scp); + off_t skip = off_dst; + + if (sdb->length <= off_dst) + return 0; + if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) + return DID_ERROR << 16; + + act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, + arr, arr_len, skip); + pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", + __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid); + n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len); + sdb->resid = min(sdb->resid, n); + return 0; +} + +/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into + * 'arr' or -1 if error. + */ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, int arr_len) { @@ -3269,6 +3297,8 @@ static int resp_get_lba_status(struct scsi_cmnd *scp, return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); } +#define RL_BUCKET_ELEMS 8 + /* Even though each pseudo target has a REPORT LUNS "well known logical unit" * (W-LUN), the normal Linux scanning logic does not associate it with a * device (e.g. /dev/sg7). The following magic will make that association: @@ -3285,12 +3315,14 @@ static int resp_report_luns(struct scsi_cmnd *scp, unsigned char select_report; u64 lun; struct scsi_lun *lun_p; - u8 *arr; + u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)]; unsigned int lun_cnt; /* normal LUN count (max: 256) */ unsigned int wlun_cnt; /* report luns W-LUN count */ unsigned int tlun_cnt; /* total LUN count */ unsigned int rlen; /* response length (in bytes) */ - int i, res; + int k, j, n, res; + unsigned int off_rsp = 0; + const int sz_lun = sizeof(struct scsi_lun); clear_luns_changed_on_target(devip); @@ -3329,33 +3361,40 @@ static int resp_report_luns(struct scsi_cmnd *scp, --lun_cnt; tlun_cnt = lun_cnt + wlun_cnt; - - rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8; - arr = vmalloc(rlen); - if (!arr) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, - INSUFF_RES_ASCQ); - return check_condition_result; - } - memset(arr, 0, rlen); + rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */ + scsi_set_resid(scp, scsi_bufflen(scp)); pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n", select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0); - /* luns start at byte 8 in response following the header */ - lun_p = (struct scsi_lun *)&arr[8]; - - /* LUNs use single level peripheral device addressing method */ + /* loops rely on sizeof response header same as sizeof lun (both 8) */ lun = sdebug_no_lun_0 ? 1 : 0; - for (i = 0; i < lun_cnt; i++) - int_to_scsilun(lun++, lun_p++); - - if (wlun_cnt) - int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++); - - put_unaligned_be32(rlen - 8, &arr[0]); - - res = fill_from_dev_buffer(scp, arr, rlen); - vfree(arr); + for (k = 0, j = 0, res = 0; true; ++k, j = 0) { + memset(arr, 0, sizeof(arr)); + lun_p = (struct scsi_lun *)&arr[0]; + if (k == 0) { + put_unaligned_be32(rlen, &arr[0]); + ++lun_p; + j = 1; + } + for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) { + if ((k * RL_BUCKET_ELEMS) + j > lun_cnt) + break; + int_to_scsilun(lun++, lun_p); + } + if (j < RL_BUCKET_ELEMS) + break; + n = j * sz_lun; + res = p_fill_from_dev_buffer(scp, arr, n, off_rsp); + if (res) + return res; + off_rsp += n; + } + if (wlun_cnt) { + int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p); + ++j; + } + if (j > 0) + res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp); return res; } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index ff41c310c900..eaccd651ccda 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -429,7 +429,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, * here, and we don't know what device it is * trying to work with, leave it as-is. */ - vmax = 8; /* max length of vendor */ + vmax = sizeof(devinfo->vendor); vskip = vendor; while (vmax > 0 && *vskip == ' ') { vmax--; @@ -439,7 +439,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, while (vmax > 0 && vskip[vmax - 1] == ' ') --vmax; - mmax = 16; /* max length of model */ + mmax = sizeof(devinfo->model); mskip = model; while (mmax > 0 && *mskip == ' ') { mmax--; @@ -455,10 +455,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, * Behave like the older version of get_device_flags. */ if (memcmp(devinfo->vendor, vskip, vmax) || - devinfo->vendor[vmax]) + (vmax < sizeof(devinfo->vendor) && + devinfo->vendor[vmax])) continue; if (memcmp(devinfo->model, mskip, mmax) || - devinfo->model[mmax]) + (mmax < sizeof(devinfo->model) && + devinfo->model[mmax])) continue; return devinfo; } else { diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 60bff78e9ead..d3e852ad5aa3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) } else if (rq_data_dir(rq) == READ) { SCpnt->cmnd[0] = READ_6; } else { - scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); + scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n", + req_op(rq), (unsigned long long) rq->cmd_flags); goto out; } @@ -1137,21 +1138,26 @@ static int sd_init_command(struct scsi_cmnd *cmd) { struct request *rq = cmd->request; - if (rq->cmd_flags & REQ_DISCARD) + switch (req_op(rq)) { + case REQ_OP_DISCARD: return sd_setup_discard_cmnd(cmd); - else if (rq->cmd_flags & REQ_WRITE_SAME) + case REQ_OP_WRITE_SAME: return sd_setup_write_same_cmnd(cmd); - else if (rq->cmd_flags & REQ_FLUSH) + case REQ_OP_FLUSH: return sd_setup_flush_cmnd(cmd); - else + case REQ_OP_READ: + case REQ_OP_WRITE: return sd_setup_read_write_cmnd(cmd); + default: + BUG(); + } } static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; - if (rq->cmd_flags & REQ_DISCARD) + if (req_op(rq) == REQ_OP_DISCARD) __free_page(rq->completion_data); if (SCpnt->cmnd != rq->cmd) { @@ -1613,8 +1619,7 @@ static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, return -EOPNOTSUPP; return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, old_key, new_key, 0, - (1 << 0) /* APTPL */ | - (1 << 2) /* ALL_TG_PT */); + (1 << 0) /* APTPL */); } static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, @@ -1774,7 +1779,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned char op = SCpnt->cmnd[0]; unsigned char unmap = SCpnt->cmnd[1] & 8; - if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { + if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) { if (!result) { good_bytes = blk_rq_bytes(req); scsi_set_resid(SCpnt, 0); @@ -2988,7 +2993,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_revalidate_disk(gd); - gd->driverfs_dev = &sdp->sdev_gendev; gd->flags = GENHD_FL_EXT_DEVT; if (sdp->removable) { gd->flags |= GENHD_FL_REMOVABLE; @@ -2996,7 +3000,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) } blk_pm_runtime_init(sdp->request_queue, dev); - add_disk(gd); + device_add_disk(dev, gd); if (sdkp->capacity) sd_dif_config_host(sdkp); diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index b0fefd67cac3..b106596cc0cf 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -113,11 +113,11 @@ snic_queue_report_tgt_req(struct snic *snic) pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(snic->pdev, pa)) { - kfree(buf); - snic_req_free(snic, rqi); SNIC_HOST_ERR(snic->shost, "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", buf); + kfree(buf); + snic_req_free(snic, rqi); ret = -EINVAL; goto error; diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h index c5f9e1917a8e..2a045a57e365 100644 --- a/drivers/scsi/snic/snic_fwint.h +++ b/drivers/scsi/snic/snic_fwint.h @@ -92,7 +92,7 @@ enum snic_io_status { }; /* end of enum snic_io_status */ /* - * snic_io_hdr : host <--> firmare + * snic_io_hdr : host <--> firmware * * for any other message that will be queued to firmware should * have the following request header diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 64c867405ad4..ed179348de80 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -713,7 +713,6 @@ static int sr_probe(struct device *dev) get_capabilities(cd); sr_vendor_init(cd); - disk->driverfs_dev = &sdev->sdev_gendev; set_capacity(disk, cd->capacity); disk->private_data = &cd->driver; disk->queue = sdev->request_queue; @@ -730,7 +729,7 @@ static int sr_probe(struct device *dev) dev_set_drvdata(dev, cd); disk->flags |= GENHD_FL_REMOVABLE; - add_disk(disk); + device_add_disk(&sdev->sdev_gendev, disk); sdev_printk(KERN_DEBUG, sdev, "Attached scsi CD-ROM %s\n", cd->cdi.name); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3ddcabb790a8..8ccfc9ea874b 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -966,6 +966,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, if (scmnd->result) { if (scsi_normalize_sense(scmnd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr) && + !(sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x03A) && do_logging(STORVSC_LOGGING_ERROR)) scsi_print_sense_hdr(scmnd->device, "storvsc", &sense_hdr); diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 097894a1fab5..47966909286d 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -61,6 +61,14 @@ config SCSI_UFSHCD_PCI If unsure, say N. +config SCSI_UFS_DWC_TC_PCI + tristate "DesignWare pci support using a G210 Test Chip" + depends on SCSI_UFSHCD && PCI + ---help--- + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + config SCSI_UFSHCD_PLATFORM tristate "Platform bus based UFS Controller support" depends on SCSI_UFSHCD @@ -72,6 +80,14 @@ config SCSI_UFSHCD_PLATFORM If unsure, say N. +config SCSI_UFS_DWC_TC_PLATFORM + tristate "DesignWare platform support using a G210 Test Chip" + depends on SCSI_UFSHCD_PLATFORM + ---help--- + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + config SCSI_UFS_QCOM tristate "QCOM specific hooks to UFS controller platform driver" depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 8303bcce7a23..6e77cb0bfee9 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -1,4 +1,6 @@ # UFSHCD makefile +obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o +obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c new file mode 100644 index 000000000000..c09a0fef0fe6 --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -0,0 +1,181 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ufshcd.h" +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +#include <linux/pci.h> +#include <linux/pm_runtime.h> + +/* Test Chip type expected values */ +#define TC_G210_20BIT 20 +#define TC_G210_40BIT 40 +#define TC_G210_INV 0 + +static int tc_type = TC_G210_INV; +module_param(tc_type, int, 0); +MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)"); + +static int tc_dwc_g210_pci_suspend(struct device *dev) +{ + return ufshcd_system_suspend(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_resume(struct device *dev) +{ + return ufshcd_system_resume(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_runtime_suspend(struct device *dev) +{ + return ufshcd_runtime_suspend(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_runtime_resume(struct device *dev) +{ + return ufshcd_runtime_resume(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_runtime_idle(struct device *dev) +{ + return ufshcd_runtime_idle(dev_get_drvdata(dev)); +} + +/** + * struct ufs_hba_dwc_vops - UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = { + .name = "tc-dwc-g210-pci", + .link_startup_notify = ufshcd_dwc_link_startup_notify, +}; + +/** + * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state + * @pdev: pointer to PCI device handle + */ +static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); +} + +/** + * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space + * data structure memory + * @pdev - pointer to PCI handle + */ +static void tc_dwc_g210_pci_remove(struct pci_dev *pdev) +{ + struct ufs_hba *hba = pci_get_drvdata(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + ufshcd_remove(hba); +} + +/** + * tc_dwc_g210_pci_probe - probe routine of the driver + * @pdev: pointer to PCI device handle + * @id: PCI device id + * + * Returns 0 on success, non-zero value on failure + */ +static int +tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + int err; + + /* Check Test Chip type and set the specific setup routine */ + if (tc_type == TC_G210_20BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_20_bit; + } else if (tc_type == TC_G210_40BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_40_bit; + } else { + dev_err(&pdev->dev, "test chip version not specified\n"); + return -EPERM; + } + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; + } + + pci_set_master(pdev); + + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); + if (err < 0) { + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; + } + + mmio_base = pcim_iomap_table(pdev)[0]; + + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + INIT_LIST_HEAD(&hba->clk_list_head); + + hba->vops = &tc_dwc_g210_pci_hba_vops; + + err = ufshcd_init(hba, mmio_base, pdev->irq); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); + return err; + } + + pci_set_drvdata(pdev, hba); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = { + .suspend = tc_dwc_g210_pci_suspend, + .resume = tc_dwc_g210_pci_resume, + .runtime_suspend = tc_dwc_g210_pci_runtime_suspend, + .runtime_resume = tc_dwc_g210_pci_runtime_resume, + .runtime_idle = tc_dwc_g210_pci_runtime_idle, +}; + +static const struct pci_device_id tc_dwc_g210_pci_tbl[] = { + { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl); + +static struct pci_driver tc_dwc_g210_pci_driver = { + .name = "tc-dwc-g210-pci", + .id_table = tc_dwc_g210_pci_tbl, + .probe = tc_dwc_g210_pci_probe, + .remove = tc_dwc_g210_pci_remove, + .shutdown = tc_dwc_g210_pci_shutdown, + .driver = { + .pm = &tc_dwc_g210_pci_pm_ops + }, +}; + +module_pci_driver(tc_dwc_g210_pci_driver); + +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c new file mode 100644 index 000000000000..2d3f5270f875 --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c @@ -0,0 +1,113 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/delay.h> + +#include "ufshcd-pltfrm.h" +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +/** + * UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_20_bit, +}; + +static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_40_bit, +}; + +static const struct of_device_id tc_dwc_g210_pltfm_match[] = { + { + .compatible = "snps,g210-tc-6.00-20bit", + .data = &tc_dwc_g210_20bit_pltfm_hba_vops, + }, + { + .compatible = "snps,g210-tc-6.00-40bit", + .data = &tc_dwc_g210_40bit_pltfm_hba_vops, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match); + +/** + * tc_dwc_g210_pltfm_probe() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev) +{ + int err; + const struct of_device_id *of_id; + struct ufs_hba_variant_ops *vops; + struct device *dev = &pdev->dev; + + of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node); + vops = (struct ufs_hba_variant_ops *)of_id->data; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * tc_dwc_g210_pltfm_remove() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, + .runtime_suspend = ufshcd_pltfrm_runtime_suspend, + .runtime_resume = ufshcd_pltfrm_runtime_resume, + .runtime_idle = ufshcd_pltfrm_runtime_idle, +}; + +static struct platform_driver tc_dwc_g210_pltfm_driver = { + .probe = tc_dwc_g210_pltfm_probe, + .remove = tc_dwc_g210_pltfm_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "tc-dwc-g210-pltfm", + .pm = &tc_dwc_g210_pltfm_pm_ops, + .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match), + }, +}; + +module_platform_driver(tc_dwc_g210_pltfm_driver); + +MODULE_ALIAS("platform:tc-dwc-g210-pltfm"); +MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver"); +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c new file mode 100644 index 000000000000..70db6d999ca3 --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210.c @@ -0,0 +1,319 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ufshcd.h" +#include "unipro.h" + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" + +/** + * tc_dwc_g210_setup_40bit_rmmi() + * This function configures Synopsys TC specific atributes (40-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba) +{ + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane0() + * This function configures Synopsys TC 20-bit RMMI Lane 0 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba) +{ + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane1() + * This function configures Synopsys TC 20-bit RMMI Lane 1 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba) +{ + int connected_rx_lanes = 0; + int connected_tx_lanes = 0; + int ret = 0; + + const struct ufshcd_dme_attr_val setup_tx_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + }; + + const struct ufshcd_dme_attr_val setup_rx_attrs[] = { + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f, + DME_LOCAL }, + }; + + /* Get the available lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), + &connected_rx_lanes); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), + &connected_tx_lanes); + + if (connected_tx_lanes == 2) { + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs, + ARRAY_SIZE(setup_tx_attrs)); + + if (ret) + goto out; + } + + if (connected_rx_lanes == 2) { + ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs, + ARRAY_SIZE(setup_rx_attrs)); + } + +out: + return ret; +} + +/** + * tc_dwc_g210_setup_20bit_rmmi() + * This function configures Synopsys TC specific atributes (20-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba) +{ + int ret = 0; + + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + }; + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); + if (ret) + goto out; + + /* Lane 0 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba); + if (ret) + goto out; + + /* Lane 1 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba); + if (ret) + goto out; + +out: + return ret; +} + +/** + * tc_dwc_g210_config_40_bit() + * This function configures Local (host) Synopsys 40-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n"); + ret = tc_dwc_g210_setup_40bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_40_bit); + +/** + * tc_dwc_g210_config_20_bit() + * This function configures Local (host) Synopsys 20-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n"); + ret = tc_dwc_g210_setup_20bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_20_bit); + +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_DESCRIPTION("Synopsys G210 Test Chip driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/scsi/ufs/tc-dwc-g210.h new file mode 100644 index 000000000000..fb177db1227d --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210.h @@ -0,0 +1,19 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _TC_DWC_G210_H +#define _TC_DWC_G210_H + +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba); +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba); + +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c new file mode 100644 index 000000000000..5fd16c72207f --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-dwc.c @@ -0,0 +1,154 @@ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ufshcd.h" +#include "unipro.h" + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" + +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n) +{ + int ret = 0; + int attr_node = 0; + + for (attr_node = 0; attr_node < n; attr_node++) { + ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel, + ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer); + + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs); + +/** + * ufshcd_dwc_program_clk_div() + * This function programs the clk divider value. This value is needed to + * provide 1 microsecond tick to unipro layer. + * @hba: Private Structure pointer + * @divider_val: clock divider value to be programmed + * + */ +static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val) +{ + ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV); +} + +/** + * ufshcd_dwc_link_is_up() + * Check if link is up + * @hba: private structure poitner + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) +{ + int dme_result = 0; + + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result); + + if (dme_result == UFSHCD_LINK_IS_UP) { + ufshcd_set_link_active(hba); + return 0; + } + + return 1; +} + +/** + * ufshcd_dwc_connection_setup() + * This function configures both the local side (host) and the peer side + * (device) unipro attributes to establish the connection to application/ + * cport. + * This function is not required if the hardware is properly configured to + * have this connection setup on reset. But invoking this function does no + * harm and should be fine even working with any ufs device. + * + * @hba: pointer to drivers private data + * + * Returns 0 on success non-zero value on failure + */ +static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) +{ + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER } + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs)); +} + +/** + * ufshcd_dwc_link_startup_notify() + * UFS Host DWC specific link startup sequence + * @hba: private structure poitner + * @status: Callback notify status + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + if (status == PRE_CHANGE) { + ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125); + + if (hba->vops->phy_initialization) { + err = hba->vops->phy_initialization(hba); + if (err) { + dev_err(hba->dev, "Phy setup failed (%d)\n", + err); + goto out; + } + } + } else { /* POST_CHANGE */ + err = ufshcd_dwc_link_is_up(hba); + if (err) { + dev_err(hba->dev, "Link is not up\n"); + goto out; + } + + err = ufshcd_dwc_connection_setup(hba); + if (err) + dev_err(hba->dev, "Connection setup failed (%d)\n", + err); + } + +out: + return err; +} +EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify); + +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/scsi/ufs/ufshcd-dwc.h new file mode 100644 index 000000000000..c8be295e0ebe --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-dwc.h @@ -0,0 +1,26 @@ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _UFSHCD_DWC_H +#define _UFSHCD_DWC_H + +struct ufshcd_dme_attr_val { + u32 attr_sel; + u32 mib_val; + u8 peer; +}; + +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status); +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n); +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 718f12e09885..db53f38da864 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -372,6 +372,6 @@ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init); MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); -MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver"); +MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index f8fa72c31a9d..f08d41a2d70b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1173,7 +1173,7 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) * @cmd_dir: requests data direction */ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, - u32 *upiu_flags, enum dma_data_direction cmd_dir) + u32 *upiu_flags, enum dma_data_direction cmd_dir) { struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; u32 data_direction; @@ -1299,47 +1299,55 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) } /** - * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) + * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) + * for Device Management Purposes * @hba - per adapter instance * @lrb - pointer to local reference block */ -static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { u32 upiu_flags; int ret = 0; - switch (lrbp->command_type) { - case UTP_CMD_TYPE_SCSI: - if (likely(lrbp->cmd)) { - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, - lrbp->cmd->sc_data_direction); - ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); - } else { - ret = -EINVAL; - } - break; - case UTP_CMD_TYPE_DEV_MANAGE: - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); - if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) - ufshcd_prepare_utp_query_req_upiu( - hba, lrbp, upiu_flags); - else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) - ufshcd_prepare_utp_nop_upiu(lrbp); - else - ret = -EINVAL; - break; - case UTP_CMD_TYPE_UFS: - /* For UFS native command implementation */ - ret = -ENOTSUPP; - dev_err(hba->dev, "%s: UFS native command are not supported\n", - __func__); - break; - default: - ret = -ENOTSUPP; - dev_err(hba->dev, "%s: unknown command type: 0x%x\n", - __func__, lrbp->command_type); - break; - } /* end of switch */ + if (hba->ufs_version == UFSHCI_VERSION_20) + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + else + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) + ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); + else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) + ufshcd_prepare_utp_nop_upiu(lrbp); + else + ret = -EINVAL; + + return ret; +} + +/** + * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) + * for SCSI Purposes + * @hba - per adapter instance + * @lrb - pointer to local reference block + */ +static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + u32 upiu_flags; + int ret = 0; + + if (hba->ufs_version == UFSHCI_VERSION_20) + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + else + lrbp->command_type = UTP_CMD_TYPE_SCSI; + + if (likely(lrbp->cmd)) { + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, + lrbp->cmd->sc_data_direction); + ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); + } else { + ret = -EINVAL; + } return ret; } @@ -1451,10 +1459,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; - lrbp->command_type = UTP_CMD_TYPE_SCSI; - /* form UPIU before issuing the command */ - ufshcd_compose_upiu(hba, lrbp); + ufshcd_comp_scsi_upiu(hba, lrbp); + err = ufshcd_map_sg(lrbp); if (err) { lrbp->cmd = NULL; @@ -1479,11 +1486,10 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, lrbp->sense_buffer = NULL; lrbp->task_tag = tag; lrbp->lun = 0; /* device management cmd is not specific to any LUN */ - lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; lrbp->intr_cmd = true; /* No interrupt aggregation */ hba->dev_cmd.type = cmd_type; - return ufshcd_compose_upiu(hba, lrbp); + return ufshcd_comp_devman_upiu(hba, lrbp); } static int @@ -2131,7 +2137,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, buff_ascii = kmalloc(ascii_len, GFP_KERNEL); if (!buff_ascii) { err = -ENOMEM; - goto out_free_buff; + goto out; } /* @@ -2150,7 +2156,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, size - QUERY_DESC_HDR_SIZE); memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; -out_free_buff: kfree(buff_ascii); } out: @@ -3539,7 +3544,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); - } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { + } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || + lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { if (hba->dev_cmd.complete) complete(hba->dev_cmd.complete); } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 4bb65669f052..430bef111293 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -264,6 +264,7 @@ struct ufs_pwr_mode_info { * @suspend: called during host controller PM callback * @resume: called during host controller PM callback * @dbg_register_dump: used to dump controller debug information + * @phy_initialization: used to initialize phys */ struct ufs_hba_variant_ops { const char *name; @@ -285,6 +286,7 @@ struct ufs_hba_variant_ops { int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); + int (*phy_initialization)(struct ufs_hba *); }; /* clock gating state */ @@ -567,11 +569,16 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) { +/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/ +#ifndef CONFIG_SCSI_UFS_DWC if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) return true; else return false; +#else +return true; +#endif } #define ufshcd_writel(hba, val, reg) \ diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/scsi/ufs/ufshci-dwc.h new file mode 100644 index 000000000000..ca341fece310 --- /dev/null +++ b/drivers/scsi/ufs/ufshci-dwc.h @@ -0,0 +1,36 @@ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _UFSHCI_DWC_H +#define _UFSHCI_DWC_H + +/* DWC HC UFSHCI specific Registers */ +enum dwc_specific_registers { + DWC_UFS_REG_HCLKDIV = 0xFC, +}; + +/* Clock Divider Values: Hex equivalent of frequency in MHz */ +enum clk_div_values { + DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e, + DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d, + DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8, +}; + +/* Selector Index */ +enum selector_index { + SELIND_LN0_TX = 0x00, + SELIND_LN1_TX = 0x01, + SELIND_LN0_RX = 0x04, + SELIND_LN1_RX = 0x05, +}; + +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 4cb1cc63f1a1..9599741ff606 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -220,6 +220,12 @@ enum { #define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) +/* Link Status*/ +enum link_status { + UFSHCD_LINK_IS_DOWN = 1, + UFSHCD_LINK_IS_UP = 2, +}; + /* UIC Commands */ enum uic_cmd_dme { UIC_CMD_DME_GET = 0x01, @@ -279,6 +285,11 @@ enum { UTP_CMD_TYPE_DEV_MANAGE = 0x2, }; +/* To accommodate UFS2.0 required Command type */ +enum { + UTP_CMD_TYPE_UFS_STORAGE = 0x1, +}; + enum { UTP_SCSI_COMMAND = 0x00000000, UTP_NATIVE_UFS_COMMAND = 0x10000000, diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index e2854e45f8d3..eff8b5675575 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -36,6 +36,10 @@ #define TX_LCC_SEQUENCER 0x0032 #define TX_MIN_ACTIVATETIME 0x0033 #define TX_PWM_G6_G7_SYNC_LENGTH 0x0034 +#define TX_REFCLKFREQ 0x00EB +#define TX_CFGCLKFREQVAL 0x00EC +#define CFGEXTRATTR 0x00F0 +#define DITHERCTRL2 0x00F1 /* * M-RX Configuration Attributes @@ -51,10 +55,40 @@ #define RX_TERMINATION_FORCE_ENABLE 0x0089 #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F #define RX_HIBERN8TIME_CAPABILITY 0x0092 +#define RX_REFCLKFREQ 0x00EB +#define RX_CFGCLKFREQVAL 0x00EC +#define CFGWIDEINLN 0x00F0 +#define CFGRXCDR8 0x00BA +#define ENARXDIRECTCFG4 0x00F2 +#define CFGRXOVR8 0x00BD +#define RXDIRECTCTRL2 0x00C7 +#define ENARXDIRECTCFG3 0x00F3 +#define RXCALCTRL 0x00B4 +#define ENARXDIRECTCFG2 0x00F4 +#define CFGRXOVR4 0x00E9 +#define RXSQCTRL 0x00B5 +#define CFGRXOVR6 0x00BF #define is_mphy_tx_attr(attr) (attr < RX_MODE) #define RX_MIN_ACTIVATETIME_UNIT_US 100 #define HIBERN8TIME_UNIT_US 100 + +/* + * Common Block Attributes + */ +#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B) +#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF) +#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD) +#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6) +#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA) +#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0) +#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1) +#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3) +#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8) +#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB) + +#define UNIPRO_CB_OFFSET(x) (0x8000 | x) + /* * PHY Adpater attributes */ @@ -119,6 +153,11 @@ #define PA_TACTIVATE_TIME_UNIT_US 10 #define PA_HIBERN8_TIME_UNIT_US 100 +/*Other attributes*/ +#define VS_MPHYCFGUPDT 0xD085 +#define VS_DEBUGOMC 0xD09E +#define VS_POWERSTATE 0xD083 + /* PHY Adapter Protocol Constants */ #define PA_MAXDATALANES 4 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 6164634aff18..4a0d3cdc607c 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -17,7 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Arvind Kumar <arvindkumar@vmware.com> + * Maintained by: Jim Gill <jgill@vmware.com> * */ diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 12712c92f37a..c097d2ccbde3 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -17,7 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Arvind Kumar <arvindkumar@vmware.com> + * Maintained by: Jim Gill <jgill@vmware.com> * */ diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 0c0f17b9a3eb..409f959845c4 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c @@ -192,7 +192,7 @@ #ifdef WD7000_DEBUG #define dprintk printk #else -#define dprintk(format,args...) +#define dprintk no_printk #endif /* @@ -1591,8 +1591,8 @@ static int wd7000_biosparam(struct scsi_device *sdev, { char b[BDEVNAME_SIZE]; - dprintk("wd7000_biosparam: dev=%s, size=%d, ", - bdevname(bdev, b), capacity); + dprintk("wd7000_biosparam: dev=%s, size=%llu, ", + bdevname(bdev, b), (u64)capacity); (void)b; /* unused var warning? */ /* |