diff options
author | Linus Torvalds | 2013-09-03 15:48:06 -0700 |
---|---|---|
committer | Linus Torvalds | 2013-09-03 15:48:06 -0700 |
commit | f66c83d059d1ed90968caa81d401f160912b063a (patch) | |
tree | 8558803eadc5c29038de16d88b02b4f6176850ac /drivers/scsi | |
parent | d472d9d98b463dd7a04f2bcdeafe4261686ce6ab (diff) | |
parent | 1f962ebcdfa15cede59e9edb299d1330949eec92 (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley:
"This patch set is a set of driver updates (ufs, zfcp, lpfc, mpt2/3sas,
qla4xxx, qla2xxx [adding support for ISP8044 + other things]).
We also have a new driver: esas2r which has a number of static checker
problems, but which I expect to resolve over the -rc course of 3.12
under the new driver exception.
We also have the error return that were discussed at LSF"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (118 commits)
[SCSI] sg: push file descriptor list locking down to per-device locking
[SCSI] sg: checking sdp->detached isn't protected when open
[SCSI] sg: no need sg_open_exclusive_lock
[SCSI] sg: use rwsem to solve race during exclusive open
[SCSI] scsi_debug: fix logical block provisioning support when unmap_alignment != 0
[SCSI] scsi_debug: fix endianness bug in sdebug_build_parts()
[SCSI] qla2xxx: Update the driver version to 8.06.00.08-k.
[SCSI] qla2xxx: print MAC via %pMR.
[SCSI] qla2xxx: Correction to message ids.
[SCSI] qla2xxx: Correctly print out/in mailbox registers.
[SCSI] qla2xxx: Add a new interface to update versions.
[SCSI] qla2xxx: Move queue depth ramp down message to i/o debug level.
[SCSI] qla2xxx: Select link initialization option bits from current operating mode.
[SCSI] qla2xxx: Add loopback IDC-TIME-EXTEND aen handling support.
[SCSI] qla2xxx: Set default critical temperature value in cases when ISPFX00 firmware doesn't provide it
[SCSI] qla2xxx: QLAFX00 make over temperature AEN handling informational, add log for normal temperature AEN
[SCSI] qla2xxx: Correct Interrupt Register offset for ISPFX00
[SCSI] qla2xxx: Remove handling of Shutdown Requested AEN from qlafx00_process_aen().
[SCSI] qla2xxx: Send all AENs for ISPFx00 to above layers.
[SCSI] qla2xxx: Add changes in initialization for ISPFX00 cards with BIOS
...
Diffstat (limited to 'drivers/scsi')
127 files changed, 24360 insertions, 1319 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 92ff027746f2..fe25677a5511 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -601,6 +601,7 @@ config SCSI_ARCMSR To compile this driver as a module, choose M here: the module will be called arcmsr (modprobe arcmsr). +source "drivers/scsi/esas2r/Kconfig" source "drivers/scsi/megaraid/Kconfig.megaraid" source "drivers/scsi/mpt2sas/Kconfig" source "drivers/scsi/mpt3sas/Kconfig" diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index b607ba4f5630..149bb6bf1849 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ +obj-$(CONFIG_SCSI_ESAS2R) += esas2r/ obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 9611195d6703..f8ca7becacca 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -#define BFAD_FW_FILE_CB "cbfw-3.2.1.0.bin" -#define BFAD_FW_FILE_CT "ctfw-3.2.1.0.bin" -#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" +#define BFAD_FW_FILE_CB "cbfw-3.2.1.1.bin" +#define BFAD_FW_FILE_CT "ctfw-3.2.1.1.bin" +#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.1.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h index 25093a04123b..3d33767f2f2c 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h @@ -1,6 +1,6 @@ /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index f2db5fe7bdc2..37049e433c9e 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -1,6 +1,6 @@ /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index f109e3b073c3..6940f0930a84 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -1,6 +1,6 @@ /* bnx2i.h: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index a28b03e5a5f6..af3e675d4d48 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1,6 +1,6 @@ /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 50fef6963a81..b6f6f436777b 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -1,6 +1,6 @@ /* bnx2i.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.7.2.2" -#define DRV_MODULE_RELDATE "Apr 25, 2012" +#define DRV_MODULE_VERSION "2.7.6.2" +#define DRV_MODULE_RELDATE "Jun 06, 2013" static char version[] = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 0056e47bd56e..fabeb88602ac 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1,7 +1,7 @@ /* * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c index c61cf7a43658..a0a3d9fe61fe 100644 --- a/drivers/scsi/bnx2i/bnx2i_sysfs.c +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c @@ -1,6 +1,6 @@ /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2004 - 2012 Broadcom Corporation + * Copyright (c) 2004 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index 356def44ce58..1663173cdb91 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c @@ -919,7 +919,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt) find_pio_EISA(&gc); find_pio_ISA(&gc); - for (i = 0; i <= MAXIRQ; i++) + for (i = 0; i < MAXIRQ; i++) if (reg_IRQ[i]) request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL); diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig new file mode 100644 index 000000000000..78fdbfd9b4b7 --- /dev/null +++ b/drivers/scsi/esas2r/Kconfig @@ -0,0 +1,5 @@ +config SCSI_ESAS2R + tristate "ATTO Technology's ExpressSAS RAID adapter driver" + depends on PCI && SCSI + ---help--- + This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers. diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile new file mode 100644 index 000000000000..c77160b8c8bd --- /dev/null +++ b/drivers/scsi/esas2r/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o + +esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \ + esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \ + esas2r_vda.o esas2r_main.o diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h new file mode 100644 index 000000000000..4aca3d52c851 --- /dev/null +++ b/drivers/scsi/esas2r/atioctl.h @@ -0,0 +1,1254 @@ +/* linux/drivers/scsi/esas2r/atioctl.h + * ATTO IOCTL Handling + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "atvda.h" + +#ifndef ATIOCTL_H +#define ATIOCTL_H + +#define EXPRESS_IOCTL_SIGNATURE "Express" +#define EXPRESS_IOCTL_SIGNATURE_SIZE 8 + +/* structure definitions for IOCTls */ + +struct __packed atto_express_ioctl_header { + u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE]; + u8 return_code; + +#define IOCTL_SUCCESS 0 +#define IOCTL_ERR_INVCMD 101 +#define IOCTL_INIT_FAILED 102 +#define IOCTL_NOT_IMPLEMENTED 103 +#define IOCTL_BAD_CHANNEL 104 +#define IOCTL_TARGET_OVERRUN 105 +#define IOCTL_TARGET_NOT_ENABLED 106 +#define IOCTL_BAD_FLASH_IMGTYPE 107 +#define IOCTL_OUT_OF_RESOURCES 108 +#define IOCTL_GENERAL_ERROR 109 +#define IOCTL_INVALID_PARAM 110 + + u8 channel; + u8 retries; + u8 pad[5]; +}; + +/* + * NOTE - if channel == 0xFF, the request is + * handled on the adapter it came in on. + */ +#define MAX_NODE_NAMES 256 + +struct __packed atto_firmware_rw_request { + u8 function; + #define FUNC_FW_DOWNLOAD 0x09 + #define FUNC_FW_UPLOAD 0x12 + + u8 img_type; + #define FW_IMG_FW 0x01 + #define FW_IMG_BIOS 0x02 + #define FW_IMG_NVR 0x03 + #define FW_IMG_RAW 0x04 + #define FW_IMG_FM_API 0x05 + #define FW_IMG_FS_API 0x06 + + u8 pad[2]; + u32 img_offset; + u32 img_size; + u8 image[0x80000]; +}; + +struct __packed atto_param_rw_request { + u16 code; + char data_buffer[512]; +}; + +#define MAX_CHANNEL 256 + +struct __packed atto_channel_list { + u32 num_channels; + u8 channel[MAX_CHANNEL]; +}; + +struct __packed atto_channel_info { + u8 major_rev; + u8 minor_rev; + u8 IRQ; + u8 revision_id; + u8 pci_bus; + u8 pci_dev_func; + u8 core_rev; + u8 host_no; + u16 device_id; + u16 vendor_id; + u16 ven_dev_id; + u8 pad[3]; + u32 hbaapi_rev; +}; + +/* + * CSMI control codes + * class independent + */ +#define CSMI_CC_GET_DRVR_INFO 1 +#define CSMI_CC_GET_CNTLR_CFG 2 +#define CSMI_CC_GET_CNTLR_STS 3 +#define CSMI_CC_FW_DOWNLOAD 4 + +/* RAID class */ +#define CSMI_CC_GET_RAID_INFO 10 +#define CSMI_CC_GET_RAID_CFG 11 + +/* HBA class */ +#define CSMI_CC_GET_PHY_INFO 20 +#define CSMI_CC_SET_PHY_INFO 21 +#define CSMI_CC_GET_LINK_ERRORS 22 +#define CSMI_CC_SMP_PASSTHRU 23 +#define CSMI_CC_SSP_PASSTHRU 24 +#define CSMI_CC_STP_PASSTHRU 25 +#define CSMI_CC_GET_SATA_SIG 26 +#define CSMI_CC_GET_SCSI_ADDR 27 +#define CSMI_CC_GET_DEV_ADDR 28 +#define CSMI_CC_TASK_MGT 29 +#define CSMI_CC_GET_CONN_INFO 30 + +/* PHY class */ +#define CSMI_CC_PHY_CTRL 60 + +/* + * CSMI status codes + * class independent + */ +#define CSMI_STS_SUCCESS 0 +#define CSMI_STS_FAILED 1 +#define CSMI_STS_BAD_CTRL_CODE 2 +#define CSMI_STS_INV_PARAM 3 +#define CSMI_STS_WRITE_ATTEMPTED 4 + +/* RAID class */ +#define CSMI_STS_INV_RAID_SET 1000 + +/* HBA class */ +#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS +#define CSMI_STS_PHY_UNCHANGEABLE 2000 +#define CSMI_STS_INV_LINK_RATE 2001 +#define CSMI_STS_INV_PHY 2002 +#define CSMI_STS_INV_PHY_FOR_PORT 2003 +#define CSMI_STS_PHY_UNSELECTABLE 2004 +#define CSMI_STS_SELECT_PHY_OR_PORT 2005 +#define CSMI_STS_INV_PORT 2006 +#define CSMI_STS_PORT_UNSELECTABLE 2007 +#define CSMI_STS_CONNECTION_FAILED 2008 +#define CSMI_STS_NO_SATA_DEV 2009 +#define CSMI_STS_NO_SATA_SIGNATURE 2010 +#define CSMI_STS_SCSI_EMULATION 2011 +#define CSMI_STS_NOT_AN_END_DEV 2012 +#define CSMI_STS_NO_SCSI_ADDR 2013 +#define CSMI_STS_NO_DEV_ADDR 2014 + +/* CSMI class independent structures */ +struct atto_csmi_get_driver_info { + char name[81]; + char description[81]; + u16 major_rev; + u16 minor_rev; + u16 build_rev; + u16 release_rev; + u16 csmi_major_rev; + u16 csmi_minor_rev; + #define CSMI_MAJOR_REV_0_81 0 + #define CSMI_MINOR_REV_0_81 81 + + #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81 + #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81 +}; + +struct atto_csmi_get_pci_bus_addr { + u8 bus_num; + u8 device_num; + u8 function_num; + u8 reserved; +}; + +struct atto_csmi_get_cntlr_cfg { + u32 base_io_addr; + + struct { + u32 base_memaddr_lo; + u32 base_memaddr_hi; + }; + + u32 board_id; + u16 slot_num; + #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF + + u8 cntlr_class; + #define CSMI_CNTLR_CLASS_HBA 5 + + u8 io_bus_type; + #define CSMI_BUS_TYPE_PCI 3 + #define CSMI_BUS_TYPE_PCMCIA 4 + + union { + struct atto_csmi_get_pci_bus_addr pci_addr; + u8 reserved[32]; + }; + + char serial_num[81]; + u16 major_rev; + u16 minor_rev; + u16 build_rev; + u16 release_rev; + u16 bios_major_rev; + u16 bios_minor_rev; + u16 bios_build_rev; + u16 bios_release_rev; + u32 cntlr_flags; + #define CSMI_CNTLRF_SAS_HBA 0x00000001 + #define CSMI_CNTLRF_SAS_RAID 0x00000002 + #define CSMI_CNTLRF_SATA_HBA 0x00000004 + #define CSMI_CNTLRF_SATA_RAID 0x00000008 + #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000 + #define CSMI_CNTLRF_FWD_ONLINE 0x00020000 + #define CSMI_CNTLRF_FWD_SRESET 0x00040000 + #define CSMI_CNTLRF_FWD_HRESET 0x00080000 + #define CSMI_CNTLRF_FWD_RROM 0x00100000 + + u16 rrom_major_rev; + u16 rrom_minor_rev; + u16 rrom_build_rev; + u16 rrom_release_rev; + u16 rrom_biosmajor_rev; + u16 rrom_biosminor_rev; + u16 rrom_biosbuild_rev; + u16 rrom_biosrelease_rev; + u8 reserved2[7]; +}; + +struct atto_csmi_get_cntlr_sts { + u32 status; + #define CSMI_CNTLR_STS_GOOD 1 + #define CSMI_CNTLR_STS_FAILED 2 + #define CSMI_CNTLR_STS_OFFLINE 3 + #define CSMI_CNTLR_STS_POWEROFF 4 + + u32 offline_reason; + #define CSMI_OFFLINE_NO_REASON 0 + #define CSMI_OFFLINE_INITIALIZING 1 + #define CSMI_OFFLINE_BUS_DEGRADED 2 + #define CSMI_OFFLINE_BUS_FAILURE 3 + + u8 reserved[28]; +}; + +struct atto_csmi_fw_download { + u32 buffer_len; + u32 download_flags; + #define CSMI_FWDF_VALIDATE 0x00000001 + #define CSMI_FWDF_SOFT_RESET 0x00000002 + #define CSMI_FWDF_HARD_RESET 0x00000004 + + u8 reserved[32]; + u16 status; + #define CSMI_FWD_STS_SUCCESS 0 + #define CSMI_FWD_STS_FAILED 1 + #define CSMI_FWD_STS_USING_RROM 2 + #define CSMI_FWD_STS_REJECT 3 + #define CSMI_FWD_STS_DOWNREV 4 + + u16 severity; + #define CSMI_FWD_SEV_INFO 0 + #define CSMI_FWD_SEV_WARNING 1 + #define CSMI_FWD_SEV_ERROR 2 + #define CSMI_FWD_SEV_FATAL 3 + +}; + +/* CSMI RAID class structures */ +struct atto_csmi_get_raid_info { + u32 num_raid_sets; + u32 max_drivesper_set; + u8 reserved[92]; +}; + +struct atto_csmi_raid_drives { + char model[40]; + char firmware[8]; + char serial_num[40]; + u8 sas_addr[8]; + u8 lun[8]; + u8 drive_sts; + #define CSMI_DRV_STS_OK 0 + #define CSMI_DRV_STS_REBUILDING 1 + #define CSMI_DRV_STS_FAILED 2 + #define CSMI_DRV_STS_DEGRADED 3 + + u8 drive_usage; + #define CSMI_DRV_USE_NOT_USED 0 + #define CSMI_DRV_USE_MEMBER 1 + #define CSMI_DRV_USE_SPARE 2 + + u8 reserved[30]; /* spec says 22 */ +}; + +struct atto_csmi_get_raid_cfg { + u32 raid_set_index; + u32 capacity; + u32 stripe_size; + u8 raid_type; + u8 status; + u8 information; + u8 drive_cnt; + u8 reserved[20]; + + struct atto_csmi_raid_drives drives[1]; +}; + +/* CSMI HBA class structures */ +struct atto_csmi_phy_entity { + u8 ident_frame[0x1C]; + u8 port_id; + u8 neg_link_rate; + u8 min_link_rate; + u8 max_link_rate; + u8 phy_change_cnt; + u8 auto_discover; + #define CSMI_DISC_NOT_SUPPORTED 0x00 + #define CSMI_DISC_NOT_STARTED 0x01 + #define CSMI_DISC_IN_PROGRESS 0x02 + #define CSMI_DISC_COMPLETE 0x03 + #define CSMI_DISC_ERROR 0x04 + + u8 reserved[2]; + u8 attach_ident_frame[0x1C]; +}; + +struct atto_csmi_get_phy_info { + u8 number_of_phys; + u8 reserved[3]; + struct atto_csmi_phy_entity + phy[32]; +}; + +struct atto_csmi_set_phy_info { + u8 phy_id; + u8 neg_link_rate; + #define CSMI_NEG_RATE_NEGOTIATE 0x00 + #define CSMI_NEG_RATE_PHY_DIS 0x01 + + u8 prog_minlink_rate; + u8 prog_maxlink_rate; + u8 signal_class; + #define CSMI_SIG_CLASS_UNKNOWN 0x00 + #define CSMI_SIG_CLASS_DIRECT 0x01 + #define CSMI_SIG_CLASS_SERVER 0x02 + #define CSMI_SIG_CLASS_ENCLOSURE 0x03 + + u8 reserved[3]; +}; + +struct atto_csmi_get_link_errors { + u8 phy_id; + u8 reset_cnts; + #define CSMI_RESET_CNTS_NO 0x00 + #define CSMI_RESET_CNTS_YES 0x01 + + u8 reserved[2]; + u32 inv_dw_cnt; + u32 disp_err_cnt; + u32 loss_ofdw_sync_cnt; + u32 phy_reseterr_cnt; + + /* + * The following field has been added by ATTO for ease of + * implementation of additional statistics. Drivers must validate + * the length of the IOCTL payload prior to filling them in so CSMI + * complaint applications function correctly. + */ + + u32 crc_err_cnt; +}; + +struct atto_csmi_smp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u32 req_len; + u8 smp_req[1020]; + u8 conn_sts; + u8 reserved2[3]; + u32 rsp_len; + u8 smp_rsp[1020]; +}; + +struct atto_csmi_ssp_passthru_sts { + u8 conn_sts; + u8 reserved[3]; + u8 data_present; + u8 status; + u16 rsp_length; + u8 rsp[256]; + u32 data_bytes; +}; + +struct atto_csmi_ssp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u8 lun[8]; + u8 cdb_len; + u8 add_cdb_len; + u8 reserved2[2]; + u8 cdb[16]; + u32 flags; + #define CSMI_SSPF_DD_READ 0x00000001 + #define CSMI_SSPF_DD_WRITE 0x00000002 + #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004 + #define CSMI_SSPF_TA_SIMPLE 0x00000000 + #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010 + #define CSMI_SSPF_TA_ORDERED 0x00000020 + #define CSMI_SSPF_TA_ACA 0x00000040 + + u8 add_cdb[24]; + u32 data_len; + + struct atto_csmi_ssp_passthru_sts sts; +}; + +struct atto_csmi_stp_passthru_sts { + u8 conn_sts; + u8 reserved[3]; + u8 sts_fis[20]; + u32 scr[16]; + u32 data_bytes; +}; + +struct atto_csmi_stp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u8 reserved2[4]; + u8 command_fis[20]; + u32 flags; + #define CSMI_STPF_DD_READ 0x00000001 + #define CSMI_STPF_DD_WRITE 0x00000002 + #define CSMI_STPF_DD_UNSPECIFIED 0x00000004 + #define CSMI_STPF_PIO 0x00000010 + #define CSMI_STPF_DMA 0x00000020 + #define CSMI_STPF_PACKET 0x00000040 + #define CSMI_STPF_DMA_QUEUED 0x00000080 + #define CSMI_STPF_EXECUTE_DIAG 0x00000100 + #define CSMI_STPF_RESET_DEVICE 0x00000200 + + u32 data_len; + + struct atto_csmi_stp_passthru_sts sts; +}; + +struct atto_csmi_get_sata_sig { + u8 phy_id; + u8 reserved[3]; + u8 reg_dth_fis[20]; +}; + +struct atto_csmi_get_scsi_addr { + u8 sas_addr[8]; + u8 sas_lun[8]; + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; +}; + +struct atto_csmi_get_dev_addr { + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; + u8 sas_addr[8]; + u8 sas_lun[8]; +}; + +struct atto_csmi_task_mgmt { + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; + u32 flags; + #define CSMI_TMF_TASK_IU 0x00000001 + #define CSMI_TMF_HARD_RST 0x00000002 + #define CSMI_TMF_SUPPRESS_RSLT 0x00000004 + + u32 queue_tag; + u32 reserved; + u8 task_mgt_func; + u8 reserved2[7]; + u32 information; + #define CSMI_TM_INFO_TEST 1 + #define CSMI_TM_INFO_EXCEEDED 2 + #define CSMI_TM_INFO_DEMAND 3 + #define CSMI_TM_INFO_TRIGGER 4 + + struct atto_csmi_ssp_passthru_sts sts; + +}; + +struct atto_csmi_get_conn_info { + u32 pinout; + #define CSMI_CON_UNKNOWN 0x00000001 + #define CSMI_CON_SFF_8482 0x00000002 + #define CSMI_CON_SFF_8470_LANE_1 0x00000100 + #define CSMI_CON_SFF_8470_LANE_2 0x00000200 + #define CSMI_CON_SFF_8470_LANE_3 0x00000400 + #define CSMI_CON_SFF_8470_LANE_4 0x00000800 + #define CSMI_CON_SFF_8484_LANE_1 0x00010000 + #define CSMI_CON_SFF_8484_LANE_2 0x00020000 + #define CSMI_CON_SFF_8484_LANE_3 0x00040000 + #define CSMI_CON_SFF_8484_LANE_4 0x00080000 + + u8 connector[16]; + u8 location; + #define CSMI_CON_INTERNAL 0x02 + #define CSMI_CON_EXTERNAL 0x04 + #define CSMI_CON_SWITCHABLE 0x08 + #define CSMI_CON_AUTO 0x10 + + u8 reserved[15]; +}; + +/* CSMI PHY class structures */ +struct atto_csmi_character { + u8 type_flags; + #define CSMI_CTF_POS_DISP 0x01 + #define CSMI_CTF_NEG_DISP 0x02 + #define CSMI_CTF_CTRL_CHAR 0x04 + + u8 value; +}; + +struct atto_csmi_pc_ctrl { + u8 type; + #define CSMI_PC_TYPE_UNDEFINED 0x00 + #define CSMI_PC_TYPE_SATA 0x01 + #define CSMI_PC_TYPE_SAS 0x02 + u8 rate; + u8 reserved[6]; + u32 vendor_unique[8]; + u32 tx_flags; + #define CSMI_PC_TXF_PREEMP_DIS 0x00000001 + + signed char tx_amplitude; + signed char tx_preemphasis; + signed char tx_slew_rate; + signed char tx_reserved[13]; + u8 tx_vendor_unique[64]; + u32 rx_flags; + #define CSMI_PC_RXF_EQ_DIS 0x00000001 + + signed char rx_threshold; + signed char rx_equalization_gain; + signed char rx_reserved[14]; + u8 rx_vendor_unique[64]; + u32 pattern_flags; + #define CSMI_PC_PATF_FIXED 0x00000001 + #define CSMI_PC_PATF_DIS_SCR 0x00000002 + #define CSMI_PC_PATF_DIS_ALIGN 0x00000004 + #define CSMI_PC_PATF_DIS_SSC 0x00000008 + + u8 fixed_pattern; + #define CSMI_PC_FP_CJPAT 0x00000001 + #define CSMI_PC_FP_ALIGN 0x00000002 + + u8 user_pattern_len; + u8 pattern_reserved[6]; + + struct atto_csmi_character user_pattern_buffer[16]; +}; + +struct atto_csmi_phy_ctrl { + u32 function; + #define CSMI_PC_FUNC_GET_SETUP 0x00000100 + + u8 phy_id; + u16 len_of_cntl; + u8 num_of_cntls; + u8 reserved[4]; + u32 link_flags; + #define CSMI_PHY_ACTIVATE_CTRL 0x00000001 + #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002 + #define CSMI_PHY_AUTO_COMWAKE 0x00000004 + + u8 spinup_rate; + u8 link_reserved[7]; + u32 vendor_unique[8]; + + struct atto_csmi_pc_ctrl control[1]; +}; + +union atto_ioctl_csmi { + struct atto_csmi_get_driver_info drvr_info; + struct atto_csmi_get_cntlr_cfg cntlr_cfg; + struct atto_csmi_get_cntlr_sts cntlr_sts; + struct atto_csmi_fw_download fw_dwnld; + struct atto_csmi_get_raid_info raid_info; + struct atto_csmi_get_raid_cfg raid_cfg; + struct atto_csmi_get_phy_info get_phy_info; + struct atto_csmi_set_phy_info set_phy_info; + struct atto_csmi_get_link_errors link_errs; + struct atto_csmi_smp_passthru smp_pass_thru; + struct atto_csmi_ssp_passthru ssp_pass_thru; + struct atto_csmi_stp_passthru stp_pass_thru; + struct atto_csmi_task_mgmt tsk_mgt; + struct atto_csmi_get_sata_sig sata_sig; + struct atto_csmi_get_scsi_addr scsi_addr; + struct atto_csmi_get_dev_addr dev_addr; + struct atto_csmi_get_conn_info conn_info[32]; + struct atto_csmi_phy_ctrl phy_ctrl; +}; + +struct atto_csmi { + u32 control_code; + u32 status; + union atto_ioctl_csmi data; +}; + +struct atto_module_info { + void *adapter; + void *pci_dev; + void *scsi_host; + unsigned short host_no; + union { + struct { + u64 node_name; + u64 port_name; + }; + u64 sas_addr; + }; +}; + +#define ATTO_FUNC_GET_ADAP_INFO 0x00 +#define ATTO_VER_GET_ADAP_INFO0 0 +#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0 + +struct __packed atto_hba_get_adapter_info { + + struct { + u16 vendor_id; + u16 device_id; + u16 ss_vendor_id; + u16 ss_device_id; + u8 class_code[3]; + u8 rev_id; + u8 bus_num; + u8 dev_num; + u8 func_num; + u8 link_width_max; + u8 link_width_curr; + #define ATTO_GAI_PCILW_UNKNOWN 0x00 + + u8 link_speed_max; + u8 link_speed_curr; + #define ATTO_GAI_PCILS_UNKNOWN 0x00 + #define ATTO_GAI_PCILS_GEN1 0x01 + #define ATTO_GAI_PCILS_GEN2 0x02 + #define ATTO_GAI_PCILS_GEN3 0x03 + + u8 interrupt_mode; + #define ATTO_GAI_PCIIM_UNKNOWN 0x00 + #define ATTO_GAI_PCIIM_LEGACY 0x01 + #define ATTO_GAI_PCIIM_MSI 0x02 + #define ATTO_GAI_PCIIM_MSIX 0x03 + + u8 msi_vector_cnt; + u8 reserved[19]; + } pci; + + u8 adap_type; + #define ATTO_GAI_AT_EPCIU320 0x00 + #define ATTO_GAI_AT_ESASRAID 0x01 + #define ATTO_GAI_AT_ESASRAID2 0x02 + #define ATTO_GAI_AT_ESASHBA 0x03 + #define ATTO_GAI_AT_ESASHBA2 0x04 + #define ATTO_GAI_AT_CELERITY 0x05 + #define ATTO_GAI_AT_CELERITY8 0x06 + #define ATTO_GAI_AT_FASTFRAME 0x07 + #define ATTO_GAI_AT_ESASHBA3 0x08 + #define ATTO_GAI_AT_CELERITY16 0x09 + #define ATTO_GAI_AT_TLSASHBA 0x0A + #define ATTO_GAI_AT_ESASHBA4 0x0B + + u8 adap_flags; + #define ATTO_GAI_AF_DEGRADED 0x01 + #define ATTO_GAI_AF_SPT_SUPP 0x02 + #define ATTO_GAI_AF_DEVADDR_SUPP 0x04 + #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08 + #define ATTO_GAI_AF_TEST_SUPP 0x10 + #define ATTO_GAI_AF_DIAG_SUPP 0x20 + #define ATTO_GAI_AF_VIRT_SES 0x40 + #define ATTO_GAI_AF_CONN_CTRL 0x80 + + u8 num_ports; + u8 num_phys; + u8 drvr_rev_major; + u8 drvr_rev_minor; + u8 drvr_revsub_minor; + u8 drvr_rev_build; + char drvr_rev_ascii[16]; + char drvr_name[32]; + char firmware_rev[16]; + char flash_rev[16]; + char model_name_short[16]; + char model_name[32]; + u32 num_targets; + u32 num_targsper_bus; + u32 num_lunsper_targ; + u8 num_busses; + u8 num_connectors; + u8 adap_flags2; + #define ATTO_GAI_AF2_FCOE_SUPP 0x01 + #define ATTO_GAI_AF2_NIC_SUPP 0x02 + #define ATTO_GAI_AF2_LOCATE_SUPP 0x04 + #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08 + #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10 + #define ATTO_GAI_AF2_NPIV_SUPP 0x20 + #define ATTO_GAI_AF2_MP_SUPP 0x40 + + u8 num_temp_sensors; + u32 num_targets_backend; + u32 tunnel_flags; + #define ATTO_GAI_TF_MEM_RW 0x00000001 + #define ATTO_GAI_TF_TRACE 0x00000002 + #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004 + #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008 + #define ATTO_GAI_TF_PHY_CTRL 0x00000010 + #define ATTO_GAI_TF_CONN_CTRL 0x00000020 + #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040 + + u8 reserved3[0x138]; +}; + +#define ATTO_FUNC_GET_ADAP_ADDR 0x01 +#define ATTO_VER_GET_ADAP_ADDR0 0 +#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0 + +struct __packed atto_hba_get_adapter_address { + + u8 addr_type; + #define ATTO_GAA_AT_PORT 0x00 + #define ATTO_GAA_AT_NODE 0x01 + #define ATTO_GAA_AT_CURR_MAC 0x02 + #define ATTO_GAA_AT_PERM_MAC 0x03 + #define ATTO_GAA_AT_VNIC 0x04 + + u8 port_id; + u16 addr_len; + u8 address[256]; +}; + +#define ATTO_FUNC_MEM_RW 0x02 +#define ATTO_VER_MEM_RW0 0 +#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0 + +struct __packed atto_hba_memory_read_write { + u8 mem_func; + u8 mem_type; + union { + u8 pci_index; + u8 i2c_dev; + }; + u8 i2c_status; + u32 length; + u64 address; + u8 reserved[48]; + +}; + +#define ATTO_FUNC_TRACE 0x03 +#define ATTO_VER_TRACE0 0 +#define ATTO_VER_TRACE1 1 +#define ATTO_VER_TRACE ATTO_VER_TRACE1 + +struct __packed atto_hba_trace { + u8 trace_func; + #define ATTO_TRC_TF_GET_INFO 0x00 + #define ATTO_TRC_TF_ENABLE 0x01 + #define ATTO_TRC_TF_DISABLE 0x02 + #define ATTO_TRC_TF_SET_MASK 0x03 + #define ATTO_TRC_TF_UPLOAD 0x04 + #define ATTO_TRC_TF_RESET 0x05 + + u8 trace_type; + #define ATTO_TRC_TT_DRIVER 0x00 + #define ATTO_TRC_TT_FWCOREDUMP 0x01 + + u8 reserved[2]; + u32 current_offset; + u32 total_length; + u32 trace_mask; + u8 reserved2[48]; +}; + +#define ATTO_FUNC_SCSI_PASS_THRU 0x04 +#define ATTO_VER_SCSI_PASS_THRU0 0 +#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0 + +struct __packed atto_hba_scsi_pass_thru { + u8 cdb[32]; + u8 cdb_length; + u8 req_status; + #define ATTO_SPT_RS_SUCCESS 0x00 + #define ATTO_SPT_RS_FAILED 0x01 + #define ATTO_SPT_RS_OVERRUN 0x02 + #define ATTO_SPT_RS_UNDERRUN 0x03 + #define ATTO_SPT_RS_NO_DEVICE 0x04 + #define ATTO_SPT_RS_NO_LUN 0x05 + #define ATTO_SPT_RS_TIMEOUT 0x06 + #define ATTO_SPT_RS_BUS_RESET 0x07 + #define ATTO_SPT_RS_ABORTED 0x08 + #define ATTO_SPT_RS_BUSY 0x09 + #define ATTO_SPT_RS_DEGRADED 0x0A + + u8 scsi_status; + u8 sense_length; + u32 flags; + #define ATTO_SPTF_DATA_IN 0x00000001 + #define ATTO_SPTF_DATA_OUT 0x00000002 + #define ATTO_SPTF_SIMPLE_Q 0x00000004 + #define ATTO_SPTF_HEAD_OF_Q 0x00000008 + #define ATTO_SPTF_ORDERED_Q 0x00000010 + + u32 timeout; + u32 target_id; + u8 lun[8]; + u32 residual_length; + u8 sense_data[0xFC]; + u8 reserved[0x28]; +}; + +#define ATTO_FUNC_GET_DEV_ADDR 0x05 +#define ATTO_VER_GET_DEV_ADDR0 0 +#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0 + +struct __packed atto_hba_get_device_address { + u8 addr_type; + #define ATTO_GDA_AT_PORT 0x00 + #define ATTO_GDA_AT_NODE 0x01 + #define ATTO_GDA_AT_MAC 0x02 + #define ATTO_GDA_AT_PORTID 0x03 + #define ATTO_GDA_AT_UNIQUE 0x04 + + u8 reserved; + u16 addr_len; + u32 target_id; + u8 address[256]; +}; + +/* The following functions are supported by firmware but do not have any + * associated driver structures + */ +#define ATTO_FUNC_PHY_CTRL 0x06 +#define ATTO_FUNC_CONN_CTRL 0x0C +#define ATTO_FUNC_ADAP_CTRL 0x0E +#define ATTO_VER_ADAP_CTRL0 0 +#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0 + +struct __packed atto_hba_adap_ctrl { + u8 adap_func; + #define ATTO_AC_AF_HARD_RST 0x00 + #define ATTO_AC_AF_GET_STATE 0x01 + #define ATTO_AC_AF_GET_TEMP 0x02 + + u8 adap_state; + #define ATTO_AC_AS_UNKNOWN 0x00 + #define ATTO_AC_AS_OK 0x01 + #define ATTO_AC_AS_RST_SCHED 0x02 + #define ATTO_AC_AS_RST_IN_PROG 0x03 + #define ATTO_AC_AS_RST_DISC 0x04 + #define ATTO_AC_AS_DEGRADED 0x05 + #define ATTO_AC_AS_DISABLED 0x06 + #define ATTO_AC_AS_TEMP 0x07 + + u8 reserved[2]; + + union { + struct { + u8 temp_sensor; + u8 temp_state; + + #define ATTO_AC_TS_UNSUPP 0x00 + #define ATTO_AC_TS_UNKNOWN 0x01 + #define ATTO_AC_TS_INIT_FAILED 0x02 + #define ATTO_AC_TS_NORMAL 0x03 + #define ATTO_AC_TS_OUT_OF_RANGE 0x04 + #define ATTO_AC_TS_FAULT 0x05 + + signed short temp_value; + signed short temp_lower_lim; + signed short temp_upper_lim; + char temp_desc[32]; + u8 reserved2[20]; + }; + }; +}; + +#define ATTO_FUNC_GET_DEV_INFO 0x0F +#define ATTO_VER_GET_DEV_INFO0 0 +#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0 + +struct __packed atto_hba_sas_device_info { + + #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16 + + u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */ + #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV + u32 exp_target_id; + u32 sas_port_mask; + u8 sas_level; + #define ATTO_SDI_SAS_LVL_INV 0xFF + + u8 slot_num; + #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV + + u8 dev_type; + #define ATTO_SDI_DT_END_DEVICE 0 + #define ATTO_SDI_DT_EXPANDER 1 + #define ATTO_SDI_DT_PORT_MULT 2 + + u8 ini_flags; + u8 tgt_flags; + u8 link_rate; /* SMP_RATE_XXX */ + u8 loc_flags; + #define ATTO_SDI_LF_DIRECT 0x01 + #define ATTO_SDI_LF_EXPANDER 0x02 + #define ATTO_SDI_LF_PORT_MULT 0x04 + u8 pm_port; + u8 reserved[0x60]; +}; + +union atto_hba_device_info { + struct atto_hba_sas_device_info sas_dev_info; +}; + +struct __packed atto_hba_get_device_info { + u32 target_id; + u8 info_type; + #define ATTO_GDI_IT_UNKNOWN 0x00 + #define ATTO_GDI_IT_SAS 0x01 + #define ATTO_GDI_IT_FC 0x02 + #define ATTO_GDI_IT_FCOE 0x03 + + u8 reserved[11]; + union atto_hba_device_info dev_info; +}; + +struct atto_ioctl { + u8 version; + u8 function; /* ATTO_FUNC_XXX */ + u8 status; +#define ATTO_STS_SUCCESS 0x00 +#define ATTO_STS_FAILED 0x01 +#define ATTO_STS_INV_VERSION 0x02 +#define ATTO_STS_OUT_OF_RSRC 0x03 +#define ATTO_STS_INV_FUNC 0x04 +#define ATTO_STS_UNSUPPORTED 0x05 +#define ATTO_STS_INV_ADAPTER 0x06 +#define ATTO_STS_INV_DRVR_VER 0x07 +#define ATTO_STS_INV_PARAM 0x08 +#define ATTO_STS_TIMEOUT 0x09 +#define ATTO_STS_NOT_APPL 0x0A +#define ATTO_STS_DEGRADED 0x0B + + u8 flags; + #define HBAF_TUNNEL 0x01 + + u32 data_length; + u8 reserved2[56]; + + union { + u8 byte[1]; + struct atto_hba_get_adapter_info get_adap_info; + struct atto_hba_get_adapter_address get_adap_addr; + struct atto_hba_scsi_pass_thru scsi_pass_thru; + struct atto_hba_get_device_address get_dev_addr; + struct atto_hba_adap_ctrl adap_ctrl; + struct atto_hba_get_device_info get_dev_info; + struct atto_hba_trace trace; + } data; + +}; + +struct __packed atto_ioctl_vda_scsi_cmd { + + #define ATTO_VDA_SCSI_VER0 0 + #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0 + + u8 cdb[16]; + u32 flags; + u32 data_length; + u32 residual_length; + u16 target_id; + u8 sense_len; + u8 scsi_stat; + u8 reserved[8]; + u8 sense_data[80]; +}; + +struct __packed atto_ioctl_vda_flash_cmd { + + #define ATTO_VDA_FLASH_VER0 0 + #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0 + + u32 flash_addr; + u32 data_length; + u8 sub_func; + u8 reserved[15]; + + union { + struct { + u32 flash_size; + u32 page_size; + u8 prod_info[32]; + } info; + + struct { + char file_name[16]; /* 8.3 fname, NULL term, wc=* */ + u32 file_size; + } file; + } data; + +}; + +struct __packed atto_ioctl_vda_diag_cmd { + + #define ATTO_VDA_DIAG_VER0 0 + #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0 + + u64 local_addr; + u32 data_length; + u8 sub_func; + u8 flags; + u8 reserved[3]; +}; + +struct __packed atto_ioctl_vda_cli_cmd { + + #define ATTO_VDA_CLI_VER0 0 + #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0 + + u32 cmd_rsp_len; +}; + +struct __packed atto_ioctl_vda_smp_cmd { + + #define ATTO_VDA_SMP_VER0 0 + #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0 + + u64 dest; + u32 cmd_rsp_len; +}; + +struct __packed atto_ioctl_vda_cfg_cmd { + + #define ATTO_VDA_CFG_VER0 0 + #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0 + + u32 data_length; + u8 cfg_func; + u8 reserved[11]; + + union { + u8 bytes[112]; + struct atto_vda_cfg_init init; + } data; + +}; + +struct __packed atto_ioctl_vda_mgt_cmd { + + #define ATTO_VDA_MGT_VER0 0 + #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0 + + u8 mgt_func; + u8 scan_generation; + u16 dev_index; + u32 data_length; + u8 reserved[8]; + union { + u8 bytes[112]; + struct atto_vda_devinfo dev_info; + struct atto_vda_grp_info grp_info; + struct atto_vdapart_info part_info; + struct atto_vda_dh_info dh_info; + struct atto_vda_metrics_info metrics_info; + struct atto_vda_schedule_info sched_info; + struct atto_vda_n_vcache_info nvcache_info; + struct atto_vda_buzzer_info buzzer_info; + struct atto_vda_adapter_info adapter_info; + struct atto_vda_temp_info temp_info; + struct atto_vda_fan_info fan_info; + } data; +}; + +struct __packed atto_ioctl_vda_gsv_cmd { + + #define ATTO_VDA_GSV_VER0 0 + #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0 + + u8 rsp_len; + u8 reserved[7]; + u8 version_info[1]; + #define ATTO_VDA_VER_UNSUPPORTED 0xFF + +}; + +struct __packed atto_ioctl_vda { + u8 version; + u8 function; /* VDA_FUNC_XXXX */ + u8 status; /* ATTO_STS_XXX */ + u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */ + u32 data_length; + u8 reserved[8]; + + union { + struct atto_ioctl_vda_scsi_cmd scsi; + struct atto_ioctl_vda_flash_cmd flash; + struct atto_ioctl_vda_diag_cmd diag; + struct atto_ioctl_vda_cli_cmd cli; + struct atto_ioctl_vda_smp_cmd smp; + struct atto_ioctl_vda_cfg_cmd cfg; + struct atto_ioctl_vda_mgt_cmd mgt; + struct atto_ioctl_vda_gsv_cmd gsv; + u8 cmd_info[256]; + } cmd; + + union { + u8 data[1]; + struct atto_vda_devinfo2 dev_info2; + } data; + +}; + +struct __packed atto_ioctl_smp { + u8 version; + #define ATTO_SMP_VERSION0 0 + #define ATTO_SMP_VERSION1 1 + #define ATTO_SMP_VERSION2 2 + #define ATTO_SMP_VERSION ATTO_SMP_VERSION2 + + u8 function; +#define ATTO_SMP_FUNC_DISC_SMP 0x00 +#define ATTO_SMP_FUNC_DISC_TARG 0x01 +#define ATTO_SMP_FUNC_SEND_CMD 0x02 +#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03 +#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04 +#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05 + + u8 status; /* ATTO_STS_XXX */ + u8 smp_status; /* if status == ATTO_STS_SUCCESS */ + #define ATTO_SMP_STS_SUCCESS 0x00 + #define ATTO_SMP_STS_FAILURE 0x01 + #define ATTO_SMP_STS_RESCAN 0x02 + #define ATTO_SMP_STS_NOT_FOUND 0x03 + + u16 target_id; + u8 phy_id; + u8 dev_index; + u64 smp_sas_addr; + u64 targ_sas_addr; + u32 req_length; + u32 rsp_length; + u8 flags; + #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */ + + u8 reserved[31]; + + union { + u8 byte[1]; + u32 dword[1]; + } data; + +}; + +struct __packed atto_express_ioctl { + struct atto_express_ioctl_header header; + + union { + struct atto_firmware_rw_request fwrw; + struct atto_param_rw_request prw; + struct atto_channel_list chanlist; + struct atto_channel_info chaninfo; + struct atto_ioctl ioctl_hba; + struct atto_module_info modinfo; + struct atto_ioctl_vda ioctl_vda; + struct atto_ioctl_smp ioctl_smp; + struct atto_csmi csmi; + + } data; +}; + +/* The struct associated with the code is listed after the definition */ +#define EXPRESS_IOCTL_MIN 0x4500 +#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */ +#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */ +#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */ +#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */ +#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */ +#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */ +#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */ +#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */ +#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */ +#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */ +#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */ +#define EXPRESS_CSMI 0x450B /* CSMI */ +#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */ +#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */ +#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */ +#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */ +#define EXPRESS_IOCTL_MAX 0x450F + +#endif diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h new file mode 100644 index 000000000000..5fc1f991d24e --- /dev/null +++ b/drivers/scsi/esas2r/atvda.h @@ -0,0 +1,1319 @@ +/* linux/drivers/scsi/esas2r/atvda.h + * ATTO VDA interface definitions + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + + +#ifndef ATVDA_H +#define ATVDA_H + +struct __packed atto_dev_addr { + u64 dev_port; + u64 hba_port; + u8 lun; + u8 flags; + #define VDA_DEVADDRF_SATA 0x01 + #define VDA_DEVADDRF_SSD 0x02 + u8 link_speed; /* VDALINKSPEED_xxx */ + u8 pad[1]; +}; + +/* dev_addr2 was added for 64-bit alignment */ + +struct __packed atto_dev_addr2 { + u64 dev_port; + u64 hba_port; + u8 lun; + u8 flags; + u8 link_speed; + u8 pad[5]; +}; + +struct __packed atto_vda_sge { + u32 length; + u64 address; +}; + + +/* VDA request function codes */ + +#define VDA_FUNC_SCSI 0x00 +#define VDA_FUNC_FLASH 0x01 +#define VDA_FUNC_DIAG 0x02 +#define VDA_FUNC_AE 0x03 +#define VDA_FUNC_CLI 0x04 +#define VDA_FUNC_IOCTL 0x05 +#define VDA_FUNC_CFG 0x06 +#define VDA_FUNC_MGT 0x07 +#define VDA_FUNC_GSV 0x08 + + +/* VDA request status values. for host driver considerations, values for + * SCSI requests start at zero. other requests may use these values as well. */ + +#define RS_SUCCESS 0x00 /*! successful completion */ +#define RS_INV_FUNC 0x01 /*! invalid command function */ +#define RS_BUSY 0x02 /*! insufficient resources */ +#define RS_SEL 0x03 /*! no target at target_id */ +#define RS_NO_LUN 0x04 /*! invalid LUN */ +#define RS_TIMEOUT 0x05 /*! request timeout */ +#define RS_OVERRUN 0x06 /*! data overrun */ +#define RS_UNDERRUN 0x07 /*! data underrun */ +#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */ +#define RS_ABORTED 0x0A /*! command aborted */ +#define RS_RESID_MISM 0x0B /*! residual length incorrect */ +#define RS_TM_FAILED 0x0C /*! task management failed */ +#define RS_RESET 0x0D /*! aborted due to bus reset */ +#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */ +#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */ +#define RS_UNSUPPORTED 0x10 /*! unsupported request */ +#define RS_SEL2 0x70 /*! internal generated RS_SEL */ +#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */ +#define RS_MGT_BASE 0x80 /*! base of VDA management errors */ +#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00) +#define RS_DEV_INVALID (RS_MGT_BASE + 0x01) +#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02) +#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03) +#define RS_DEV_LOST (RS_MGT_BASE + 0x04) +#define RS_SCAN_GEN (RS_MGT_BASE + 0x05) +#define RS_GRP_INVALID (RS_MGT_BASE + 0x08) +#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09) +#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A) +#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B) +#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C) +#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D) +#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E) +#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F) +#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10) +#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11) +#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12) +#define RS_CFG_SAVE (RS_MGT_BASE + 0x14) +#define RS_PART_LAST (RS_MGT_BASE + 0x18) +#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19) +#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A) +#define RS_PART_TARGET (RS_MGT_BASE + 0x1B) +#define RS_PART_LUN (RS_MGT_BASE + 0x1C) +#define RS_PART_DUP (RS_MGT_BASE + 0x1D) +#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E) +#define RS_PART_MAX (RS_MGT_BASE + 0x1F) +#define RS_PART_CAP (RS_MGT_BASE + 0x20) +#define RS_PART_STATE (RS_MGT_BASE + 0x21) +#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22) +#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23) +#define RS_HS_ERROR (RS_MGT_BASE + 0x24) +#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25) +#define RS_BAD_PARAM (RS_MGT_BASE + 0x26) +#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27) +#define RS_FLS_BASE 0xB0 /*! base of VDA errors */ +#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00) +#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01) +#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02) +#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03) +#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04) +#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05) +#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06) +#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07) +#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08) +#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */ +#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0) +#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1) +#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2) +#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3) +#define RS_DEGRADED 0xFB /*! degraded mode */ +#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */ +#define RS_VDA_INTERNAL 0xFD /*! catch-all */ +#define RS_PENDING 0xFE /*! pending, not started */ +#define RS_STARTED 0xFF /*! started */ + + +/* flash request subfunctions. these are used in both the IOCTL and the + * driver-firmware interface (VDA_FUNC_FLASH). */ + +#define VDA_FLASH_BEGINW 0x00 +#define VDA_FLASH_READ 0x01 +#define VDA_FLASH_WRITE 0x02 +#define VDA_FLASH_COMMIT 0x03 +#define VDA_FLASH_CANCEL 0x04 +#define VDA_FLASH_INFO 0x05 +#define VDA_FLASH_FREAD 0x06 +#define VDA_FLASH_FWRITE 0x07 +#define VDA_FLASH_FINFO 0x08 + + +/* IOCTL request subfunctions. these identify the payload type for + * VDA_FUNC_IOCTL. + */ + +#define VDA_IOCTL_HBA 0x00 +#define VDA_IOCTL_CSMI 0x01 +#define VDA_IOCTL_SMP 0x02 + +struct __packed atto_vda_devinfo { + struct atto_dev_addr dev_addr; + u8 vendor_id[8]; + u8 product_id[16]; + u8 revision[4]; + u64 capacity; + u32 block_size; + u8 dev_type; + + union { + u8 dev_status; + #define VDADEVSTAT_INVALID 0x00 + #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID + #define VDADEVSTAT_ASSIGNED 0x01 + #define VDADEVSTAT_SPARE 0x02 + #define VDADEVSTAT_UNAVAIL 0x03 + #define VDADEVSTAT_PT_MAINT 0x04 + #define VDADEVSTAT_LCLSPARE 0x05 + #define VDADEVSTAT_UNUSEABLE 0x06 + #define VDADEVSTAT_AVAIL 0xFF + + u8 op_ctrl; + #define VDA_DEV_OP_CTRL_START 0x01 + #define VDA_DEV_OP_CTRL_HALT 0x02 + #define VDA_DEV_OP_CTRL_RESUME 0x03 + #define VDA_DEV_OP_CTRL_CANCEL 0x04 + }; + + u8 member_state; + #define VDAMBRSTATE_ONLINE 0x00 + #define VDAMBRSTATE_DEGRADED 0x01 + #define VDAMBRSTATE_UNAVAIL 0x02 + #define VDAMBRSTATE_FAULTED 0x03 + #define VDAMBRSTATE_MISREAD 0x04 + #define VDAMBRSTATE_INCOMPAT 0x05 + + u8 operation; + #define VDAOP_NONE 0x00 + #define VDAOP_REBUILD 0x01 + #define VDAOP_ERASE 0x02 + #define VDAOP_PATTERN 0x03 + #define VDAOP_CONVERSION 0x04 + #define VDAOP_FULL_INIT 0x05 + #define VDAOP_QUICK_INIT 0x06 + #define VDAOP_SECT_SCAN 0x07 + #define VDAOP_SECT_SCAN_PARITY 0x08 + #define VDAOP_SECT_SCAN_PARITY_FIX 0x09 + #define VDAOP_RECOV_REBUILD 0x0A + + u8 op_status; + #define VDAOPSTAT_OK 0x00 + #define VDAOPSTAT_FAULTED 0x01 + #define VDAOPSTAT_HALTED 0x02 + #define VDAOPSTAT_INT 0x03 + + u8 progress; /* 0 - 100% */ + u16 ses_dev_index; + #define VDASESDI_INVALID 0xFFFF + + u8 serial_no[32]; + + union { + u16 target_id; + #define VDATGTID_INVALID 0xFFFF + + u16 features_mask; + }; + + u16 lun; + u16 features; + #define VDADEVFEAT_ENC_SERV 0x0001 + #define VDADEVFEAT_IDENT 0x0002 + #define VDADEVFEAT_DH_SUPP 0x0004 + #define VDADEVFEAT_PHYS_ID 0x0008 + + u8 ses_element_id; + u8 link_speed; + #define VDALINKSPEED_UNKNOWN 0x00 + #define VDALINKSPEED_1GB 0x01 + #define VDALINKSPEED_1_5GB 0x02 + #define VDALINKSPEED_2GB 0x03 + #define VDALINKSPEED_3GB 0x04 + #define VDALINKSPEED_4GB 0x05 + #define VDALINKSPEED_6GB 0x06 + #define VDALINKSPEED_8GB 0x07 + + u16 phys_target_id; + u8 reserved[2]; +}; + + +/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it + * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore, + * the entire structure is DMaed between the firmware and host buffer and + * the data will always be in little endian format. + */ + +struct __packed atto_vda_devinfo2 { + struct atto_dev_addr dev_addr; + u8 vendor_id[8]; + u8 product_id[16]; + u8 revision[4]; + u64 capacity; + u32 block_size; + u8 dev_type; + u8 dev_status; + u8 member_state; + u8 operation; + u8 op_status; + u8 progress; + u16 ses_dev_index; + u8 serial_no[32]; + union { + u16 target_id; + u16 features_mask; + }; + + u16 lun; + u16 features; + u8 ses_element_id; + u8 link_speed; + u16 phys_target_id; + u8 reserved[2]; + +/* This is where fields specific to struct atto_vda_devinfo2 begin. Note + * that the structure version started at one so applications that unionize this + * structure with atto_vda_dev_info can differentiate them if desired. + */ + + u8 version; + #define VDADEVINFO_VERSION0 0x00 + #define VDADEVINFO_VERSION1 0x01 + #define VDADEVINFO_VERSION2 0x02 + #define VDADEVINFO_VERSION3 0x03 + #define VDADEVINFO_VERSION VDADEVINFO_VERSION3 + + u8 reserved2[3]; + + /* sector scanning fields */ + + u32 ss_curr_errors; + u64 ss_curr_scanned; + u32 ss_curr_recvrd; + u32 ss_scan_length; + u32 ss_total_errors; + u32 ss_total_recvrd; + u32 ss_num_scans; + + /* grp_name was added in version 2 of this structure. */ + + char grp_name[15]; + u8 reserved3[4]; + + /* dev_addr_list was added in version 3 of this structure. */ + + u8 num_dev_addr; + struct atto_dev_addr2 dev_addr_list[8]; +}; + + +struct __packed atto_vda_grp_info { + u8 grp_index; + #define VDA_MAX_RAID_GROUPS 32 + + char grp_name[15]; + u64 capacity; + u32 block_size; + u32 interleave; + u8 type; + #define VDA_GRP_TYPE_RAID0 0 + #define VDA_GRP_TYPE_RAID1 1 + #define VDA_GRP_TYPE_RAID4 4 + #define VDA_GRP_TYPE_RAID5 5 + #define VDA_GRP_TYPE_RAID6 6 + #define VDA_GRP_TYPE_RAID10 10 + #define VDA_GRP_TYPE_RAID40 40 + #define VDA_GRP_TYPE_RAID50 50 + #define VDA_GRP_TYPE_RAID60 60 + #define VDA_GRP_TYPE_DVRAID_HS 252 + #define VDA_GRP_TYPE_DVRAID_NOHS 253 + #define VDA_GRP_TYPE_JBOD 254 + #define VDA_GRP_TYPE_SPARE 255 + + union { + u8 status; + #define VDA_GRP_STAT_INVALID 0x00 + #define VDA_GRP_STAT_NEW 0x01 + #define VDA_GRP_STAT_WAITING 0x02 + #define VDA_GRP_STAT_ONLINE 0x03 + #define VDA_GRP_STAT_DEGRADED 0x04 + #define VDA_GRP_STAT_OFFLINE 0x05 + #define VDA_GRP_STAT_DELETED 0x06 + #define VDA_GRP_STAT_RECOV_BASIC 0x07 + #define VDA_GRP_STAT_RECOV_EXTREME 0x08 + + u8 op_ctrl; + #define VDA_GRP_OP_CTRL_START 0x01 + #define VDA_GRP_OP_CTRL_HALT 0x02 + #define VDA_GRP_OP_CTRL_RESUME 0x03 + #define VDA_GRP_OP_CTRL_CANCEL 0x04 + }; + + u8 rebuild_state; + #define VDA_RBLD_NONE 0x00 + #define VDA_RBLD_REBUILD 0x01 + #define VDA_RBLD_ERASE 0x02 + #define VDA_RBLD_PATTERN 0x03 + #define VDA_RBLD_CONV 0x04 + #define VDA_RBLD_FULL_INIT 0x05 + #define VDA_RBLD_QUICK_INIT 0x06 + #define VDA_RBLD_SECT_SCAN 0x07 + #define VDA_RBLD_SECT_SCAN_PARITY 0x08 + #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09 + #define VDA_RBLD_RECOV_REBUILD 0x0A + #define VDA_RBLD_RECOV_BASIC 0x0B + #define VDA_RBLD_RECOV_EXTREME 0x0C + + u8 span_depth; + u8 progress; + u8 mirror_width; + u8 stripe_width; + u8 member_cnt; + + union { + u16 members[32]; + #define VDA_MEMBER_MISSING 0xFFFF + #define VDA_MEMBER_NEW 0xFFFE + u16 features_mask; + }; + + u16 features; + #define VDA_GRP_FEAT_HOTSWAP 0x0001 + #define VDA_GRP_FEAT_SPDRD_MASK 0x0006 + #define VDA_GRP_FEAT_SPDRD_DIS 0x0000 + #define VDA_GRP_FEAT_SPDRD_ENB 0x0002 + #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004 + #define VDA_GRP_FEAT_IDENT 0x0008 + #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030 + #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010 + #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020 + #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030 + #define VDA_GRP_FEAT_WRITE_CACHE 0x0040 + #define VDA_GRP_FEAT_RBLD_RESUME 0x0080 + #define VDA_GRP_FEAT_SECT_RESUME 0x0100 + #define VDA_GRP_FEAT_INIT_RESUME 0x0200 + #define VDA_GRP_FEAT_SSD 0x0400 + #define VDA_GRP_FEAT_BOOT_DEV 0x0800 + + /* + * for backward compatibility, a prefetch value of zero means the + * setting is ignored/unsupported. therefore, the firmware supported + * 0-6 values are incremented to 1-7. + */ + + u8 prefetch; + u8 op_status; + #define VDAGRPOPSTAT_MASK 0x0F + #define VDAGRPOPSTAT_INVALID 0x00 + #define VDAGRPOPSTAT_OK 0x01 + #define VDAGRPOPSTAT_FAULTED 0x02 + #define VDAGRPOPSTAT_HALTED 0x03 + #define VDAGRPOPSTAT_INT 0x04 + #define VDAGRPOPPROC_MASK 0xF0 + #define VDAGRPOPPROC_STARTABLE 0x10 + #define VDAGRPOPPROC_CANCELABLE 0x20 + #define VDAGRPOPPROC_RESUMABLE 0x40 + #define VDAGRPOPPROC_HALTABLE 0x80 + u8 over_provision; + u8 reserved[3]; + +}; + + +struct __packed atto_vdapart_info { + u8 part_no; + #define VDA_MAX_PARTITIONS 128 + + char grp_name[15]; + u64 part_size; + u64 start_lba; + u32 block_size; + u16 target_id; + u8 LUN; + char serial_no[41]; + u8 features; + #define VDAPI_FEAT_WRITE_CACHE 0x01 + + u8 reserved[7]; +}; + + +struct __packed atto_vda_dh_info { + u8 req_type; + #define VDADH_RQTYPE_CACHE 0x01 + #define VDADH_RQTYPE_FETCH 0x02 + #define VDADH_RQTYPE_SET_STAT 0x03 + #define VDADH_RQTYPE_GET_STAT 0x04 + + u8 req_qual; + #define VDADH_RQQUAL_SMART 0x01 + #define VDADH_RQQUAL_MEDDEF 0x02 + #define VDADH_RQQUAL_INFOEXC 0x04 + + u8 num_smart_attribs; + u8 status; + #define VDADH_STAT_DISABLE 0x00 + #define VDADH_STAT_ENABLE 0x01 + + u32 med_defect_cnt; + u32 info_exc_cnt; + u8 smart_status; + #define VDADH_SMARTSTAT_OK 0x00 + #define VDADH_SMARTSTAT_ERR 0x01 + + u8 reserved[35]; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_dh_smart { + u8 attrib_id; + u8 current_val; + u8 worst; + u8 threshold; + u8 raw_data[6]; + u8 raw_attrib_status; + #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01 + #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02 + #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04 + #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08 + #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10 + #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20 + + u8 calc_attrib_status; + #define VDADHSM_CALCSTAT_UNKNOWN 0x00 + #define VDADHSM_CALCSTAT_GOOD 0x01 + #define VDADHSM_CALCSTAT_PREFAIL 0x02 + #define VDADHSM_CALCSTAT_OLDAGE 0x03 + + u8 reserved[4]; +}; + + +struct __packed atto_vda_metrics_info { + u8 data_version; + #define VDAMET_VERSION0 0x00 + #define VDAMET_VERSION VDAMET_VERSION0 + + u8 metrics_action; + #define VDAMET_METACT_NONE 0x00 + #define VDAMET_METACT_START 0x01 + #define VDAMET_METACT_STOP 0x02 + #define VDAMET_METACT_RETRIEVE 0x03 + #define VDAMET_METACT_CLEAR 0x04 + + u8 test_action; + #define VDAMET_TSTACT_NONE 0x00 + #define VDAMET_TSTACT_STRT_INIT 0x01 + #define VDAMET_TSTACT_STRT_READ 0x02 + #define VDAMET_TSTACT_STRT_VERIFY 0x03 + #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04 + #define VDAMET_TSTACT_STOP 0x05 + + u8 num_dev_indexes; + #define VDAMET_ALL_DEVICES 0xFF + + u16 dev_indexes[32]; + u8 reserved[12]; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_metrics_data { + u16 dev_index; + u16 length; + #define VDAMD_LEN_LAST 0x8000 + #define VDAMD_LEN_MASK 0x0FFF + + u32 flags; + #define VDAMDF_RUN 0x00000007 + #define VDAMDF_RUN_READ 0x00000001 + #define VDAMDF_RUN_WRITE 0x00000002 + #define VDAMDF_RUN_ALL 0x00000004 + #define VDAMDF_READ 0x00000010 + #define VDAMDF_WRITE 0x00000020 + #define VDAMDF_ALL 0x00000040 + #define VDAMDF_DRIVETEST 0x40000000 + #define VDAMDF_NEW 0x80000000 + + u64 total_read_data; + u64 total_write_data; + u64 total_read_io; + u64 total_write_io; + u64 read_start_time; + u64 read_stop_time; + u64 write_start_time; + u64 write_stop_time; + u64 read_maxio_time; + u64 wpvdadmetricsdatarite_maxio_time; + u64 read_totalio_time; + u64 write_totalio_time; + u64 read_total_errs; + u64 write_total_errs; + u64 read_recvrd_errs; + u64 write_recvrd_errs; + u64 miscompares; +}; + + +struct __packed atto_vda_schedule_info { + u8 schedule_type; + #define VDASI_SCHTYPE_ONETIME 0x01 + #define VDASI_SCHTYPE_DAILY 0x02 + #define VDASI_SCHTYPE_WEEKLY 0x03 + + u8 operation; + #define VDASI_OP_NONE 0x00 + #define VDASI_OP_CREATE 0x01 + #define VDASI_OP_CANCEL 0x02 + + u8 hour; + u8 minute; + u8 day; + #define VDASI_DAY_NONE 0x00 + + u8 progress; + #define VDASI_PROG_NONE 0xFF + + u8 event_type; + #define VDASI_EVTTYPE_SECT_SCAN 0x01 + #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02 + #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03 + + u8 recurrences; + #define VDASI_RECUR_FOREVER 0x00 + + u32 id; + #define VDASI_ID_NONE 0x00 + + char grp_name[15]; + u8 reserved[85]; +}; + + +struct __packed atto_vda_n_vcache_info { + u8 super_cap_status; + #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00 + #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01 + #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02 + + u8 nvcache_module_status; + #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00 + #define VDANVCI_NVCACHEMODULE_PRESENT 0x01 + + u8 protection_mode; + #define VDANVCI_PROTMODE_HI_PROTECT 0x00 + #define VDANVCI_PROTMODE_HI_PERFORM 0x01 + + u8 reserved[109]; +}; + + +struct __packed atto_vda_buzzer_info { + u8 status; + #define VDABUZZI_BUZZER_OFF 0x00 + #define VDABUZZI_BUZZER_ON 0x01 + #define VDABUZZI_BUZZER_LAST 0x02 + + u8 reserved[3]; + u32 duration; + #define VDABUZZI_DURATION_INDEFINITE 0xffffffff + + u8 reserved2[104]; +}; + + +struct __packed atto_vda_adapter_info { + u8 version; + #define VDAADAPINFO_VERSION0 0x00 + #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0 + + u8 reserved; + signed short utc_offset; + u32 utc_time; + u32 features; + #define VDA_ADAP_FEAT_IDENT 0x0001 + #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002 + #define VDA_ADAP_FEAT_UTC_TIME 0x0004 + + u32 valid_features; + char active_config[33]; + u8 temp_count; + u8 fan_count; + u8 reserved3[61]; +}; + + +struct __packed atto_vda_temp_info { + u8 temp_index; + u8 max_op_temp; + u8 min_op_temp; + u8 op_temp_warn; + u8 temperature; + u8 type; + #define VDA_TEMP_TYPE_CPU 1 + + u8 reserved[106]; +}; + + +struct __packed atto_vda_fan_info { + u8 fan_index; + u8 status; + #define VDA_FAN_STAT_UNKNOWN 0 + #define VDA_FAN_STAT_NORMAL 1 + #define VDA_FAN_STAT_FAIL 2 + + u16 crit_pvdafaninfothreshold; + u16 warn_threshold; + u16 speed; + u8 reserved[104]; +}; + + +/* VDA management commands */ + +#define VDAMGT_DEV_SCAN 0x00 +#define VDAMGT_DEV_INFO 0x01 +#define VDAMGT_DEV_CLEAN 0x02 +#define VDAMGT_DEV_IDENTIFY 0x03 +#define VDAMGT_DEV_IDENTSTOP 0x04 +#define VDAMGT_DEV_PT_INFO 0x05 +#define VDAMGT_DEV_FEATURES 0x06 +#define VDAMGT_DEV_PT_FEATURES 0x07 +#define VDAMGT_DEV_HEALTH_REQ 0x08 +#define VDAMGT_DEV_METRICS 0x09 +#define VDAMGT_DEV_INFO2 0x0A +#define VDAMGT_DEV_OPERATION 0x0B +#define VDAMGT_DEV_INFO2_BYADDR 0x0C +#define VDAMGT_GRP_INFO 0x10 +#define VDAMGT_GRP_CREATE 0x11 +#define VDAMGT_GRP_DELETE 0x12 +#define VDAMGT_ADD_STORAGE 0x13 +#define VDAMGT_MEMBER_ADD 0x14 +#define VDAMGT_GRP_COMMIT 0x15 +#define VDAMGT_GRP_REBUILD 0x16 +#define VDAMGT_GRP_COMMIT_INIT 0x17 +#define VDAMGT_QUICK_RAID 0x18 +#define VDAMGT_GRP_FEATURES 0x19 +#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A +#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B +#define VDAMGT_GRP_OPERATION 0x1C +#define VDAMGT_CFG_SAVE 0x20 +#define VDAMGT_LAST_ERROR 0x21 +#define VDAMGT_ADAP_INFO 0x22 +#define VDAMGT_ADAP_FEATURES 0x23 +#define VDAMGT_TEMP_INFO 0x24 +#define VDAMGT_FAN_INFO 0x25 +#define VDAMGT_PART_INFO 0x30 +#define VDAMGT_PART_MAP 0x31 +#define VDAMGT_PART_UNMAP 0x32 +#define VDAMGT_PART_AUTOMAP 0x33 +#define VDAMGT_PART_SPLIT 0x34 +#define VDAMGT_PART_MERGE 0x35 +#define VDAMGT_SPARE_LIST 0x40 +#define VDAMGT_SPARE_ADD 0x41 +#define VDAMGT_SPARE_REMOVE 0x42 +#define VDAMGT_LOCAL_SPARE_ADD 0x43 +#define VDAMGT_SCHEDULE_EVENT 0x50 +#define VDAMGT_SCHEDULE_INFO 0x51 +#define VDAMGT_NVCACHE_INFO 0x60 +#define VDAMGT_NVCACHE_SET 0x61 +#define VDAMGT_BUZZER_INFO 0x70 +#define VDAMGT_BUZZER_SET 0x71 + + +struct __packed atto_vda_ae_hdr { + u8 bylength; + u8 byflags; + #define VDAAE_HDRF_EVENT_ACK 0x01 + + u8 byversion; + #define VDAAE_HDR_VER_0 0 + + u8 bytype; + #define VDAAE_HDR_TYPE_RAID 1 + #define VDAAE_HDR_TYPE_LU 2 + #define VDAAE_HDR_TYPE_DISK 3 + #define VDAAE_HDR_TYPE_RESET 4 + #define VDAAE_HDR_TYPE_LOG_INFO 5 + #define VDAAE_HDR_TYPE_LOG_WARN 6 + #define VDAAE_HDR_TYPE_LOG_CRIT 7 + #define VDAAE_HDR_TYPE_LOG_FAIL 8 + #define VDAAE_HDR_TYPE_NVC 9 + #define VDAAE_HDR_TYPE_TLG_INFO 10 + #define VDAAE_HDR_TYPE_TLG_WARN 11 + #define VDAAE_HDR_TYPE_TLG_CRIT 12 + #define VDAAE_HDR_TYPE_PWRMGT 13 + #define VDAAE_HDR_TYPE_MUTE 14 + #define VDAAE_HDR_TYPE_DEV 15 +}; + + +struct __packed atto_vda_ae_raid { + struct atto_vda_ae_hdr hdr; + u32 dwflags; + #define VDAAE_GROUP_STATE 0x00000001 + #define VDAAE_RBLD_STATE 0x00000002 + #define VDAAE_RBLD_PROG 0x00000004 + #define VDAAE_MEMBER_CHG 0x00000008 + #define VDAAE_PART_CHG 0x00000010 + #define VDAAE_MEM_STATE_CHG 0x00000020 + + u8 bygroup_state; + #define VDAAE_RAID_INVALID 0 + #define VDAAE_RAID_NEW 1 + #define VDAAE_RAID_WAITING 2 + #define VDAAE_RAID_ONLINE 3 + #define VDAAE_RAID_DEGRADED 4 + #define VDAAE_RAID_OFFLINE 5 + #define VDAAE_RAID_DELETED 6 + #define VDAAE_RAID_BASIC 7 + #define VDAAE_RAID_EXTREME 8 + #define VDAAE_RAID_UNKNOWN 9 + + u8 byrebuild_state; + #define VDAAE_RBLD_NONE 0 + #define VDAAE_RBLD_REBUILD 1 + #define VDAAE_RBLD_ERASE 2 + #define VDAAE_RBLD_PATTERN 3 + #define VDAAE_RBLD_CONV 4 + #define VDAAE_RBLD_FULL_INIT 5 + #define VDAAE_RBLD_QUICK_INIT 6 + #define VDAAE_RBLD_SECT_SCAN 7 + #define VDAAE_RBLD_SECT_SCAN_PARITY 8 + #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9 + #define VDAAE_RBLD_RECOV_REBUILD 10 + #define VDAAE_RBLD_UNKNOWN 11 + + u8 byrebuild_progress; + u8 op_status; + #define VDAAE_GRPOPSTAT_MASK 0x0F + #define VDAAE_GRPOPSTAT_INVALID 0x00 + #define VDAAE_GRPOPSTAT_OK 0x01 + #define VDAAE_GRPOPSTAT_FAULTED 0x02 + #define VDAAE_GRPOPSTAT_HALTED 0x03 + #define VDAAE_GRPOPSTAT_INT 0x04 + #define VDAAE_GRPOPPROC_MASK 0xF0 + #define VDAAE_GRPOPPROC_STARTABLE 0x10 + #define VDAAE_GRPOPPROC_CANCELABLE 0x20 + #define VDAAE_GRPOPPROC_RESUMABLE 0x40 + #define VDAAE_GRPOPPROC_HALTABLE 0x80 + char acname[15]; + u8 byreserved; + u8 byreserved2[0x80 - 0x1C]; +}; + + +struct __packed atto_vda_ae_lu_tgt_lun { + u16 wtarget_id; + u8 bylun; + u8 byreserved; +}; + + +struct __packed atto_vda_ae_lu_tgt_lun_raid { + u16 wtarget_id; + u8 bylun; + u8 byreserved; + u32 dwinterleave; + u32 dwblock_size; +}; + + +struct __packed atto_vda_ae_lu { + struct atto_vda_ae_hdr hdr; + u32 dwevent; + #define VDAAE_LU_DISC 0x00000001 + #define VDAAE_LU_LOST 0x00000002 + #define VDAAE_LU_STATE 0x00000004 + #define VDAAE_LU_PASSTHROUGH 0x10000000 + #define VDAAE_LU_PHYS_ID 0x20000000 + + u8 bystate; + #define VDAAE_LU_UNDEFINED 0 + #define VDAAE_LU_NOT_PRESENT 1 + #define VDAAE_LU_OFFLINE 2 + #define VDAAE_LU_ONLINE 3 + #define VDAAE_LU_DEGRADED 4 + #define VDAAE_LU_FACTORY_DISABLED 5 + #define VDAAE_LU_DELETED 6 + #define VDAAE_LU_BUSSCAN 7 + #define VDAAE_LU_UNKNOWN 8 + + u8 byreserved; + u16 wphys_target_id; + + union { + struct atto_vda_ae_lu_tgt_lun tgtlun; + struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid; + } id; +}; + + +struct __packed atto_vda_ae_disk { + struct atto_vda_ae_hdr hdr; +}; + + +#define VDAAE_LOG_STRSZ 64 + +struct __packed atto_vda_ae_log { + struct atto_vda_ae_hdr hdr; + char aclog_ascii[VDAAE_LOG_STRSZ]; +}; + + +#define VDAAE_TLG_STRSZ 56 + +struct __packed atto_vda_ae_timestamp_log { + struct atto_vda_ae_hdr hdr; + u32 dwtimestamp; + char aclog_ascii[VDAAE_TLG_STRSZ]; +}; + + +struct __packed atto_vda_ae_nvc { + struct atto_vda_ae_hdr hdr; +}; + + +struct __packed atto_vda_ae_dev { + struct atto_vda_ae_hdr hdr; + struct atto_dev_addr devaddr; +}; + + +union atto_vda_ae { + struct atto_vda_ae_hdr hdr; + struct atto_vda_ae_disk disk; + struct atto_vda_ae_lu lu; + struct atto_vda_ae_raid raid; + struct atto_vda_ae_log log; + struct atto_vda_ae_timestamp_log tslog; + struct atto_vda_ae_nvc nvcache; + struct atto_vda_ae_dev dev; +}; + + +struct __packed atto_vda_date_and_time { + u8 flags; + #define VDA_DT_DAY_MASK 0x07 + #define VDA_DT_DAY_NONE 0x00 + #define VDA_DT_DAY_SUN 0x01 + #define VDA_DT_DAY_MON 0x02 + #define VDA_DT_DAY_TUE 0x03 + #define VDA_DT_DAY_WED 0x04 + #define VDA_DT_DAY_THU 0x05 + #define VDA_DT_DAY_FRI 0x06 + #define VDA_DT_DAY_SAT 0x07 + #define VDA_DT_PM 0x40 + #define VDA_DT_MILITARY 0x80 + + u8 seconds; + u8 minutes; + u8 hours; + u8 day; + u8 month; + u16 year; +}; + +#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */ +#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */ +#define SGE_LAST 0x01000000 /*! last entry */ +#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */ +#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */ +#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */ +#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */ + + +struct __packed atto_vda_cfg_init { + struct atto_vda_date_and_time date_time; + u32 sgl_page_size; + u32 vda_version; + u32 fw_version; + u32 fw_build; + u32 fw_release; + u32 epoch_time; + u32 ioctl_tunnel; + #define VDA_ITF_MEM_RW 0x00000001 + #define VDA_ITF_TRACE 0x00000002 + #define VDA_ITF_SCSI_PASS_THRU 0x00000004 + #define VDA_ITF_GET_DEV_ADDR 0x00000008 + #define VDA_ITF_PHY_CTRL 0x00000010 + #define VDA_ITF_CONN_CTRL 0x00000020 + #define VDA_ITF_GET_DEV_INFO 0x00000040 + + u32 num_targets_backend; + u8 reserved[0x48]; +}; + + +/* configuration commands */ + +#define VDA_CFG_INIT 0x00 +#define VDA_CFG_GET_INIT 0x01 +#define VDA_CFG_GET_INIT2 0x02 + + +/*! physical region descriptor (PRD) aka scatter/gather entry */ + +struct __packed atto_physical_region_description { + u64 address; + u32 ctl_len; + #define PRD_LEN_LIMIT 0x003FFFFF + #define PRD_LEN_MAX 0x003FF000 + #define PRD_NXT_PRD_CNT 0x0000007F + #define PRD_CHAIN 0x01000000 + #define PRD_DATA 0x00000000 + #define PRD_INT_SEL 0xF0000000 + #define PRD_INT_SEL_F0 0x00000000 + #define PRD_INT_SEL_F1 0x40000000 + #define PRD_INT_SEL_F2 0x80000000 + #define PRD_INT_SEL_F3 0xc0000000 + #define PRD_INT_SEL_SRAM 0x10000000 + #define PRD_INT_SEL_PBSR 0x20000000 + +}; + +/* Request types. NOTE that ALL requests have the same layout for the first + * few bytes. + */ +struct __packed atto_vda_req_header { + u32 length; + u8 function; + u8 variable1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; +}; + + +#define FCP_CDB_SIZE 16 + +struct __packed atto_vda_scsi_req { + u32 length; + u8 function; /* VDA_FUNC_SCSI */ + u8 sense_len; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 flags; + #define FCP_CMND_LUN_MASK 0x000000FF + #define FCP_CMND_TA_MASK 0x00000700 + #define FCP_CMND_TA_SIMPL_Q 0x00000000 + #define FCP_CMND_TA_HEAD_Q 0x00000100 + #define FCP_CMND_TA_ORDRD_Q 0x00000200 + #define FCP_CMND_TA_ACA 0x00000400 + #define FCP_CMND_PRI_MASK 0x00007800 + #define FCP_CMND_TM_MASK 0x00FF0000 + #define FCP_CMND_ATS 0x00020000 + #define FCP_CMND_CTS 0x00040000 + #define FCP_CMND_LRS 0x00100000 + #define FCP_CMND_TRS 0x00200000 + #define FCP_CMND_CLA 0x00400000 + #define FCP_CMND_TRM 0x00800000 + #define FCP_CMND_DATA_DIR 0x03000000 + #define FCP_CMND_WRD 0x01000000 + #define FCP_CMND_RDD 0x02000000 + + u8 cdb[FCP_CDB_SIZE]; + union { + struct __packed { + u64 ppsense_buf; + u16 target_id; + u8 iblk_cnt_prd; + u8 reserved; + }; + + struct atto_physical_region_description sense_buff_prd; + }; + + union { + struct atto_vda_sge sge[1]; + + u32 abort_handle; + u32 dwords[245]; + struct atto_physical_region_description prd[1]; + } u; +}; + + +struct __packed atto_vda_flash_req { + u32 length; + u8 function; /* VDA_FUNC_FLASH */ + u8 sub_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 flash_addr; + u8 checksum; + u8 rsvd[3]; + + union { + struct { + char file_name[16]; /* 8.3 fname, NULL term, wc=* */ + struct atto_vda_sge sge[1]; + } file; + + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[2]; + } data; +}; + + +struct __packed atto_vda_diag_req { + u32 length; + u8 function; /* VDA_FUNC_DIAG */ + u8 sub_func; + #define VDA_DIAG_STATUS 0x00 + #define VDA_DIAG_RESET 0x01 + #define VDA_DIAG_PAUSE 0x02 + #define VDA_DIAG_RESUME 0x03 + #define VDA_DIAG_READ 0x04 + #define VDA_DIAG_WRITE 0x05 + + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 rsvd; + u64 local_addr; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_ae_req { + u32 length; + u8 function; /* VDA_FUNC_AE */ + u8 reserved1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + + union { + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[1]; + }; +}; + + +struct __packed atto_vda_cli_req { + u32 length; + u8 function; /* VDA_FUNC_CLI */ + u8 reserved1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 cmd_rsp_len; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_ioctl_req { + u32 length; + u8 function; /* VDA_FUNC_IOCTL */ + u8 sub_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + + union { + struct atto_vda_sge reserved_sge; + struct atto_physical_region_description reserved_prde; + }; + + union { + struct { + u32 ctrl_code; + u16 target_id; + u8 lun; + u8 reserved; + } csmi; + }; + + union { + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[1]; + }; +}; + + +struct __packed atto_vda_cfg_req { + u32 length; + u8 function; /* VDA_FUNC_CFG */ + u8 sub_func; + u8 rsvd1; + u8 sg_list_offset; + u32 handle; + + union { + u8 bytes[116]; + struct atto_vda_cfg_init init; + struct atto_vda_sge sge; + struct atto_physical_region_description prde; + } data; +}; + + +struct __packed atto_vda_mgmt_req { + u32 length; + u8 function; /* VDA_FUNC_MGT */ + u8 mgt_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u8 scan_generation; + u8 payld_sglst_offset; + u16 dev_index; + u32 payld_length; + u32 pad; + union { + struct atto_vda_sge sge[2]; + struct atto_physical_region_description prde[2]; + }; + struct atto_vda_sge payld_sge[1]; +}; + + +union atto_vda_req { + struct atto_vda_scsi_req scsi; + struct atto_vda_flash_req flash; + struct atto_vda_diag_req diag; + struct atto_vda_ae_req ae; + struct atto_vda_cli_req cli; + struct atto_vda_ioctl_req ioctl; + struct atto_vda_cfg_req cfg; + struct atto_vda_mgmt_req mgt; + u8 bytes[1024]; +}; + +/* Outbound response structures */ + +struct __packed atto_vda_scsi_rsp { + u8 scsi_stat; + u8 sense_len; + u8 rsvd[2]; + u32 residual_length; +}; + +struct __packed atto_vda_flash_rsp { + u32 file_size; +}; + +struct __packed atto_vda_ae_rsp { + u32 length; +}; + +struct __packed atto_vda_cli_rsp { + u32 cmd_rsp_len; +}; + +struct __packed atto_vda_ioctl_rsp { + union { + struct { + u32 csmi_status; + u16 target_id; + u8 lun; + u8 reserved; + } csmi; + }; +}; + +struct __packed atto_vda_cfg_rsp { + u16 vda_version; + u16 fw_release; + u32 fw_build; +}; + +struct __packed atto_vda_mgmt_rsp { + u32 length; + u16 dev_index; + u8 scan_generation; +}; + +union atto_vda_func_rsp { + struct atto_vda_scsi_rsp scsi_rsp; + struct atto_vda_flash_rsp flash_rsp; + struct atto_vda_ae_rsp ae_rsp; + struct atto_vda_cli_rsp cli_rsp; + struct atto_vda_ioctl_rsp ioctl_rsp; + struct atto_vda_cfg_rsp cfg_rsp; + struct atto_vda_mgmt_rsp mgt_rsp; + u32 dwords[2]; +}; + +struct __packed atto_vda_ob_rsp { + u32 handle; + u8 req_stat; + u8 rsvd[3]; + + union atto_vda_func_rsp + func_rsp; +}; + +struct __packed atto_vda_ae_data { + u8 event_data[256]; +}; + +struct __packed atto_vda_mgmt_data { + union { + u8 bytes[112]; + struct atto_vda_devinfo dev_info; + struct atto_vda_grp_info grp_info; + struct atto_vdapart_info part_info; + struct atto_vda_dh_info dev_health_info; + struct atto_vda_metrics_info metrics_info; + struct atto_vda_schedule_info sched_info; + struct atto_vda_n_vcache_info nvcache_info; + struct atto_vda_buzzer_info buzzer_info; + } data; +}; + +union atto_vda_rsp_data { + struct atto_vda_ae_data ae_data; + struct atto_vda_mgmt_data mgt_data; + u8 sense_data[252]; + #define SENSE_DATA_SZ 252; + u8 bytes[256]; +}; + +#endif diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h new file mode 100644 index 000000000000..0838e265e0b9 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r.h @@ -0,0 +1,1441 @@ +/* + * linux/drivers/scsi/esas2r/esas2r.h + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/proc_fs.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> + +#include "esas2r_log.h" +#include "atioctl.h" +#include "atvda.h" + +#ifndef ESAS2R_H +#define ESAS2R_H + +/* Global Variables */ +extern struct esas2r_adapter *esas2r_adapters[]; +extern u8 *esas2r_buffered_ioctl; +extern dma_addr_t esas2r_buffered_ioctl_addr; +extern u32 esas2r_buffered_ioctl_size; +extern struct pci_dev *esas2r_buffered_ioctl_pcid; +#define SGL_PG_SZ_MIN 64 +#define SGL_PG_SZ_MAX 1024 +extern int sgl_page_size; +#define NUM_SGL_MIN 8 +#define NUM_SGL_MAX 2048 +extern int num_sg_lists; +#define NUM_REQ_MIN 4 +#define NUM_REQ_MAX 256 +extern int num_requests; +#define NUM_AE_MIN 2 +#define NUM_AE_MAX 8 +extern int num_ae_requests; +extern int cmd_per_lun; +extern int can_queue; +extern int esas2r_max_sectors; +extern int sg_tablesize; +extern int interrupt_mode; +extern int num_io_requests; + +/* Macro defintions */ +#define ESAS2R_MAX_ID 255 +#define MAX_ADAPTERS 32 +#define ESAS2R_DRVR_NAME "esas2r" +#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter" +#define ESAS2R_MAX_DEVICES 32 +#define ATTONODE_NAME "ATTONode" +#define ESAS2R_MAJOR_REV 1 +#define ESAS2R_MINOR_REV 00 +#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \ + DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV) +#define ESAS2R_COPYRIGHT_YEARS "2001-2013" +#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384 +#define ESAS2R_DEFAULT_CMD_PER_LUN 64 +#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024 +#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num) +#define NUM_TO_STR(num) #num + +#define ESAS2R_SGL_ALIGN 16 +#define ESAS2R_LIST_ALIGN 16 +#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA +#define ESAS2R_DATA_BUF_LEN 256 +#define ESAS2R_DEFAULT_TMO 5000 +#define ESAS2R_DISC_BUF_LEN 512 +#define ESAS2R_FWCOREDUMP_SZ 0x80000 +#define ESAS2R_NUM_PHYS 8 +#define ESAS2R_TARG_ID_INV 0xFFFF +#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK +#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK +#define ESAS2R_INT_DIS_MASK 0 +#define ESAS2R_MAX_TARGETS 256 +#define ESAS2R_KOBJ_NAME_LEN 20 + +/* u16 (WORD) component macros */ +#define LOBYTE(w) ((u8)(u16)(w)) +#define HIBYTE(w) ((u8)(((u16)(w)) >> 8)) +#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8))) + +/* u32 (DWORD) component macros */ +#define LOWORD(d) ((u16)(u32)(d)) +#define HIWORD(d) ((u16)(((u32)(d)) >> 16)) +#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16))) + +/* macro to get the lowest nonzero bit of a value */ +#define LOBIT(x) ((x) & (0 - (x))) + +/* These functions are provided to access the chip's control registers. + * The register is specified by its byte offset from the register base + * for the adapter. + */ +#define esas2r_read_register_dword(a, reg) \ + readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG) + +#define esas2r_write_register_dword(a, reg, data) \ + writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG)) + +#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r) + +/* This function is provided to access the chip's data window. The + * register is specified by its byte offset from the window base + * for the adapter. + */ +#define esas2r_read_data_byte(a, reg) \ + readb((void __iomem *)a->data_window + (reg)) + +/* ATTO vendor and device Ids */ +#define ATTO_VENDOR_ID 0x117C +#define ATTO_DID_INTEL_IOP348 0x002C +#define ATTO_DID_MV_88RC9580 0x0049 +#define ATTO_DID_MV_88RC9580TS 0x0066 +#define ATTO_DID_MV_88RC9580TSE 0x0067 +#define ATTO_DID_MV_88RC9580TL 0x0068 + +/* ATTO subsystem device Ids */ +#define ATTO_SSDID_TBT 0x4000 +#define ATTO_TSSC_3808 0x4066 +#define ATTO_TSSC_3808E 0x4067 +#define ATTO_TLSH_1068 0x4068 +#define ATTO_ESAS_R680 0x0049 +#define ATTO_ESAS_R608 0x004A +#define ATTO_ESAS_R60F 0x004B +#define ATTO_ESAS_R6F0 0x004C +#define ATTO_ESAS_R644 0x004D +#define ATTO_ESAS_R648 0x004E + +/* + * flash definitions & structures + * define the code types + */ +#define FBT_CPYR 0xAA00 +#define FBT_SETUP 0xAA02 +#define FBT_FLASH_VER 0xAA04 + +/* offsets to various locations in flash */ +#define FLS_OFFSET_BOOT (u32)(0x00700000) +#define FLS_OFFSET_NVR (u32)(0x007C0000) +#define FLS_OFFSET_CPYR FLS_OFFSET_NVR +#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT) +#define FLS_BLOCK_SIZE (u32)(0x00020000) +#define FI_NVR_2KB 0x0800 +#define FI_NVR_8KB 0x2000 +#define FM_BUF_SZ 0x800 + +/* + * marvell frey (88R9580) register definitions + * chip revision identifiers + */ +#define MVR_FREY_B2 0xB2 + +/* + * memory window definitions. window 0 is the data window with definitions + * of MW_DATA_XXX. window 1 is the register window with definitions of + * MW_REG_XXX. + */ +#define MW_REG_WINDOW_SIZE (u32)(0x00040000) +#define MW_REG_OFFSET_HWREG (u32)(0x00000000) +#define MW_REG_OFFSET_PCI (u32)(0x00008000) +#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG) +#define MW_DATA_WINDOW_SIZE (u32)(0x00020000) +#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000) +#define MW_DATA_ADDR_SRAM (u32)(0xF4000000) +#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000) + +/* + * the following registers are for the communication + * list interface (AKA message unit (MU)) + */ +#define MU_IN_LIST_ADDR_LO (u32)(0x00004000) +#define MU_IN_LIST_ADDR_HI (u32)(0x00004004) + +#define MU_IN_LIST_WRITE (u32)(0x00004018) + #define MU_ILW_TOGGLE (u32)(0x00004000) + +#define MU_IN_LIST_READ (u32)(0x0000401C) + #define MU_ILR_TOGGLE (u32)(0x00004000) + #define MU_ILIC_LIST (u32)(0x0000000F) + #define MU_ILIC_LIST_F0 (u32)(0x00000000) + #define MU_ILIC_DEST (u32)(0x00000F00) + #define MU_ILIC_DEST_DDR (u32)(0x00000200) +#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028) + +#define MU_IN_LIST_CONFIG (u32)(0x0000402C) + #define MU_ILC_ENABLE (u32)(0x00000001) + #define MU_ILC_ENTRY_MASK (u32)(0x000000F0) + #define MU_ILC_ENTRY_4_DW (u32)(0x00000020) + #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000) + #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000) + #define MU_ILC_NUMBER_SHIFT 16 + +#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050) +#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054) + +#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058) +#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C) + +#define MU_OUT_LIST_WRITE (u32)(0x00004068) + #define MU_OLW_TOGGLE (u32)(0x00004000) + +#define MU_OUT_LIST_COPY (u32)(0x0000406C) + #define MU_OLC_TOGGLE (u32)(0x00004000) + #define MU_OLC_WRT_PTR (u32)(0x00003FFF) + +#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078) + #define MU_OLIC_LIST (u32)(0x0000000F) + #define MU_OLIC_LIST_F0 (u32)(0x00000000) + #define MU_OLIC_SOURCE (u32)(0x00000F00) + #define MU_OLIC_SOURCE_DDR (u32)(0x00000200) + +#define MU_OUT_LIST_CONFIG (u32)(0x0000407C) + #define MU_OLC_ENABLE (u32)(0x00000001) + #define MU_OLC_ENTRY_MASK (u32)(0x000000F0) + #define MU_OLC_ENTRY_4_DW (u32)(0x00000020) + #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000) + #define MU_OLC_NUMBER_SHIFT 16 + +#define MU_OUT_LIST_INT_STAT (u32)(0x00004088) + #define MU_OLIS_INT (u32)(0x00000001) + +#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C) + #define MU_OLIS_MASK (u32)(0x00000001) + +/* + * the maximum size of the communication lists is two greater than the + * maximum amount of VDA requests. the extra are to prevent queue overflow. + */ +#define ESAS2R_MAX_NUM_REQS 256 +#define ESAS2R_NUM_EXTRA 2 +#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA) + +/* + * the following registers are for the CPU interface + */ +#define MU_CTL_STATUS_IN (u32)(0x00010108) + #define MU_CTL_IN_FULL_RST (u32)(0x00000020) +#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130) + #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000) +#define MU_DOORBELL_IN (u32)(0x00010460) + #define DRBL_RESET_BUS (u32)(0x00000002) + #define DRBL_PAUSE_AE (u32)(0x00000004) + #define DRBL_RESUME_AE (u32)(0x00000008) + #define DRBL_MSG_IFC_DOWN (u32)(0x00000010) + #define DRBL_FLASH_REQ (u32)(0x00000020) + #define DRBL_FLASH_DONE (u32)(0x00000040) + #define DRBL_FORCE_INT (u32)(0x00000080) + #define DRBL_MSG_IFC_INIT (u32)(0x00000100) + #define DRBL_POWER_DOWN (u32)(0x00000200) + #define DRBL_DRV_VER_1 (u32)(0x00010000) + #define DRBL_DRV_VER DRBL_DRV_VER_1 +#define MU_DOORBELL_IN_ENB (u32)(0x00010464) +#define MU_DOORBELL_OUT (u32)(0x00010480) + #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000) + #define DRBL_UNUSED_HANDLER (u32)(0x00100000) + #define DRBL_UNDEF_INSTR (u32)(0x00200000) + #define DRBL_PREFETCH_ABORT (u32)(0x00300000) + #define DRBL_DATA_ABORT (u32)(0x00400000) + #define DRBL_JUMP_TO_ZERO (u32)(0x00500000) + #define DRBL_FW_RESET (u32)(0x00080000) + #define DRBL_FW_VER_MSK (u32)(0x00070000) + #define DRBL_FW_VER_0 (u32)(0x00000000) + #define DRBL_FW_VER_1 (u32)(0x00010000) + #define DRBL_FW_VER DRBL_FW_VER_1 +#define MU_DOORBELL_OUT_ENB (u32)(0x00010484) + #define DRBL_ENB_MASK (u32)(0x00F803FF) +#define MU_INT_STATUS_OUT (u32)(0x00010200) + #define MU_INTSTAT_POST_OUT (u32)(0x00000010) + #define MU_INTSTAT_DRBL_IN (u32)(0x00000100) + #define MU_INTSTAT_DRBL (u32)(0x00001000) + #define MU_INTSTAT_MASK (u32)(0x00001010) +#define MU_INT_MASK_OUT (u32)(0x0001020C) + +/* PCI express registers accessed via window 1 */ +#define MVR_PCI_WIN1_REMAP (u32)(0x00008438) + #define MVRPW1R_ENABLE (u32)(0x00000001) + + +/* structures */ + +/* inbound list dynamic source entry */ +struct esas2r_inbound_list_source_entry { + u64 address; + u32 length; + #define HWILSE_INTERFACE_F0 0x00000000 + u32 reserved; +}; + +/* PCI data structure in expansion ROM images */ +struct __packed esas2r_boot_header { + char signature[4]; + u16 vendor_id; + u16 device_id; + u16 VPD; + u16 struct_length; + u8 struct_revision; + u8 class_code[3]; + u16 image_length; + u16 code_revision; + u8 code_type; + #define CODE_TYPE_PC 0 + #define CODE_TYPE_OPEN 1 + #define CODE_TYPE_EFI 3 + u8 indicator; + #define INDICATOR_LAST 0x80 + u8 reserved[2]; +}; + +struct __packed esas2r_boot_image { + u16 signature; + u8 reserved[22]; + u16 header_offset; + u16 pnp_offset; +}; + +struct __packed esas2r_pc_image { + u16 signature; + u8 length; + u8 entry_point[3]; + u8 checksum; + u16 image_end; + u16 min_size; + u8 rom_flags; + u8 reserved[12]; + u16 header_offset; + u16 pnp_offset; + struct esas2r_boot_header boot_image; +}; + +struct __packed esas2r_efi_image { + u16 signature; + u16 length; + u32 efi_signature; + #define EFI_ROM_SIG 0x00000EF1 + u16 image_type; + #define EFI_IMAGE_APP 10 + #define EFI_IMAGE_BSD 11 + #define EFI_IMAGE_RTD 12 + u16 machine_type; + #define EFI_MACHINE_IA32 0x014c + #define EFI_MACHINE_IA64 0x0200 + #define EFI_MACHINE_X64 0x8664 + #define EFI_MACHINE_EBC 0x0EBC + u16 compression; + #define EFI_UNCOMPRESSED 0x0000 + #define EFI_COMPRESSED 0x0001 + u8 reserved[8]; + u16 efi_offset; + u16 header_offset; + u16 reserved2; + struct esas2r_boot_header boot_image; +}; + +struct esas2r_adapter; +struct esas2r_sg_context; +struct esas2r_request; + +typedef void (*RQCALLBK) (struct esas2r_adapter *a, + struct esas2r_request *rq); +typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); + +struct esas2r_component_header { + u8 img_type; + #define CH_IT_FW 0x00 + #define CH_IT_NVR 0x01 + #define CH_IT_BIOS 0x02 + #define CH_IT_MAC 0x03 + #define CH_IT_CFG 0x04 + #define CH_IT_EFI 0x05 + u8 status; + #define CH_STAT_PENDING 0xff + #define CH_STAT_FAILED 0x00 + #define CH_STAT_SUCCESS 0x01 + #define CH_STAT_RETRY 0x02 + #define CH_STAT_INVALID 0x03 + u8 pad[2]; + u32 version; + u32 length; + u32 image_offset; +}; + +#define FI_REL_VER_SZ 16 + +struct esas2r_flash_img_v0 { + u8 fi_version; + #define FI_VERSION_0 00 + u8 status; + u8 adap_typ; + u8 action; + u32 length; + u16 checksum; + u16 driver_error; + u16 flags; + u16 num_comps; + #define FI_NUM_COMPS_V0 5 + u8 rel_version[FI_REL_VER_SZ]; + struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0]; + u8 scratch_buf[FM_BUF_SZ]; +}; + +struct esas2r_flash_img { + u8 fi_version; + #define FI_VERSION_1 01 + u8 status; + #define FI_STAT_SUCCESS 0x00 + #define FI_STAT_FAILED 0x01 + #define FI_STAT_REBOOT 0x02 + #define FI_STAT_ADAPTYP 0x03 + #define FI_STAT_INVALID 0x04 + #define FI_STAT_CHKSUM 0x05 + #define FI_STAT_LENGTH 0x06 + #define FI_STAT_UNKNOWN 0x07 + #define FI_STAT_IMG_VER 0x08 + #define FI_STAT_BUSY 0x09 + #define FI_STAT_DUAL 0x0A + #define FI_STAT_MISSING 0x0B + #define FI_STAT_UNSUPP 0x0C + #define FI_STAT_ERASE 0x0D + #define FI_STAT_FLASH 0x0E + #define FI_STAT_DEGRADED 0x0F + u8 adap_typ; + #define FI_AT_UNKNWN 0xFF + #define FI_AT_SUN_LAKE 0x0B + #define FI_AT_MV_9580 0x0F + u8 action; + #define FI_ACT_DOWN 0x00 + #define FI_ACT_UP 0x01 + #define FI_ACT_UPSZ 0x02 + #define FI_ACT_MAX 0x02 + #define FI_ACT_DOWN1 0x80 + u32 length; + u16 checksum; + u16 driver_error; + u16 flags; + #define FI_FLG_NVR_DEF 0x0001 + u16 num_comps; + #define FI_NUM_COMPS_V1 6 + u8 rel_version[FI_REL_VER_SZ]; + struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1]; + u8 scratch_buf[FM_BUF_SZ]; +}; + +/* definitions for flash script (FS) commands */ +struct esas2r_ioctlfs_command { + u8 command; + #define ESAS2R_FS_CMD_ERASE 0 + #define ESAS2R_FS_CMD_READ 1 + #define ESAS2R_FS_CMD_BEGINW 2 + #define ESAS2R_FS_CMD_WRITE 3 + #define ESAS2R_FS_CMD_COMMIT 4 + #define ESAS2R_FS_CMD_CANCEL 5 + u8 checksum; + u8 reserved[2]; + u32 flash_addr; + u32 length; + u32 image_offset; +}; + +struct esas2r_ioctl_fs { + u8 version; + #define ESAS2R_FS_VER 0 + u8 status; + u8 driver_error; + u8 adap_type; + #define ESAS2R_FS_AT_ESASRAID2 3 + #define ESAS2R_FS_AT_TSSASRAID2 4 + #define ESAS2R_FS_AT_TSSASRAID2E 5 + #define ESAS2R_FS_AT_TLSASHBA 6 + u8 driver_ver; + u8 reserved[11]; + struct esas2r_ioctlfs_command command; + u8 data[1]; +}; + +struct esas2r_sas_nvram { + u8 signature[4]; + u8 version; + #define SASNVR_VERSION_0 0x00 + #define SASNVR_VERSION SASNVR_VERSION_0 + u8 checksum; + #define SASNVR_CKSUM_SEED 0x5A + u8 max_lun_for_target; + u8 pci_latency; + #define SASNVR_PCILAT_DIS 0x00 + #define SASNVR_PCILAT_MIN 0x10 + #define SASNVR_PCILAT_MAX 0xF8 + u8 options1; + #define SASNVR1_BOOT_DRVR 0x01 + #define SASNVR1_BOOT_SCAN 0x02 + #define SASNVR1_DIS_PCI_MWI 0x04 + #define SASNVR1_FORCE_ORD_Q 0x08 + #define SASNVR1_CACHELINE_0 0x10 + #define SASNVR1_DIS_DEVSORT 0x20 + #define SASNVR1_PWR_MGT_EN 0x40 + #define SASNVR1_WIDEPORT 0x80 + u8 options2; + #define SASNVR2_SINGLE_BUS 0x01 + #define SASNVR2_SLOT_BIND 0x02 + #define SASNVR2_EXP_PROG 0x04 + #define SASNVR2_CMDTHR_LUN 0x08 + #define SASNVR2_HEARTBEAT 0x10 + #define SASNVR2_INT_CONNECT 0x20 + #define SASNVR2_SW_MUX_CTRL 0x40 + #define SASNVR2_DISABLE_NCQ 0x80 + u8 int_coalescing; + #define SASNVR_COAL_DIS 0x00 + #define SASNVR_COAL_LOW 0x01 + #define SASNVR_COAL_MED 0x02 + #define SASNVR_COAL_HI 0x03 + u8 cmd_throttle; + #define SASNVR_CMDTHR_NONE 0x00 + u8 dev_wait_time; + u8 dev_wait_count; + u8 spin_up_delay; + #define SASNVR_SPINUP_MAX 0x14 + u8 ssp_align_rate; + u8 sas_addr[8]; + u8 phy_speed[16]; + #define SASNVR_SPEED_AUTO 0x00 + #define SASNVR_SPEED_1_5GB 0x01 + #define SASNVR_SPEED_3GB 0x02 + #define SASNVR_SPEED_6GB 0x03 + #define SASNVR_SPEED_12GB 0x04 + u8 phy_mux[16]; + #define SASNVR_MUX_DISABLED 0x00 + #define SASNVR_MUX_1_5GB 0x01 + #define SASNVR_MUX_3GB 0x02 + #define SASNVR_MUX_6GB 0x03 + u8 phy_flags[16]; + #define SASNVR_PHF_DISABLED 0x01 + #define SASNVR_PHF_RD_ONLY 0x02 + u8 sort_type; + #define SASNVR_SORT_SAS_ADDR 0x00 + #define SASNVR_SORT_H308_CONN 0x01 + #define SASNVR_SORT_PHY_ID 0x02 + #define SASNVR_SORT_SLOT_ID 0x03 + u8 dpm_reqcmd_lmt; + u8 dpm_stndby_time; + u8 dpm_active_time; + u8 phy_target_id[16]; + #define SASNVR_PTI_DISABLED 0xFF + u8 virt_ses_mode; + #define SASNVR_VSMH_DISABLED 0x00 + u8 read_write_mode; + #define SASNVR_RWM_DEFAULT 0x00 + u8 link_down_to; + u8 reserved[0xA1]; +}; + +typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr); + +struct esas2r_sg_context { + struct esas2r_adapter *adapter; + struct esas2r_request *first_req; + u32 length; + u8 *cur_offset; + PGETPHYSADDR get_phys_addr; + union { + struct { + struct atto_vda_sge *curr; + struct atto_vda_sge *last; + struct atto_vda_sge *limit; + struct atto_vda_sge *chain; + } a64; + struct { + struct atto_physical_region_description *curr; + struct atto_physical_region_description *chain; + u32 sgl_max_cnt; + u32 sge_cnt; + } prd; + } sge; + struct scatterlist *cur_sgel; + u8 *exp_offset; + int num_sgel; + int sgel_count; +}; + +struct esas2r_target { + u8 flags; + #define TF_PASS_THRU 0x01 + #define TF_USED 0x02 + u8 new_target_state; + u8 target_state; + u8 buffered_target_state; +#define TS_NOT_PRESENT 0x00 +#define TS_PRESENT 0x05 +#define TS_LUN_CHANGE 0x06 +#define TS_INVALID 0xFF + u32 block_size; + u32 inter_block; + u32 inter_byte; + u16 virt_targ_id; + u16 phys_targ_id; + u8 identifier_len; + u64 sas_addr; + u8 identifier[60]; + struct atto_vda_ae_lu lu_event; +}; + +struct esas2r_request { + struct list_head comp_list; + struct list_head req_list; + union atto_vda_req *vrq; + struct esas2r_mem_desc *vrq_md; + union { + void *data_buf; + union atto_vda_rsp_data *vda_rsp_data; + }; + u8 *sense_buf; + struct list_head sg_table_head; + struct esas2r_mem_desc *sg_table; + u32 timeout; + #define RQ_TIMEOUT_S1 0xFFFFFFFF + #define RQ_TIMEOUT_S2 0xFFFFFFFE + #define RQ_MAX_TIMEOUT 0xFFFFFFFD + u16 target_id; + u8 req_type; + #define RT_INI_REQ 0x01 + #define RT_DISC_REQ 0x02 + u8 sense_len; + union atto_vda_func_rsp func_rsp; + RQCALLBK comp_cb; + RQCALLBK interrupt_cb; + void *interrupt_cx; + u8 flags; + #define RF_1ST_IBLK_BASE 0x04 + #define RF_FAILURE_OK 0x08 + u8 req_stat; + u16 vda_req_sz; + #define RQ_SIZE_DEFAULT 0 + u64 lba; + RQCALLBK aux_req_cb; + void *aux_req_cx; + u32 blk_len; + u32 max_blk_len; + union { + struct scsi_cmnd *cmd; + u8 *task_management_status_ptr; + }; +}; + +struct esas2r_flash_context { + struct esas2r_flash_img *fi; + RQCALLBK interrupt_cb; + u8 *sgc_offset; + u8 *scratch; + u32 fi_hdr_len; + u8 task; + #define FMTSK_ERASE_BOOT 0 + #define FMTSK_WRTBIOS 1 + #define FMTSK_READBIOS 2 + #define FMTSK_WRTMAC 3 + #define FMTSK_READMAC 4 + #define FMTSK_WRTEFI 5 + #define FMTSK_READEFI 6 + #define FMTSK_WRTCFG 7 + #define FMTSK_READCFG 8 + u8 func; + u16 num_comps; + u32 cmp_len; + u32 flsh_addr; + u32 curr_len; + u8 comp_typ; + struct esas2r_sg_context sgc; +}; + +struct esas2r_disc_context { + u8 disc_evt; + #define DCDE_DEV_CHANGE 0x01 + #define DCDE_DEV_SCAN 0x02 + u8 state; + #define DCS_DEV_RMV 0x00 + #define DCS_DEV_ADD 0x01 + #define DCS_BLOCK_DEV_SCAN 0x02 + #define DCS_RAID_GRP_INFO 0x03 + #define DCS_PART_INFO 0x04 + #define DCS_PT_DEV_INFO 0x05 + #define DCS_PT_DEV_ADDR 0x06 + #define DCS_DISC_DONE 0xFF + u16 flags; + #define DCF_DEV_CHANGE 0x0001 + #define DCF_DEV_SCAN 0x0002 + #define DCF_POLLED 0x8000 + u32 interleave; + u32 block_size; + u16 dev_ix; + u8 part_num; + u8 raid_grp_ix; + char raid_grp_name[16]; + struct esas2r_target *curr_targ; + u16 curr_virt_id; + u16 curr_phys_id; + u8 scan_gen; + u8 dev_addr_type; + u64 sas_addr; +}; + +struct esas2r_mem_desc { + struct list_head next_desc; + void *virt_addr; + u64 phys_addr; + void *pad; + void *esas2r_data; + u32 esas2r_param; + u32 size; +}; + +enum fw_event_type { + fw_event_null, + fw_event_lun_change, + fw_event_present, + fw_event_not_present, + fw_event_vda_ae +}; + +struct esas2r_vda_ae { + u32 signature; +#define ESAS2R_VDA_EVENT_SIG 0x4154544F + u8 bus_number; + u8 devfn; + u8 pad[2]; + union atto_vda_ae vda_ae; +}; + +struct esas2r_fw_event_work { + struct list_head list; + struct delayed_work work; + struct esas2r_adapter *a; + enum fw_event_type type; + u8 data[sizeof(struct esas2r_vda_ae)]; +}; + +enum state { + FW_INVALID_ST, + FW_STATUS_ST, + FW_COMMAND_ST +}; + +struct esas2r_firmware { + enum state state; + struct esas2r_flash_img header; + u8 *data; + u64 phys; + int orig_len; + void *header_buff; + u64 header_buff_phys; +}; + +struct esas2r_adapter { + struct esas2r_target targetdb[ESAS2R_MAX_TARGETS]; + struct esas2r_target *targetdb_end; + unsigned char *regs; + unsigned char *data_window; + u32 volatile flags; + #define AF_PORT_CHANGE (u32)(0x00000001) + #define AF_CHPRST_NEEDED (u32)(0x00000004) + #define AF_CHPRST_PENDING (u32)(0x00000008) + #define AF_CHPRST_DETECTED (u32)(0x00000010) + #define AF_BUSRST_NEEDED (u32)(0x00000020) + #define AF_BUSRST_PENDING (u32)(0x00000040) + #define AF_BUSRST_DETECTED (u32)(0x00000080) + #define AF_DISABLED (u32)(0x00000100) + #define AF_FLASH_LOCK (u32)(0x00000200) + #define AF_OS_RESET (u32)(0x00002000) + #define AF_FLASHING (u32)(0x00004000) + #define AF_POWER_MGT (u32)(0x00008000) + #define AF_NVR_VALID (u32)(0x00010000) + #define AF_DEGRADED_MODE (u32)(0x00020000) + #define AF_DISC_PENDING (u32)(0x00040000) + #define AF_TASKLET_SCHEDULED (u32)(0x00080000) + #define AF_HEARTBEAT (u32)(0x00200000) + #define AF_HEARTBEAT_ENB (u32)(0x00400000) + #define AF_NOT_PRESENT (u32)(0x00800000) + #define AF_CHPRST_STARTED (u32)(0x01000000) + #define AF_FIRST_INIT (u32)(0x02000000) + #define AF_POWER_DOWN (u32)(0x04000000) + #define AF_DISC_IN_PROG (u32)(0x08000000) + #define AF_COMM_LIST_TOGGLE (u32)(0x10000000) + #define AF_LEGACY_SGE_MODE (u32)(0x20000000) + #define AF_DISC_POLLED (u32)(0x40000000) + u32 volatile flags2; + #define AF2_SERIAL_FLASH (u32)(0x00000001) + #define AF2_DEV_SCAN (u32)(0x00000002) + #define AF2_DEV_CNT_OK (u32)(0x00000004) + #define AF2_COREDUMP_AVAIL (u32)(0x00000008) + #define AF2_COREDUMP_SAVED (u32)(0x00000010) + #define AF2_VDA_POWER_DOWN (u32)(0x00000100) + #define AF2_THUNDERLINK (u32)(0x00000200) + #define AF2_THUNDERBOLT (u32)(0x00000400) + #define AF2_INIT_DONE (u32)(0x00000800) + #define AF2_INT_PENDING (u32)(0x00001000) + #define AF2_TIMER_TICK (u32)(0x00002000) + #define AF2_IRQ_CLAIMED (u32)(0x00004000) + #define AF2_MSI_ENABLED (u32)(0x00008000) + atomic_t disable_cnt; + atomic_t dis_ints_cnt; + u32 int_stat; + u32 int_mask; + u32 volatile *outbound_copy; + struct list_head avail_request; + spinlock_t request_lock; + spinlock_t sg_list_lock; + spinlock_t queue_lock; + spinlock_t mem_lock; + struct list_head free_sg_list_head; + struct esas2r_mem_desc *sg_list_mds; + struct list_head active_list; + struct list_head defer_list; + struct esas2r_request **req_table; + union { + u16 prev_dev_cnt; + u32 heartbeat_time; + #define ESAS2R_HEARTBEAT_TIME (3000) + }; + u32 chip_uptime; + #define ESAS2R_CHP_UPTIME_MAX (60000) + #define ESAS2R_CHP_UPTIME_CNT (20000) + u64 uncached_phys; + u8 *uncached; + struct esas2r_sas_nvram *nvram; + struct esas2r_request general_req; + u8 init_msg; + #define ESAS2R_INIT_MSG_START 1 + #define ESAS2R_INIT_MSG_INIT 2 + #define ESAS2R_INIT_MSG_GET_INIT 3 + #define ESAS2R_INIT_MSG_REINIT 4 + u16 cmd_ref_no; + u32 fw_version; + u32 fw_build; + u32 chip_init_time; + #define ESAS2R_CHPRST_TIME (180000) + #define ESAS2R_CHPRST_WAIT_TIME (2000) + u32 last_tick_time; + u32 window_base; + RQBUILDSGL build_sgl; + struct esas2r_request *first_ae_req; + u32 list_size; + u32 last_write; + u32 last_read; + u16 max_vdareq_size; + u16 disc_wait_cnt; + struct esas2r_mem_desc inbound_list_md; + struct esas2r_mem_desc outbound_list_md; + struct esas2r_disc_context disc_ctx; + u8 *disc_buffer; + u32 disc_start_time; + u32 disc_wait_time; + u32 flash_ver; + char flash_rev[16]; + char fw_rev[16]; + char image_type[16]; + struct esas2r_flash_context flash_context; + u32 num_targets_backend; + u32 ioctl_tunnel; + struct tasklet_struct tasklet; + struct pci_dev *pcid; + struct Scsi_Host *host; + unsigned int index; + char name[32]; + struct timer_list timer; + struct esas2r_firmware firmware; + wait_queue_head_t nvram_waiter; + int nvram_command_done; + wait_queue_head_t fm_api_waiter; + int fm_api_command_done; + wait_queue_head_t vda_waiter; + int vda_command_done; + u8 *vda_buffer; + u64 ppvda_buffer; +#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data)) +#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ) + wait_queue_head_t fs_api_waiter; + int fs_api_command_done; + u64 ppfs_api_buffer; + u8 *fs_api_buffer; + u32 fs_api_buffer_size; + wait_queue_head_t buffered_ioctl_waiter; + int buffered_ioctl_done; + int uncached_size; + struct workqueue_struct *fw_event_q; + struct list_head fw_event_list; + spinlock_t fw_event_lock; + u8 fw_events_off; /* if '1', then ignore events */ + char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN]; + /* + * intr_mode stores the interrupt mode currently being used by this + * adapter. it is based on the interrupt_mode module parameter, but + * can be changed based on the ability (or not) to utilize the + * mode requested by the parameter. + */ + int intr_mode; +#define INTR_MODE_LEGACY 0 +#define INTR_MODE_MSI 1 +#define INTR_MODE_MSIX 2 + struct esas2r_sg_context fm_api_sgc; + u8 *save_offset; + struct list_head vrq_mds_head; + struct esas2r_mem_desc *vrq_mds; + int num_vrqs; + struct semaphore fm_api_semaphore; + struct semaphore fs_api_semaphore; + struct semaphore nvram_semaphore; + struct atto_ioctl *local_atto_ioctl; + u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ]; + unsigned int sysfs_fw_created:1; + unsigned int sysfs_fs_created:1; + unsigned int sysfs_vda_created:1; + unsigned int sysfs_hw_created:1; + unsigned int sysfs_live_nvram_created:1; + unsigned int sysfs_default_nvram_created:1; +}; + +/* + * Function Declarations + * SCSI functions + */ +int esas2r_release(struct Scsi_Host *); +const char *esas2r_info(struct Scsi_Host *); +int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *data); +int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg); +int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +u8 handle_hba_ioctl(struct esas2r_adapter *a, + struct atto_ioctl *ioctl_hba); +int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); +int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); +int esas2r_slave_alloc(struct scsi_device *dev); +int esas2r_slave_configure(struct scsi_device *dev); +void esas2r_slave_destroy(struct scsi_device *dev); +int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason); +int esas2r_change_queue_type(struct scsi_device *dev, int type); +long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); + +/* SCSI error handler (eh) functions */ +int esas2r_eh_abort(struct scsi_cmnd *cmd); +int esas2r_device_reset(struct scsi_cmnd *cmd); +int esas2r_host_reset(struct scsi_cmnd *cmd); +int esas2r_bus_reset(struct scsi_cmnd *cmd); +int esas2r_target_reset(struct scsi_cmnd *cmd); + +/* Internal functions */ +int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, + int index); +int esas2r_cleanup(struct Scsi_Host *host); +int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, + int count); +int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, + int count); +int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, + int count); +void esas2r_adapter_tasklet(unsigned long context); +irqreturn_t esas2r_interrupt(int irq, void *dev_id); +irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id); +void esas2r_kickoff_timer(struct esas2r_adapter *a); +int esas2r_suspend(struct pci_dev *pcid, pm_message_t state); +int esas2r_resume(struct pci_dev *pcid); +void esas2r_fw_event_off(struct esas2r_adapter *a); +void esas2r_fw_event_on(struct esas2r_adapter *a); +bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *nvram); +void esas2r_nvram_get_defaults(struct esas2r_adapter *a, + struct esas2r_sas_nvram *nvram); +void esas2r_complete_request_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_reset_detected(struct esas2r_adapter *a); +void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id, + u8 state); +int esas2r_req_status_to_error(u8 req_stat); +void esas2r_kill_adapter(int i); +void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq); +struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a); +u32 esas2r_get_uncached_size(struct esas2r_adapter *a); +bool esas2r_init_adapter_struct(struct esas2r_adapter *a, + void **uncached_area); +bool esas2r_check_adapter(struct esas2r_adapter *a); +bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll); +void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq); +bool esas2r_send_task_mgmt(struct esas2r_adapter *a, + struct esas2r_request *rqaux, u8 task_mgt_func); +void esas2r_do_tasklet_tasks(struct esas2r_adapter *a); +void esas2r_adapter_interrupt(struct esas2r_adapter *a); +void esas2r_do_deferred_processes(struct esas2r_adapter *a); +void esas2r_reset_bus(struct esas2r_adapter *a); +void esas2r_reset_adapter(struct esas2r_adapter *a); +void esas2r_timer_tick(struct esas2r_adapter *a); +const char *esas2r_get_model_name(struct esas2r_adapter *a); +const char *esas2r_get_model_name_short(struct esas2r_adapter *a); +u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time, + u32 *delay); +void esas2r_build_flash_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 cksum, + u32 addr, + u32 length); +void esas2r_build_mgt_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 scan_gen, + u16 dev_index, + u32 length, + void *data); +void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); +void esas2r_build_cli_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u32 cmd_rsp_len); +void esas2r_build_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u8 sub_func); +void esas2r_build_cfg_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u32 length, + void *data); +void esas2r_power_down(struct esas2r_adapter *a); +bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll); +void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq); +u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo); +bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_ioctl_fs *fs, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc); +bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from, + u32 size); +bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from, + u32 size); +bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, + struct esas2r_request *rq, struct esas2r_sg_context *sgc); +void esas2r_force_interrupt(struct esas2r_adapter *a); +void esas2r_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_process_adapter_reset(struct esas2r_adapter *a); +void esas2r_complete_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_dummy_complete(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq); +void esas2r_start_vda_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +bool esas2r_read_flash_rev(struct esas2r_adapter *a); +bool esas2r_read_image_type(struct esas2r_adapter *a); +bool esas2r_nvram_read_direct(struct esas2r_adapter *a); +bool esas2r_nvram_validate(struct esas2r_adapter *a); +void esas2r_nvram_set_defaults(struct esas2r_adapter *a); +bool esas2r_print_flash_rev(struct esas2r_adapter *a); +void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt); +bool esas2r_init_msgs(struct esas2r_adapter *a); +bool esas2r_is_adapter_present(struct esas2r_adapter *a); +void esas2r_nuxi_mgt_data(u8 function, void *data); +void esas2r_nuxi_cfg_data(u8 function, void *data); +void esas2r_nuxi_ae_data(union atto_vda_ae *ae); +void esas2r_reset_chip(struct esas2r_adapter *a); +void esas2r_log_request_failure(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_polled_interrupt(struct esas2r_adapter *a); +bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, + u8 status); +bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); +bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); +void esas2r_targ_db_initialize(struct esas2r_adapter *a); +void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify); +void esas2r_targ_db_report_changes(struct esas2r_adapter *a); +struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, + struct esas2r_disc_context *dc); +struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, + struct esas2r_disc_context *dc, + u8 *ident, + u8 ident_len); +void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t); +struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, + u64 *sas_addr); +struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, + void *identifier, + u8 ident_len); +u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id); +struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, + u16 virt_id); +u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a); +void esas2r_disc_initialize(struct esas2r_adapter *a); +void esas2r_disc_start_waiting(struct esas2r_adapter *a); +void esas2r_disc_check_for_work(struct esas2r_adapter *a); +void esas2r_disc_check_complete(struct esas2r_adapter *a); +void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt); +bool esas2r_disc_start_port(struct esas2r_adapter *a); +void esas2r_disc_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str); +bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, + struct atto_ioctl_vda *vi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc); +void esas2r_queue_fw_event(struct esas2r_adapter *a, + enum fw_event_type type, + void *data, + int data_sz); + +/* Inline functions */ +static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits) +{ + return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags); +} + +static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits) +{ + return test_and_clear_bit(ilog2(bits), + (volatile unsigned long *)flags); +} + +/* Allocate a chip scatter/gather list entry */ +static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) +{ + unsigned long flags; + struct list_head *sgl; + struct esas2r_mem_desc *result = NULL; + + spin_lock_irqsave(&a->sg_list_lock, flags); + if (likely(!list_empty(&a->free_sg_list_head))) { + sgl = a->free_sg_list_head.next; + result = list_entry(sgl, struct esas2r_mem_desc, next_desc); + list_del_init(sgl); + } + spin_unlock_irqrestore(&a->sg_list_lock, flags); + + return result; +} + +/* Initialize a scatter/gather context */ +static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc, + struct esas2r_adapter *a, + struct esas2r_request *rq, + struct atto_vda_sge *first) +{ + sgc->adapter = a; + sgc->first_req = rq; + + /* + * set the limit pointer such that an SGE pointer above this value + * would be the first one to overflow the SGL. + */ + sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq + + (sizeof(union + atto_vda_req) / + 8) + - sizeof(struct + atto_vda_sge)); + if (first) { + sgc->sge.a64.last = + sgc->sge.a64.curr = first; + rq->vrq->scsi.sg_list_offset = (u8) + ((u8 *)first - + (u8 *)rq->vrq); + } else { + sgc->sge.a64.last = + sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0]; + rq->vrq->scsi.sg_list_offset = + (u8)offsetof(struct atto_vda_scsi_req, u.sge); + } + sgc->sge.a64.chain = NULL; +} + +static inline void esas2r_rq_init_request(struct esas2r_request *rq, + struct esas2r_adapter *a) +{ + union atto_vda_req *vrq = rq->vrq; + u32 handle; + + INIT_LIST_HEAD(&rq->sg_table_head); + rq->data_buf = (void *)(vrq + 1); + rq->interrupt_cb = NULL; + rq->comp_cb = esas2r_complete_request_cb; + rq->flags = 0; + rq->timeout = 0; + rq->req_stat = RS_PENDING; + rq->req_type = RT_INI_REQ; + + /* clear the outbound response */ + rq->func_rsp.dwords[0] = 0; + rq->func_rsp.dwords[1] = 0; + + /* + * clear the size of the VDA request. esas2r_build_sg_list() will + * only allow the size of the request to grow. there are some + * management requests that go through there twice and the second + * time through sets a smaller request size. if this is not modified + * at all we'll set it to the size of the entire VDA request. + */ + rq->vda_req_sz = RQ_SIZE_DEFAULT; + + /* req_table entry should be NULL at this point - if not, halt */ + + if (a->req_table[LOWORD(vrq->scsi.handle)]) + esas2r_bugon(); + + /* fill in the table for this handle so we can get back to the + * request. + */ + a->req_table[LOWORD(vrq->scsi.handle)] = rq; + + /* + * add a reference number to the handle to make it unique (until it + * wraps of course) while preserving the upper word + */ + + handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000; + vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++); + + /* + * the following formats a SCSI request. the caller can override as + * necessary. clear_vda_request can be called to clear the VDA + * request for another type of request. + */ + vrq->scsi.function = VDA_FUNC_SCSI; + vrq->scsi.sense_len = SENSE_DATA_SZ; + + /* clear out sg_list_offset and chain_offset */ + vrq->scsi.sg_list_offset = 0; + vrq->scsi.chain_offset = 0; + vrq->scsi.flags = 0; + vrq->scsi.reserved = 0; + + /* set the sense buffer to be the data payload buffer */ + vrq->scsi.ppsense_buf + = cpu_to_le64(rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); +} + +static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq, + struct esas2r_adapter *a) +{ + unsigned long flags; + + if (list_empty(&rq->sg_table_head)) + return; + + spin_lock_irqsave(&a->sg_list_lock, flags); + list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head); + spin_unlock_irqrestore(&a->sg_list_lock, flags); +} + +static inline void esas2r_rq_destroy_request(struct esas2r_request *rq, + struct esas2r_adapter *a) + +{ + esas2r_rq_free_sg_lists(rq, a); + a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL; + rq->data_buf = NULL; +} + +static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a) +{ + return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED + | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED + | AF_PORT_CHANGE)) + ? true : false; +} + +/* + * Build the scatter/gather list for an I/O request according to the + * specifications placed in the esas2r_sg_context. The caller must initialize + * struct esas2r_sg_context prior to the initial call by calling + * esas2r_sgc_init() + */ +static inline bool esas2r_build_sg_list(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0)) + return true; + + return (*a->build_sgl)(a, sgc); +} + +static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a) +{ + if (atomic_inc_return(&a->dis_ints_cnt) == 1) + esas2r_write_register_dword(a, MU_INT_MASK_OUT, + ESAS2R_INT_DIS_MASK); +} + +static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a) +{ + if (atomic_dec_return(&a->dis_ints_cnt) == 0) + esas2r_write_register_dword(a, MU_INT_MASK_OUT, + ESAS2R_INT_ENB_MASK); +} + +/* Schedule a TASKLET to perform non-interrupt tasks that may require delays + * or long completion times. + */ +static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a) +{ + /* make sure we don't schedule twice */ + if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) & + ilog2(AF_TASKLET_SCHEDULED))) + tasklet_hi_schedule(&a->tasklet); +} + +static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a) +{ + if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING)) + && (a->nvram->options2 & SASNVR2_HEARTBEAT)) + esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB); + else + esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); +} + +static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a) +{ + esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); + esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); +} + +/* Set the initial state for resetting the adapter on the next pass through + * esas2r_do_deferred. + */ +static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a) +{ + esas2r_disable_heartbeat(a); + + esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED); + esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); + esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); +} + +/* See if an interrupt is pending on the adapter. */ +static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a) +{ + u32 intstat; + + if (a->int_mask == 0) + return false; + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if ((intstat & a->int_mask) == 0) + return false; + + esas2r_disable_chip_interrupts(a); + + a->int_stat = intstat; + a->int_mask = 0; + + return true; +} + +static inline u16 esas2r_targ_get_id(struct esas2r_target *t, + struct esas2r_adapter *a) +{ + return (u16)(uintptr_t)(t - a->targetdb); +} + +/* Build and start an asynchronous event request */ +static inline void esas2r_start_ae_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + unsigned long flags; + + esas2r_build_ae_req(a, rq); + + spin_lock_irqsave(&a->queue_lock, flags); + esas2r_start_vda_request(a, rq); + spin_unlock_irqrestore(&a->queue_lock, flags); +} + +static inline void esas2r_comp_list_drain(struct esas2r_adapter *a, + struct list_head *comp_list) +{ + struct esas2r_request *rq; + struct list_head *element, *next; + + list_for_each_safe(element, next, comp_list) { + rq = list_entry(element, struct esas2r_request, comp_list); + list_del_init(element); + esas2r_complete_request(a, rq); + } +} + +/* sysfs handlers */ +extern struct bin_attribute bin_attr_fw; +extern struct bin_attribute bin_attr_fs; +extern struct bin_attribute bin_attr_vda; +extern struct bin_attribute bin_attr_hw; +extern struct bin_attribute bin_attr_live_nvram; +extern struct bin_attribute bin_attr_default_nvram; + +#endif /* ESAS2R_H */ diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c new file mode 100644 index 000000000000..dec6c334ce3e --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_disc.c @@ -0,0 +1,1189 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_disc.c + * esas2r device discovery routines + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +/* Miscellaneous internal discovery routines */ +static void esas2r_disc_abort(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_continue(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a); +static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr); +static bool esas2r_disc_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); + +/* Internal discovery routines that process the states */ +static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_dev_add(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_part_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); + +void esas2r_disc_initialize(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *nvr = a->nvram; + + esas2r_trace_enter(); + + esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); + esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN); + esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK); + + a->disc_start_time = jiffies_to_msecs(jiffies); + a->disc_wait_time = nvr->dev_wait_time * 1000; + a->disc_wait_cnt = nvr->dev_wait_count; + + if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS) + a->disc_wait_cnt = ESAS2R_MAX_TARGETS; + + /* + * If we are doing chip reset or power management processing, always + * wait for devices. use the NVRAM device count if it is greater than + * previously discovered devices. + */ + + esas2r_hdebug("starting discovery..."); + + a->general_req.interrupt_cx = NULL; + + if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) { + if (a->prev_dev_cnt == 0) { + /* Don't bother waiting if there is nothing to wait + * for. + */ + a->disc_wait_time = 0; + } else { + /* + * Set the device wait count to what was previously + * found. We don't care if the user only configured + * a time because we know the exact count to wait for. + * There is no need to honor the user's wishes to + * always wait the full time. + */ + a->disc_wait_cnt = a->prev_dev_cnt; + + /* + * bump the minimum wait time to 15 seconds since the + * default is 3 (system boot or the boot driver usually + * buys us more time). + */ + if (a->disc_wait_time < 15000) + a->disc_wait_time = 15000; + } + } + + esas2r_trace("disc wait count: %d", a->disc_wait_cnt); + esas2r_trace("disc wait time: %d", a->disc_wait_time); + + if (a->disc_wait_time == 0) + esas2r_disc_check_complete(a); + + esas2r_trace_exit(); +} + +void esas2r_disc_start_waiting(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->mem_lock, flags); + + if (a->disc_ctx.disc_evt) + esas2r_disc_start_port(a); + + spin_unlock_irqrestore(&a->mem_lock, flags); +} + +void esas2r_disc_check_for_work(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + + /* service any pending interrupts first */ + + esas2r_polled_interrupt(a); + + /* + * now, interrupt processing may have queued up a discovery event. go + * see if we have one to start. we couldn't start it in the ISR since + * polled discovery would cause a deadlock. + */ + + esas2r_disc_start_waiting(a); + + if (rq->interrupt_cx == NULL) + return; + + if (rq->req_stat == RS_STARTED + && rq->timeout <= RQ_MAX_TIMEOUT) { + /* wait for the current discovery request to complete. */ + esas2r_wait_request(a, rq); + + if (rq->req_stat == RS_TIMEOUT) { + esas2r_disc_abort(a, rq); + esas2r_local_reset_adapter(a); + return; + } + } + + if (rq->req_stat == RS_PENDING + || rq->req_stat == RS_STARTED) + return; + + esas2r_disc_continue(a, rq); +} + +void esas2r_disc_check_complete(struct esas2r_adapter *a) +{ + unsigned long flags; + + esas2r_trace_enter(); + + /* check to see if we should be waiting for devices */ + if (a->disc_wait_time) { + u32 currtime = jiffies_to_msecs(jiffies); + u32 time = currtime - a->disc_start_time; + + /* + * Wait until the device wait time is exhausted or the device + * wait count is satisfied. + */ + if (time < a->disc_wait_time + && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt + || a->disc_wait_cnt == 0)) { + /* After three seconds of waiting, schedule a scan. */ + if (time >= 3000 + && !(esas2r_lock_set_flags(&a->flags2, + AF2_DEV_SCAN) & + ilog2(AF2_DEV_SCAN))) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + } + + esas2r_trace_exit(); + return; + } + + /* + * We are done waiting...we think. Adjust the wait time to + * consume events after the count is met. + */ + if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK) + & ilog2(AF2_DEV_CNT_OK))) + a->disc_wait_time = time + 3000; + + /* If we haven't done a full scan yet, do it now. */ + if (!(esas2r_lock_set_flags(&a->flags2, + AF2_DEV_SCAN) & + ilog2(AF2_DEV_SCAN))) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); + return; + } + + /* + * Now, if there is still time left to consume events, continue + * waiting. + */ + if (time < a->disc_wait_time) { + esas2r_trace_exit(); + return; + } + } else { + if (!(esas2r_lock_set_flags(&a->flags2, + AF2_DEV_SCAN) & + ilog2(AF2_DEV_SCAN))) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + } + } + + /* We want to stop waiting for devices. */ + a->disc_wait_time = 0; + + if ((a->flags & AF_DISC_POLLED) + && (a->flags & AF_DISC_IN_PROG)) { + /* + * Polled discovery is still pending so continue the active + * discovery until it is done. At that point, we will stop + * polled discovery and transition to interrupt driven + * discovery. + */ + } else { + /* + * Done waiting for devices. Note that we get here immediately + * after deferred waiting completes because that is interrupt + * driven; i.e. There is no transition. + */ + esas2r_disc_fix_curr_requests(a); + esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); + + /* + * We have deferred target state changes until now because we + * don't want to report any removals (due to the first arrival) + * until the device wait time expires. + */ + esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); + } + + esas2r_trace_exit(); +} + +void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt) +{ + struct esas2r_disc_context *dc = &a->disc_ctx; + + esas2r_trace_enter(); + + esas2r_trace("disc_event: %d", disc_evt); + + /* Initialize the discovery context */ + dc->disc_evt |= disc_evt; + + /* + * Don't start discovery before or during polled discovery. if we did, + * we would have a deadlock if we are in the ISR already. + */ + if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED))) + esas2r_disc_start_port(a); + + esas2r_trace_exit(); +} + +bool esas2r_disc_start_port(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + struct esas2r_disc_context *dc = &a->disc_ctx; + bool ret; + + esas2r_trace_enter(); + + if (a->flags & AF_DISC_IN_PROG) { + esas2r_trace_exit(); + + return false; + } + + /* If there is a discovery waiting, process it. */ + if (dc->disc_evt) { + if ((a->flags & AF_DISC_POLLED) + && a->disc_wait_time == 0) { + /* + * We are doing polled discovery, but we no longer want + * to wait for devices. Stop polled discovery and + * transition to interrupt driven discovery. + */ + + esas2r_trace_exit(); + + return false; + } + } else { + /* Discovery is complete. */ + + esas2r_hdebug("disc done"); + + esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); + + esas2r_trace_exit(); + + return false; + } + + /* Handle the discovery context */ + esas2r_trace("disc_evt: %d", dc->disc_evt); + esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG); + dc->flags = 0; + + if (a->flags & AF_DISC_POLLED) + dc->flags |= DCF_POLLED; + + rq->interrupt_cx = dc; + rq->req_stat = RS_SUCCESS; + + /* Decode the event code */ + if (dc->disc_evt & DCDE_DEV_SCAN) { + dc->disc_evt &= ~DCDE_DEV_SCAN; + + dc->flags |= DCF_DEV_SCAN; + dc->state = DCS_BLOCK_DEV_SCAN; + } else if (dc->disc_evt & DCDE_DEV_CHANGE) { + dc->disc_evt &= ~DCDE_DEV_CHANGE; + + dc->flags |= DCF_DEV_CHANGE; + dc->state = DCS_DEV_RMV; + } + + /* Continue interrupt driven discovery */ + if (!(a->flags & AF_DISC_POLLED)) + ret = esas2r_disc_continue(a, rq); + else + ret = true; + + esas2r_trace_exit(); + + return ret; +} + +static bool esas2r_disc_continue(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + + /* Device discovery/removal */ + while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) { + rslt = false; + + switch (dc->state) { + case DCS_DEV_RMV: + + rslt = esas2r_disc_dev_remove(a, rq); + break; + + case DCS_DEV_ADD: + + rslt = esas2r_disc_dev_add(a, rq); + break; + + case DCS_BLOCK_DEV_SCAN: + + rslt = esas2r_disc_block_dev_scan(a, rq); + break; + + case DCS_RAID_GRP_INFO: + + rslt = esas2r_disc_raid_grp_info(a, rq); + break; + + case DCS_PART_INFO: + + rslt = esas2r_disc_part_info(a, rq); + break; + + case DCS_PT_DEV_INFO: + + rslt = esas2r_disc_passthru_dev_info(a, rq); + break; + case DCS_PT_DEV_ADDR: + + rslt = esas2r_disc_passthru_dev_addr(a, rq); + break; + case DCS_DISC_DONE: + + dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN); + break; + + default: + + esas2r_bugon(); + dc->state = DCS_DISC_DONE; + break; + } + + if (rslt) + return true; + } + + /* Discovery is done...for now. */ + rq->interrupt_cx = NULL; + + if (!(a->flags & AF_DISC_PENDING)) + esas2r_disc_fix_curr_requests(a); + + esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); + + /* Start the next discovery. */ + return esas2r_disc_start_port(a); +} + +static bool esas2r_disc_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + unsigned long flags; + + /* Set the timeout to a minimum value. */ + if (rq->timeout < ESAS2R_DEFAULT_TMO) + rq->timeout = ESAS2R_DEFAULT_TMO; + + /* + * Override the request type to distinguish discovery requests. If we + * end up deferring the request, esas2r_disc_local_start_request() + * will be called to restart it. + */ + rq->req_type = RT_DISC_REQ; + + spin_lock_irqsave(&a->queue_lock, flags); + + if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING))) + esas2r_disc_local_start_request(a, rq); + else + list_add_tail(&rq->req_list, &a->defer_list); + + spin_unlock_irqrestore(&a->queue_lock, flags); + + return true; +} + +void esas2r_disc_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_trace_enter(); + + list_add_tail(&rq->req_list, &a->active_list); + + esas2r_start_vda_request(a, rq); + + esas2r_trace_exit(); + + return; +} + +static void esas2r_disc_abort(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + + esas2r_trace_enter(); + + /* abort the current discovery */ + + dc->state = DCS_DISC_DONE; + + esas2r_trace_exit(); +} + +static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + + esas2r_trace_enter(); + + esas2r_rq_init_request(rq, a); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_DEV_SCAN, + 0, + 0, + 0, + NULL); + + rq->comp_cb = esas2r_disc_block_dev_scan_cb; + + rq->timeout = 30000; + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SUCCESS) + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix = 0; + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vda_grp_info *grpinfo; + + esas2r_trace_enter(); + + esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix); + + if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) { + dc->state = DCS_DISC_DONE; + + esas2r_trace_exit(); + + return false; + } + + esas2r_rq_init_request(rq, a); + + grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; + + memset(grpinfo, 0, sizeof(struct atto_vda_grp_info)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_GRP_INFO, + dc->scan_gen, + 0, + sizeof(struct atto_vda_grp_info), + NULL); + + grpinfo->grp_index = dc->raid_grp_ix; + + rq->comp_cb = esas2r_disc_raid_grp_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vda_grp_info *grpinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->raid_grp_ix = 0; + goto done; + } + + if (rq->req_stat == RS_SUCCESS) { + grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; + + if (grpinfo->status != VDA_GRP_STAT_ONLINE + && grpinfo->status != VDA_GRP_STAT_DEGRADED) { + /* go to the next group. */ + + dc->raid_grp_ix++; + } else { + memcpy(&dc->raid_grp_name[0], + &grpinfo->grp_name[0], + sizeof(grpinfo->grp_name)); + + dc->interleave = le32_to_cpu(grpinfo->interleave); + dc->block_size = le32_to_cpu(grpinfo->block_size); + + dc->state = DCS_PART_INFO; + dc->part_num = 0; + } + } else { + if (!(rq->req_stat == RS_GRP_INVALID)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for RAID group info failed - " + "returned with %x", + rq->req_stat); + } + + dc->dev_ix = 0; + dc->state = DCS_PT_DEV_INFO; + } + +done: + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_part_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vdapart_info *partinfo; + + esas2r_trace_enter(); + + esas2r_trace("part_num: %d", dc->part_num); + + if (dc->part_num >= VDA_MAX_PARTITIONS) { + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix++; + + esas2r_trace_exit(); + + return false; + } + + esas2r_rq_init_request(rq, a); + + partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; + + memset(partinfo, 0, sizeof(struct atto_vdapart_info)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_PART_INFO, + dc->scan_gen, + 0, + sizeof(struct atto_vdapart_info), + NULL); + + partinfo->part_no = dc->part_num; + + memcpy(&partinfo->grp_name[0], + &dc->raid_grp_name[0], + sizeof(partinfo->grp_name)); + + rq->comp_cb = esas2r_disc_part_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vdapart_info *partinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->raid_grp_ix = 0; + dc->state = DCS_RAID_GRP_INFO; + } else if (rq->req_stat == RS_SUCCESS) { + partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; + + dc->part_num = partinfo->part_no; + + dc->curr_virt_id = le16_to_cpu(partinfo->target_id); + + esas2r_targ_db_add_raid(a, dc); + + dc->part_num++; + } else { + if (!(rq->req_stat == RS_PART_LAST)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for RAID group partition info " + "failed - status:%d", rq->req_stat); + } + + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix++; + } + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vda_devinfo *devinfo; + + esas2r_trace_enter(); + + esas2r_trace("dev_ix: %d", dc->dev_ix); + + esas2r_rq_init_request(rq, a); + + devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; + + memset(devinfo, 0, sizeof(struct atto_vda_devinfo)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_DEV_PT_INFO, + dc->scan_gen, + dc->dev_ix, + sizeof(struct atto_vda_devinfo), + NULL); + + rq->comp_cb = esas2r_disc_passthru_dev_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vda_devinfo *devinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->dev_ix = 0; + dc->state = DCS_PT_DEV_INFO; + } else if (rq->req_stat == RS_SUCCESS) { + devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; + + dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index); + + dc->curr_virt_id = le16_to_cpu(devinfo->target_id); + + if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) { + dc->curr_phys_id = + le16_to_cpu(devinfo->phys_target_id); + dc->dev_addr_type = ATTO_GDA_AT_PORT; + dc->state = DCS_PT_DEV_ADDR; + + esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); + esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); + } else { + dc->dev_ix++; + } + } else { + if (!(rq->req_stat == RS_DEV_INVALID)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for device information failed - " + "status:%d", rq->req_stat); + } + + dc->state = DCS_DISC_DONE; + } + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_ioctl *hi; + struct esas2r_sg_context sgc; + + esas2r_trace_enter(); + + esas2r_rq_init_request(rq, a); + + /* format the request. */ + + sgc.cur_offset = NULL; + sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr; + sgc.length = offsetof(struct atto_ioctl, data) + + sizeof(struct atto_hba_get_device_address); + + esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge); + + esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA); + + if (!esas2r_build_sg_list(a, rq, &sgc)) { + esas2r_rq_destroy_request(rq, a); + + esas2r_trace_exit(); + + return false; + } + + rq->comp_cb = esas2r_disc_passthru_dev_addr_cb; + + rq->interrupt_cx = dc; + + /* format the IOCTL data. */ + + hi = (struct atto_ioctl *)a->disc_buffer; + + memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN); + + hi->version = ATTO_VER_GET_DEV_ADDR0; + hi->function = ATTO_FUNC_GET_DEV_ADDR; + hi->flags = HBAF_TUNNEL; + + hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id); + hi->data.get_dev_addr.addr_type = dc->dev_addr_type; + + /* start it up. */ + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t = NULL; + unsigned long flags; + struct atto_ioctl *hi; + u16 addrlen; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + hi = (struct atto_ioctl *)a->disc_buffer; + + if (rq->req_stat == RS_SUCCESS + && hi->status == ATTO_STS_SUCCESS) { + addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len); + + if (dc->dev_addr_type == ATTO_GDA_AT_PORT) { + if (addrlen == sizeof(u64)) + memcpy(&dc->sas_addr, + &hi->data.get_dev_addr.address[0], + addrlen); + else + memset(&dc->sas_addr, 0, sizeof(dc->sas_addr)); + + /* Get the unique identifier. */ + dc->dev_addr_type = ATTO_GDA_AT_UNIQUE; + + goto next_dev_addr; + } else { + /* Add the pass through target. */ + if (HIBYTE(addrlen) == 0) { + t = esas2r_targ_db_add_pthru(a, + dc, + &hi->data. + get_dev_addr. + address[0], + (u8)hi->data. + get_dev_addr. + addr_len); + + if (t) + memcpy(&t->sas_addr, &dc->sas_addr, + sizeof(t->sas_addr)); + } else { + /* getting the back end data failed */ + + esas2r_log(ESAS2R_LOG_WARN, + "an error occurred retrieving the " + "back end data (%s:%d)", + __func__, + __LINE__); + } + } + } else { + /* getting the back end data failed */ + + esas2r_log(ESAS2R_LOG_WARN, + "an error occurred retrieving the back end data - " + "rq->req_stat:%d hi->status:%d", + rq->req_stat, hi->status); + } + + /* proceed to the next device. */ + + if (dc->flags & DCF_DEV_SCAN) { + dc->dev_ix++; + dc->state = DCS_PT_DEV_INFO; + } else if (dc->flags & DCF_DEV_CHANGE) { + dc->curr_targ++; + dc->state = DCS_DEV_ADD; + } else { + esas2r_bugon(); + } + +next_dev_addr: + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = sgc->adapter; + + if (sgc->length > ESAS2R_DISC_BUF_LEN) + esas2r_bugon(); + + *addr = a->uncached_phys + + (u64)((u8 *)a->disc_buffer - a->uncached); + + return sgc->length; +} + +static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t; + struct esas2r_target *t2; + + esas2r_trace_enter(); + + /* process removals. */ + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->new_target_state != TS_NOT_PRESENT) + continue; + + t->new_target_state = TS_INVALID; + + /* remove the right target! */ + + t2 = + esas2r_targ_db_find_by_virt_id(a, + esas2r_targ_get_id(t, + a)); + + if (t2) + esas2r_targ_db_remove(a, t2); + } + + /* removals complete. process arrivals. */ + + dc->state = DCS_DEV_ADD; + dc->curr_targ = a->targetdb; + + esas2r_trace_exit(); + + return false; +} + +static bool esas2r_disc_dev_add(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t = dc->curr_targ; + + if (t >= a->targetdb_end) { + /* done processing state changes. */ + + dc->state = DCS_DISC_DONE; + } else if (t->new_target_state == TS_PRESENT) { + struct atto_vda_ae_lu *luevt = &t->lu_event; + + esas2r_trace_enter(); + + /* clear this now in case more events come in. */ + + t->new_target_state = TS_INVALID; + + /* setup the discovery context for adding this device. */ + + dc->curr_virt_id = esas2r_targ_get_id(t, a); + + if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) + && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) { + dc->block_size = luevt->id.tgtlun_raid.dwblock_size; + dc->interleave = luevt->id.tgtlun_raid.dwinterleave; + } else { + dc->block_size = 0; + dc->interleave = 0; + } + + /* determine the device type being added. */ + + if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) { + if (luevt->dwevent & VDAAE_LU_PHYS_ID) { + dc->state = DCS_PT_DEV_ADDR; + dc->dev_addr_type = ATTO_GDA_AT_PORT; + dc->curr_phys_id = luevt->wphys_target_id; + } else { + esas2r_log(ESAS2R_LOG_WARN, + "luevt->dwevent does not have the " + "VDAAE_LU_PHYS_ID bit set (%s:%d)", + __func__, __LINE__); + } + } else { + dc->raid_grp_name[0] = 0; + + esas2r_targ_db_add_raid(a, dc); + } + + esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); + esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); + esas2r_trace("dwevent: %d", luevt->dwevent); + + esas2r_trace_exit(); + } + + if (dc->state == DCS_DEV_ADD) { + /* go to the next device. */ + + dc->curr_targ++; + } + + return false; +} + +/* + * When discovery is done, find all requests on defer queue and + * test if they need to be modified. If a target is no longer present + * then complete the request with RS_SEL. Otherwise, update the + * target_id since after a hibernate it can be a different value. + * VDA does not make passthrough target IDs persistent. + */ +static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a) +{ + unsigned long flags; + struct esas2r_target *t; + struct esas2r_request *rq; + struct list_head *element; + + /* update virt_targ_id in any outstanding esas2r_requests */ + + spin_lock_irqsave(&a->queue_lock, flags); + + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + t = a->targetdb + rq->target_id; + + if (t->target_state == TS_PRESENT) + rq->vrq->scsi.target_id = le16_to_cpu( + t->virt_targ_id); + else + rq->req_stat = RS_SEL; + } + + } + + spin_unlock_irqrestore(&a->queue_lock, flags); +} diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c new file mode 100644 index 000000000000..8582929b1fef --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -0,0 +1,1512 @@ + +/* + * linux/drivers/scsi/esas2r/esas2r_flash.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* local macro defs */ +#define esas2r_nvramcalc_cksum(n) \ + (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \ + SASNVR_CKSUM_SEED)) +#define esas2r_nvramcalc_xor_cksum(n) \ + (esas2r_calc_byte_xor_cksum((u8 *)(n), \ + sizeof(struct esas2r_sas_nvram), 0)) + +#define ESAS2R_FS_DRVR_VER 2 + +static struct esas2r_sas_nvram default_sas_nvram = { + { 'E', 'S', 'A', 'S' }, /* signature */ + SASNVR_VERSION, /* version */ + 0, /* checksum */ + 31, /* max_lun_for_target */ + SASNVR_PCILAT_MAX, /* pci_latency */ + SASNVR1_BOOT_DRVR, /* options1 */ + SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */ + | SASNVR2_SW_MUX_CTRL, + SASNVR_COAL_DIS, /* int_coalescing */ + SASNVR_CMDTHR_NONE, /* cmd_throttle */ + 3, /* dev_wait_time */ + 1, /* dev_wait_count */ + 0, /* spin_up_delay */ + 0, /* ssp_align_rate */ + { 0x50, 0x01, 0x08, 0x60, /* sas_addr */ + 0x00, 0x00, 0x00, 0x00 }, + { SASNVR_SPEED_AUTO }, /* phy_speed */ + { SASNVR_MUX_DISABLED }, /* SAS multiplexing */ + { 0 }, /* phy_flags */ + SASNVR_SORT_SAS_ADDR, /* sort_type */ + 3, /* dpm_reqcmd_lmt */ + 3, /* dpm_stndby_time */ + 0, /* dpm_active_time */ + { 0 }, /* phy_target_id */ + SASNVR_VSMH_DISABLED, /* virt_ses_mode */ + SASNVR_RWM_DEFAULT, /* read_write_mode */ + 0, /* link down timeout */ + { 0 } /* reserved */ +}; + +static u8 cmd_to_fls_func[] = { + 0xFF, + VDA_FLASH_READ, + VDA_FLASH_BEGINW, + VDA_FLASH_WRITE, + VDA_FLASH_COMMIT, + VDA_FLASH_CANCEL +}; + +static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed) +{ + u32 cksum = seed; + u8 *p = (u8 *)&cksum; + + while (len) { + if (((uintptr_t)addr & 3) == 0) + break; + + cksum = cksum ^ *addr; + addr++; + len--; + } + while (len >= sizeof(u32)) { + cksum = cksum ^ *(u32 *)addr; + addr += 4; + len -= 4; + } + while (len--) { + cksum = cksum ^ *addr; + addr++; + } + return p[0] ^ p[1] ^ p[2] ^ p[3]; +} + +static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed) +{ + u8 *p = (u8 *)addr; + u8 cksum = seed; + + while (len--) + cksum = cksum + p[len]; + return cksum; +} + +/* Interrupt callback to process FM API write requests. */ +static void esas2r_fmapi_callback(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + + if (rq->req_stat == RS_SUCCESS) { + /* Last request was successful. See what to do now. */ + switch (vrq->sub_func) { + case VDA_FLASH_BEGINW: + if (fc->sgc.cur_offset == NULL) + goto commit; + + vrq->sub_func = VDA_FLASH_WRITE; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_WRITE: +commit: + vrq->sub_func = VDA_FLASH_COMMIT; + rq->req_stat = RS_PENDING; + rq->interrupt_cb = fc->interrupt_cb; + break; + + default: + break; + } + } + + if (rq->req_stat != RS_PENDING) + /* + * All done. call the real callback to complete the FM API + * request. We should only get here if a BEGINW or WRITE + * operation failed. + */ + (*fc->interrupt_cb)(a, rq); +} + +/* + * Build a flash request based on the flash context. The request status + * is filled in on an error. + */ +static void build_flash_msg(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_sg_context *sgc = &fc->sgc; + u8 cksum = 0; + + /* calculate the checksum */ + if (fc->func == VDA_FLASH_BEGINW) { + if (sgc->cur_offset) + cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset, + sgc->length, + 0); + rq->interrupt_cb = esas2r_fmapi_callback; + } else { + rq->interrupt_cb = fc->interrupt_cb; + } + esas2r_build_flash_req(a, + rq, + fc->func, + cksum, + fc->flsh_addr, + sgc->length); + + esas2r_rq_free_sg_lists(rq, a); + + /* + * remember the length we asked for. we have to keep track of + * the current amount done so we know how much to compare when + * doing the verification phase. + */ + fc->curr_len = fc->sgc.length; + + if (sgc->cur_offset) { + /* setup the S/G context to build the S/G table */ + esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + rq->req_stat = RS_BUSY; + return; + } + } else { + fc->sgc.length = 0; + } + + /* update the flsh_addr to the next one to write to */ + fc->flsh_addr += fc->curr_len; +} + +/* determine the method to process the flash request */ +static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + /* + * assume we have more to do. if we return with the status set to + * RS_PENDING, FM API tasks will continue. + */ + rq->req_stat = RS_PENDING; + if (a->flags & AF_DEGRADED_MODE) + /* not suppported for now */; + else + build_flash_msg(a, rq); + + return rq->req_stat == RS_PENDING; +} + +/* boot image fixer uppers called before downloading the image. */ +static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS]; + struct esas2r_pc_image *pi; + struct esas2r_boot_header *bh; + + pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset); + bh = + (struct esas2r_boot_header *)((u8 *)pi + + le16_to_cpu(pi->header_offset)); + bh->device_id = cpu_to_le16(a->pcid->device); + + /* Recalculate the checksum in the PNP header if there */ + if (pi->pnp_offset) { + u8 *pnp_header_bytes = + ((u8 *)pi + le16_to_cpu(pi->pnp_offset)); + + /* Identifier - dword that starts at byte 10 */ + *((u32 *)&pnp_header_bytes[10]) = + cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor, + a->pcid->subsystem_device)); + + /* Checksum - byte 9 */ + pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes, + 32, 0); + } + + /* Recalculate the checksum needed by the PC */ + pi->checksum = pi->checksum - + esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0); +} + +static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI]; + u32 len = ch->length; + u32 offset = ch->image_offset; + struct esas2r_efi_image *ei; + struct esas2r_boot_header *bh; + + while (len) { + u32 thislen; + + ei = (struct esas2r_efi_image *)((u8 *)fi + offset); + bh = (struct esas2r_boot_header *)((u8 *)ei + + le16_to_cpu( + ei->header_offset)); + bh->device_id = cpu_to_le16(a->pcid->device); + thislen = (u32)le16_to_cpu(bh->image_length) * 512; + + if (thislen > len) + break; + + len -= thislen; + offset += thislen; + } +} + +/* Complete a FM API request with the specified status. */ +static bool complete_fmapi_req(struct esas2r_adapter *a, + struct esas2r_request *rq, u8 fi_stat) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_flash_img *fi = fc->fi; + + fi->status = fi_stat; + fi->driver_error = rq->req_stat; + rq->interrupt_cb = NULL; + rq->req_stat = RS_SUCCESS; + + if (fi_stat != FI_STAT_IMG_VER) + memset(fc->scratch, 0, FM_BUF_SZ); + + esas2r_enable_heartbeat(a); + esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK); + return false; +} + +/* Process each phase of the flash download process. */ +static void fw_download_proc(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_flash_img *fi = fc->fi; + struct esas2r_component_header *ch; + u32 len; + u8 *p, *q; + + /* If the previous operation failed, just return. */ + if (rq->req_stat != RS_SUCCESS) + goto error; + + /* + * If an upload just completed and the compare length is non-zero, + * then we just read back part of the image we just wrote. verify the + * section and continue reading until the entire image is verified. + */ + if (fc->func == VDA_FLASH_READ + && fc->cmp_len) { + ch = &fi->cmp_hdr[fc->comp_typ]; + + p = fc->scratch; + q = (u8 *)fi /* start of the whole gob */ + + ch->image_offset /* start of the current image */ + + ch->length /* end of the current image */ + - fc->cmp_len; /* where we are now */ + + /* + * NOTE - curr_len is the exact count of bytes for the read + * even when the end is read and its not a full buffer + */ + for (len = fc->curr_len; len; len--) + if (*p++ != *q++) + goto error; + + fc->cmp_len -= fc->curr_len; /* # left to compare */ + + /* Update fc and determine the length for the next upload */ + if (fc->cmp_len > FM_BUF_SZ) + fc->sgc.length = FM_BUF_SZ; + else + fc->sgc.length = fc->cmp_len; + + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - (u8 *)fi); + } + + /* + * This code uses a 'while' statement since the next component may + * have a length = zero. This can happen since some components are + * not required. At the end of this 'while' we set up the length + * for the next request and therefore sgc.length can be = 0. + */ + while (fc->sgc.length == 0) { + ch = &fi->cmp_hdr[fc->comp_typ]; + + switch (fc->task) { + case FMTSK_ERASE_BOOT: + /* the BIOS image is written next */ + ch = &fi->cmp_hdr[CH_IT_BIOS]; + if (ch->length == 0) + goto no_bios; + + fc->task = FMTSK_WRTBIOS; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_BIOS; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTBIOS: + /* + * The BIOS image has been written - read it and + * verify it + */ + fc->task = FMTSK_READBIOS; + fc->func = VDA_FLASH_READ; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READBIOS: +no_bios: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The MAC image is written next */ + ch = &fi->cmp_hdr[CH_IT_MAC]; + if (ch->length == 0) + goto no_mac; + + fc->task = FMTSK_WRTMAC; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_MAC; + fc->flsh_addr = FLS_OFFSET_BOOT + + fi->cmp_hdr[CH_IT_BIOS].length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTMAC: + /* The MAC image has been written - read and verify */ + fc->task = FMTSK_READMAC; + fc->func = VDA_FLASH_READ; + fc->flsh_addr -= ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READMAC: +no_mac: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The EFI image is written next */ + ch = &fi->cmp_hdr[CH_IT_EFI]; + if (ch->length == 0) + goto no_efi; + + fc->task = FMTSK_WRTEFI; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_EFI; + fc->flsh_addr = FLS_OFFSET_BOOT + + fi->cmp_hdr[CH_IT_BIOS].length + + fi->cmp_hdr[CH_IT_MAC].length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTEFI: + /* The EFI image has been written - read and verify */ + fc->task = FMTSK_READEFI; + fc->func = VDA_FLASH_READ; + fc->flsh_addr -= ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READEFI: +no_efi: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The CFG image is written next */ + ch = &fi->cmp_hdr[CH_IT_CFG]; + + if (ch->length == 0) + goto no_cfg; + fc->task = FMTSK_WRTCFG; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_CFG; + fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTCFG: + /* The CFG image has been written - read and verify */ + fc->task = FMTSK_READCFG; + fc->func = VDA_FLASH_READ; + fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READCFG: +no_cfg: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* + * The download is complete. If in degraded mode, + * attempt a chip reset. + */ + if (a->flags & AF_DEGRADED_MODE) + esas2r_local_reset_adapter(a); + + a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; + esas2r_print_flash_rev(a); + + /* Update the type of boot image on the card */ + memcpy(a->image_type, fi->rel_version, + sizeof(fi->rel_version)); + complete_fmapi_req(a, rq, FI_STAT_SUCCESS); + return; + } + + /* If verifying, don't try reading more than what's there */ + if (fc->func == VDA_FLASH_READ + && fc->sgc.length > fc->cmp_len) + fc->sgc.length = fc->cmp_len; + } + + /* Build the request to perform the next action */ + if (!load_image(a, rq)) { +error: + if (fc->comp_typ < fi->num_comps) { + ch = &fi->cmp_hdr[fc->comp_typ]; + ch->status = CH_STAT_FAILED; + } + + complete_fmapi_req(a, rq, FI_STAT_FAILED); + } +} + +/* Determine the flash image adaptyp for this adapter */ +static u8 get_fi_adap_type(struct esas2r_adapter *a) +{ + u8 type; + + /* use the device ID to get the correct adap_typ for this HBA */ + switch (a->pcid->device) { + case ATTO_DID_INTEL_IOP348: + type = FI_AT_SUN_LAKE; + break; + + case ATTO_DID_MV_88RC9580: + case ATTO_DID_MV_88RC9580TS: + case ATTO_DID_MV_88RC9580TSE: + case ATTO_DID_MV_88RC9580TL: + type = FI_AT_MV_9580; + break; + + default: + type = FI_AT_UNKNWN; + break; + } + + return type; +} + +/* Size of config + copyright + flash_ver images, 0 for failure. */ +static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver) +{ + u16 *pw = (u16 *)cfg - 1; + u32 sz = 0; + u32 len = length; + + if (len == 0) + len = FM_BUF_SZ; + + if (flash_ver) + *flash_ver = 0; + + while (true) { + u16 type; + u16 size; + + type = le16_to_cpu(*pw--); + size = le16_to_cpu(*pw--); + + if (type != FBT_CPYR + && type != FBT_SETUP + && type != FBT_FLASH_VER) + break; + + if (type == FBT_FLASH_VER + && flash_ver) + *flash_ver = le32_to_cpu(*(u32 *)(pw - 1)); + + sz += size + (2 * sizeof(u16)); + pw -= size / sizeof(u16); + + if (sz > len - (2 * sizeof(u16))) + break; + } + + /* See if we are comparing the size to the specified length */ + if (length && sz != length) + return 0; + + return sz; +} + +/* Verify that the boot image is valid */ +static u8 chk_boot(u8 *boot_img, u32 length) +{ + struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img; + u16 hdroffset = le16_to_cpu(bi->header_offset); + struct esas2r_boot_header *bh; + + if (bi->signature != le16_to_cpu(0xaa55) + || (long)hdroffset > + (long)(65536L - sizeof(struct esas2r_boot_header)) + || (hdroffset & 3) + || (hdroffset < sizeof(struct esas2r_boot_image)) + || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length)) + return 0xff; + + bh = (struct esas2r_boot_header *)((char *)bi + hdroffset); + + if (bh->signature[0] != 'P' + || bh->signature[1] != 'C' + || bh->signature[2] != 'I' + || bh->signature[3] != 'R' + || le16_to_cpu(bh->struct_length) < + (u16)sizeof(struct esas2r_boot_header) + || bh->class_code[2] != 0x01 + || bh->class_code[1] != 0x04 + || bh->class_code[0] != 0x00 + || (bh->code_type != CODE_TYPE_PC + && bh->code_type != CODE_TYPE_OPEN + && bh->code_type != CODE_TYPE_EFI)) + return 0xff; + + return bh->code_type; +} + +/* The sum of all the WORDS of the image */ +static u16 calc_fi_checksum(struct esas2r_flash_context *fc) +{ + struct esas2r_flash_img *fi = fc->fi; + u16 cksum; + u32 len; + u16 *pw; + + for (len = (fi->length - fc->fi_hdr_len) / 2, + pw = (u16 *)((u8 *)fi + fc->fi_hdr_len), + cksum = 0; + len; + len--, pw++) + cksum = cksum + le16_to_cpu(*pw); + + return cksum; +} + +/* + * Verify the flash image structure. The following verifications will + * be performed: + * 1) verify the fi_version is correct + * 2) verify the checksum of the entire image. + * 3) validate the adap_typ, action and length fields. + * 4) valdiate each component header. check the img_type and + * length fields + * 5) valdiate each component image. validate signatures and + * local checksums + */ +static bool verify_fi(struct esas2r_adapter *a, + struct esas2r_flash_context *fc) +{ + struct esas2r_flash_img *fi = fc->fi; + u8 type; + bool imgerr; + u16 i; + u32 len; + struct esas2r_component_header *ch; + + /* Verify the length - length must even since we do a word checksum */ + len = fi->length; + + if ((len & 1) + || len < fc->fi_hdr_len) { + fi->status = FI_STAT_LENGTH; + return false; + } + + /* Get adapter type and verify type in flash image */ + type = get_fi_adap_type(a); + if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) { + fi->status = FI_STAT_ADAPTYP; + return false; + } + + /* + * Loop through each component and verify the img_type and length + * fields. Keep a running count of the sizes sooze we can verify total + * size to additive size. + */ + imgerr = false; + + for (i = 0, len = 0, ch = fi->cmp_hdr; + i < fi->num_comps; + i++, ch++) { + bool cmperr = false; + + /* + * Verify that the component header has the same index as the + * image type. The headers must be ordered correctly + */ + if (i != ch->img_type) { + imgerr = true; + ch->status = CH_STAT_INVALID; + continue; + } + + switch (ch->img_type) { + case CH_IT_BIOS: + type = CODE_TYPE_PC; + break; + + case CH_IT_MAC: + type = CODE_TYPE_OPEN; + break; + + case CH_IT_EFI: + type = CODE_TYPE_EFI; + break; + } + + switch (ch->img_type) { + case CH_IT_FW: + case CH_IT_NVR: + break; + + case CH_IT_BIOS: + case CH_IT_MAC: + case CH_IT_EFI: + if (ch->length & 0x1ff) + cmperr = true; + + /* Test if component image is present */ + if (ch->length == 0) + break; + + /* Image is present - verify the image */ + if (chk_boot((u8 *)fi + ch->image_offset, ch->length) + != type) + cmperr = true; + + break; + + case CH_IT_CFG: + + /* Test if component image is present */ + if (ch->length == 0) { + cmperr = true; + break; + } + + /* Image is present - verify the image */ + if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length, + ch->length, NULL)) + cmperr = true; + + break; + + default: + + fi->status = FI_STAT_UNKNOWN; + return false; + } + + if (cmperr) { + imgerr = true; + ch->status = CH_STAT_INVALID; + } else { + ch->status = CH_STAT_PENDING; + len += ch->length; + } + } + + if (imgerr) { + fi->status = FI_STAT_MISSING; + return false; + } + + /* Compare fi->length to the sum of ch->length fields */ + if (len != fi->length - fc->fi_hdr_len) { + fi->status = FI_STAT_LENGTH; + return false; + } + + /* Compute the checksum - it should come out zero */ + if (fi->checksum != calc_fi_checksum(fc)) { + fi->status = FI_STAT_CHKSUM; + return false; + } + + return true; +} + +/* Fill in the FS IOCTL response data from a completed request. */ +static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)rq->interrupt_cx; + + if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) + esas2r_enable_heartbeat(a); + + fs->driver_error = rq->req_stat; + + if (fs->driver_error == RS_SUCCESS) + fs->status = ATTO_STS_SUCCESS; + else + fs->status = ATTO_STS_FAILED; +} + +/* Prepare an FS IOCTL request to be sent to the firmware. */ +bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_ioctl_fs *fs, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func); + struct esas2r_ioctlfs_command *fsc = &fs->command; + u8 func = 0; + u32 datalen; + + fs->status = ATTO_STS_FAILED; + fs->driver_error = RS_PENDING; + + if (fs->version > ESAS2R_FS_VER) { + fs->status = ATTO_STS_INV_VERSION; + return false; + } + + func = cmd_to_fls_func[fsc->command]; + if (fsc->command >= cmdcnt || func == 0xFF) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + if (fsc->command != ESAS2R_FS_CMD_CANCEL) { + if ((a->pcid->device != ATTO_DID_MV_88RC9580 + || fs->adap_type != ESAS2R_FS_AT_ESASRAID2) + && (a->pcid->device != ATTO_DID_MV_88RC9580TS + || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2) + && (a->pcid->device != ATTO_DID_MV_88RC9580TSE + || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E) + && (a->pcid->device != ATTO_DID_MV_88RC9580TL + || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) { + fs->status = ATTO_STS_INV_ADAPTER; + return false; + } + + if (fs->driver_ver > ESAS2R_FS_DRVR_VER) { + fs->status = ATTO_STS_INV_DRVR_VER; + return false; + } + } + + if (a->flags & AF_DEGRADED_MODE) { + fs->status = ATTO_STS_DEGRADED; + return false; + } + + rq->interrupt_cb = esas2r_complete_fs_ioctl; + rq->interrupt_cx = fs; + datalen = le32_to_cpu(fsc->length); + esas2r_build_flash_req(a, + rq, + func, + fsc->checksum, + le32_to_cpu(fsc->flash_addr), + datalen); + + if (func == VDA_FLASH_WRITE + || func == VDA_FLASH_READ) { + if (datalen == 0) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge); + sgc->length = datalen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + fs->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + + if (func == VDA_FLASH_COMMIT) + esas2r_disable_heartbeat(a); + + esas2r_start_request(a, rq); + + return true; +} + +static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function) +{ + u32 starttime; + u32 timeout; + u32 intstat; + u32 doorbell; + + /* Disable chip interrupts awhile */ + if (function == DRBL_FLASH_REQ) + esas2r_disable_chip_interrupts(a); + + /* Issue the request to the firmware */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, function); + + /* Now wait for the firmware to process it */ + starttime = jiffies_to_msecs(jiffies); + timeout = a->flags & + (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000; + + while (true) { + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (intstat & MU_INTSTAT_DRBL) { + /* Got a doorbell interrupt. Check for the function */ + doorbell = + esas2r_read_register_dword(a, MU_DOORBELL_OUT); + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + if (doorbell & function) + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { + /* + * Iimeout. If we were requesting flash access, + * indicate we are done so the firmware knows we gave + * up. If this was a REQ, we also need to re-enable + * chip interrupts. + */ + if (function == DRBL_FLASH_REQ) { + esas2r_hdebug("flash access timeout"); + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_FLASH_DONE); + esas2r_enable_chip_interrupts(a); + } else { + esas2r_hdebug("flash release timeout"); + } + + return false; + } + } + + /* if we're done, re-enable chip interrupts */ + if (function == DRBL_FLASH_DONE) + esas2r_enable_chip_interrupts(a); + + return true; +} + +#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE) + +bool esas2r_read_flash_block(struct esas2r_adapter *a, + void *to, + u32 from, + u32 size) +{ + u8 *end = (u8 *)to; + + /* Try to acquire access to the flash */ + if (!esas2r_flash_access(a, DRBL_FLASH_REQ)) + return false; + + while (size) { + u32 len; + u32 offset; + u32 iatvr; + + if (a->flags2 & AF2_SERIAL_FLASH) + iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); + else + iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); + + esas2r_map_data_window(a, iatvr); + offset = from & (WINDOW_SIZE - 1); + len = size; + + if (len > WINDOW_SIZE - offset) + len = WINDOW_SIZE - offset; + + from += len; + size -= len; + + while (len--) { + *end++ = esas2r_read_data_byte(a, offset); + offset++; + } + } + + /* Release flash access */ + esas2r_flash_access(a, DRBL_FLASH_DONE); + return true; +} + +bool esas2r_read_flash_rev(struct esas2r_adapter *a) +{ + u8 bytes[256]; + u16 *pw; + u16 *pwstart; + u16 type; + u16 size; + u32 sz; + + sz = sizeof(bytes); + pw = (u16 *)(bytes + sz); + pwstart = (u16 *)bytes + 2; + + if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz)) + goto invalid_rev; + + while (pw >= pwstart) { + pw--; + type = le16_to_cpu(*pw); + pw--; + size = le16_to_cpu(*pw); + pw -= size / 2; + + if (type == FBT_CPYR + || type == FBT_SETUP + || pw < pwstart) + continue; + + if (type == FBT_FLASH_VER) + a->flash_ver = le32_to_cpu(*(u32 *)pw); + + break; + } + +invalid_rev: + return esas2r_print_flash_rev(a); +} + +bool esas2r_print_flash_rev(struct esas2r_adapter *a) +{ + u16 year = LOWORD(a->flash_ver); + u8 day = LOBYTE(HIWORD(a->flash_ver)); + u8 month = HIBYTE(HIWORD(a->flash_ver)); + + if (day == 0 + || month == 0 + || day > 31 + || month > 12 + || year < 2006 + || year > 9999) { + strcpy(a->flash_rev, "not found"); + a->flash_ver = 0; + return false; + } + + sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year); + esas2r_hdebug("flash version: %s", a->flash_rev); + return true; +} + +/* + * Find the type of boot image type that is currently in the flash. + * The chip only has a 64 KB PCI-e expansion ROM + * size so only one image can be flashed at a time. + */ +bool esas2r_read_image_type(struct esas2r_adapter *a) +{ + u8 bytes[256]; + struct esas2r_boot_image *bi; + struct esas2r_boot_header *bh; + u32 sz; + u32 len; + u32 offset; + + /* Start at the base of the boot images and look for a valid image */ + sz = sizeof(bytes); + len = FLS_LENGTH_BOOT; + offset = 0; + + while (true) { + if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT + + offset, + sz)) + goto invalid_rev; + + bi = (struct esas2r_boot_image *)bytes; + bh = (struct esas2r_boot_header *)((u8 *)bi + + le16_to_cpu( + bi->header_offset)); + if (bi->signature != cpu_to_le16(0xAA55)) + goto invalid_rev; + + if (bh->code_type == CODE_TYPE_PC) { + strcpy(a->image_type, "BIOS"); + + return true; + } else if (bh->code_type == CODE_TYPE_EFI) { + struct esas2r_efi_image *ei; + + /* + * So we have an EFI image. There are several types + * so see which architecture we have. + */ + ei = (struct esas2r_efi_image *)bytes; + + switch (le16_to_cpu(ei->machine_type)) { + case EFI_MACHINE_IA32: + strcpy(a->image_type, "EFI 32-bit"); + return true; + + case EFI_MACHINE_IA64: + strcpy(a->image_type, "EFI itanium"); + return true; + + case EFI_MACHINE_X64: + strcpy(a->image_type, "EFI 64-bit"); + return true; + + case EFI_MACHINE_EBC: + strcpy(a->image_type, "EFI EBC"); + return true; + + default: + goto invalid_rev; + } + } else { + u32 thislen; + + /* jump to the next image */ + thislen = (u32)le16_to_cpu(bh->image_length) * 512; + if (thislen == 0 + || thislen + offset > len + || bh->indicator == INDICATOR_LAST) + break; + + offset += thislen; + } + } + +invalid_rev: + strcpy(a->image_type, "no boot images"); + return false; +} + +/* + * Read and validate current NVRAM parameters by accessing + * physical NVRAM directly. if currently stored parameters are + * invalid, use the defaults. + */ +bool esas2r_nvram_read_direct(struct esas2r_adapter *a) +{ + bool result; + + if (down_interruptible(&a->nvram_semaphore)) + return false; + + if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR, + sizeof(struct esas2r_sas_nvram))) { + esas2r_hdebug("NVRAM read failed, using defaults"); + return false; + } + + result = esas2r_nvram_validate(a); + + up(&a->nvram_semaphore); + + return result; +} + +/* Interrupt callback to process NVRAM completions. */ +static void esas2r_nvram_callback(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + if (rq->req_stat == RS_SUCCESS) { + /* last request was successful. see what to do now. */ + + switch (vrq->sub_func) { + case VDA_FLASH_BEGINW: + vrq->sub_func = VDA_FLASH_WRITE; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_WRITE: + vrq->sub_func = VDA_FLASH_COMMIT; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_READ: + esas2r_nvram_validate(a); + break; + + case VDA_FLASH_COMMIT: + default: + break; + } + } + + if (rq->req_stat != RS_PENDING) { + /* update the NVRAM state */ + if (rq->req_stat == RS_SUCCESS) + esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); + else + esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); + + esas2r_enable_heartbeat(a); + + up(&a->nvram_semaphore); + } +} + +/* + * Write the contents of nvram to the adapter's physical NVRAM. + * The cached copy of the NVRAM is also updated. + */ +bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *nvram) +{ + struct esas2r_sas_nvram *n = nvram; + u8 sas_address_bytes[8]; + u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0]; + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + if (a->flags & AF_DEGRADED_MODE) + return false; + + if (down_interruptible(&a->nvram_semaphore)) + return false; + + if (n == NULL) + n = a->nvram; + + /* check the validity of the settings */ + if (n->version > SASNVR_VERSION) { + up(&a->nvram_semaphore); + return false; + } + + memcpy(&sas_address_bytes[0], n->sas_addr, 8); + + if (sas_address_bytes[0] != 0x50 + || sas_address_bytes[1] != 0x01 + || sas_address_bytes[2] != 0x08 + || (sas_address_bytes[3] & 0xF0) != 0x60 + || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) { + up(&a->nvram_semaphore); + return false; + } + + if (n->spin_up_delay > SASNVR_SPINUP_MAX) + n->spin_up_delay = SASNVR_SPINUP_MAX; + + n->version = SASNVR_VERSION; + n->checksum = n->checksum - esas2r_nvramcalc_cksum(n); + memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram)); + + /* write the NVRAM */ + n = a->nvram; + esas2r_disable_heartbeat(a); + + esas2r_build_flash_req(a, + rq, + VDA_FLASH_BEGINW, + esas2r_nvramcalc_xor_cksum(n), + FLS_OFFSET_NVR, + sizeof(struct esas2r_sas_nvram)); + + if (a->flags & AF_LEGACY_SGE_MODE) { + + vrq->data.sge[0].length = + cpu_to_le32(SGE_LAST | + sizeof(struct esas2r_sas_nvram)); + vrq->data.sge[0].address = cpu_to_le64( + a->uncached_phys + (u64)((u8 *)n - a->uncached)); + } else { + vrq->data.prde[0].ctl_len = + cpu_to_le32(sizeof(struct esas2r_sas_nvram)); + vrq->data.prde[0].address = cpu_to_le64( + a->uncached_phys + + (u64)((u8 *)n - a->uncached)); + } + rq->interrupt_cb = esas2r_nvram_callback; + esas2r_start_request(a, rq); + return true; +} + +/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */ +bool esas2r_nvram_validate(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *n = a->nvram; + bool rslt = false; + + if (n->signature[0] != 'E' + || n->signature[1] != 'S' + || n->signature[2] != 'A' + || n->signature[3] != 'S') { + esas2r_hdebug("invalid NVRAM signature"); + } else if (esas2r_nvramcalc_cksum(n)) { + esas2r_hdebug("invalid NVRAM checksum"); + } else if (n->version > SASNVR_VERSION) { + esas2r_hdebug("invalid NVRAM version"); + } else { + esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); + rslt = true; + } + + if (rslt == false) { + esas2r_hdebug("using defaults"); + esas2r_nvram_set_defaults(a); + } + + return rslt; +} + +/* + * Set the cached NVRAM to defaults. note that this function sets the default + * NVRAM when it has been determined that the physical NVRAM is invalid. + * In this case, the SAS address is fabricated. + */ +void esas2r_nvram_set_defaults(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *n = a->nvram; + u32 time = jiffies_to_msecs(jiffies); + + esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); + memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); + n->sas_addr[3] |= 0x0F; + n->sas_addr[4] = HIBYTE(LOWORD(time)); + n->sas_addr[5] = LOBYTE(LOWORD(time)); + n->sas_addr[6] = a->pcid->bus->number; + n->sas_addr[7] = a->pcid->devfn; +} + +void esas2r_nvram_get_defaults(struct esas2r_adapter *a, + struct esas2r_sas_nvram *nvram) +{ + u8 sas_addr[8]; + + /* + * in case we are copying the defaults into the adapter, copy the SAS + * address out first. + */ + memcpy(&sas_addr[0], a->nvram->sas_addr, 8); + memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); + memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); +} + +bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, + struct esas2r_request *rq, struct esas2r_sg_context *sgc) +{ + struct esas2r_flash_context *fc = &a->flash_context; + u8 j; + struct esas2r_component_header *ch; + + if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) { + /* flag was already set */ + fi->status = FI_STAT_BUSY; + return false; + } + + memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context)); + sgc = &fc->sgc; + fc->fi = fi; + fc->sgc_offset = sgc->cur_offset; + rq->req_stat = RS_SUCCESS; + rq->interrupt_cx = fc; + + switch (fi->fi_version) { + case FI_VERSION_1: + fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf; + fc->num_comps = FI_NUM_COMPS_V1; + fc->fi_hdr_len = sizeof(struct esas2r_flash_img); + break; + + default: + return complete_fmapi_req(a, rq, FI_STAT_IMG_VER); + } + + if (a->flags & AF_DEGRADED_MODE) + return complete_fmapi_req(a, rq, FI_STAT_DEGRADED); + + switch (fi->action) { + case FI_ACT_DOWN: /* Download the components */ + /* Verify the format of the flash image */ + if (!verify_fi(a, fc)) + return complete_fmapi_req(a, rq, fi->status); + + /* Adjust the BIOS fields that are dependent on the HBA */ + ch = &fi->cmp_hdr[CH_IT_BIOS]; + + if (ch->length) + fix_bios(a, fi); + + /* Adjust the EFI fields that are dependent on the HBA */ + ch = &fi->cmp_hdr[CH_IT_EFI]; + + if (ch->length) + fix_efi(a, fi); + + /* + * Since the image was just modified, compute the checksum on + * the modified image. First update the CRC for the composite + * expansion ROM image. + */ + fi->checksum = calc_fi_checksum(fc); + + /* Disable the heartbeat */ + esas2r_disable_heartbeat(a); + + /* Now start up the download sequence */ + fc->task = FMTSK_ERASE_BOOT; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_CFG; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->sgc.length = FLS_LENGTH_BOOT; + fc->sgc.cur_offset = NULL; + + /* Setup the callback address */ + fc->interrupt_cb = fw_download_proc; + break; + + case FI_ACT_UPSZ: /* Get upload sizes */ + fi->adap_typ = get_fi_adap_type(a); + fi->flags = 0; + fi->num_comps = fc->num_comps; + fi->length = fc->fi_hdr_len; + + /* Report the type of boot image in the rel_version string */ + memcpy(fi->rel_version, a->image_type, + sizeof(fi->rel_version)); + + /* Build the component headers */ + for (j = 0, ch = fi->cmp_hdr; + j < fi->num_comps; + j++, ch++) { + ch->img_type = j; + ch->status = CH_STAT_PENDING; + ch->length = 0; + ch->version = 0xffffffff; + ch->image_offset = 0; + ch->pad[0] = 0; + ch->pad[1] = 0; + } + + if (a->flash_ver != 0) { + fi->cmp_hdr[CH_IT_BIOS].version = + fi->cmp_hdr[CH_IT_MAC].version = + fi->cmp_hdr[CH_IT_EFI].version = + fi->cmp_hdr[CH_IT_CFG].version + = a->flash_ver; + + fi->cmp_hdr[CH_IT_BIOS].status = + fi->cmp_hdr[CH_IT_MAC].status = + fi->cmp_hdr[CH_IT_EFI].status = + fi->cmp_hdr[CH_IT_CFG].status = + CH_STAT_SUCCESS; + + return complete_fmapi_req(a, rq, FI_STAT_SUCCESS); + } + + /* fall through */ + + case FI_ACT_UP: /* Upload the components */ + default: + return complete_fmapi_req(a, rq, FI_STAT_INVALID); + } + + /* + * If we make it here, fc has been setup to do the first task. Call + * load_image to format the request, start it, and get out. The + * interrupt code will call the callback when the first message is + * complete. + */ + if (!load_image(a, rq)) + return complete_fmapi_req(a, rq, FI_STAT_FAILED); + + esas2r_start_request(a, rq); + + return true; +} diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c new file mode 100644 index 000000000000..3a798e7d5c56 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -0,0 +1,1773 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_init.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +static bool esas2r_initmem_alloc(struct esas2r_adapter *a, + struct esas2r_mem_desc *mem_desc, + u32 align) +{ + mem_desc->esas2r_param = mem_desc->size + align; + mem_desc->virt_addr = NULL; + mem_desc->phys_addr = 0; + mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, + (size_t)mem_desc-> + esas2r_param, + (dma_addr_t *)&mem_desc-> + phys_addr, + GFP_KERNEL); + + if (mem_desc->esas2r_data == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate %lu bytes of consistent memory!", + (long + unsigned + int)mem_desc->esas2r_param); + return false; + } + + mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); + mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); + memset(mem_desc->virt_addr, 0, mem_desc->size); + return true; +} + +static void esas2r_initmem_free(struct esas2r_adapter *a, + struct esas2r_mem_desc *mem_desc) +{ + if (mem_desc->virt_addr == NULL) + return; + + /* + * Careful! phys_addr and virt_addr may have been adjusted from the + * original allocation in order to return the desired alignment. That + * means we have to use the original address (in esas2r_data) and size + * (esas2r_param) and calculate the original physical address based on + * the difference between the requested and actual allocation size. + */ + if (mem_desc->phys_addr) { + int unalign = ((u8 *)mem_desc->virt_addr) - + ((u8 *)mem_desc->esas2r_data); + + dma_free_coherent(&a->pcid->dev, + (size_t)mem_desc->esas2r_param, + mem_desc->esas2r_data, + (dma_addr_t)(mem_desc->phys_addr - unalign)); + } else { + kfree(mem_desc->esas2r_data); + } + + mem_desc->virt_addr = NULL; +} + +static bool alloc_vda_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_mem_desc *memdesc = kzalloc( + sizeof(struct esas2r_mem_desc), GFP_KERNEL); + + if (memdesc == NULL) { + esas2r_hdebug("could not alloc mem for vda request memdesc\n"); + return false; + } + + memdesc->size = sizeof(union atto_vda_req) + + ESAS2R_DATA_BUF_LEN; + + if (!esas2r_initmem_alloc(a, memdesc, 256)) { + esas2r_hdebug("could not alloc mem for vda request\n"); + kfree(memdesc); + return false; + } + + a->num_vrqs++; + list_add(&memdesc->next_desc, &a->vrq_mds_head); + + rq->vrq_md = memdesc; + rq->vrq = (union atto_vda_req *)memdesc->virt_addr; + rq->vrq->scsi.handle = a->num_vrqs; + + return true; +} + +static void esas2r_unmap_regions(struct esas2r_adapter *a) +{ + if (a->regs) + iounmap((void __iomem *)a->regs); + + a->regs = NULL; + + pci_release_region(a->pcid, 2); + + if (a->data_window) + iounmap((void __iomem *)a->data_window); + + a->data_window = NULL; + + pci_release_region(a->pcid, 0); +} + +static int esas2r_map_regions(struct esas2r_adapter *a) +{ + int error; + + a->regs = NULL; + a->data_window = NULL; + + error = pci_request_region(a->pcid, 2, a->name); + if (error != 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "pci_request_region(2) failed, error %d", + error); + + return error; + } + + a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), + pci_resource_len(a->pcid, 2)); + if (a->regs == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "ioremap failed for regs mem region\n"); + pci_release_region(a->pcid, 2); + return -EFAULT; + } + + error = pci_request_region(a->pcid, 0, a->name); + if (error != 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "pci_request_region(2) failed, error %d", + error); + esas2r_unmap_regions(a); + return error; + } + + a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, + 0), + pci_resource_len(a->pcid, 0)); + if (a->data_window == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "ioremap failed for data_window mem region\n"); + esas2r_unmap_regions(a); + return -EFAULT; + } + + return 0; +} + +static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) +{ + int i; + + /* Set up interrupt mode based on the requested value */ + switch (intr_mode) { + case INTR_MODE_LEGACY: +use_legacy_interrupts: + a->intr_mode = INTR_MODE_LEGACY; + break; + + case INTR_MODE_MSI: + i = pci_enable_msi(a->pcid); + if (i != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "failed to enable MSI for adapter %d, " + "falling back to legacy interrupts " + "(err=%d)", a->index, + i); + goto use_legacy_interrupts; + } + a->intr_mode = INTR_MODE_MSI; + esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED); + break; + + + default: + esas2r_log(ESAS2R_LOG_WARN, + "unknown interrupt_mode %d requested, " + "falling back to legacy interrupt", + interrupt_mode); + goto use_legacy_interrupts; + } +} + +static void esas2r_claim_interrupts(struct esas2r_adapter *a) +{ + unsigned long flags = IRQF_DISABLED; + + if (a->intr_mode == INTR_MODE_LEGACY) + flags |= IRQF_SHARED; + + esas2r_log(ESAS2R_LOG_INFO, + "esas2r_claim_interrupts irq=%d (%p, %s, %x)", + a->pcid->irq, a, a->name, flags); + + if (request_irq(a->pcid->irq, + (a->intr_mode == + INTR_MODE_LEGACY) ? esas2r_interrupt : + esas2r_msi_interrupt, + flags, + a->name, + a)) { + esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", + a->pcid->irq); + return; + } + + esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED); + esas2r_log(ESAS2R_LOG_INFO, + "claimed IRQ %d flags: 0x%lx", + a->pcid->irq, flags); +} + +int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, + int index) +{ + struct esas2r_adapter *a; + u64 bus_addr = 0; + int i; + void *next_uncached; + struct esas2r_request *first_request, *last_request; + + if (index >= MAX_ADAPTERS) { + esas2r_log(ESAS2R_LOG_CRIT, + "tried to init invalid adapter index %u!", + index); + return 0; + } + + if (esas2r_adapters[index]) { + esas2r_log(ESAS2R_LOG_CRIT, + "tried to init existing adapter index %u!", + index); + return 0; + } + + a = (struct esas2r_adapter *)host->hostdata; + memset(a, 0, sizeof(struct esas2r_adapter)); + a->pcid = pcid; + a->host = host; + + if (sizeof(dma_addr_t) > 4) { + const uint64_t required_mask = dma_get_required_mask + (&pcid->dev); + if (required_mask > DMA_BIT_MASK(32) + && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64)) + && !pci_set_consistent_dma_mask(pcid, + DMA_BIT_MASK(64))) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "64-bit PCI addressing enabled\n"); + } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pcid, + DMA_BIT_MASK(32))) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "32-bit PCI addressing enabled\n"); + } else { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to set DMA mask"); + esas2r_kill_adapter(index); + return 0; + } + } else { + if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pcid, + DMA_BIT_MASK(32))) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "32-bit PCI addressing enabled\n"); + } else { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to set DMA mask"); + esas2r_kill_adapter(index); + return 0; + } + } + esas2r_adapters[index] = a; + sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); + esas2r_debug("new adapter %p, name %s", a, a->name); + spin_lock_init(&a->request_lock); + spin_lock_init(&a->fw_event_lock); + sema_init(&a->fm_api_semaphore, 1); + sema_init(&a->fs_api_semaphore, 1); + sema_init(&a->nvram_semaphore, 1); + + esas2r_fw_event_off(a); + snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", + a->index); + a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); + + init_waitqueue_head(&a->buffered_ioctl_waiter); + init_waitqueue_head(&a->nvram_waiter); + init_waitqueue_head(&a->fm_api_waiter); + init_waitqueue_head(&a->fs_api_waiter); + init_waitqueue_head(&a->vda_waiter); + + INIT_LIST_HEAD(&a->general_req.req_list); + INIT_LIST_HEAD(&a->active_list); + INIT_LIST_HEAD(&a->defer_list); + INIT_LIST_HEAD(&a->free_sg_list_head); + INIT_LIST_HEAD(&a->avail_request); + INIT_LIST_HEAD(&a->vrq_mds_head); + INIT_LIST_HEAD(&a->fw_event_list); + + first_request = (struct esas2r_request *)((u8 *)(a + 1)); + + for (last_request = first_request, i = 1; i < num_requests; + last_request++, i++) { + INIT_LIST_HEAD(&last_request->req_list); + list_add_tail(&last_request->comp_list, &a->avail_request); + if (!alloc_vda_req(a, last_request)) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate a VDA request!"); + esas2r_kill_adapter(index); + return 0; + } + } + + esas2r_debug("requests: %p to %p (%d, %d)", first_request, + last_request, + sizeof(*first_request), + num_requests); + + if (esas2r_map_regions(a) != 0) { + esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); + esas2r_kill_adapter(index); + return 0; + } + + a->index = index; + + /* interrupts will be disabled until we are done with init */ + atomic_inc(&a->dis_ints_cnt); + atomic_inc(&a->disable_cnt); + a->flags |= AF_CHPRST_PENDING + | AF_DISC_PENDING + | AF_FIRST_INIT + | AF_LEGACY_SGE_MODE; + + a->init_msg = ESAS2R_INIT_MSG_START; + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + + esas2r_setup_interrupts(a, interrupt_mode); + + a->uncached_size = esas2r_get_uncached_size(a); + a->uncached = dma_alloc_coherent(&pcid->dev, + (size_t)a->uncached_size, + (dma_addr_t *)&bus_addr, + GFP_KERNEL); + if (a->uncached == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate %d bytes of consistent memory!", + a->uncached_size); + esas2r_kill_adapter(index); + return 0; + } + + a->uncached_phys = bus_addr; + + esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", + a->uncached_size, + a->uncached, + upper_32_bits(bus_addr), + lower_32_bits(bus_addr)); + memset(a->uncached, 0, a->uncached_size); + next_uncached = a->uncached; + + if (!esas2r_init_adapter_struct(a, + &next_uncached)) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to initialize adapter structure (2)!"); + esas2r_kill_adapter(index); + return 0; + } + + tasklet_init(&a->tasklet, + esas2r_adapter_tasklet, + (unsigned long)a); + + /* + * Disable chip interrupts to prevent spurious interrupts + * until we claim the IRQ. + */ + esas2r_disable_chip_interrupts(a); + esas2r_check_adapter(a); + + if (!esas2r_init_adapter_hw(a, true)) + esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); + else + esas2r_debug("esas2r_init_adapter ok"); + + esas2r_claim_interrupts(a); + + if (a->flags2 & AF2_IRQ_CLAIMED) + esas2r_enable_chip_interrupts(a); + + esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE); + if (!(a->flags & AF_DEGRADED_MODE)) + esas2r_kickoff_timer(a); + esas2r_debug("esas2r_init_adapter done for %p (%d)", + a, a->disable_cnt); + + return 1; +} + +static void esas2r_adapter_power_down(struct esas2r_adapter *a, + int power_management) +{ + struct esas2r_mem_desc *memdesc, *next; + + if ((a->flags2 & AF2_INIT_DONE) + && (!(a->flags & AF_DEGRADED_MODE))) { + if (!power_management) { + del_timer_sync(&a->timer); + tasklet_kill(&a->tasklet); + } + esas2r_power_down(a); + + /* + * There are versions of firmware that do not handle the sync + * cache command correctly. Stall here to ensure that the + * cache is lazily flushed. + */ + mdelay(500); + esas2r_debug("chip halted"); + } + + /* Remove sysfs binary files */ + if (a->sysfs_fw_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); + a->sysfs_fw_created = 0; + } + + if (a->sysfs_fs_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); + a->sysfs_fs_created = 0; + } + + if (a->sysfs_vda_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); + a->sysfs_vda_created = 0; + } + + if (a->sysfs_hw_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); + a->sysfs_hw_created = 0; + } + + if (a->sysfs_live_nvram_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, + &bin_attr_live_nvram); + a->sysfs_live_nvram_created = 0; + } + + if (a->sysfs_default_nvram_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, + &bin_attr_default_nvram); + a->sysfs_default_nvram_created = 0; + } + + /* Clean up interrupts */ + if (a->flags2 & AF2_IRQ_CLAIMED) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "free_irq(%d) called", a->pcid->irq); + + free_irq(a->pcid->irq, a); + esas2r_debug("IRQ released"); + esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED); + } + + if (a->flags2 & AF2_MSI_ENABLED) { + pci_disable_msi(a->pcid); + esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED); + esas2r_debug("MSI disabled"); + } + + if (a->inbound_list_md.virt_addr) + esas2r_initmem_free(a, &a->inbound_list_md); + + if (a->outbound_list_md.virt_addr) + esas2r_initmem_free(a, &a->outbound_list_md); + + list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, + next_desc) { + esas2r_initmem_free(a, memdesc); + } + + /* Following frees everything allocated via alloc_vda_req */ + list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { + esas2r_initmem_free(a, memdesc); + list_del(&memdesc->next_desc); + kfree(memdesc); + } + + kfree(a->first_ae_req); + a->first_ae_req = NULL; + + kfree(a->sg_list_mds); + a->sg_list_mds = NULL; + + kfree(a->req_table); + a->req_table = NULL; + + if (a->regs) { + esas2r_unmap_regions(a); + a->regs = NULL; + a->data_window = NULL; + esas2r_debug("regions unmapped"); + } +} + +/* Release/free allocated resources for specified adapters. */ +void esas2r_kill_adapter(int i) +{ + struct esas2r_adapter *a = esas2r_adapters[i]; + + if (a) { + unsigned long flags; + struct workqueue_struct *wq; + esas2r_debug("killing adapter %p [%d] ", a, i); + esas2r_fw_event_off(a); + esas2r_adapter_power_down(a, 0); + if (esas2r_buffered_ioctl && + (a->pcid == esas2r_buffered_ioctl_pcid)) { + dma_free_coherent(&a->pcid->dev, + (size_t)esas2r_buffered_ioctl_size, + esas2r_buffered_ioctl, + esas2r_buffered_ioctl_addr); + esas2r_buffered_ioctl = NULL; + } + + if (a->vda_buffer) { + dma_free_coherent(&a->pcid->dev, + (size_t)VDA_MAX_BUFFER_SIZE, + a->vda_buffer, + (dma_addr_t)a->ppvda_buffer); + a->vda_buffer = NULL; + } + if (a->fs_api_buffer) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->fs_api_buffer_size, + a->fs_api_buffer, + (dma_addr_t)a->ppfs_api_buffer); + a->fs_api_buffer = NULL; + } + + kfree(a->local_atto_ioctl); + a->local_atto_ioctl = NULL; + + spin_lock_irqsave(&a->fw_event_lock, flags); + wq = a->fw_event_q; + a->fw_event_q = NULL; + spin_unlock_irqrestore(&a->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + + if (a->uncached) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->uncached_size, + a->uncached, + (dma_addr_t)a->uncached_phys); + a->uncached = NULL; + esas2r_debug("uncached area freed"); + } + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "pci_disable_device() called. msix_enabled: %d " + "msi_enabled: %d irq: %d pin: %d", + a->pcid->msix_enabled, + a->pcid->msi_enabled, + a->pcid->irq, + a->pcid->pin); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "before pci_disable_device() enable_cnt: %d", + a->pcid->enable_cnt.counter); + + pci_disable_device(a->pcid); + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "after pci_disable_device() enable_cnt: %d", + a->pcid->enable_cnt.counter); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "pci_set_drv_data(%p, NULL) called", + a->pcid); + + pci_set_drvdata(a->pcid, NULL); + esas2r_adapters[i] = NULL; + + if (a->flags2 & AF2_INIT_DONE) { + esas2r_lock_clear_flags(&a->flags2, + AF2_INIT_DONE); + + esas2r_lock_set_flags(&a->flags, + AF_DEGRADED_MODE); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->host->shost_gendev), + "scsi_remove_host() called"); + + scsi_remove_host(a->host); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(a->host); + } + } +} + +int esas2r_cleanup(struct Scsi_Host *host) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + int index; + + if (host == NULL) { + int i; + + esas2r_debug("esas2r_cleanup everything"); + for (i = 0; i < MAX_ADAPTERS; i++) + esas2r_kill_adapter(i); + return -1; + } + + esas2r_debug("esas2r_cleanup called for host %p", host); + index = a->index; + esas2r_kill_adapter(index); + return index; +} + +int esas2r_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + u32 device_state; + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()"); + if (!a) + return -ENODEV; + + esas2r_adapter_power_down(a, 1); + device_state = pci_choose_state(pdev, state); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_save_state() called"); + pci_save_state(pdev); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_disable_device() called"); + pci_disable_device(pdev); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_set_power_state() called"); + pci_set_power_state(pdev, device_state); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0"); + return 0; +} + +int esas2r_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + int rez; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()"); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_set_power_state(PCI_D0) " + "called"); + pci_set_power_state(pdev, PCI_D0); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_enable_wake(PCI_D0, 0) " + "called"); + pci_enable_wake(pdev, PCI_D0, 0); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_restore_state() called"); + pci_restore_state(pdev); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_enable_device() called"); + rez = pci_enable_device(pdev); + pci_set_master(pdev); + + if (!a) { + rez = -ENODEV; + goto error_exit; + } + + if (esas2r_map_regions(a) != 0) { + esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); + rez = -ENOMEM; + goto error_exit; + } + + /* Set up interupt mode */ + esas2r_setup_interrupts(a, a->intr_mode); + + /* + * Disable chip interrupts to prevent spurious interrupts until we + * claim the IRQ. + */ + esas2r_disable_chip_interrupts(a); + if (!esas2r_power_up(a, true)) { + esas2r_debug("yikes, esas2r_power_up failed"); + rez = -ENOMEM; + goto error_exit; + } + + esas2r_claim_interrupts(a); + + if (a->flags2 & AF2_IRQ_CLAIMED) { + /* + * Now that system interrupt(s) are claimed, we can enable + * chip interrupts. + */ + esas2r_enable_chip_interrupts(a); + esas2r_kickoff_timer(a); + } else { + esas2r_debug("yikes, unable to claim IRQ"); + esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); + rez = -ENOMEM; + goto error_exit; + } + +error_exit: + esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d", + rez); + return rez; +} + +bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) +{ + esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); + esas2r_log(ESAS2R_LOG_CRIT, + "setting adapter to degraded mode: %s\n", error_str); + return false; +} + +u32 esas2r_get_uncached_size(struct esas2r_adapter *a) +{ + return sizeof(struct esas2r_sas_nvram) + + ALIGN(ESAS2R_DISC_BUF_LEN, 8) + + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ + + 8 + + (num_sg_lists * (u16)sgl_page_size) + + ALIGN((num_requests + num_ae_requests + 1 + + ESAS2R_LIST_EXTRA) * + sizeof(struct esas2r_inbound_list_source_entry), + 8) + + ALIGN((num_requests + num_ae_requests + 1 + + ESAS2R_LIST_EXTRA) * + sizeof(struct atto_vda_ob_rsp), 8) + + 256; /* VDA request and buffer align */ +} + +static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) +{ + int pcie_cap_reg; + + pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); + if (0xffff && pcie_cap_reg) { + u16 devcontrol; + + pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, + &devcontrol); + + if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { + esas2r_log(ESAS2R_LOG_INFO, + "max read request size > 512B"); + + devcontrol &= ~PCI_EXP_DEVCTL_READRQ; + devcontrol |= 0x2000; + pci_write_config_word(a->pcid, + pcie_cap_reg + PCI_EXP_DEVCTL, + devcontrol); + } + } +} + +/* + * Determine the organization of the uncached data area and + * finish initializing the adapter structure + */ +bool esas2r_init_adapter_struct(struct esas2r_adapter *a, + void **uncached_area) +{ + u32 i; + u8 *high; + struct esas2r_inbound_list_source_entry *element; + struct esas2r_request *rq; + struct esas2r_mem_desc *sgl; + + spin_lock_init(&a->sg_list_lock); + spin_lock_init(&a->mem_lock); + spin_lock_init(&a->queue_lock); + + a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; + + if (!alloc_vda_req(a, &a->general_req)) { + esas2r_hdebug( + "failed to allocate a VDA request for the general req!"); + return false; + } + + /* allocate requests for asynchronous events */ + a->first_ae_req = + kzalloc(num_ae_requests * sizeof(struct esas2r_request), + GFP_KERNEL); + + if (a->first_ae_req == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for asynchronous events"); + return false; + } + + /* allocate the S/G list memory descriptors */ + a->sg_list_mds = kzalloc( + num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL); + + if (a->sg_list_mds == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for s/g list descriptors"); + return false; + } + + /* allocate the request table */ + a->req_table = + kzalloc((num_requests + num_ae_requests + + 1) * sizeof(struct esas2r_request *), GFP_KERNEL); + + if (a->req_table == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for the request table"); + return false; + } + + /* initialize PCI configuration space */ + esas2r_init_pci_cfg_space(a); + + /* + * the thunder_stream boards all have a serial flash part that has a + * different base address on the AHB bus. + */ + if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) + && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) + a->flags2 |= AF2_THUNDERBOLT; + + if (a->flags2 & AF2_THUNDERBOLT) + a->flags2 |= AF2_SERIAL_FLASH; + + if (a->pcid->subsystem_device == ATTO_TLSH_1068) + a->flags2 |= AF2_THUNDERLINK; + + /* Uncached Area */ + high = (u8 *)*uncached_area; + + /* initialize the scatter/gather table pages */ + + for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { + sgl->size = sgl_page_size; + + list_add_tail(&sgl->next_desc, &a->free_sg_list_head); + + if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { + /* Allow the driver to load if the minimum count met. */ + if (i < NUM_SGL_MIN) + return false; + break; + } + } + + /* compute the size of the lists */ + a->list_size = num_requests + ESAS2R_LIST_EXTRA; + + /* allocate the inbound list */ + a->inbound_list_md.size = a->list_size * + sizeof(struct + esas2r_inbound_list_source_entry); + + if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { + esas2r_hdebug("failed to allocate IB list"); + return false; + } + + /* allocate the outbound list */ + a->outbound_list_md.size = a->list_size * + sizeof(struct atto_vda_ob_rsp); + + if (!esas2r_initmem_alloc(a, &a->outbound_list_md, + ESAS2R_LIST_ALIGN)) { + esas2r_hdebug("failed to allocate IB list"); + return false; + } + + /* allocate the NVRAM structure */ + a->nvram = (struct esas2r_sas_nvram *)high; + high += sizeof(struct esas2r_sas_nvram); + + /* allocate the discovery buffer */ + a->disc_buffer = high; + high += ESAS2R_DISC_BUF_LEN; + high = PTR_ALIGN(high, 8); + + /* allocate the outbound list copy pointer */ + a->outbound_copy = (u32 volatile *)high; + high += sizeof(u32); + + if (!(a->flags & AF_NVR_VALID)) + esas2r_nvram_set_defaults(a); + + /* update the caller's uncached memory area pointer */ + *uncached_area = (void *)high; + + /* initialize the allocated memory */ + if (a->flags & AF_FIRST_INIT) { + memset(a->req_table, 0, + (num_requests + num_ae_requests + + 1) * sizeof(struct esas2r_request *)); + + esas2r_targ_db_initialize(a); + + /* prime parts of the inbound list */ + element = + (struct esas2r_inbound_list_source_entry *)a-> + inbound_list_md. + virt_addr; + + for (i = 0; i < a->list_size; i++) { + element->address = 0; + element->reserved = 0; + element->length = cpu_to_le32(HWILSE_INTERFACE_F0 + | (sizeof(union + atto_vda_req) + / + sizeof(u32))); + element++; + } + + /* init the AE requests */ + for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, + i++) { + INIT_LIST_HEAD(&rq->req_list); + if (!alloc_vda_req(a, rq)) { + esas2r_hdebug( + "failed to allocate a VDA request!"); + return false; + } + + esas2r_rq_init_request(rq, a); + + /* override the completion function */ + rq->comp_cb = esas2r_ae_complete; + } + } + + return true; +} + +/* This code will verify that the chip is operational. */ +bool esas2r_check_adapter(struct esas2r_adapter *a) +{ + u32 starttime; + u32 doorbell; + u64 ppaddr; + u32 dw; + + /* + * if the chip reset detected flag is set, we can bypass a bunch of + * stuff. + */ + if (a->flags & AF_CHPRST_DETECTED) + goto skip_chip_reset; + + /* + * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver + * may have left them enabled or we may be recovering from a fault. + */ + esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); + esas2r_flush_register_dword(a, MU_INT_MASK_OUT); + + /* + * wait for the firmware to become ready by forcing an interrupt and + * waiting for a response. + */ + starttime = jiffies_to_msecs(jiffies); + + while (true) { + esas2r_force_interrupt(a); + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell == 0xFFFFFFFF) { + /* + * Give the firmware up to two seconds to enable + * register access after a reset. + */ + if ((jiffies_to_msecs(jiffies) - starttime) > 2000) + return esas2r_set_degraded_mode(a, + "unable to access registers"); + } else if (doorbell & DRBL_FORCE_INT) { + u32 ver = (doorbell & DRBL_FW_VER_MSK); + + /* + * This driver supports version 0 and version 1 of + * the API + */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + + if (ver == DRBL_FW_VER_0) { + esas2r_lock_set_flags(&a->flags, + AF_LEGACY_SGE_MODE); + + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + } else if (ver == DRBL_FW_VER_1) { + esas2r_lock_clear_flags(&a->flags, + AF_LEGACY_SGE_MODE); + + a->max_vdareq_size = 1024; + a->build_sgl = esas2r_build_sg_list_prd; + } else { + return esas2r_set_degraded_mode(a, + "unknown firmware version"); + } + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { + esas2r_hdebug("FW ready TMO"); + esas2r_bugon(); + + return esas2r_set_degraded_mode(a, + "firmware start has timed out"); + } + } + + /* purge any asynchronous events since we will repost them later */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(50)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug("timeout waiting for interface down"); + break; + } + } +skip_chip_reset: + /* + * first things first, before we go changing any of these registers + * disable the communication lists. + */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); + dw &= ~MU_ILC_ENABLE; + esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); + dw &= ~MU_OLC_ENABLE; + esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); + + /* configure the communication list addresses */ + ppaddr = a->inbound_list_md.phys_addr; + esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, + upper_32_bits(ppaddr)); + ppaddr = a->outbound_list_md.phys_addr; + esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, + upper_32_bits(ppaddr)); + ppaddr = a->uncached_phys + + ((u8 *)a->outbound_copy - a->uncached); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, + upper_32_bits(ppaddr)); + + /* reset the read and write pointers */ + *a->outbound_copy = + a->last_write = + a->last_read = a->list_size - 1; + esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); + esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, + MU_OLW_TOGGLE | a->last_write); + + /* configure the interface select fields */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); + dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); + esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, + (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); + dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); + esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, + (dw | MU_OLIC_LIST_F0 | + MU_OLIC_SOURCE_DDR)); + + /* finish configuring the communication lists */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); + dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); + dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC + | (a->list_size << MU_ILC_NUMBER_SHIFT); + esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); + dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); + dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); + esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); + + /* + * notify the firmware that we're done setting up the communication + * list registers. wait here until the firmware is done configuring + * its lists. it will signal that it is done by enabling the lists. + */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_INIT) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug( + "timeout waiting for communication list init"); + esas2r_bugon(); + return esas2r_set_degraded_mode(a, + "timeout waiting for communication list init"); + } + } + + /* + * flag whether the firmware supports the power down doorbell. we + * determine this by reading the inbound doorbell enable mask. + */ + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); + if (doorbell & DRBL_POWER_DOWN) + esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN); + else + esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN); + + /* + * enable assertion of outbound queue and doorbell interrupts in the + * main interrupt cause register. + */ + esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); + esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); + return true; +} + +/* Process the initialization message just completed and format the next one. */ +static bool esas2r_format_init_msg(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u32 msg = a->init_msg; + struct atto_vda_cfg_init *ci; + + a->init_msg = 0; + + switch (msg) { + case ESAS2R_INIT_MSG_START: + case ESAS2R_INIT_MSG_REINIT: + { + struct timeval now; + do_gettimeofday(&now); + esas2r_hdebug("CFG init"); + esas2r_build_cfg_req(a, + rq, + VDA_CFG_INIT, + 0, + NULL); + ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; + ci->sgl_page_size = sgl_page_size; + ci->epoch_time = now.tv_sec; + rq->flags |= RF_FAILURE_OK; + a->init_msg = ESAS2R_INIT_MSG_INIT; + break; + } + + case ESAS2R_INIT_MSG_INIT: + if (rq->req_stat == RS_SUCCESS) { + u32 major; + u32 minor; + + a->fw_version = le16_to_cpu( + rq->func_rsp.cfg_rsp.vda_version); + a->fw_build = rq->func_rsp.cfg_rsp.fw_build; + major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release); + minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release); + a->fw_version += (major << 16) + (minor << 24); + } else { + esas2r_hdebug("FAILED"); + } + + /* + * the 2.71 and earlier releases of R6xx firmware did not error + * unsupported config requests correctly. + */ + + if ((a->flags2 & AF2_THUNDERBOLT) + || (be32_to_cpu(a->fw_version) > + be32_to_cpu(0x47020052))) { + esas2r_hdebug("CFG get init"); + esas2r_build_cfg_req(a, + rq, + VDA_CFG_GET_INIT2, + sizeof(struct atto_vda_cfg_init), + NULL); + + rq->vrq->cfg.sg_list_offset = offsetof( + struct atto_vda_cfg_req, + data.sge); + rq->vrq->cfg.data.prde.ctl_len = + cpu_to_le32(sizeof(struct atto_vda_cfg_init)); + rq->vrq->cfg.data.prde.address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + rq->flags |= RF_FAILURE_OK; + a->init_msg = ESAS2R_INIT_MSG_GET_INIT; + break; + } + + case ESAS2R_INIT_MSG_GET_INIT: + if (msg == ESAS2R_INIT_MSG_GET_INIT) { + ci = (struct atto_vda_cfg_init *)rq->data_buf; + if (rq->req_stat == RS_SUCCESS) { + a->num_targets_backend = + le32_to_cpu(ci->num_targets_backend); + a->ioctl_tunnel = + le32_to_cpu(ci->ioctl_tunnel); + } else { + esas2r_hdebug("FAILED"); + } + } + /* fall through */ + + default: + rq->req_stat = RS_SUCCESS; + return false; + } + return true; +} + +/* + * Perform initialization messages via the request queue. Messages are + * performed with interrupts disabled. + */ +bool esas2r_init_msgs(struct esas2r_adapter *a) +{ + bool success = true; + struct esas2r_request *rq = &a->general_req; + + esas2r_rq_init_request(rq, a); + rq->comp_cb = esas2r_dummy_complete; + + if (a->init_msg == 0) + a->init_msg = ESAS2R_INIT_MSG_REINIT; + + while (a->init_msg) { + if (esas2r_format_init_msg(a, rq)) { + unsigned long flags; + while (true) { + spin_lock_irqsave(&a->queue_lock, flags); + esas2r_start_vda_request(a, rq); + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_wait_request(a, rq); + if (rq->req_stat != RS_PENDING) + break; + } + } + + if (rq->req_stat == RS_SUCCESS + || ((rq->flags & RF_FAILURE_OK) + && rq->req_stat != RS_TIMEOUT)) + continue; + + esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", + a->init_msg, rq->req_stat, rq->flags); + a->init_msg = ESAS2R_INIT_MSG_START; + success = false; + break; + } + + esas2r_rq_destroy_request(rq, a); + return success; +} + +/* Initialize the adapter chip */ +bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) +{ + bool rslt = false; + struct esas2r_request *rq; + u32 i; + + if (a->flags & AF_DEGRADED_MODE) + goto exit; + + if (!(a->flags & AF_NVR_VALID)) { + if (!esas2r_nvram_read_direct(a)) + esas2r_log(ESAS2R_LOG_WARN, + "invalid/missing NVRAM parameters"); + } + + if (!esas2r_init_msgs(a)) { + esas2r_set_degraded_mode(a, "init messages failed"); + goto exit; + } + + /* The firmware is ready. */ + esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE); + esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); + + /* Post all the async event requests */ + for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) + esas2r_start_ae_request(a, rq); + + if (!a->flash_rev[0]) + esas2r_read_flash_rev(a); + + if (!a->image_type[0]) + esas2r_read_image_type(a); + + if (a->fw_version == 0) + a->fw_rev[0] = 0; + else + sprintf(a->fw_rev, "%1d.%02d", + (int)LOBYTE(HIWORD(a->fw_version)), + (int)HIBYTE(HIWORD(a->fw_version))); + + esas2r_hdebug("firmware revision: %s", a->fw_rev); + + if ((a->flags & AF_CHPRST_DETECTED) + && (a->flags & AF_FIRST_INIT)) { + esas2r_enable_chip_interrupts(a); + return true; + } + + /* initialize discovery */ + esas2r_disc_initialize(a); + + /* + * wait for the device wait time to expire here if requested. this is + * usually requested during initial driver load and possibly when + * resuming from a low power state. deferred device waiting will use + * interrupts. chip reset recovery always defers device waiting to + * avoid being in a TASKLET too long. + */ + if (init_poll) { + u32 currtime = a->disc_start_time; + u32 nexttick = 100; + u32 deltatime; + + /* + * Block Tasklets from getting scheduled and indicate this is + * polled discovery. + */ + esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED); + esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED); + + /* + * Temporarily bring the disable count to zero to enable + * deferred processing. Note that the count is already zero + * after the first initialization. + */ + if (a->flags & AF_FIRST_INIT) + atomic_dec(&a->disable_cnt); + + while (a->flags & AF_DISC_PENDING) { + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + /* + * Determine the need for a timer tick based on the + * delta time between this and the last iteration of + * this loop. We don't use the absolute time because + * then we would have to worry about when nexttick + * wraps and currtime hasn't yet. + */ + deltatime = jiffies_to_msecs(jiffies) - currtime; + currtime += deltatime; + + /* + * Process any waiting discovery as long as the chip is + * up. If a chip reset happens during initial polling, + * we have to make sure the timer tick processes the + * doorbell indicating the firmware is ready. + */ + if (!(a->flags & AF_CHPRST_PENDING)) + esas2r_disc_check_for_work(a); + + /* Simulate a timer tick. */ + if (nexttick <= deltatime) { + + /* Time for a timer tick */ + nexttick += 100; + esas2r_timer_tick(a); + } + + if (nexttick > deltatime) + nexttick -= deltatime; + + /* Do any deferred processing */ + if (esas2r_is_tasklet_pending(a)) + esas2r_do_tasklet_tasks(a); + + } + + if (a->flags & AF_FIRST_INIT) + atomic_inc(&a->disable_cnt); + + esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED); + esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); + } + + + esas2r_targ_db_report_changes(a); + + /* + * For cases where (a) the initialization messages processing may + * handle an interrupt for a port event and a discovery is waiting, but + * we are not waiting for devices, or (b) the device wait time has been + * exhausted but there is still discovery pending, start any leftover + * discovery in interrupt driven mode. + */ + esas2r_disc_start_waiting(a); + + /* Enable chip interrupts */ + a->int_mask = ESAS2R_INT_STS_MASK; + esas2r_enable_chip_interrupts(a); + esas2r_enable_heartbeat(a); + rslt = true; + +exit: + /* + * Regardless of whether initialization was successful, certain things + * need to get done before we exit. + */ + + if ((a->flags & AF_CHPRST_DETECTED) + && (a->flags & AF_FIRST_INIT)) { + /* + * Reinitialization was performed during the first + * initialization. Only clear the chip reset flag so the + * original device polling is not cancelled. + */ + if (!rslt) + esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); + } else { + /* First initialization or a subsequent re-init is complete. */ + if (!rslt) { + esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); + esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); + } + + + /* Enable deferred processing after the first initialization. */ + if (a->flags & AF_FIRST_INIT) { + esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT); + + if (atomic_dec_return(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + } + } + + return rslt; +} + +void esas2r_reset_adapter(struct esas2r_adapter *a) +{ + esas2r_lock_set_flags(&a->flags, AF_OS_RESET); + esas2r_local_reset_adapter(a); + esas2r_schedule_tasklet(a); +} + +void esas2r_reset_chip(struct esas2r_adapter *a) +{ + if (!esas2r_is_adapter_present(a)) + return; + + /* + * Before we reset the chip, save off the VDA core dump. The VDA core + * dump is located in the upper 512KB of the onchip SRAM. Make sure + * to not overwrite a previous crash that was saved. + */ + if ((a->flags2 & AF2_COREDUMP_AVAIL) + && !(a->flags2 & AF2_COREDUMP_SAVED) + && a->fw_coredump_buff) { + esas2r_read_mem_block(a, + a->fw_coredump_buff, + MW_DATA_ADDR_SRAM + 0x80000, + ESAS2R_FWCOREDUMP_SZ); + + esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED); + } + + esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL); + + /* Reset the chip */ + if (a->pcid->revision == MVR_FREY_B2) + esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, + MU_CTL_IN_FULL_RST2); + else + esas2r_write_register_dword(a, MU_CTL_STATUS_IN, + MU_CTL_IN_FULL_RST); + + + /* Stall a little while to let the reset condition clear */ + mdelay(10); +} + +static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) +{ + u32 starttime; + u32 doorbell; + + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_POWER_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { + esas2r_hdebug("Timeout waiting for power down"); + break; + } + } +} + +/* + * Perform power management processing including managing device states, adapter + * states, interrupts, and I/O. + */ +void esas2r_power_down(struct esas2r_adapter *a) +{ + esas2r_lock_set_flags(&a->flags, AF_POWER_MGT); + esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN); + + if (!(a->flags & AF_DEGRADED_MODE)) { + u32 starttime; + u32 doorbell; + + /* + * We are currently running OK and will be reinitializing later. + * increment the disable count to coordinate with + * esas2r_init_adapter. We don't have to do this in degraded + * mode since we never enabled interrupts in the first place. + */ + esas2r_disable_chip_interrupts(a); + esas2r_disable_heartbeat(a); + + /* wait for any VDA activity to clear before continuing */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_MSG_IFC_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = + esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug( + "timeout waiting for interface down"); + break; + } + } + + /* + * For versions of firmware that support it tell them the driver + * is powering down. + */ + if (a->flags2 & AF2_VDA_POWER_DOWN) + esas2r_power_down_notify_firmware(a); + } + + /* Suspend I/O processing. */ + esas2r_lock_set_flags(&a->flags, AF_OS_RESET); + esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); + esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); + + esas2r_process_adapter_reset(a); + + /* Remove devices now that I/O is cleaned up. */ + a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); + esas2r_targ_db_remove_all(a, false); +} + +/* + * Perform power management processing including managing device states, adapter + * states, interrupts, and I/O. + */ +bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) +{ + bool ret; + + esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN); + esas2r_init_pci_cfg_space(a); + esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT); + atomic_inc(&a->disable_cnt); + + /* reinitialize the adapter */ + ret = esas2r_check_adapter(a); + if (!esas2r_init_adapter_hw(a, init_poll)) + ret = false; + + /* send the reset asynchronous event */ + esas2r_send_reset_ae(a, true); + + /* clear this flag after initialization. */ + esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT); + return ret; +} + +bool esas2r_is_adapter_present(struct esas2r_adapter *a) +{ + if (a->flags & AF_NOT_PRESENT) + return false; + + if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { + esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT); + + return false; + } + return true; +} + +const char *esas2r_get_model_name(struct esas2r_adapter *a) +{ + switch (a->pcid->subsystem_device) { + case ATTO_ESAS_R680: + return "ATTO ExpressSAS R680"; + + case ATTO_ESAS_R608: + return "ATTO ExpressSAS R608"; + + case ATTO_ESAS_R60F: + return "ATTO ExpressSAS R60F"; + + case ATTO_ESAS_R6F0: + return "ATTO ExpressSAS R6F0"; + + case ATTO_ESAS_R644: + return "ATTO ExpressSAS R644"; + + case ATTO_ESAS_R648: + return "ATTO ExpressSAS R648"; + + case ATTO_TSSC_3808: + return "ATTO ThunderStream SC 3808D"; + + case ATTO_TSSC_3808E: + return "ATTO ThunderStream SC 3808E"; + + case ATTO_TLSH_1068: + return "ATTO ThunderLink SH 1068"; + } + + return "ATTO SAS Controller"; +} + +const char *esas2r_get_model_name_short(struct esas2r_adapter *a) +{ + switch (a->pcid->subsystem_device) { + case ATTO_ESAS_R680: + return "R680"; + + case ATTO_ESAS_R608: + return "R608"; + + case ATTO_ESAS_R60F: + return "R60F"; + + case ATTO_ESAS_R6F0: + return "R6F0"; + + case ATTO_ESAS_R644: + return "R644"; + + case ATTO_ESAS_R648: + return "R648"; + + case ATTO_TSSC_3808: + return "SC 3808D"; + + case ATTO_TSSC_3808E: + return "SC 3808E"; + + case ATTO_TLSH_1068: + return "SH 1068"; + } + + return "unknown"; +} diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c new file mode 100644 index 000000000000..c2d4ff57c5c3 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_int.c @@ -0,0 +1,941 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_int.c + * esas2r interrupt handling + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +/* Local function prototypes */ +static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell); +static void esas2r_get_outbound_responses(struct esas2r_adapter *a); +static void esas2r_process_bus_reset(struct esas2r_adapter *a); + +/* + * Poll the adapter for interrupts and service them. + * This function handles both legacy interrupts and MSI. + */ +void esas2r_polled_interrupt(struct esas2r_adapter *a) +{ + u32 intstat; + u32 doorbell; + + esas2r_disable_chip_interrupts(a); + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (intstat & MU_INTSTAT_POST_OUT) { + /* clear the interrupt */ + + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + + esas2r_get_outbound_responses(a); + } + + if (intstat & MU_INTSTAT_DRBL) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + esas2r_enable_chip_interrupts(a); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +/* + * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler + * schedules a TASKLET to process events, whereas the MSI handler just + * processes interrupt events directly. + */ +irqreturn_t esas2r_interrupt(int irq, void *dev_id) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; + + if (!esas2r_adapter_interrupt_pending(a)) + return IRQ_NONE; + + esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING); + esas2r_schedule_tasklet(a); + + return IRQ_HANDLED; +} + +void esas2r_adapter_interrupt(struct esas2r_adapter *a) +{ + u32 doorbell; + + if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) { + /* clear the interrupt */ + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + esas2r_get_outbound_responses(a); + } + + if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + a->int_mask = ESAS2R_INT_STS_MASK; + + esas2r_enable_chip_interrupts(a); + + if (likely(atomic_read(&a->disable_cnt) == 0)) + esas2r_do_deferred_processes(a); +} + +irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; + u32 intstat; + u32 doorbell; + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (likely(intstat & MU_INTSTAT_POST_OUT)) { + /* clear the interrupt */ + + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + + esas2r_get_outbound_responses(a); + } + + if (unlikely(intstat & MU_INTSTAT_DRBL)) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + /* + * Work around a chip bug and force a new MSI to be sent if one is + * still pending. + */ + esas2r_disable_chip_interrupts(a); + esas2r_enable_chip_interrupts(a); + + if (likely(atomic_read(&a->disable_cnt) == 0)) + esas2r_do_deferred_processes(a); + + esas2r_do_tasklet_tasks(a); + + return 1; +} + + + +static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct atto_vda_ob_rsp *rsp) +{ + + /* + * For I/O requests, only copy the response if an error + * occurred and setup a callback to do error processing. + */ + if (unlikely(rq->req_stat != RS_SUCCESS)) { + memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); + + if (rq->req_stat == RS_ABORTED) { + if (rq->timeout > RQ_MAX_TIMEOUT) + rq->req_stat = RS_TIMEOUT; + } else if (rq->req_stat == RS_SCSI_ERROR) { + u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; + + esas2r_trace("scsistatus: %x", scsistatus); + + /* Any of these are a good result. */ + if (scsistatus == SAM_STAT_GOOD || scsistatus == + SAM_STAT_CONDITION_MET || scsistatus == + SAM_STAT_INTERMEDIATE || scsistatus == + SAM_STAT_INTERMEDIATE_CONDITION_MET) { + rq->req_stat = RS_SUCCESS; + rq->func_rsp.scsi_rsp.scsi_stat = + SAM_STAT_GOOD; + } + } + } +} + +static void esas2r_get_outbound_responses(struct esas2r_adapter *a) +{ + struct atto_vda_ob_rsp *rsp; + u32 rspput_ptr; + u32 rspget_ptr; + struct esas2r_request *rq; + u32 handle; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* Get the outbound limit and pointers */ + rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR; + rspget_ptr = a->last_read; + + esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr); + + /* If we don't have anything to process, get out */ + if (unlikely(rspget_ptr == rspput_ptr)) { + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_trace_exit(); + return; + } + + /* Make sure the firmware is healthy */ + if (unlikely(rspput_ptr >= a->list_size)) { + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_bugon(); + esas2r_local_reset_adapter(a); + esas2r_trace_exit(); + return; + } + + do { + rspget_ptr++; + + if (rspget_ptr >= a->list_size) + rspget_ptr = 0; + + rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr + + rspget_ptr; + + handle = rsp->handle; + + /* Verify the handle range */ + if (unlikely(LOWORD(handle) == 0 + || LOWORD(handle) > num_requests + + num_ae_requests + 1)) { + esas2r_bugon(); + continue; + } + + /* Get the request for this handle */ + rq = a->req_table[LOWORD(handle)]; + + if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) { + esas2r_bugon(); + continue; + } + + list_del(&rq->req_list); + + /* Get the completion status */ + rq->req_stat = rsp->req_stat; + + esas2r_trace("handle: %x", handle); + esas2r_trace("rq: %p", rq); + esas2r_trace("req_status: %x", rq->req_stat); + + if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { + esas2r_handle_outbound_rsp_err(a, rq, rsp); + } else { + /* + * Copy the outbound completion struct for non-I/O + * requests. + */ + memcpy(&rq->func_rsp, &rsp->func_rsp, + sizeof(rsp->func_rsp)); + } + + /* Queue the request for completion. */ + list_add_tail(&rq->comp_list, &comp_list); + + } while (rspget_ptr != rspput_ptr); + + a->last_read = rspget_ptr; + spin_unlock_irqrestore(&a->queue_lock, flags); + + esas2r_comp_list_drain(a, &comp_list); + esas2r_trace_exit(); +} + +/* + * Perform all deferred processes for the adapter. Deferred + * processes can only be done while the current interrupt + * disable_cnt for the adapter is zero. + */ +void esas2r_do_deferred_processes(struct esas2r_adapter *a) +{ + int startreqs = 2; + struct esas2r_request *rq; + unsigned long flags; + + /* + * startreqs is used to control starting requests + * that are on the deferred queue + * = 0 - do not start any requests + * = 1 - can start discovery requests + * = 2 - can start any request + */ + + if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING)) + startreqs = 0; + else if (a->flags & AF_DISC_PENDING) + startreqs = 1; + + atomic_inc(&a->disable_cnt); + + /* Clear off the completed list to be processed later. */ + + if (esas2r_is_tasklet_pending(a)) { + esas2r_schedule_tasklet(a); + + startreqs = 0; + } + + /* + * If we can start requests then traverse the defer queue + * looking for requests to start or complete + */ + if (startreqs && !list_empty(&a->defer_list)) { + LIST_HEAD(comp_list); + struct list_head *element, *next; + + spin_lock_irqsave(&a->queue_lock, flags); + + list_for_each_safe(element, next, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, + req_list); + + if (rq->req_stat != RS_PENDING) { + list_del(element); + list_add_tail(&rq->comp_list, &comp_list); + } + /* + * Process discovery and OS requests separately. We + * can't hold up discovery requests when discovery is + * pending. In general, there may be different sets of + * conditions for starting different types of requests. + */ + else if (rq->req_type == RT_DISC_REQ) { + list_del(element); + esas2r_disc_local_start_request(a, rq); + } else if (startreqs == 2) { + list_del(element); + esas2r_local_start_request(a, rq); + + /* + * Flashing could have been set by last local + * start + */ + if (a->flags & AF_FLASHING) + break; + } + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_comp_list_drain(a, &comp_list); + } + + atomic_dec(&a->disable_cnt); +} + +/* + * Process an adapter reset (or one that is about to happen) + * by making sure all outstanding requests are completed that + * haven't been already. + */ +void esas2r_process_adapter_reset(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + unsigned long flags; + struct esas2r_disc_context *dc; + + LIST_HEAD(comp_list); + struct list_head *element; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* abort the active discovery, if any. */ + + if (rq->interrupt_cx) { + dc = (struct esas2r_disc_context *)rq->interrupt_cx; + + dc->disc_evt = 0; + + esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); + } + + /* + * just clear the interrupt callback for now. it will be dequeued if + * and when we find it on the active queue and we don't want the + * callback called. also set the dummy completion callback in case we + * were doing an I/O request. + */ + + rq->interrupt_cx = NULL; + rq->interrupt_cb = NULL; + + rq->comp_cb = esas2r_dummy_complete; + + /* Reset the read and write pointers */ + + *a->outbound_copy = + a->last_write = + a->last_read = a->list_size - 1; + + esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); + + /* Kill all the requests on the active list */ + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->req_stat == RS_STARTED) + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, &comp_list); + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_comp_list_drain(a, &comp_list); + esas2r_process_bus_reset(a); + esas2r_trace_exit(); +} + +static void esas2r_process_bus_reset(struct esas2r_adapter *a) +{ + struct esas2r_request *rq; + struct list_head *element; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + + esas2r_hdebug("reset detected"); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* kill all the requests on the deferred queue */ + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, &comp_list); + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + esas2r_comp_list_drain(a, &comp_list); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + esas2r_lock_clear_flags(&a->flags, AF_OS_RESET); + + esas2r_trace_exit(); +} + +static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) +{ + + esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED); + esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); + esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); + esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); + /* + * Make sure we don't get attempt more than 3 resets + * when the uptime between resets does not exceed one + * minute. This will stop any situation where there is + * really something wrong with the hardware. The way + * this works is that we start with uptime ticks at 0. + * Each time we do a reset, we add 20 seconds worth to + * the count. Each time a timer tick occurs, as long + * as a chip reset is not pending, we decrement the + * tick count. If the uptime ticks ever gets to 60 + * seconds worth, we disable the adapter from that + * point forward. Three strikes, you're out. + */ + if (!esas2r_is_adapter_present(a) || (a->chip_uptime >= + ESAS2R_CHP_UPTIME_MAX)) { + esas2r_hdebug("*** adapter disabled ***"); + + /* + * Ok, some kind of hard failure. Make sure we + * exit this loop with chip interrupts + * permanently disabled so we don't lock up the + * entire system. Also flag degraded mode to + * prevent the heartbeat from trying to recover. + */ + + esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); + esas2r_lock_set_flags(&a->flags, AF_DISABLED); + esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); + esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); + + esas2r_disable_chip_interrupts(a); + a->int_mask = 0; + esas2r_process_adapter_reset(a); + + esas2r_log(ESAS2R_LOG_CRIT, + "Adapter disabled because of hardware failure"); + } else { + u32 flags = + esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED); + + if (!(flags & AF_CHPRST_STARTED)) + /* + * Only disable interrupts if this is + * the first reset attempt. + */ + esas2r_disable_chip_interrupts(a); + + if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) && + !(flags & AF_CHPRST_STARTED)) { + /* + * Don't reset the chip on the first + * deferred power up attempt. + */ + } else { + esas2r_hdebug("*** resetting chip ***"); + esas2r_reset_chip(a); + } + + /* Kick off the reinitialization */ + a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; + a->chip_init_time = jiffies_to_msecs(jiffies); + if (!(a->flags & AF_POWER_MGT)) { + esas2r_process_adapter_reset(a); + + if (!(flags & AF_CHPRST_STARTED)) { + /* Remove devices now that I/O is cleaned up. */ + a->prev_dev_cnt = + esas2r_targ_db_get_tgt_cnt(a); + esas2r_targ_db_remove_all(a, false); + } + } + + a->int_mask = 0; + } +} + +static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) +{ + while (a->flags & AF_CHPRST_DETECTED) { + /* + * Balance the enable in esas2r_initadapter_hw. + * Esas2r_power_down already took care of it for power + * management. + */ + if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags & + AF_POWER_MGT)) + esas2r_disable_chip_interrupts(a); + + /* Reinitialize the chip. */ + esas2r_check_adapter(a); + esas2r_init_adapter_hw(a, 0); + + if (a->flags & AF_CHPRST_NEEDED) + break; + + if (a->flags & AF_POWER_MGT) { + /* Recovery from power management. */ + if (a->flags & AF_FIRST_INIT) { + /* Chip reset during normal power up */ + esas2r_log(ESAS2R_LOG_CRIT, + "The firmware was reset during a normal power-up sequence"); + } else { + /* Deferred power up complete. */ + esas2r_lock_clear_flags(&a->flags, + AF_POWER_MGT); + esas2r_send_reset_ae(a, true); + } + } else { + /* Recovery from online chip reset. */ + if (a->flags & AF_FIRST_INIT) { + /* Chip reset during driver load */ + } else { + /* Chip reset after driver load */ + esas2r_send_reset_ae(a, false); + } + + esas2r_log(ESAS2R_LOG_CRIT, + "Recovering from a chip reset while the chip was online"); + } + + esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED); + esas2r_enable_chip_interrupts(a); + + /* + * Clear this flag last! this indicates that the chip has been + * reset already during initialization. + */ + esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED); + } +} + + +/* Perform deferred tasks when chip interrupts are disabled */ +void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) +{ + if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) { + if (a->flags & AF_CHPRST_NEEDED) + esas2r_chip_rst_needed_during_tasklet(a); + + esas2r_handle_chip_rst_during_tasklet(a); + } + + if (a->flags & AF_BUSRST_NEEDED) { + esas2r_hdebug("hard resetting bus"); + + esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); + + if (a->flags & AF_FLASHING) + esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); + else + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_RESET_BUS); + } + + if (a->flags & AF_BUSRST_DETECTED) { + esas2r_process_bus_reset(a); + + esas2r_log_dev(ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "scsi_report_bus_reset() called"); + + scsi_report_bus_reset(a->host, 0); + + esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); + esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); + + esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); + } + + if (a->flags & AF_PORT_CHANGE) { + esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE); + + esas2r_targ_db_report_changes(a); + } + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell) +{ + if (!(doorbell & DRBL_FORCE_INT)) { + esas2r_trace_enter(); + esas2r_trace("doorbell: %x", doorbell); + } + + /* First clear the doorbell bits */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); + + if (doorbell & DRBL_RESET_BUS) + esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); + + if (doorbell & DRBL_FORCE_INT) + esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); + + if (doorbell & DRBL_PANIC_REASON_MASK) { + esas2r_hdebug("*** Firmware Panic ***"); + esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked"); + } + + if (doorbell & DRBL_FW_RESET) { + esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL); + esas2r_local_reset_adapter(a); + } + + if (!(doorbell & DRBL_FORCE_INT)) + esas2r_trace_exit(); +} + +void esas2r_force_interrupt(struct esas2r_adapter *a) +{ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT | + DRBL_DRV_VER); +} + + +static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae, + u16 target, u32 length) +{ + struct esas2r_target *t = a->targetdb + target; + u32 cplen = length; + unsigned long flags; + + if (cplen > sizeof(t->lu_event)) + cplen = sizeof(t->lu_event); + + esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent); + esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate); + + spin_lock_irqsave(&a->mem_lock, flags); + + t->new_target_state = TS_INVALID; + + if (ae->lu.dwevent & VDAAE_LU_LOST) { + t->new_target_state = TS_NOT_PRESENT; + } else { + switch (ae->lu.bystate) { + case VDAAE_LU_NOT_PRESENT: + case VDAAE_LU_OFFLINE: + case VDAAE_LU_DELETED: + case VDAAE_LU_FACTORY_DISABLED: + t->new_target_state = TS_NOT_PRESENT; + break; + + case VDAAE_LU_ONLINE: + case VDAAE_LU_DEGRADED: + t->new_target_state = TS_PRESENT; + break; + } + } + + if (t->new_target_state != TS_INVALID) { + memcpy(&t->lu_event, &ae->lu, cplen); + + esas2r_disc_queue_event(a, DCDE_DEV_CHANGE); + } + + spin_unlock_irqrestore(&a->mem_lock, flags); +} + + + +void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + union atto_vda_ae *ae = + (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data; + u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length); + union atto_vda_ae *last = + (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data + + length); + + esas2r_trace_enter(); + esas2r_trace("length: %d", length); + + if (length > sizeof(struct atto_vda_ae_data) + || (length & 3) != 0 + || length == 0) { + esas2r_log(ESAS2R_LOG_WARN, + "The AE request response length (%p) is too long: %d", + rq, length); + + esas2r_hdebug("aereq->length (0x%x) too long", length); + esas2r_bugon(); + + last = ae; + } + + while (ae < last) { + u16 target; + + esas2r_trace("ae: %p", ae); + esas2r_trace("ae->hdr: %p", &(ae->hdr)); + + length = ae->hdr.bylength; + + if (length > (u32)((u8 *)last - (u8 *)ae) + || (length & 3) != 0 + || length == 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "the async event length is invalid (%p): %d", + ae, length); + + esas2r_hdebug("ae->hdr.length (0x%x) invalid", length); + esas2r_bugon(); + + break; + } + + esas2r_nuxi_ae_data(ae); + + esas2r_queue_fw_event(a, fw_event_vda_ae, ae, + sizeof(union atto_vda_ae)); + + switch (ae->hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + + if (ae->raid.dwflags & (VDAAE_GROUP_STATE + | VDAAE_RBLD_STATE + | VDAAE_MEMBER_CHG + | VDAAE_PART_CHG)) { + esas2r_log(ESAS2R_LOG_INFO, + "RAID event received - name:%s rebuild_state:%d group_state:%d", + ae->raid.acname, + ae->raid.byrebuild_state, + ae->raid.bygroup_state); + } + + break; + + case VDAAE_HDR_TYPE_LU: + esas2r_log(ESAS2R_LOG_INFO, + "LUN event received: event:%d target_id:%d LUN:%d state:%d", + ae->lu.dwevent, + ae->lu.id.tgtlun.wtarget_id, + ae->lu.id.tgtlun.bylun, + ae->lu.bystate); + + target = ae->lu.id.tgtlun.wtarget_id; + + if (target < ESAS2R_MAX_TARGETS) + esas2r_lun_event(a, ae, target, length); + + break; + + case VDAAE_HDR_TYPE_DISK: + esas2r_log(ESAS2R_LOG_INFO, "Disk event received"); + break; + + default: + + /* Silently ignore the rest and let the apps deal with + * them. + */ + + break; + } + + ae = (union atto_vda_ae *)((u8 *)ae + length); + } + + /* Now requeue it. */ + esas2r_start_ae_request(a, rq); + esas2r_trace_exit(); +} + +/* Send an asynchronous event for a chip reset or power management. */ +void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt) +{ + struct atto_vda_ae_hdr ae; + + if (pwr_mgt) + ae.bytype = VDAAE_HDR_TYPE_PWRMGT; + else + ae.bytype = VDAAE_HDR_TYPE_RESET; + + ae.byversion = VDAAE_HDR_VER_0; + ae.byflags = 0; + ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr); + + if (pwr_mgt) + esas2r_hdebug("*** sending power management AE ***"); + else + esas2r_hdebug("*** sending reset AE ***"); + + esas2r_queue_fw_event(a, fw_event_vda_ae, &ae, + sizeof(union atto_vda_ae)); +} + +void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq) +{} + +static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u8 snslen, snslen2; + + snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len; + + if (snslen > rq->sense_len) + snslen = rq->sense_len; + + if (snslen) { + if (rq->sense_buf) + memcpy(rq->sense_buf, rq->data_buf, snslen); + else + rq->sense_buf = (u8 *)rq->data_buf; + + /* See about possible sense data */ + if (snslen2 > 0x0c) { + u8 *s = (u8 *)rq->data_buf; + + esas2r_trace_enter(); + + /* Report LUNS data has changed */ + if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) { + esas2r_trace("rq->target_id: %d", + rq->target_id); + esas2r_target_state_changed(a, rq->target_id, + TS_LUN_CHANGE); + } + + esas2r_trace("add_sense_key=%x", s[0x0c]); + esas2r_trace("add_sense_qual=%x", s[0x0d]); + esas2r_trace_exit(); + } + } + + rq->sense_len = snslen; +} + + +void esas2r_complete_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + if (rq->vrq->scsi.function == VDA_FUNC_FLASH + && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) + esas2r_lock_clear_flags(&a->flags, AF_FLASHING); + + /* See if we setup a callback to do special processing */ + + if (rq->interrupt_cb) { + (*rq->interrupt_cb)(a, rq); + + if (rq->req_stat == RS_PENDING) { + esas2r_start_request(a, rq); + return; + } + } + + if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI) + && unlikely(rq->req_stat != RS_SUCCESS)) { + esas2r_check_req_rsp_sense(a, rq); + esas2r_log_request_failure(a, rq); + } + + (*rq->comp_cb)(a, rq); +} diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c new file mode 100644 index 000000000000..324e2626a08b --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_io.c @@ -0,0 +1,880 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_io.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + struct esas2r_target *t = NULL; + struct esas2r_request *startrq = rq; + unsigned long flags; + + if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) { + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) + rq->req_stat = RS_SEL2; + else + rq->req_stat = RS_DEGRADED; + } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { + t = a->targetdb + rq->target_id; + + if (unlikely(t >= a->targetdb_end + || !(t->flags & TF_USED))) { + rq->req_stat = RS_SEL; + } else { + /* copy in the target ID. */ + rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); + + /* + * Test if we want to report RS_SEL for missing target. + * Note that if AF_DISC_PENDING is set than this will + * go on the defer queue. + */ + if (unlikely(t->target_state != TS_PRESENT + && !(a->flags & AF_DISC_PENDING))) + rq->req_stat = RS_SEL; + } + } + + if (unlikely(rq->req_stat != RS_PENDING)) { + esas2r_complete_request(a, rq); + return; + } + + esas2r_trace("rq=%p", rq); + esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle); + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + esas2r_trace("rq->target_id=%d", rq->target_id); + esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags); + } + + spin_lock_irqsave(&a->queue_lock, flags); + + if (likely(list_empty(&a->defer_list) && + !(a->flags & + (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING)))) + esas2r_local_start_request(a, startrq); + else + list_add_tail(&startrq->req_list, &a->defer_list); + + spin_unlock_irqrestore(&a->queue_lock, flags); +} + +/* + * Starts the specified request. all requests have RS_PENDING set when this + * routine is called. The caller is usually esas2r_start_request, but + * esas2r_do_deferred_processes will start request that are deferred. + * + * The caller must ensure that requests can be started. + * + * esas2r_start_request will defer a request if there are already requests + * waiting or there is a chip reset pending. once the reset condition clears, + * esas2r_do_deferred_processes will call this function to start the request. + * + * When a request is started, it is placed on the active list and queued to + * the controller. + */ +void esas2r_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_trace_enter(); + esas2r_trace("rq=%p", rq); + esas2r_trace("rq->vrq:%p", rq->vrq); + esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr); + + if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH + && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) + esas2r_lock_set_flags(&a->flags, AF_FLASHING); + + list_add_tail(&rq->req_list, &a->active_list); + esas2r_start_vda_request(a, rq); + esas2r_trace_exit(); + return; +} + +void esas2r_start_vda_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_inbound_list_source_entry *element; + u32 dw; + + rq->req_stat = RS_STARTED; + /* + * Calculate the inbound list entry location and the current state of + * toggle bit. + */ + a->last_write++; + if (a->last_write >= a->list_size) { + a->last_write = 0; + /* update the toggle bit */ + if (a->flags & AF_COMM_LIST_TOGGLE) + esas2r_lock_clear_flags(&a->flags, + AF_COMM_LIST_TOGGLE); + else + esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); + } + + element = + (struct esas2r_inbound_list_source_entry *)a->inbound_list_md. + virt_addr + + a->last_write; + + /* Set the VDA request size if it was never modified */ + if (rq->vda_req_sz == RQ_SIZE_DEFAULT) + rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32)); + + element->address = cpu_to_le64(rq->vrq_md->phys_addr); + element->length = cpu_to_le32(rq->vda_req_sz); + + /* Update the write pointer */ + dw = a->last_write; + + if (a->flags & AF_COMM_LIST_TOGGLE) + dw |= MU_ILW_TOGGLE; + + esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); + esas2r_trace("dw:%x", dw); + esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz); + esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw); +} + +/* + * Build the scatter/gather list for an I/O request according to the + * specifications placed in the s/g context. The caller must initialize + * context prior to the initial call by calling esas2r_sgc_init(). + */ +bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + union atto_vda_req *vrq = rq->vrq; + + while (sgc->length) { + u32 rem = 0; + u64 addr; + u32 len; + + len = (*sgc->get_phys_addr)(sgc, &addr); + + if (unlikely(len == 0)) + return false; + + /* if current length is more than what's left, stop there */ + if (unlikely(len > sgc->length)) + len = sgc->length; + +another_entry: + /* limit to a round number less than the maximum length */ + if (len > SGE_LEN_MAX) { + /* + * Save the remainder of the split. Whenever we limit + * an entry we come back around to build entries out + * of the leftover. We do this to prevent multiple + * calls to the get_phys_addr() function for an SGE + * that is too large. + */ + rem = len - SGE_LEN_MAX; + len = SGE_LEN_MAX; + } + + /* See if we need to allocate a new SGL */ + if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { + u8 sgelen; + struct esas2r_mem_desc *sgl; + + /* + * If no SGls are available, return failure. The + * caller can call us later with the current context + * to pick up here. + */ + sgl = esas2r_alloc_sgl(a); + + if (unlikely(sgl == NULL)) + return false; + + /* Calculate the length of the last SGE filled in */ + sgelen = (u8)((u8 *)sgc->sge.a64.curr + - (u8 *)sgc->sge.a64.last); + + /* + * Copy the last SGE filled in to the first entry of + * the new SGL to make room for the chain entry. + */ + memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); + + /* Figure out the new curr pointer in the new segment */ + sgc->sge.a64.curr = + (struct atto_vda_sge *)((u8 *)sgl->virt_addr + + sgelen); + + /* Set the limit pointer and build the chain entry */ + sgc->sge.a64.limit = + (struct atto_vda_sge *)((u8 *)sgl->virt_addr + + sgl_page_size + - sizeof(struct + atto_vda_sge)); + sgc->sge.a64.last->length = cpu_to_le32( + SGE_CHAIN | SGE_ADDR_64); + sgc->sge.a64.last->address = + cpu_to_le64(sgl->phys_addr); + + /* + * Now, if there was a previous chain entry, then + * update it to contain the length of this segment + * and size of this chain. otherwise this is the + * first SGL, so set the chain_offset in the request. + */ + if (sgc->sge.a64.chain) { + sgc->sge.a64.chain->length |= + cpu_to_le32( + ((u8 *)(sgc->sge.a64. + last + 1) + - (u8 *)rq->sg_table-> + virt_addr) + + sizeof(struct atto_vda_sge) * + LOBIT(SGE_CHAIN_SZ)); + } else { + vrq->scsi.chain_offset = (u8) + ((u8 *)sgc-> + sge.a64.last - + (u8 *)vrq); + + /* + * This is the first SGL, so set the + * chain_offset and the VDA request size in + * the request. + */ + rq->vda_req_sz = + (vrq->scsi.chain_offset + + sizeof(struct atto_vda_sge) + + 3) + / sizeof(u32); + } + + /* + * Remember this so when we get a new SGL filled in we + * can update the length of this chain entry. + */ + sgc->sge.a64.chain = sgc->sge.a64.last; + + /* Now link the new SGL onto the primary request. */ + list_add(&sgl->next_desc, &rq->sg_table_head); + } + + /* Update last one filled in */ + sgc->sge.a64.last = sgc->sge.a64.curr; + + /* Build the new SGE and update the S/G context */ + sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len); + sgc->sge.a64.curr->address = cpu_to_le32(addr); + sgc->sge.a64.curr++; + sgc->cur_offset += len; + sgc->length -= len; + + /* + * Check if we previously split an entry. If so we have to + * pick up where we left off. + */ + if (rem) { + addr += len; + len = rem; + rem = 0; + goto another_entry; + } + } + + /* Mark the end of the SGL */ + sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST); + + /* + * If there was a previous chain entry, update the length to indicate + * the length of this last segment. + */ + if (sgc->sge.a64.chain) { + sgc->sge.a64.chain->length |= cpu_to_le32( + ((u8 *)(sgc->sge.a64.curr) - + (u8 *)rq->sg_table->virt_addr)); + } else { + u16 reqsize; + + /* + * The entire VDA request was not used so lets + * set the size of the VDA request to be DMA'd + */ + reqsize = + ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq) + + sizeof(struct atto_vda_sge) + 3) / sizeof(u32); + + /* + * Only update the request size if it is bigger than what is + * already there. We can come in here twice for some management + * commands. + */ + if (reqsize > rq->vda_req_sz) + rq->vda_req_sz = reqsize; + } + return true; +} + + +/* + * Create PRD list for each I-block consumed by the command. This routine + * determines how much data is required from each I-block being consumed + * by the command. The first and last I-blocks can be partials and all of + * the I-blocks in between are for a full I-block of data. + * + * The interleave size is used to determine the number of bytes in the 1st + * I-block and the remaining I-blocks are what remeains. + */ +static bool esas2r_build_prd_iblk(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + u64 addr; + u32 len; + struct esas2r_mem_desc *sgl; + u32 numchain = 1; + u32 rem = 0; + + while (sgc->length) { + /* Get the next address/length pair */ + + len = (*sgc->get_phys_addr)(sgc, &addr); + + if (unlikely(len == 0)) + return false; + + /* If current length is more than what's left, stop there */ + + if (unlikely(len > sgc->length)) + len = sgc->length; + +another_entry: + /* Limit to a round number less than the maximum length */ + + if (len > PRD_LEN_MAX) { + /* + * Save the remainder of the split. whenever we limit + * an entry we come back around to build entries out + * of the leftover. We do this to prevent multiple + * calls to the get_phys_addr() function for an SGE + * that is too large. + */ + rem = len - PRD_LEN_MAX; + len = PRD_LEN_MAX; + } + + /* See if we need to allocate a new SGL */ + if (sgc->sge.prd.sge_cnt == 0) { + if (len == sgc->length) { + /* + * We only have 1 PRD entry left. + * It can be placed where the chain + * entry would have gone + */ + + /* Build the simple SGE */ + sgc->sge.prd.curr->ctl_len = cpu_to_le32( + PRD_DATA | len); + sgc->sge.prd.curr->address = cpu_to_le64(addr); + + /* Adjust length related fields */ + sgc->cur_offset += len; + sgc->length -= len; + + /* We use the reserved chain entry for data */ + numchain = 0; + + break; + } + + if (sgc->sge.prd.chain) { + /* + * Fill # of entries of current SGL in previous + * chain the length of this current SGL may not + * full. + */ + + sgc->sge.prd.chain->ctl_len |= cpu_to_le32( + sgc->sge.prd.sgl_max_cnt); + } + + /* + * If no SGls are available, return failure. The + * caller can call us later with the current context + * to pick up here. + */ + + sgl = esas2r_alloc_sgl(a); + + if (unlikely(sgl == NULL)) + return false; + + /* + * Link the new SGL onto the chain + * They are in reverse order + */ + list_add(&sgl->next_desc, &rq->sg_table_head); + + /* + * An SGL was just filled in and we are starting + * a new SGL. Prime the chain of the ending SGL with + * info that points to the new SGL. The length gets + * filled in when the new SGL is filled or ended + */ + + sgc->sge.prd.chain = sgc->sge.prd.curr; + + sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN); + sgc->sge.prd.chain->address = + cpu_to_le64(sgl->phys_addr); + + /* + * Start a new segment. + * Take one away and save for chain SGE + */ + + sgc->sge.prd.curr = + (struct atto_physical_region_description *)sgl + -> + virt_addr; + sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1; + } + + sgc->sge.prd.sge_cnt--; + /* Build the simple SGE */ + sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len); + sgc->sge.prd.curr->address = cpu_to_le64(addr); + + /* Used another element. Point to the next one */ + + sgc->sge.prd.curr++; + + /* Adjust length related fields */ + + sgc->cur_offset += len; + sgc->length -= len; + + /* + * Check if we previously split an entry. If so we have to + * pick up where we left off. + */ + + if (rem) { + addr += len; + len = rem; + rem = 0; + goto another_entry; + } + } + + if (!list_empty(&rq->sg_table_head)) { + if (sgc->sge.prd.chain) { + sgc->sge.prd.chain->ctl_len |= + cpu_to_le32(sgc->sge.prd.sgl_max_cnt + - sgc->sge.prd.sge_cnt + - numchain); + } + } + + return true; +} + +bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + u32 len = sgc->length; + struct esas2r_target *t = a->targetdb + rq->target_id; + u8 is_i_o = 0; + u16 reqsize; + struct atto_physical_region_description *curr_iblk_chn; + u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0]; + + /* + * extract LBA from command so we can determine + * the I-Block boundary + */ + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && t->target_state == TS_PRESENT + && !(t->flags & TF_PASS_THRU)) { + u32 lbalo = 0; + + switch (rq->vrq->scsi.cdb[0]) { + case READ_16: + case WRITE_16: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[9], + cdb[8]), + MAKEWORD(cdb[7], + cdb[6])); + is_i_o = 1; + break; + } + + case READ_12: + case WRITE_12: + case READ_10: + case WRITE_10: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[5], + cdb[4]), + MAKEWORD(cdb[3], + cdb[2])); + is_i_o = 1; + break; + } + + case READ_6: + case WRITE_6: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[3], + cdb[2]), + MAKEWORD(cdb[1] & 0x1F, + 0)); + is_i_o = 1; + break; + } + + default: + break; + } + + if (is_i_o) { + u32 startlba; + + rq->vrq->scsi.iblk_cnt_prd = 0; + + /* Determine size of 1st I-block PRD list */ + startlba = t->inter_block - (lbalo & (t->inter_block - + 1)); + sgc->length = startlba * t->block_size; + + /* Chk if the 1st iblk chain starts at base of Iblock */ + if ((lbalo & (t->inter_block - 1)) == 0) + rq->flags |= RF_1ST_IBLK_BASE; + + if (sgc->length > len) + sgc->length = len; + } else { + sgc->length = len; + } + } else { + sgc->length = len; + } + + /* get our starting chain address */ + + curr_iblk_chn = + (struct atto_physical_region_description *)sgc->sge.a64.curr; + + sgc->sge.prd.sgl_max_cnt = sgl_page_size / + sizeof(struct + atto_physical_region_description); + + /* create all of the I-block PRD lists */ + + while (len) { + sgc->sge.prd.sge_cnt = 0; + sgc->sge.prd.chain = NULL; + sgc->sge.prd.curr = curr_iblk_chn; + + /* increment to next I-Block */ + + len -= sgc->length; + + /* go build the next I-Block PRD list */ + + if (unlikely(!esas2r_build_prd_iblk(a, sgc))) + return false; + + curr_iblk_chn++; + + if (is_i_o) { + rq->vrq->scsi.iblk_cnt_prd++; + + if (len > t->inter_byte) + sgc->length = t->inter_byte; + else + sgc->length = len; + } + } + + /* figure out the size used of the VDA request */ + + reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq)) + / sizeof(u32); + + /* + * only update the request size if it is bigger than what is + * already there. we can come in here twice for some management + * commands. + */ + + if (reqsize > rq->vda_req_sz) + rq->vda_req_sz = reqsize; + + return true; +} + +static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime) +{ + u32 delta = currtime - a->chip_init_time; + + if (delta <= ESAS2R_CHPRST_WAIT_TIME) { + /* Wait before accessing registers */ + } else if (delta >= ESAS2R_CHPRST_TIME) { + /* + * The last reset failed so try again. Reset + * processing will give up after three tries. + */ + esas2r_local_reset_adapter(a); + } else { + /* We can now see if the firmware is ready */ + u32 doorbell; + + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) { + esas2r_force_interrupt(a); + } else { + u32 ver = (doorbell & DRBL_FW_VER_MSK); + + /* Driver supports API version 0 and 1 */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + if (ver == DRBL_FW_VER_0) { + esas2r_lock_set_flags(&a->flags, + AF_CHPRST_DETECTED); + esas2r_lock_set_flags(&a->flags, + AF_LEGACY_SGE_MODE); + + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + } else if (ver == DRBL_FW_VER_1) { + esas2r_lock_set_flags(&a->flags, + AF_CHPRST_DETECTED); + esas2r_lock_clear_flags(&a->flags, + AF_LEGACY_SGE_MODE); + + a->max_vdareq_size = 1024; + a->build_sgl = esas2r_build_sg_list_prd; + } else { + esas2r_local_reset_adapter(a); + } + } + } +} + + +/* This function must be called once per timer tick */ +void esas2r_timer_tick(struct esas2r_adapter *a) +{ + u32 currtime = jiffies_to_msecs(jiffies); + u32 deltatime = currtime - a->last_tick_time; + + a->last_tick_time = currtime; + + /* count down the uptime */ + if (a->chip_uptime + && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { + if (deltatime >= a->chip_uptime) + a->chip_uptime = 0; + else + a->chip_uptime -= deltatime; + } + + if (a->flags & AF_CHPRST_PENDING) { + if (!(a->flags & AF_CHPRST_NEEDED) + && !(a->flags & AF_CHPRST_DETECTED)) + esas2r_handle_pending_reset(a, currtime); + } else { + if (a->flags & AF_DISC_PENDING) + esas2r_disc_check_complete(a); + + if (a->flags & AF_HEARTBEAT_ENB) { + if (a->flags & AF_HEARTBEAT) { + if ((currtime - a->heartbeat_time) >= + ESAS2R_HEARTBEAT_TIME) { + esas2r_lock_clear_flags(&a->flags, + AF_HEARTBEAT); + esas2r_hdebug("heartbeat failed"); + esas2r_log(ESAS2R_LOG_CRIT, + "heartbeat failed"); + esas2r_bugon(); + esas2r_local_reset_adapter(a); + } + } else { + esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT); + a->heartbeat_time = currtime; + esas2r_force_interrupt(a); + } + } + } + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +/* + * Send the specified task management function to the target and LUN + * specified in rqaux. in addition, immediately abort any commands that + * are queued but not sent to the device according to the rules specified + * by the task management function. + */ +bool esas2r_send_task_mgmt(struct esas2r_adapter *a, + struct esas2r_request *rqaux, u8 task_mgt_func) +{ + u16 targetid = rqaux->target_id; + u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags); + bool ret = false; + struct esas2r_request *rq; + struct list_head *next, *element; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + esas2r_trace("rqaux:%p", rqaux); + esas2r_trace("task_mgt_func:%x", task_mgt_func); + spin_lock_irqsave(&a->queue_lock, flags); + + /* search the defer queue looking for requests for the device */ + list_for_each_safe(element, next, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && rq->target_id == targetid + && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun + || task_mgt_func == 0x20)) { /* target reset */ + /* Found a request affected by the task management */ + if (rq->req_stat == RS_PENDING) { + /* + * The request is pending or waiting. We can + * safelycomplete the request now. + */ + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, + &comp_list); + } + } + } + + /* Send the task management request to the firmware */ + rqaux->sense_len = 0; + rqaux->vrq->scsi.length = 0; + rqaux->target_id = targetid; + rqaux->vrq->scsi.flags |= cpu_to_le32(lun); + memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb)); + rqaux->vrq->scsi.flags |= + cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); + + if (a->flags & AF_FLASHING) { + /* Assume success. if there are active requests, return busy */ + rqaux->req_stat = RS_SUCCESS; + + list_for_each_safe(element, next, &a->active_list) { + rq = list_entry(element, struct esas2r_request, + req_list); + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && rq->target_id == targetid + && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun + || task_mgt_func == 0x20)) /* target reset */ + rqaux->req_stat = RS_BUSY; + } + + ret = true; + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + if (!(a->flags & AF_FLASHING)) + esas2r_start_request(a, rqaux); + + esas2r_comp_list_drain(a, &comp_list); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + esas2r_trace_exit(); + + return ret; +} + +void esas2r_reset_bus(struct esas2r_adapter *a) +{ + esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); + + if (!(a->flags & AF_DEGRADED_MODE) + && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { + esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED); + esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING); + esas2r_lock_set_flags(&a->flags, AF_OS_RESET); + + esas2r_schedule_tasklet(a); + } +} + +bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, + u8 status) +{ + esas2r_trace_enter(); + esas2r_trace("rq:%p", rq); + list_del_init(&rq->req_list); + if (rq->timeout > RQ_MAX_TIMEOUT) { + /* + * The request timed out, but we could not abort it because a + * chip reset occurred. Return busy status. + */ + rq->req_stat = RS_BUSY; + esas2r_trace_exit(); + return true; + } + + rq->req_stat = status; + esas2r_trace_exit(); + return true; +} diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c new file mode 100644 index 000000000000..f3d0cb885972 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -0,0 +1,2110 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_ioctl.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* + * Buffered ioctl handlers. A buffered ioctl is one which requires that we + * allocate a DMA-able memory area to communicate with the firmware. In + * order to prevent continually allocating and freeing consistent memory, + * we will allocate a global buffer the first time we need it and re-use + * it for subsequent ioctl calls that require it. + */ + +u8 *esas2r_buffered_ioctl; +dma_addr_t esas2r_buffered_ioctl_addr; +u32 esas2r_buffered_ioctl_size; +struct pci_dev *esas2r_buffered_ioctl_pcid; + +static DEFINE_SEMAPHORE(buffered_ioctl_semaphore); +typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, + struct esas2r_request *, + struct esas2r_sg_context *, + void *); +typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, + struct esas2r_request *, void *); + +struct esas2r_buffered_ioctl { + struct esas2r_adapter *a; + void *ioctl; + u32 length; + u32 control_code; + u32 offset; + BUFFERED_IOCTL_CALLBACK + callback; + void *context; + BUFFERED_IOCTL_DONE_CALLBACK + done_callback; + void *done_context; + +}; + +static void complete_fm_api_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->fm_api_command_done = 1; + wake_up_interruptible(&a->fm_api_waiter); +} + +/* Callbacks for building scatter/gather lists for FM API requests */ +static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = sgc->cur_offset - a->save_offset; + + (*addr) = a->firmware.phys + offset; + return a->firmware.orig_len - offset; +} + +static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = sgc->cur_offset - a->save_offset; + + (*addr) = a->firmware.header_buff_phys + offset; + return sizeof(struct esas2r_flash_img) - offset; +} + +/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ +static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_request *rq; + + if (down_interruptible(&a->fm_api_semaphore)) { + fi->status = FI_STAT_BUSY; + return; + } + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + up(&a->fm_api_semaphore); + fi->status = FI_STAT_BUSY; + return; + } + + if (fi == &a->firmware.header) { + a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev, + (size_t)sizeof( + struct + esas2r_flash_img), + (dma_addr_t *)&a-> + firmware. + header_buff_phys, + GFP_KERNEL); + + if (a->firmware.header_buff == NULL) { + esas2r_debug("failed to allocate header buffer!"); + fi->status = FI_STAT_BUSY; + return; + } + + memcpy(a->firmware.header_buff, fi, + sizeof(struct esas2r_flash_img)); + a->save_offset = a->firmware.header_buff; + a->fm_api_sgc.get_phys_addr = + (PGETPHYSADDR)get_physaddr_fm_api_header; + } else { + a->save_offset = (u8 *)fi; + a->fm_api_sgc.get_phys_addr = + (PGETPHYSADDR)get_physaddr_fm_api; + } + + rq->comp_cb = complete_fm_api_req; + a->fm_api_command_done = 0; + a->fm_api_sgc.cur_offset = a->save_offset; + + if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, + &a->fm_api_sgc)) + goto all_done; + + /* Now wait around for it to complete. */ + while (!a->fm_api_command_done) + wait_event_interruptible(a->fm_api_waiter, + a->fm_api_command_done); +all_done: + if (fi == &a->firmware.header) { + memcpy(fi, a->firmware.header_buff, + sizeof(struct esas2r_flash_img)); + + dma_free_coherent(&a->pcid->dev, + (size_t)sizeof(struct esas2r_flash_img), + a->firmware.header_buff, + (dma_addr_t)a->firmware.header_buff_phys); + } + + up(&a->fm_api_semaphore); + esas2r_free_request(a, (struct esas2r_request *)rq); + return; + +} + +static void complete_nvr_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->nvram_command_done = 1; + wake_up_interruptible(&a->nvram_waiter); +} + +/* Callback for building scatter/gather lists for buffered ioctls */ +static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, + u64 *addr) +{ + int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; + + (*addr) = esas2r_buffered_ioctl_addr + offset; + return esas2r_buffered_ioctl_size - offset; +} + +static void complete_buffered_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->buffered_ioctl_done = 1; + wake_up_interruptible(&a->buffered_ioctl_waiter); +} + +static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) +{ + struct esas2r_adapter *a = bi->a; + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + u8 result = IOCTL_SUCCESS; + + if (down_interruptible(&buffered_ioctl_semaphore)) + return IOCTL_OUT_OF_RESOURCES; + + /* allocate a buffer or use the existing buffer. */ + if (esas2r_buffered_ioctl) { + if (esas2r_buffered_ioctl_size < bi->length) { + /* free the too-small buffer and get a new one */ + dma_free_coherent(&a->pcid->dev, + (size_t)esas2r_buffered_ioctl_size, + esas2r_buffered_ioctl, + esas2r_buffered_ioctl_addr); + + goto allocate_buffer; + } + } else { +allocate_buffer: + esas2r_buffered_ioctl_size = bi->length; + esas2r_buffered_ioctl_pcid = a->pcid; + esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev, + (size_t) + esas2r_buffered_ioctl_size, + & + esas2r_buffered_ioctl_addr, + GFP_KERNEL); + } + + if (!esas2r_buffered_ioctl) { + esas2r_log(ESAS2R_LOG_CRIT, + "could not allocate %d bytes of consistent memory " + "for a buffered ioctl!", + bi->length); + + esas2r_debug("buffered ioctl alloc failure"); + result = IOCTL_OUT_OF_RESOURCES; + goto exit_cleanly; + } + + memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "could not allocate an internal request"); + + result = IOCTL_OUT_OF_RESOURCES; + esas2r_debug("buffered ioctl - no requests"); + goto exit_cleanly; + } + + a->buffered_ioctl_done = 0; + rq->comp_cb = complete_buffered_ioctl_req; + sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; + sgc.length = esas2r_buffered_ioctl_size; + + if (!(*bi->callback)(a, rq, &sgc, bi->context)) { + /* completed immediately, no need to wait */ + a->buffered_ioctl_done = 0; + goto free_andexit_cleanly; + } + + /* now wait around for it to complete. */ + while (!a->buffered_ioctl_done) + wait_event_interruptible(a->buffered_ioctl_waiter, + a->buffered_ioctl_done); + +free_andexit_cleanly: + if (result == IOCTL_SUCCESS && bi->done_callback) + (*bi->done_callback)(a, rq, bi->done_context); + + esas2r_free_request(a, rq); + +exit_cleanly: + if (result == IOCTL_SUCCESS) + memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); + + up(&buffered_ioctl_semaphore); + return result; +} + +/* SMP ioctl support */ +static int smp_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, void *context) +{ + struct atto_ioctl_smp *si = + (struct atto_ioctl_smp *)esas2r_buffered_ioctl; + + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + si->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + + esas2r_start_request(a, rq); + return true; +} + +static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = si; + bi.length = sizeof(struct atto_ioctl_smp) + + le32_to_cpu(si->req_length) + + le32_to_cpu(si->rsp_length); + bi.offset = 0; + bi.callback = smp_ioctl_callback; + return handle_buffered_ioctl(&bi); +} + + +/* CSMI ioctl support */ +static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); + rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); + + /* Now call the original completion callback. */ + (*rq->aux_req_cb)(a, rq); +} + +/* Tunnel a CSMI IOCTL to the back end driver for processing. */ +static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, + union atto_ioctl_csmi *ci, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, + u32 ctrl_code, + u16 target_id) +{ + struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; + + if (a->flags & AF_DEGRADED_MODE) + return false; + + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI); + ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); + ioctl->csmi.target_id = cpu_to_le16(target_id); + ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); + + /* + * Always usurp the completion callback since the interrupt callback + * mechanism may be used. + */ + rq->aux_req_cx = ci; + rq->aux_req_cb = rq->comp_cb; + rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; + + if (!esas2r_build_sg_list(a, rq, sgc)) + return false; + + esas2r_start_request(a, rq); + return true; +} + +static bool check_lun(struct scsi_lun lun) +{ + bool result; + + result = ((lun.scsi_lun[7] == 0) && + (lun.scsi_lun[6] == 0) && + (lun.scsi_lun[5] == 0) && + (lun.scsi_lun[4] == 0) && + (lun.scsi_lun[3] == 0) && + (lun.scsi_lun[2] == 0) && +/* Byte 1 is intentionally skipped */ + (lun.scsi_lun[0] == 0)); + + return result; +} + +static int csmi_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, void *context) +{ + struct atto_csmi *ci = (struct atto_csmi *)context; + union atto_ioctl_csmi *ioctl_csmi = + (union atto_ioctl_csmi *)esas2r_buffered_ioctl; + u8 path = 0; + u8 tid = 0; + u8 lun = 0; + u32 sts = CSMI_STS_SUCCESS; + struct esas2r_target *t; + unsigned long flags; + + if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { + struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; + + path = gda->path_id; + tid = gda->target_id; + lun = gda->lun; + } else if (ci->control_code == CSMI_CC_TASK_MGT) { + struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; + + path = tm->path_id; + tid = tm->target_id; + lun = tm->lun; + } + + if (path > 0 || tid > ESAS2R_MAX_ID) { + rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( + CSMI_STS_INV_PARAM); + return false; + } + + rq->target_id = tid; + rq->vrq->scsi.flags |= cpu_to_le32(lun); + + switch (ci->control_code) { + case CSMI_CC_GET_DRVR_INFO: + { + struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; + + strcpy(gdi->description, esas2r_get_model_name(a)); + gdi->csmi_major_rev = CSMI_MAJOR_REV; + gdi->csmi_minor_rev = CSMI_MINOR_REV; + break; + } + + case CSMI_CC_GET_CNTLR_CFG: + { + struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; + + gcc->base_io_addr = 0; + pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2, + &gcc->base_memaddr_lo); + pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3, + &gcc->base_memaddr_hi); + gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, + a->pcid->subsystem_vendor); + gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; + gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; + gcc->io_bus_type = CSMI_BUS_TYPE_PCI; + gcc->pci_addr.bus_num = a->pcid->bus->number; + gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); + gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); + + memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); + + gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); + gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); + gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); + gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); + gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); + gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); + gcc->bios_build_rev = LOWORD(a->flash_ver); + + if (a->flags2 & AF2_THUNDERLINK) + gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA + | CSMI_CNTLRF_SATA_HBA; + else + gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID + | CSMI_CNTLRF_SATA_RAID; + + gcc->rrom_major_rev = 0; + gcc->rrom_minor_rev = 0; + gcc->rrom_build_rev = 0; + gcc->rrom_release_rev = 0; + gcc->rrom_biosmajor_rev = 0; + gcc->rrom_biosminor_rev = 0; + gcc->rrom_biosbuild_rev = 0; + gcc->rrom_biosrelease_rev = 0; + break; + } + + case CSMI_CC_GET_CNTLR_STS: + { + struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; + + if (a->flags & AF_DEGRADED_MODE) + gcs->status = CSMI_CNTLR_STS_FAILED; + else + gcs->status = CSMI_CNTLR_STS_GOOD; + + gcs->offline_reason = CSMI_OFFLINE_NO_REASON; + break; + } + + case CSMI_CC_FW_DOWNLOAD: + case CSMI_CC_GET_RAID_INFO: + case CSMI_CC_GET_RAID_CFG: + + sts = CSMI_STS_BAD_CTRL_CODE; + break; + + case CSMI_CC_SMP_PASSTHRU: + case CSMI_CC_SSP_PASSTHRU: + case CSMI_CC_STP_PASSTHRU: + case CSMI_CC_GET_PHY_INFO: + case CSMI_CC_SET_PHY_INFO: + case CSMI_CC_GET_LINK_ERRORS: + case CSMI_CC_GET_SATA_SIG: + case CSMI_CC_GET_CONN_INFO: + case CSMI_CC_PHY_CTRL: + + if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, + ci->control_code, + ESAS2R_TARG_ID_INV)) { + sts = CSMI_STS_FAILED; + break; + } + + return true; + + case CSMI_CC_GET_SCSI_ADDR: + { + struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; + + struct scsi_lun lun; + + memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); + + if (!check_lun(lun)) { + sts = CSMI_STS_NO_SCSI_ADDR; + break; + } + + /* make sure the device is present */ + spin_lock_irqsave(&a->mem_lock, flags); + t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr); + spin_unlock_irqrestore(&a->mem_lock, flags); + + if (t == NULL) { + sts = CSMI_STS_NO_SCSI_ADDR; + break; + } + + gsa->host_index = 0xFF; + gsa->lun = gsa->sas_lun[1]; + rq->target_id = esas2r_targ_get_id(t, a); + break; + } + + case CSMI_CC_GET_DEV_ADDR: + { + struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; + + /* make sure the target is present */ + t = a->targetdb + rq->target_id; + + if (t >= a->targetdb_end + || t->target_state != TS_PRESENT + || t->sas_addr == 0) { + sts = CSMI_STS_NO_DEV_ADDR; + break; + } + + /* fill in the result */ + *(u64 *)gda->sas_addr = t->sas_addr; + memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); + gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); + break; + } + + case CSMI_CC_TASK_MGT: + + /* make sure the target is present */ + t = a->targetdb + rq->target_id; + + if (t >= a->targetdb_end + || t->target_state != TS_PRESENT + || !(t->flags & TF_PASS_THRU)) { + sts = CSMI_STS_NO_DEV_ADDR; + break; + } + + if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, + ci->control_code, + t->phys_targ_id)) { + sts = CSMI_STS_FAILED; + break; + } + + return true; + + default: + + sts = CSMI_STS_BAD_CTRL_CODE; + break; + } + + rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); + + return false; +} + + +static void csmi_ioctl_done_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, void *context) +{ + struct atto_csmi *ci = (struct atto_csmi *)context; + union atto_ioctl_csmi *ioctl_csmi = + (union atto_ioctl_csmi *)esas2r_buffered_ioctl; + + switch (ci->control_code) { + case CSMI_CC_GET_DRVR_INFO: + { + struct atto_csmi_get_driver_info *gdi = + &ioctl_csmi->drvr_info; + + strcpy(gdi->name, ESAS2R_VERSION_STR); + + gdi->major_rev = ESAS2R_MAJOR_REV; + gdi->minor_rev = ESAS2R_MINOR_REV; + gdi->build_rev = 0; + gdi->release_rev = 0; + break; + } + + case CSMI_CC_GET_SCSI_ADDR: + { + struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; + + if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == + CSMI_STS_SUCCESS) { + gsa->target_id = rq->target_id; + gsa->path_id = 0; + } + + break; + } + } + + ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); +} + + +static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = &ci->data; + bi.length = sizeof(union atto_ioctl_csmi); + bi.offset = 0; + bi.callback = csmi_ioctl_callback; + bi.context = ci; + bi.done_callback = csmi_ioctl_done_callback; + bi.done_context = ci; + + return handle_buffered_ioctl(&bi); +} + +/* ATTO HBA ioctl support */ + +/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ +static bool hba_ioctl_tunnel(struct esas2r_adapter *a, + struct atto_ioctl *hi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + hi->status = ATTO_STS_OUT_OF_RSRC; + + return false; + } + + esas2r_start_request(a, rq); + + return true; +} + +static void scsi_passthru_comp_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; + struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; + u8 sts = ATTO_SPT_RS_FAILED; + + spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; + spt->sense_length = rq->sense_len; + spt->residual_length = + le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); + + switch (rq->req_stat) { + case RS_SUCCESS: + case RS_SCSI_ERROR: + sts = ATTO_SPT_RS_SUCCESS; + break; + case RS_UNDERRUN: + sts = ATTO_SPT_RS_UNDERRUN; + break; + case RS_OVERRUN: + sts = ATTO_SPT_RS_OVERRUN; + break; + case RS_SEL: + case RS_SEL2: + sts = ATTO_SPT_RS_NO_DEVICE; + break; + case RS_NO_LUN: + sts = ATTO_SPT_RS_NO_LUN; + break; + case RS_TIMEOUT: + sts = ATTO_SPT_RS_TIMEOUT; + break; + case RS_DEGRADED: + sts = ATTO_SPT_RS_DEGRADED; + break; + case RS_BUSY: + sts = ATTO_SPT_RS_BUSY; + break; + case RS_ABORTED: + sts = ATTO_SPT_RS_ABORTED; + break; + case RS_RESET: + sts = ATTO_SPT_RS_BUS_RESET; + break; + } + + spt->req_status = sts; + + /* Update the target ID to the next one present. */ + spt->target_id = + esas2r_targ_db_find_next_present(a, (u16)spt->target_id); + + /* Done, call the completion callback. */ + (*rq->aux_req_cb)(a, rq); +} + +static int hba_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, + void *context) +{ + struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; + + hi->status = ATTO_STS_SUCCESS; + + switch (hi->function) { + case ATTO_FUNC_GET_ADAP_INFO: + { + u8 *class_code = (u8 *)&a->pcid->class; + + struct atto_hba_get_adapter_info *gai = + &hi->data.get_adap_info; + int pcie_cap_reg; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_GET_ADAP_INFO0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_ADAP_INFO0; + break; + } + + memset(gai, 0, sizeof(*gai)); + + gai->pci.vendor_id = a->pcid->vendor; + gai->pci.device_id = a->pcid->device; + gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; + gai->pci.ss_device_id = a->pcid->subsystem_device; + gai->pci.class_code[0] = class_code[0]; + gai->pci.class_code[1] = class_code[1]; + gai->pci.class_code[2] = class_code[2]; + gai->pci.rev_id = a->pcid->revision; + gai->pci.bus_num = a->pcid->bus->number; + gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); + gai->pci.func_num = PCI_FUNC(a->pcid->devfn); + + pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); + if (pcie_cap_reg) { + u16 stat; + u32 caps; + + pci_read_config_word(a->pcid, + pcie_cap_reg + PCI_EXP_LNKSTA, + &stat); + pci_read_config_dword(a->pcid, + pcie_cap_reg + PCI_EXP_LNKCAP, + &caps); + + gai->pci.link_speed_curr = + (u8)(stat & PCI_EXP_LNKSTA_CLS); + gai->pci.link_speed_max = + (u8)(caps & PCI_EXP_LNKCAP_SLS); + gai->pci.link_width_curr = + (u8)((stat & PCI_EXP_LNKSTA_NLW) + >> PCI_EXP_LNKSTA_NLW_SHIFT); + gai->pci.link_width_max = + (u8)((caps & PCI_EXP_LNKCAP_MLW) + >> 4); + } + + gai->pci.msi_vector_cnt = 1; + + if (a->pcid->msix_enabled) + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; + else if (a->pcid->msi_enabled) + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; + else + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; + + gai->adap_type = ATTO_GAI_AT_ESASRAID2; + + if (a->flags2 & AF2_THUNDERLINK) + gai->adap_type = ATTO_GAI_AT_TLSASHBA; + + if (a->flags & AF_DEGRADED_MODE) + gai->adap_flags |= ATTO_GAI_AF_DEGRADED; + + gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | + ATTO_GAI_AF_DEVADDR_SUPP; + + if (a->pcid->subsystem_device == ATTO_ESAS_R60F + || a->pcid->subsystem_device == ATTO_ESAS_R608 + || a->pcid->subsystem_device == ATTO_ESAS_R644 + || a->pcid->subsystem_device == ATTO_TSSC_3808E) + gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; + + gai->num_ports = ESAS2R_NUM_PHYS; + gai->num_phys = ESAS2R_NUM_PHYS; + + strcpy(gai->firmware_rev, a->fw_rev); + strcpy(gai->flash_rev, a->flash_rev); + strcpy(gai->model_name_short, esas2r_get_model_name_short(a)); + strcpy(gai->model_name, esas2r_get_model_name(a)); + + gai->num_targets = ESAS2R_MAX_TARGETS; + + gai->num_busses = 1; + gai->num_targsper_bus = gai->num_targets; + gai->num_lunsper_targ = 256; + + if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 + || a->pcid->subsystem_device == ATTO_ESAS_R60F) + gai->num_connectors = 4; + else + gai->num_connectors = 2; + + gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; + + gai->num_targets_backend = a->num_targets_backend; + + gai->tunnel_flags = a->ioctl_tunnel + & (ATTO_GAI_TF_MEM_RW + | ATTO_GAI_TF_TRACE + | ATTO_GAI_TF_SCSI_PASS_THRU + | ATTO_GAI_TF_GET_DEV_ADDR + | ATTO_GAI_TF_PHY_CTRL + | ATTO_GAI_TF_CONN_CTRL + | ATTO_GAI_TF_GET_DEV_INFO); + break; + } + + case ATTO_FUNC_GET_ADAP_ADDR: + { + struct atto_hba_get_adapter_address *gaa = + &hi->data.get_adap_addr; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_ADAP_ADDR0; + } else if (gaa->addr_type == ATTO_GAA_AT_PORT + || gaa->addr_type == ATTO_GAA_AT_NODE) { + if (gaa->addr_type == ATTO_GAA_AT_PORT + && gaa->port_id >= ESAS2R_NUM_PHYS) { + hi->status = ATTO_STS_NOT_APPL; + } else { + memcpy((u64 *)gaa->address, + &a->nvram->sas_addr[0], sizeof(u64)); + gaa->addr_len = sizeof(u64); + } + } else { + hi->status = ATTO_STS_INV_PARAM; + } + + break; + } + + case ATTO_FUNC_MEM_RW: + { + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + hi->status = ATTO_STS_UNSUPPORTED; + + break; + } + + case ATTO_FUNC_TRACE: + { + struct atto_hba_trace *trc = &hi->data.trace; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_TRACE1) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_TRACE1; + break; + } + + if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP + && hi->version >= ATTO_VER_TRACE1) { + if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { + u32 len = hi->data_length; + u32 offset = trc->current_offset; + u32 total_len = ESAS2R_FWCOREDUMP_SZ; + + /* Size is zero if a core dump isn't present */ + if (!(a->flags2 & AF2_COREDUMP_SAVED)) + total_len = 0; + + if (len > total_len) + len = total_len; + + if (offset >= total_len + || offset + len > total_len + || len == 0) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + memcpy(trc + 1, + a->fw_coredump_buff + offset, + len); + + hi->data_length = len; + } else if (trc->trace_func == ATTO_TRC_TF_RESET) { + memset(a->fw_coredump_buff, 0, + ESAS2R_FWCOREDUMP_SZ); + + esas2r_lock_clear_flags(&a->flags2, + AF2_COREDUMP_SAVED); + } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + /* Always return all the info we can. */ + trc->trace_mask = 0; + trc->current_offset = 0; + trc->total_length = ESAS2R_FWCOREDUMP_SZ; + + /* Return zero length buffer if core dump not present */ + if (!(a->flags2 & AF2_COREDUMP_SAVED)) + trc->total_length = 0; + } else { + hi->status = ATTO_STS_UNSUPPORTED; + } + + break; + } + + case ATTO_FUNC_SCSI_PASS_THRU: + { + struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; + struct scsi_lun lun; + + memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_SCSI_PASS_THRU0; + break; + } + + if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + esas2r_sgc_init(sgc, a, rq, NULL); + + sgc->length = hi->data_length; + sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) + + sizeof(struct atto_hba_scsi_pass_thru); + + /* Finish request initialization */ + rq->target_id = (u16)spt->target_id; + rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); + memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); + rq->vrq->scsi.length = cpu_to_le32(hi->data_length); + rq->sense_len = spt->sense_length; + rq->sense_buf = (u8 *)spt->sense_data; + /* NOTE: we ignore spt->timeout */ + + /* + * always usurp the completion callback since the interrupt + * callback mechanism may be used. + */ + + rq->aux_req_cx = hi; + rq->aux_req_cb = rq->comp_cb; + rq->comp_cb = scsi_passthru_comp_cb; + + if (spt->flags & ATTO_SPTF_DATA_IN) { + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); + } else if (spt->flags & ATTO_SPTF_DATA_OUT) { + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); + } else { + if (sgc->length) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + } + + if (spt->flags & ATTO_SPTF_ORDERED_Q) + rq->vrq->scsi.flags |= + cpu_to_le32(FCP_CMND_TA_ORDRD_Q); + else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + hi->status = ATTO_STS_OUT_OF_RSRC; + break; + } + + esas2r_start_request(a, rq); + + return true; + } + + case ATTO_FUNC_GET_DEV_ADDR: + { + struct atto_hba_get_device_address *gda = + &hi->data.get_dev_addr; + struct esas2r_target *t; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_GET_DEV_ADDR0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_DEV_ADDR0; + break; + } + + if (gda->target_id >= ESAS2R_MAX_TARGETS) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + t = a->targetdb + (u16)gda->target_id; + + if (t->target_state != TS_PRESENT) { + hi->status = ATTO_STS_FAILED; + } else if (gda->addr_type == ATTO_GDA_AT_PORT) { + if (t->sas_addr == 0) { + hi->status = ATTO_STS_UNSUPPORTED; + } else { + *(u64 *)gda->address = t->sas_addr; + + gda->addr_len = sizeof(u64); + } + } else if (gda->addr_type == ATTO_GDA_AT_NODE) { + hi->status = ATTO_STS_NOT_APPL; + } else { + hi->status = ATTO_STS_INV_PARAM; + } + + /* update the target ID to the next one present. */ + + gda->target_id = + esas2r_targ_db_find_next_present(a, + (u16)gda->target_id); + break; + } + + case ATTO_FUNC_PHY_CTRL: + case ATTO_FUNC_CONN_CTRL: + { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + case ATTO_FUNC_ADAP_CTRL: + { + struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_ADAP_CTRL0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_ADAP_CTRL0; + break; + } + + if (ac->adap_func == ATTO_AC_AF_HARD_RST) { + esas2r_reset_adapter(a); + } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (a->flags & AF_CHPRST_NEEDED) + ac->adap_state = ATTO_AC_AS_RST_SCHED; + else if (a->flags & AF_CHPRST_PENDING) + ac->adap_state = ATTO_AC_AS_RST_IN_PROG; + else if (a->flags & AF_DISC_PENDING) + ac->adap_state = ATTO_AC_AS_RST_DISC; + else if (a->flags & AF_DISABLED) + ac->adap_state = ATTO_AC_AS_DISABLED; + else if (a->flags & AF_DEGRADED_MODE) + ac->adap_state = ATTO_AC_AS_DEGRADED; + else + ac->adap_state = ATTO_AC_AS_OK; + + break; + } + + case ATTO_FUNC_GET_DEV_INFO: + { + struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; + struct esas2r_target *t; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_GET_DEV_INFO0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_DEV_INFO0; + break; + } + + if (gdi->target_id >= ESAS2R_MAX_TARGETS) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + t = a->targetdb + (u16)gdi->target_id; + + /* update the target ID to the next one present. */ + + gdi->target_id = + esas2r_targ_db_find_next_present(a, + (u16)gdi->target_id); + + if (t->target_state != TS_PRESENT) { + hi->status = ATTO_STS_FAILED; + break; + } + + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + default: + + hi->status = ATTO_STS_INV_FUNC; + break; + } + + return false; +} + +static void hba_ioctl_done_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, void *context) +{ + struct atto_ioctl *ioctl_hba = + (struct atto_ioctl *)esas2r_buffered_ioctl; + + esas2r_debug("hba_ioctl_done_callback %d", a->index); + + if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { + struct atto_hba_get_adapter_info *gai = + &ioctl_hba->data.get_adap_info; + + esas2r_debug("ATTO_FUNC_GET_ADAP_INFO"); + + gai->drvr_rev_major = ESAS2R_MAJOR_REV; + gai->drvr_rev_minor = ESAS2R_MINOR_REV; + + strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR); + strcpy(gai->drvr_name, ESAS2R_DRVR_NAME); + + gai->num_busses = 1; + gai->num_targsper_bus = ESAS2R_MAX_ID + 1; + gai->num_lunsper_targ = 1; + } +} + +u8 handle_hba_ioctl(struct esas2r_adapter *a, + struct atto_ioctl *ioctl_hba) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = ioctl_hba; + bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; + bi.callback = hba_ioctl_callback; + bi.context = NULL; + bi.done_callback = hba_ioctl_done_callback; + bi.done_context = NULL; + bi.offset = 0; + + return handle_buffered_ioctl(&bi); +} + + +int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *data) +{ + int result = 0; + + a->nvram_command_done = 0; + rq->comp_cb = complete_nvr_req; + + if (esas2r_nvram_write(a, rq, data)) { + /* now wait around for it to complete. */ + while (!a->nvram_command_done) + wait_event_interruptible(a->nvram_waiter, + a->nvram_command_done); + ; + + /* done, check the status. */ + if (rq->req_stat == RS_SUCCESS) + result = 1; + } + return result; +} + + +/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ +int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) +{ + struct atto_express_ioctl *ioctl = NULL; + struct esas2r_adapter *a; + struct esas2r_request *rq; + u16 code; + int err; + + esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg); + + if ((arg == NULL) + || (cmd < EXPRESS_IOCTL_MIN) + || (cmd > EXPRESS_IOCTL_MAX)) + return -ENOTSUPP; + + if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler access_ok failed for cmd %d, " + "address %p", cmd, + arg); + return -EFAULT; + } + + /* allocate a kernel memory buffer for the IOCTL data */ + ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); + if (ioctl == NULL) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler kzalloc failed for %d bytes", + sizeof(struct atto_express_ioctl)); + return -ENOMEM; + } + + err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl)); + if (err != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "copy_from_user didn't copy everything (err %d, cmd %d)", + err, + cmd); + kfree(ioctl); + + return -EFAULT; + } + + /* verify the signature */ + + if (memcmp(ioctl->header.signature, + EXPRESS_IOCTL_SIGNATURE, + EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { + esas2r_log(ESAS2R_LOG_WARN, "invalid signature"); + kfree(ioctl); + + return -ENOTSUPP; + } + + /* assume success */ + + ioctl->header.return_code = IOCTL_SUCCESS; + err = 0; + + /* + * handle EXPRESS_IOCTL_GET_CHANNELS + * without paying attention to channel + */ + + if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { + int i = 0, k = 0; + + ioctl->data.chanlist.num_channels = 0; + + while (i < MAX_ADAPTERS) { + if (esas2r_adapters[i]) { + ioctl->data.chanlist.num_channels++; + ioctl->data.chanlist.channel[k] = i; + k++; + } + i++; + } + + goto ioctl_done; + } + + /* get the channel */ + + if (ioctl->header.channel == 0xFF) { + a = (struct esas2r_adapter *)hostdata; + } else { + a = esas2r_adapters[ioctl->header.channel]; + if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) { + ioctl->header.return_code = IOCTL_BAD_CHANNEL; + esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); + kfree(ioctl); + + return -ENOTSUPP; + } + } + + switch (cmd) { + case EXPRESS_IOCTL_RW_FIRMWARE: + + if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { + err = esas2r_write_fw(a, + (char *)ioctl->data.fwrw.image, + 0, + sizeof(struct + atto_express_ioctl)); + + if (err >= 0) { + err = esas2r_read_fw(a, + (char *)ioctl->data.fwrw. + image, + 0, + sizeof(struct + atto_express_ioctl)); + } + } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { + err = esas2r_write_fs(a, + (char *)ioctl->data.fwrw.image, + 0, + sizeof(struct + atto_express_ioctl)); + + if (err >= 0) { + err = esas2r_read_fs(a, + (char *)ioctl->data.fwrw. + image, + 0, + sizeof(struct + atto_express_ioctl)); + } + } else { + ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; + } + + break; + + case EXPRESS_IOCTL_READ_PARAMS: + + memcpy(ioctl->data.prw.data_buffer, a->nvram, + sizeof(struct esas2r_sas_nvram)); + ioctl->data.prw.code = 1; + break; + + case EXPRESS_IOCTL_WRITE_PARAMS: + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + up(&a->nvram_semaphore); + ioctl->data.prw.code = 0; + break; + } + + code = esas2r_write_params(a, rq, + (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); + ioctl->data.prw.code = code; + + esas2r_free_request(a, rq); + + break; + + case EXPRESS_IOCTL_DEFAULT_PARAMS: + + esas2r_nvram_get_defaults(a, + (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); + ioctl->data.prw.code = 1; + break; + + case EXPRESS_IOCTL_CHAN_INFO: + + ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; + ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; + ioctl->data.chaninfo.IRQ = a->pcid->irq; + ioctl->data.chaninfo.device_id = a->pcid->device; + ioctl->data.chaninfo.vendor_id = a->pcid->vendor; + ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; + ioctl->data.chaninfo.revision_id = a->pcid->revision; + ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; + ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; + ioctl->data.chaninfo.core_rev = 0; + ioctl->data.chaninfo.host_no = a->host->host_no; + ioctl->data.chaninfo.hbaapi_rev = 0; + break; + + case EXPRESS_IOCTL_SMP: + ioctl->header.return_code = handle_smp_ioctl(a, + &ioctl->data. + ioctl_smp); + break; + + case EXPRESS_CSMI: + ioctl->header.return_code = + handle_csmi_ioctl(a, &ioctl->data.csmi); + break; + + case EXPRESS_IOCTL_HBA: + ioctl->header.return_code = handle_hba_ioctl(a, + &ioctl->data. + ioctl_hba); + break; + + case EXPRESS_IOCTL_VDA: + err = esas2r_write_vda(a, + (char *)&ioctl->data.ioctl_vda, + 0, + sizeof(struct atto_ioctl_vda) + + ioctl->data.ioctl_vda.data_length); + + if (err >= 0) { + err = esas2r_read_vda(a, + (char *)&ioctl->data.ioctl_vda, + 0, + sizeof(struct atto_ioctl_vda) + + ioctl->data.ioctl_vda.data_length); + } + + + + + break; + + case EXPRESS_IOCTL_GET_MOD_INFO: + + ioctl->data.modinfo.adapter = a; + ioctl->data.modinfo.pci_dev = a->pcid; + ioctl->data.modinfo.scsi_host = a->host; + ioctl->data.modinfo.host_no = a->host->host_no; + + break; + + default: + esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd); + ioctl->header.return_code = IOCTL_ERR_INVCMD; + } + +ioctl_done: + + if (err < 0) { + esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err, + cmd); + + switch (err) { + case -ENOMEM: + case -EBUSY: + ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; + break; + + case -ENOSYS: + case -EINVAL: + ioctl->header.return_code = IOCTL_INVALID_PARAM; + break; + } + + ioctl->header.return_code = IOCTL_GENERAL_ERROR; + } + + /* Always copy the buffer back, if only to pick up the status */ + err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); + if (err != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler copy_to_user didn't copy " + "everything (err %d, cmd %d)", err, + cmd); + kfree(ioctl); + + return -EFAULT; + } + + kfree(ioctl); + + return 0; +} + +int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg) +{ + return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); +} + +static void free_fw_buffers(struct esas2r_adapter *a) +{ + if (a->firmware.data) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->firmware.orig_len, + a->firmware.data, + (dma_addr_t)a->firmware.phys); + + a->firmware.data = NULL; + } +} + +static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) +{ + free_fw_buffers(a); + + a->firmware.orig_len = length; + + a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev, + (size_t)length, + (dma_addr_t *)&a->firmware. + phys, + GFP_KERNEL); + + if (!a->firmware.data) { + esas2r_debug("buffer alloc failed!"); + return 0; + } + + return 1; +} + +/* Handle a call to read firmware. */ +int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) +{ + esas2r_trace_enter(); + /* if the cached header is a status, simply copy it over and return. */ + if (a->firmware.state == FW_STATUS_ST) { + int size = min_t(int, count, sizeof(a->firmware.header)); + esas2r_trace_exit(); + memcpy(buf, &a->firmware.header, size); + esas2r_debug("esas2r_read_fw: STATUS size %d", size); + return size; + } + + /* + * if the cached header is a command, do it if at + * offset 0, otherwise copy the pieces. + */ + + if (a->firmware.state == FW_COMMAND_ST) { + u32 length = a->firmware.header.length; + esas2r_trace_exit(); + + esas2r_debug("esas2r_read_fw: COMMAND length %d off %d", + length, + off); + + if (off == 0) { + if (a->firmware.header.action == FI_ACT_UP) { + if (!allocate_fw_buffers(a, length)) + return -ENOMEM; + + + /* copy header over */ + + memcpy(a->firmware.data, + &a->firmware.header, + sizeof(a->firmware.header)); + + do_fm_api(a, + (struct esas2r_flash_img *)a->firmware.data); + } else if (a->firmware.header.action == FI_ACT_UPSZ) { + int size = + min((int)count, + (int)sizeof(a->firmware.header)); + do_fm_api(a, &a->firmware.header); + memcpy(buf, &a->firmware.header, size); + esas2r_debug("FI_ACT_UPSZ size %d", size); + return size; + } else { + esas2r_debug("invalid action %d", + a->firmware.header.action); + return -ENOSYS; + } + } + + if (count + off > length) + count = length - off; + + if (count < 0) + return 0; + + if (!a->firmware.data) { + esas2r_debug( + "read: nonzero offset but no buffer available!"); + return -ENOMEM; + } + + esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off, + count, + length); + + memcpy(buf, &a->firmware.data[off], count); + + /* when done, release the buffer */ + + if (length <= off + count) { + esas2r_debug("esas2r_read_fw: freeing buffer!"); + + free_fw_buffers(a); + } + + return count; + } + + esas2r_trace_exit(); + esas2r_debug("esas2r_read_fw: invalid firmware state %d", + a->firmware.state); + + return -EINVAL; +} + +/* Handle a call to write firmware. */ +int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + u32 length; + + if (off == 0) { + struct esas2r_flash_img *header = + (struct esas2r_flash_img *)buf; + + /* assume version 0 flash image */ + + int min_size = sizeof(struct esas2r_flash_img_v0); + + a->firmware.state = FW_INVALID_ST; + + /* validate the version field first */ + + if (count < 4 + || header->fi_version > FI_VERSION_1) { + esas2r_debug( + "esas2r_write_fw: short header or invalid version"); + return -EINVAL; + } + + /* See if its a version 1 flash image */ + + if (header->fi_version == FI_VERSION_1) + min_size = sizeof(struct esas2r_flash_img); + + /* If this is the start, the header must be full and valid. */ + if (count < min_size) { + esas2r_debug("esas2r_write_fw: short header, aborting"); + return -EINVAL; + } + + /* Make sure the size is reasonable. */ + length = header->length; + + if (length > 1024 * 1024) { + esas2r_debug( + "esas2r_write_fw: hosed, length %d fi_version %d", + length, header->fi_version); + return -EINVAL; + } + + /* + * If this is a write command, allocate memory because + * we have to cache everything. otherwise, just cache + * the header, because the read op will do the command. + */ + + if (header->action == FI_ACT_DOWN) { + if (!allocate_fw_buffers(a, length)) + return -ENOMEM; + + /* + * Store the command, so there is context on subsequent + * calls. + */ + memcpy(&a->firmware.header, + buf, + sizeof(*header)); + } else if (header->action == FI_ACT_UP + || header->action == FI_ACT_UPSZ) { + /* Save the command, result will be picked up on read */ + memcpy(&a->firmware.header, + buf, + sizeof(*header)); + + a->firmware.state = FW_COMMAND_ST; + + esas2r_debug( + "esas2r_write_fw: COMMAND, count %d, action %d ", + count, header->action); + + /* + * Pretend we took the whole buffer, + * so we don't get bothered again. + */ + + return count; + } else { + esas2r_debug("esas2r_write_fw: invalid action %d ", + a->firmware.header.action); + return -ENOSYS; + } + } else { + length = a->firmware.header.length; + } + + /* + * We only get here on a download command, regardless of offset. + * the chunks written by the system need to be cached, and when + * the final one arrives, issue the fmapi command. + */ + + if (off + count > length) + count = length - off; + + if (count > 0) { + esas2r_debug("esas2r_write_fw: off %d count %d length %d", off, + count, + length); + + /* + * On a full upload, the system tries sending the whole buffer. + * there's nothing to do with it, so just drop it here, before + * trying to copy over into unallocated memory! + */ + if (a->firmware.header.action == FI_ACT_UP) + return count; + + if (!a->firmware.data) { + esas2r_debug( + "write: nonzero offset but no buffer available!"); + return -ENOMEM; + } + + memcpy(&a->firmware.data[off], buf, count); + + if (length == off + count) { + do_fm_api(a, + (struct esas2r_flash_img *)a->firmware.data); + + /* + * Now copy the header result to be picked up by the + * next read + */ + memcpy(&a->firmware.header, + a->firmware.data, + sizeof(a->firmware.header)); + + a->firmware.state = FW_STATUS_ST; + + esas2r_debug("write completed"); + + /* + * Since the system has the data buffered, the only way + * this can leak is if a root user writes a program + * that writes a shorter buffer than it claims, and the + * copyin fails. + */ + free_fw_buffers(a); + } + } + + return count; +} + +/* Callback for the completion of a VDA request. */ +static void vda_complete_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->vda_command_done = 1; + wake_up_interruptible(&a->vda_waiter); +} + +/* Scatter/gather callback for VDA requests */ +static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; + + (*addr) = a->ppvda_buffer + offset; + return VDA_MAX_BUFFER_SIZE - offset; +} + +/* Handle a call to read a VDA command. */ +int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) +{ + if (!a->vda_buffer) + return -ENOMEM; + + if (off == 0) { + struct esas2r_request *rq; + struct atto_ioctl_vda *vi = + (struct atto_ioctl_vda *)a->vda_buffer; + struct esas2r_sg_context sgc; + bool wait_for_completion; + + /* + * Presumeably, someone has already written to the vda_buffer, + * and now they are reading the node the response, so now we + * will actually issue the request to the chip and reply. + */ + + /* allocate a request */ + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_debug("esas2r_read_vda: out of requestss"); + return -EBUSY; + } + + rq->comp_cb = vda_complete_req; + + sgc.first_req = rq; + sgc.adapter = a; + sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; + + a->vda_command_done = 0; + + wait_for_completion = + esas2r_process_vda_ioctl(a, vi, rq, &sgc); + + if (wait_for_completion) { + /* now wait around for it to complete. */ + + while (!a->vda_command_done) + wait_event_interruptible(a->vda_waiter, + a->vda_command_done); + } + + esas2r_free_request(a, (struct esas2r_request *)rq); + } + + if (off > VDA_MAX_BUFFER_SIZE) + return 0; + + if (count + off > VDA_MAX_BUFFER_SIZE) + count = VDA_MAX_BUFFER_SIZE - off; + + if (count < 0) + return 0; + + memcpy(buf, a->vda_buffer + off, count); + + return count; +} + +/* Handle a call to write a VDA command. */ +int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + /* + * allocate memory for it, if not already done. once allocated, + * we will keep it around until the driver is unloaded. + */ + + if (!a->vda_buffer) { + dma_addr_t dma_addr; + a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev, + (size_t) + VDA_MAX_BUFFER_SIZE, + &dma_addr, + GFP_KERNEL); + + a->ppvda_buffer = dma_addr; + } + + if (!a->vda_buffer) + return -ENOMEM; + + if (off > VDA_MAX_BUFFER_SIZE) + return 0; + + if (count + off > VDA_MAX_BUFFER_SIZE) + count = VDA_MAX_BUFFER_SIZE - off; + + if (count < 1) + return 0; + + memcpy(a->vda_buffer + off, buf, count); + + return count; +} + +/* Callback for the completion of an FS_API request.*/ +static void fs_api_complete_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->fs_api_command_done = 1; + + wake_up_interruptible(&a->fs_api_waiter); +} + +/* Scatter/gather callback for VDA requests */ +static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)a->fs_api_buffer; + u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; + + (*addr) = a->ppfs_api_buffer + offset; + + return a->fs_api_buffer_size - offset; +} + +/* Handle a call to read firmware via FS_API. */ +int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) +{ + if (!a->fs_api_buffer) + return -ENOMEM; + + if (off == 0) { + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)a->fs_api_buffer; + + /* If another flash request is already in progress, return. */ + if (down_interruptible(&a->fs_api_semaphore)) { +busy: + fs->status = ATTO_STS_OUT_OF_RSRC; + return -EBUSY; + } + + /* + * Presumeably, someone has already written to the + * fs_api_buffer, and now they are reading the node the + * response, so now we will actually issue the request to the + * chip and reply. Allocate a request + */ + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_debug("esas2r_read_fs: out of requests"); + up(&a->fs_api_semaphore); + goto busy; + } + + rq->comp_cb = fs_api_complete_req; + + /* Set up the SGCONTEXT for to build the s/g table */ + + sgc.cur_offset = fs->data; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; + + a->fs_api_command_done = 0; + + if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) { + if (fs->status == ATTO_STS_OUT_OF_RSRC) + count = -EBUSY; + + goto dont_wait; + } + + /* Now wait around for it to complete. */ + + while (!a->fs_api_command_done) + wait_event_interruptible(a->fs_api_waiter, + a->fs_api_command_done); + ; +dont_wait: + /* Free the request and keep going */ + up(&a->fs_api_semaphore); + esas2r_free_request(a, (struct esas2r_request *)rq); + + /* Pick up possible error code from above */ + if (count < 0) + return count; + } + + if (off > a->fs_api_buffer_size) + return 0; + + if (count + off > a->fs_api_buffer_size) + count = a->fs_api_buffer_size - off; + + if (count < 0) + return 0; + + memcpy(buf, a->fs_api_buffer + off, count); + + return count; +} + +/* Handle a call to write firmware via FS_API. */ +int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + if (off == 0) { + struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; + u32 length = fs->command.length + offsetof( + struct esas2r_ioctl_fs, + data); + + /* + * Special case, for BEGIN commands, the length field + * is lying to us, so just get enough for the header. + */ + + if (fs->command.command == ESAS2R_FS_CMD_BEGINW) + length = offsetof(struct esas2r_ioctl_fs, data); + + /* + * Beginning a command. We assume we'll get at least + * enough in the first write so we can look at the + * header and see how much we need to alloc. + */ + + if (count < offsetof(struct esas2r_ioctl_fs, data)) + return -EINVAL; + + /* Allocate a buffer or use the existing buffer. */ + if (a->fs_api_buffer) { + if (a->fs_api_buffer_size < length) { + /* Free too-small buffer and get a new one */ + dma_free_coherent(&a->pcid->dev, + (size_t)a->fs_api_buffer_size, + a->fs_api_buffer, + (dma_addr_t)a->ppfs_api_buffer); + + goto re_allocate_buffer; + } + } else { +re_allocate_buffer: + a->fs_api_buffer_size = length; + + a->fs_api_buffer = (u8 *)dma_alloc_coherent( + &a->pcid->dev, + (size_t)a->fs_api_buffer_size, + (dma_addr_t *)&a->ppfs_api_buffer, + GFP_KERNEL); + } + } + + if (!a->fs_api_buffer) + return -ENOMEM; + + if (off > a->fs_api_buffer_size) + return 0; + + if (count + off > a->fs_api_buffer_size) + count = a->fs_api_buffer_size - off; + + if (count < 1) + return 0; + + memcpy(a->fs_api_buffer + off, buf, count); + + return count; +} diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c new file mode 100644 index 000000000000..9bf285df58dd --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_log.c @@ -0,0 +1,254 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_log.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* + * this module within the driver is tasked with providing logging functionality. + * the event_log_level module parameter controls the level of messages that are + * written to the system log. the default level of messages that are written + * are critical and warning messages. if other types of messages are desired, + * one simply needs to load the module with the correct value for the + * event_log_level module parameter. for example: + * + * insmod <module> event_log_level=1 + * + * will load the module and only critical events will be written by this module + * to the system log. if critical, warning, and information-level messages are + * desired, the correct value for the event_log_level module parameter + * would be as follows: + * + * insmod <module> event_log_level=3 + */ + +#define EVENT_LOG_BUFF_SIZE 1024 + +static long event_log_level = ESAS2R_LOG_DFLT; + +module_param(event_log_level, long, S_IRUGO | S_IRUSR); +MODULE_PARM_DESC(event_log_level, + "Specifies the level of events to report to the system log. Critical and warning level events are logged by default."); + +/* A shared buffer to use for formatting messages. */ +static char event_buffer[EVENT_LOG_BUFF_SIZE]; + +/* A lock to protect the shared buffer used for formatting messages. */ +static DEFINE_SPINLOCK(event_buffer_lock); + +/** + * translates an esas2r-defined logging event level to a kernel logging level. + * + * @param [in] level the esas2r-defined logging event level to translate + * + * @return the corresponding kernel logging level. + */ +static const char *translate_esas2r_event_level_to_kernel(const long level) +{ + switch (level) { + case ESAS2R_LOG_CRIT: + return KERN_CRIT; + + case ESAS2R_LOG_WARN: + return KERN_WARNING; + + case ESAS2R_LOG_INFO: + return KERN_INFO; + + case ESAS2R_LOG_DEBG: + case ESAS2R_LOG_TRCE: + default: + return KERN_DEBUG; + } +} + +/** + * the master logging function. this function will format the message as + * outlined by the formatting string, the input device information and the + * substitution arguments and output the resulting string to the system log. + * + * @param [in] level the event log level of the message + * @param [in] dev the device information + * @param [in] format the formatting string for the message + * @param [in] args the substition arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +static int esas2r_log_master(const long level, + const struct device *dev, + const char *format, + va_list args) +{ + if (level <= event_log_level) { + unsigned long flags = 0; + int retval = 0; + char *buffer = event_buffer; + size_t buflen = EVENT_LOG_BUFF_SIZE; + const char *fmt_nodev = "%s%s: "; + const char *fmt_dev = "%s%s [%s, %s, %s]"; + const char *slevel = + translate_esas2r_event_level_to_kernel(level); + + spin_lock_irqsave(&event_buffer_lock, flags); + + if (buffer == NULL) { + spin_unlock_irqrestore(&event_buffer_lock, flags); + return -1; + } + + memset(buffer, 0, buflen); + + /* + * format the level onto the beginning of the string and do + * some pointer arithmetic to move the pointer to the point + * where the actual message can be inserted. + */ + + if (dev == NULL) { + snprintf(buffer, buflen, fmt_nodev, slevel, + ESAS2R_DRVR_NAME); + } else { + snprintf(buffer, buflen, fmt_dev, slevel, + ESAS2R_DRVR_NAME, + (dev->driver ? dev->driver->name : "unknown"), + (dev->bus ? dev->bus->name : "unknown"), + dev_name(dev)); + } + + buffer += strlen(event_buffer); + buflen -= strlen(event_buffer); + + retval = vsnprintf(buffer, buflen, format, args); + if (retval < 0) { + spin_unlock_irqrestore(&event_buffer_lock, flags); + return -1; + } + + /* + * Put a line break at the end of the formatted string so that + * we don't wind up with run-on messages. only append if there + * is enough space in the buffer. + */ + if (strlen(event_buffer) < buflen) + strcat(buffer, "\n"); + + printk(event_buffer); + + spin_unlock_irqrestore(&event_buffer_lock, flags); + } + + return 0; +} + +/** + * formats and logs a message to the system log. + * + * @param [in] level the event level of the message + * @param [in] format the formating string for the message + * @param [in] ... the substitution arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log(const long level, const char *format, ...) +{ + int retval = 0; + va_list args; + + va_start(args, format); + + retval = esas2r_log_master(level, NULL, format, args); + + va_end(args); + + return retval; +} + +/** + * formats and logs a message to the system log. this message will include + * device information. + * + * @param [in] level the event level of the message + * @param [in] dev the device information + * @param [in] format the formatting string for the message + * @param [in] ... the substitution arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log_dev(const long level, + const struct device *dev, + const char *format, + ...) +{ + int retval = 0; + va_list args; + + va_start(args, format); + + retval = esas2r_log_master(level, dev, format, args); + + va_end(args); + + return retval; +} + +/** + * formats and logs a message to the system log. this message will include + * device information. + * + * @param [in] level the event level of the message + * @param [in] buf + * @param [in] len + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log_hexdump(const long level, + const void *buf, + size_t len) +{ + if (level <= event_log_level) { + print_hex_dump(translate_esas2r_event_level_to_kernel(level), + "", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + } + + return 1; +} diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h new file mode 100644 index 000000000000..7b6397bb5b94 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_log.h @@ -0,0 +1,118 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_log.h + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef __esas2r_log_h__ +#define __esas2r_log_h__ + +struct device; + +enum { + ESAS2R_LOG_NONE = 0, /* no events logged */ + ESAS2R_LOG_CRIT = 1, /* critical events */ + ESAS2R_LOG_WARN = 2, /* warning events */ + ESAS2R_LOG_INFO = 3, /* info events */ + ESAS2R_LOG_DEBG = 4, /* debugging events */ + ESAS2R_LOG_TRCE = 5, /* tracing events */ + +#ifdef ESAS2R_TRACE + ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE +#else + ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN +#endif +}; + +int esas2r_log(const long level, const char *format, ...); +int esas2r_log_dev(const long level, + const struct device *dev, + const char *format, + ...); +int esas2r_log_hexdump(const long level, + const void *buf, + size_t len); + +/* + * the following macros are provided specifically for debugging and tracing + * messages. esas2r_debug() is provided for generic non-hardware layer + * debugging and tracing events. esas2r_hdebug is provided specifically for + * hardware layer debugging and tracing events. + */ + +#ifdef ESAS2R_DEBUG +#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args) +#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args) +#else +#define esas2r_debug(f, args ...) +#define esas2r_hdebug(f, args ...) +#endif /* ESAS2R_DEBUG */ + +/* + * the following macros are provided in order to trace the driver and catch + * some more serious bugs. be warned, enabling these macros may *severely* + * impact performance. + */ + +#ifdef ESAS2R_TRACE +#define esas2r_bugon() \ + do { \ + esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \ + " - dumping stack and stopping kernel", __func__, \ + __LINE__); \ + dump_stack(); \ + BUG(); \ + } while (0) + +#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \ + __func__, __FILE__, __LINE__) +#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \ + __func__, __FILE__, __LINE__) +#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \ + f, __func__, __FILE__, __LINE__, \ + ## args) +#else +#define esas2r_bugon() +#define esas2r_trace_enter() +#define esas2r_trace_exit() +#define esas2r_trace(f, args ...) +#endif /* ESAS2R_TRACE */ + +#endif /* __esas2r_log_h__ */ diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c new file mode 100644 index 000000000000..4abf1272e1eb --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -0,0 +1,2032 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_main.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver"); +MODULE_AUTHOR("ATTO Technology, Inc."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ESAS2R_VERSION_STR); + +/* global definitions */ + +static int found_adapters; +struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS]; + +#define ESAS2R_VDA_EVENT_PORT1 54414 +#define ESAS2R_VDA_EVENT_PORT2 54415 +#define ESAS2R_VDA_EVENT_SOCK_COUNT 2 + +static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *host = class_to_shost(dev); + + return (struct esas2r_adapter *)host->hostdata; +} + +static ssize_t read_fw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_fw(a, buf, off, count); +} + +static ssize_t write_fw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_write_fw(a, buf, off, count); +} + +static ssize_t read_fs(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_fs(a, buf, off, count); +} + +static ssize_t write_fs(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min(sizeof(struct esas2r_ioctl_fs), count); + int result = 0; + + result = esas2r_write_fs(a, buf, off, count); + + if (result < 0) + result = 0; + + return length; +} + +static ssize_t read_vda(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_vda(a, buf, off, count); +} + +static ssize_t write_vda(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_write_vda(a, buf, off, count); +} + +static ssize_t read_live_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE); + + memcpy(buf, a->nvram, length); + return length; +} + +static ssize_t write_live_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + struct esas2r_request *rq; + int result = -EFAULT; + + rq = esas2r_alloc_request(a); + if (rq == NULL) + return -ENOMEM; + + if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf)) + result = count; + + esas2r_free_request(a, rq); + + return result; +} + +static ssize_t read_default_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf); + + return sizeof(struct esas2r_sas_nvram); +} + +static ssize_t read_hw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE); + + if (!a->local_atto_ioctl) + return -ENOMEM; + + if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS) + return -ENOMEM; + + memcpy(buf, a->local_atto_ioctl, length); + + return length; +} + +static ssize_t write_hw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min(sizeof(struct atto_ioctl), count); + + if (!a->local_atto_ioctl) { + a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl), + GFP_KERNEL); + if (a->local_atto_ioctl == NULL) { + esas2r_log(ESAS2R_LOG_WARN, + "write_hw kzalloc failed for %d bytes", + sizeof(struct atto_ioctl)); + return -ENOMEM; + } + } + + memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl)); + memcpy(a->local_atto_ioctl, buf, length); + + return length; +} + +#define ESAS2R_RW_BIN_ATTR(_name) \ + struct bin_attribute bin_attr_ ## _name = { \ + .attr = \ + { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \ + .size = 0, \ + .read = read_ ## _name, \ + .write = write_ ## _name } + +ESAS2R_RW_BIN_ATTR(fw); +ESAS2R_RW_BIN_ATTR(fs); +ESAS2R_RW_BIN_ATTR(vda); +ESAS2R_RW_BIN_ATTR(hw); +ESAS2R_RW_BIN_ATTR(live_nvram); + +struct bin_attribute bin_attr_default_nvram = { + .attr = { .name = "default_nvram", .mode = S_IRUGO }, + .size = 0, + .read = read_default_nvram, + .write = NULL +}; + +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .show_info = esas2r_show_info, + .name = ESAS2R_LONGNAME, + .release = esas2r_release, + .info = esas2r_info, + .ioctl = esas2r_ioctl, + .queuecommand = esas2r_queuecommand, + .eh_abort_handler = esas2r_eh_abort, + .eh_device_reset_handler = esas2r_device_reset, + .eh_bus_reset_handler = esas2r_bus_reset, + .eh_host_reset_handler = esas2r_host_reset, + .eh_target_reset_handler = esas2r_target_reset, + .can_queue = 128, + .this_id = -1, + .sg_tablesize = SCSI_MAX_SG_SEGMENTS, + .cmd_per_lun = + ESAS2R_DEFAULT_CMD_PER_LUN, + .present = 0, + .unchecked_isa_dma = 0, + .use_clustering = ENABLE_CLUSTERING, + .emulated = 0, + .proc_name = ESAS2R_DRVR_NAME, + .slave_configure = esas2r_slave_configure, + .slave_alloc = esas2r_slave_alloc, + .slave_destroy = esas2r_slave_destroy, + .change_queue_depth = esas2r_change_queue_depth, + .change_queue_type = esas2r_change_queue_type, + .max_sectors = 0xFFFF, +}; + +int sgl_page_size = 512; +module_param(sgl_page_size, int, 0); +MODULE_PARM_DESC(sgl_page_size, + "Scatter/gather list (SGL) page size in number of S/G " + "entries. If your application is doing a lot of very large " + "transfers, you may want to increase the SGL page size. " + "Default 512."); + +int num_sg_lists = 1024; +module_param(num_sg_lists, int, 0); +MODULE_PARM_DESC(num_sg_lists, + "Number of scatter/gather lists. Default 1024."); + +int sg_tablesize = SCSI_MAX_SG_SEGMENTS; +module_param(sg_tablesize, int, 0); +MODULE_PARM_DESC(sg_tablesize, + "Maximum number of entries in a scatter/gather table."); + +int num_requests = 256; +module_param(num_requests, int, 0); +MODULE_PARM_DESC(num_requests, + "Number of requests. Default 256."); + +int num_ae_requests = 4; +module_param(num_ae_requests, int, 0); +MODULE_PARM_DESC(num_ae_requests, + "Number of VDA asynchromous event requests. Default 4."); + +int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN; +module_param(cmd_per_lun, int, 0); +MODULE_PARM_DESC(cmd_per_lun, + "Maximum number of commands per LUN. Default " + DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) "."); + +int can_queue = 128; +module_param(can_queue, int, 0); +MODULE_PARM_DESC(can_queue, + "Maximum number of commands per adapter. Default 128."); + +int esas2r_max_sectors = 0xFFFF; +module_param(esas2r_max_sectors, int, 0); +MODULE_PARM_DESC(esas2r_max_sectors, + "Maximum number of disk sectors in a single data transfer. " + "Default 65535 (largest possible setting)."); + +int interrupt_mode = 1; +module_param(interrupt_mode, int, 0); +MODULE_PARM_DESC(interrupt_mode, + "Defines the interrupt mode to use. 0 for legacy" + ", 1 for MSI. Default is MSI (1)."); + +static struct pci_device_id + esas2r_pci_table[] = { + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E, + 0, + 0, 0 }, + { 0, 0, 0, 0, + 0, + 0, 0 } +}; + +MODULE_DEVICE_TABLE(pci, esas2r_pci_table); + +static int +esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id); + +static void +esas2r_remove(struct pci_dev *pcid); + +static struct pci_driver + esas2r_pci_driver = { + .name = ESAS2R_DRVR_NAME, + .id_table = esas2r_pci_table, + .probe = esas2r_probe, + .remove = esas2r_remove, + .suspend = esas2r_suspend, + .resume = esas2r_resume, +}; + +static int esas2r_probe(struct pci_dev *pcid, + const struct pci_device_id *id) +{ + struct Scsi_Host *host = NULL; + struct esas2r_adapter *a; + int err; + + size_t host_alloc_size = sizeof(struct esas2r_adapter) + + ((num_requests) + + 1) * sizeof(struct esas2r_request); + + esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev), + "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x", + pcid->vendor, + pcid->device, + pcid->subsystem_vendor, + pcid->subsystem_device); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "before pci_enable_device() " + "enable_cnt: %d", + pcid->enable_cnt.counter); + + err = pci_enable_device(pcid); + if (err != 0) { + esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev), + "pci_enable_device() FAIL (%d)", + err); + return -ENODEV; + } + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "pci_enable_device() OK"); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "after pci_device_enable() enable_cnt: %d", + pcid->enable_cnt.counter); + + host = scsi_host_alloc(&driver_template, host_alloc_size); + if (host == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL"); + return -ENODEV; + } + + memset(host->hostdata, 0, host_alloc_size); + + a = (struct esas2r_adapter *)host->hostdata; + + esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host); + + /* override max LUN and max target id */ + + host->max_id = ESAS2R_MAX_ID + 1; + host->max_lun = 255; + + /* we can handle 16-byte CDbs */ + + host->max_cmd_len = 16; + + host->can_queue = can_queue; + host->cmd_per_lun = cmd_per_lun; + host->this_id = host->max_id + 1; + host->max_channel = 0; + host->unique_id = found_adapters; + host->sg_tablesize = sg_tablesize; + host->max_sectors = esas2r_max_sectors; + + /* set to bus master for BIOses that don't do it for us */ + + esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called"); + + pci_set_master(pcid); + + if (!esas2r_init_adapter(host, pcid, found_adapters)) { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to initialize device at PCI bus %x:%x", + pcid->bus->number, + pcid->devfn); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(host); + + return 0; + + } + + esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid, + host->hostdata); + + pci_set_drvdata(pcid, host); + + esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called"); + + err = scsi_add_host(host, &pcid->dev); + + if (err) { + esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err); + esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev), + "scsi_add_host() FAIL"); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(host); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "pci_set_drvdata(%p, NULL) called", + pcid); + + pci_set_drvdata(pcid, NULL); + + return -ENODEV; + } + + + esas2r_fw_event_on(a); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_scan_host() called"); + + scsi_scan_host(host); + + /* Add sysfs binary files */ + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: fw"); + else + a->sysfs_fw_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: fs"); + else + a->sysfs_fs_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: vda"); + else + a->sysfs_vda_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: hw"); + else + a->sysfs_hw_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: live_nvram"); + else + a->sysfs_live_nvram_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, + &bin_attr_default_nvram)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: default_nvram"); + else + a->sysfs_default_nvram_created = 1; + + found_adapters++; + + return 0; +} + +static void esas2r_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host; + int index; + + if (pdev == NULL) { + esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL"); + return; + } + + host = pci_get_drvdata(pdev); + + if (host == NULL) { + /* + * this can happen if pci_set_drvdata was already called + * to clear the host pointer. if this is the case, we + * are okay; this channel has already been cleaned up. + */ + + return; + } + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "esas2r_remove(%p) called; " + "host:%p", pdev, + host); + + index = esas2r_cleanup(host); + + if (index < 0) + esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev), + "unknown host in %s", + __func__); + + found_adapters--; + + /* if this was the last adapter, clean up the rest of the driver */ + + if (found_adapters == 0) + esas2r_cleanup(NULL); +} + +static int __init esas2r_init(void) +{ + int i; + + esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); + + /* verify valid parameters */ + + if (can_queue < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: can_queue must be at least 1, value " + "forced."); + can_queue = 1; + } else if (can_queue > 2048) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: can_queue must be no larger than 2048, " + "value forced."); + can_queue = 2048; + } + + if (cmd_per_lun < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: cmd_per_lun must be at least 1, value " + "forced."); + cmd_per_lun = 1; + } else if (cmd_per_lun > 2048) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: cmd_per_lun must be no larger than " + "2048, value forced."); + cmd_per_lun = 2048; + } + + if (sg_tablesize < 32) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: sg_tablesize must be at least 32, " + "value forced."); + sg_tablesize = 32; + } + + if (esas2r_max_sectors < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: esas2r_max_sectors must be at least " + "1, value forced."); + esas2r_max_sectors = 1; + } else if (esas2r_max_sectors > 0xffff) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: esas2r_max_sectors must be no larger " + "than 0xffff, value forced."); + esas2r_max_sectors = 0xffff; + } + + sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1); + + if (sgl_page_size < SGL_PG_SZ_MIN) + sgl_page_size = SGL_PG_SZ_MIN; + else if (sgl_page_size > SGL_PG_SZ_MAX) + sgl_page_size = SGL_PG_SZ_MAX; + + if (num_sg_lists < NUM_SGL_MIN) + num_sg_lists = NUM_SGL_MIN; + else if (num_sg_lists > NUM_SGL_MAX) + num_sg_lists = NUM_SGL_MAX; + + if (num_requests < NUM_REQ_MIN) + num_requests = NUM_REQ_MIN; + else if (num_requests > NUM_REQ_MAX) + num_requests = NUM_REQ_MAX; + + if (num_ae_requests < NUM_AE_MIN) + num_ae_requests = NUM_AE_MIN; + else if (num_ae_requests > NUM_AE_MAX) + num_ae_requests = NUM_AE_MAX; + + /* set up other globals */ + + for (i = 0; i < MAX_ADAPTERS; i++) + esas2r_adapters[i] = NULL; + + /* initialize */ + + driver_template.module = THIS_MODULE; + + if (pci_register_driver(&esas2r_pci_driver) != 0) + esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED"); + else + esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK"); + + if (!found_adapters) { + pci_unregister_driver(&esas2r_pci_driver); + esas2r_cleanup(NULL); + + esas2r_log(ESAS2R_LOG_CRIT, + "driver will not be loaded because no ATTO " + "%s devices were found", + ESAS2R_DRVR_NAME); + return -1; + } else { + esas2r_log(ESAS2R_LOG_INFO, "found %d adapters", + found_adapters); + } + + return 0; +} + +/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */ +static const struct file_operations esas2r_proc_fops = { + .compat_ioctl = esas2r_proc_ioctl, + .unlocked_ioctl = esas2r_proc_ioctl, +}; + +static struct Scsi_Host *esas2r_proc_host; +static int esas2r_proc_major; + +long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + return esas2r_ioctl_handler(esas2r_proc_host->hostdata, + (int)cmd, (void __user *)arg); +} + +static void __exit esas2r_exit(void) +{ + esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); + + if (esas2r_proc_major > 0) { + esas2r_log(ESAS2R_LOG_INFO, "unregister proc"); + + remove_proc_entry(ATTONODE_NAME, + esas2r_proc_host->hostt->proc_dir); + unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME); + + esas2r_proc_major = 0; + } + + esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called"); + + pci_unregister_driver(&esas2r_pci_driver); +} + +int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; + + struct esas2r_target *t; + int dev_count = 0; + + esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no); + + seq_printf(m, ESAS2R_LONGNAME "\n" + "Driver version: "ESAS2R_VERSION_STR "\n" + "Flash version: %s\n" + "Firmware version: %s\n" + "Copyright "ESAS2R_COPYRIGHT_YEARS "\n" + "http://www.attotech.com\n" + "\n", + a->flash_rev, + a->fw_rev[0] ? a->fw_rev : "(none)"); + + + seq_printf(m, "Adapter information:\n" + "--------------------\n" + "Model: %s\n" + "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n", + esas2r_get_model_name(a), + a->nvram->sas_addr[0], + a->nvram->sas_addr[1], + a->nvram->sas_addr[2], + a->nvram->sas_addr[3], + a->nvram->sas_addr[4], + a->nvram->sas_addr[5], + a->nvram->sas_addr[6], + a->nvram->sas_addr[7]); + + seq_puts(m, "\n" + "Discovered devices:\n" + "\n" + " # Target ID\n" + "---------------\n"); + + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->buffered_target_state == TS_PRESENT) { + seq_printf(m, " %3d %3d\n", + ++dev_count, + (u16)(uintptr_t)(t - a->targetdb)); + } + + if (dev_count == 0) + seq_puts(m, "none\n"); + + seq_puts(m, "\n"); + return 0; + +} + +int esas2r_release(struct Scsi_Host *sh) +{ + esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev), + "esas2r_release() called"); + + esas2r_cleanup(sh); + if (sh->irq) + free_irq(sh->irq, NULL); + scsi_unregister(sh); + return 0; +} + +const char *esas2r_info(struct Scsi_Host *sh) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; + static char esas2r_info_str[512]; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev), + "esas2r_info() called"); + + /* + * if we haven't done so already, register as a char driver + * and stick a node under "/proc/scsi/esas2r/ATTOnode" + */ + + if (esas2r_proc_major <= 0) { + esas2r_proc_host = sh; + + esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME, + &esas2r_proc_fops); + + esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev), + "register_chrdev (major %d)", + esas2r_proc_major); + + if (esas2r_proc_major > 0) { + struct proc_dir_entry *pde; + + pde = proc_create(ATTONODE_NAME, 0, + sh->hostt->proc_dir, + &esas2r_proc_fops); + + if (!pde) { + esas2r_log_dev(ESAS2R_LOG_WARN, + &(sh->shost_gendev), + "failed to create_proc_entry"); + esas2r_proc_major = -1; + } + } + } + + sprintf(esas2r_info_str, + ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)" + " driver version: "ESAS2R_VERSION_STR " firmware version: " + "%s\n", + a->pcid->bus->number, a->pcid->devfn, a->pcid->irq, + a->fw_rev[0] ? a->fw_rev : "(none)"); + + return esas2r_info_str; +} + +/* Callback for building a request scatter/gather list */ +static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr) +{ + u32 len; + + if (likely(sgc->cur_offset == sgc->exp_offset)) { + /* + * the normal case: caller used all bytes from previous call, so + * expected offset is the same as the current offset. + */ + + if (sgc->sgel_count < sgc->num_sgel) { + /* retrieve next segment, except for first time */ + if (sgc->exp_offset > (u8 *)0) { + /* advance current segment */ + sgc->cur_sgel = sg_next(sgc->cur_sgel); + ++(sgc->sgel_count); + } + + + len = sg_dma_len(sgc->cur_sgel); + (*addr) = sg_dma_address(sgc->cur_sgel); + + /* save the total # bytes returned to caller so far */ + sgc->exp_offset += len; + + } else { + len = 0; + } + } else if (sgc->cur_offset < sgc->exp_offset) { + /* + * caller did not use all bytes from previous call. need to + * compute the address based on current segment. + */ + + len = sg_dma_len(sgc->cur_sgel); + (*addr) = sg_dma_address(sgc->cur_sgel); + + sgc->exp_offset -= len; + + /* calculate PA based on prev segment address and offsets */ + *addr = *addr + + (sgc->cur_offset - sgc->exp_offset); + + sgc->exp_offset += len; + + /* re-calculate length based on offset */ + len = lower_32_bits( + sgc->exp_offset - sgc->cur_offset); + } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */ + /* + * we don't expect the caller to skip ahead. + * cur_offset will never exceed the len we return + */ + len = 0; + } + + return len; +} + +int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + unsigned bufflen; + + /* Assume success, if it fails we will fix the result later. */ + cmd->result = DID_OK << 16; + + if (unlikely(a->flags & AF_DEGRADED_MODE)) { + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + + rq = esas2r_alloc_request(a); + if (unlikely(rq == NULL)) { + esas2r_debug("esas2r_alloc_request failed"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + rq->cmd = cmd; + bufflen = scsi_bufflen(cmd); + + if (likely(bufflen != 0)) { + if (cmd->sc_data_direction == DMA_TO_DEVICE) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); + } + + memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len); + rq->vrq->scsi.length = cpu_to_le32(bufflen); + rq->target_id = cmd->device->id; + rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); + rq->sense_buf = cmd->sense_buffer; + rq->sense_len = SCSI_SENSE_BUFFERSIZE; + + esas2r_sgc_init(&sgc, a, rq, NULL); + + sgc.length = bufflen; + sgc.cur_offset = NULL; + + sgc.cur_sgel = scsi_sglist(cmd); + sgc.exp_offset = NULL; + sgc.num_sgel = scsi_dma_map(cmd); + sgc.sgel_count = 0; + + if (unlikely(sgc.num_sgel < 0)) { + esas2r_free_request(a, rq); + return SCSI_MLQUEUE_HOST_BUSY; + } + + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc; + + if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) { + scsi_dma_unmap(cmd); + esas2r_free_request(a, rq); + return SCSI_MLQUEUE_HOST_BUSY; + } + + esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id, + (int)cmd->device->lun); + + esas2r_start_request(a, rq); + + return 0; +} + +static void complete_task_management_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + (*rq->task_management_status_ptr) = rq->req_stat; + esas2r_free_request(a, rq); +} + +/** + * Searches the specified queue for the specified queue for the command + * to abort. + * + * @param [in] a + * @param [in] abort_request + * @param [in] cmd + * t + * @return 0 on failure, 1 if command was not found, 2 if command was found + */ +static int esas2r_check_active_queue(struct esas2r_adapter *a, + struct esas2r_request **abort_request, + struct scsi_cmnd *cmd, + struct list_head *queue) +{ + bool found = false; + struct esas2r_request *ar = *abort_request; + struct esas2r_request *rq; + struct list_head *element, *next; + + list_for_each_safe(element, next, queue) { + + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->cmd == cmd) { + + /* Found the request. See what to do with it. */ + if (queue == &a->active_list) { + /* + * We are searching the active queue, which + * means that we need to send an abort request + * to the firmware. + */ + ar = esas2r_alloc_request(a); + if (ar == NULL) { + esas2r_log_dev(ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "unable to allocate an abort request for cmd %p", + cmd); + return 0; /* Failure */ + } + + /* + * Task management request must be formatted + * with a lock held. + */ + ar->sense_len = 0; + ar->vrq->scsi.length = 0; + ar->target_id = rq->target_id; + ar->vrq->scsi.flags |= cpu_to_le32( + (u8)le32_to_cpu(rq->vrq->scsi.flags)); + + memset(ar->vrq->scsi.cdb, 0, + sizeof(ar->vrq->scsi.cdb)); + + ar->vrq->scsi.flags |= cpu_to_le32( + FCP_CMND_TRM); + ar->vrq->scsi.u.abort_handle = + rq->vrq->scsi.handle; + } else { + /* + * The request is pending but not active on + * the firmware. Just free it now and we'll + * report the successful abort below. + */ + list_del_init(&rq->req_list); + esas2r_free_request(a, rq); + } + + found = true; + break; + } + + } + + if (!found) + return 1; /* Not found */ + + return 2; /* found */ + + +} + +int esas2r_eh_abort(struct scsi_cmnd *cmd) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *abort_request = NULL; + unsigned long flags; + struct list_head *queue; + int result; + + esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); + + if (a->flags & AF_DEGRADED_MODE) { + cmd->result = DID_ABORT << 16; + + scsi_set_resid(cmd, 0); + + cmd->scsi_done(cmd); + + return 0; + } + + spin_lock_irqsave(&a->queue_lock, flags); + + /* + * Run through the defer and active queues looking for the request + * to abort. + */ + + queue = &a->defer_list; + +check_active_queue: + + result = esas2r_check_active_queue(a, &abort_request, cmd, queue); + + if (!result) { + spin_unlock_irqrestore(&a->queue_lock, flags); + return FAILED; + } else if (result == 2 && (queue == &a->defer_list)) { + queue = &a->active_list; + goto check_active_queue; + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + if (abort_request) { + u8 task_management_status = RS_PENDING; + + /* + * the request is already active, so we need to tell + * the firmware to abort it and wait for the response. + */ + + abort_request->comp_cb = complete_task_management_request; + abort_request->task_management_status_ptr = + &task_management_status; + + esas2r_start_request(a, abort_request); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + while (task_management_status == RS_PENDING) + msleep(10); + + /* + * Once we get here, the original request will have been + * completed by the firmware and the abort request will have + * been cleaned up. we're done! + */ + + return SUCCESS; + } + + /* + * If we get here, either we found the inactive request and + * freed it, or we didn't find it at all. Either way, success! + */ + + cmd->result = DID_ABORT << 16; + + scsi_set_resid(cmd, 0); + + cmd->scsi_done(cmd); + + return SUCCESS; +} + +static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + + if (a->flags & AF_DEGRADED_MODE) + return FAILED; + + if (host_reset) + esas2r_reset_adapter(a); + else + esas2r_reset_bus(a); + + /* above call sets the AF_OS_RESET flag. wait for it to clear. */ + + while (a->flags & AF_OS_RESET) { + msleep(10); + + if (a->flags & AF_DEGRADED_MODE) + return FAILED; + } + + if (a->flags & AF_DEGRADED_MODE) + return FAILED; + + return SUCCESS; +} + +int esas2r_host_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd); + + return esas2r_host_bus_reset(cmd, true); +} + +int esas2r_bus_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd); + + return esas2r_host_bus_reset(cmd, false); +} + +static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *rq; + u8 task_management_status = RS_PENDING; + bool completed; + + if (a->flags & AF_DEGRADED_MODE) + return FAILED; + +retry: + rq = esas2r_alloc_request(a); + if (rq == NULL) { + if (target_reset) { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to allocate a request for a " + "target reset (%d)!", + cmd->device->id); + } else { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to allocate a request for a " + "device reset (%d:%d)!", + cmd->device->id, + cmd->device->lun); + } + + + return FAILED; + } + + rq->target_id = cmd->device->id; + rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); + rq->req_stat = RS_PENDING; + + rq->comp_cb = complete_task_management_request; + rq->task_management_status_ptr = &task_management_status; + + if (target_reset) { + esas2r_debug("issuing target reset (%p) to id %d", rq, + cmd->device->id); + completed = esas2r_send_task_mgmt(a, rq, 0x20); + } else { + esas2r_debug("issuing device reset (%p) to id %d lun %d", rq, + cmd->device->id, cmd->device->lun); + completed = esas2r_send_task_mgmt(a, rq, 0x10); + } + + if (completed) { + /* Task management cmd completed right away, need to free it. */ + + esas2r_free_request(a, rq); + } else { + /* + * Wait for firmware to complete the request. Completion + * callback will free it. + */ + while (task_management_status == RS_PENDING) + msleep(10); + } + + if (a->flags & AF_DEGRADED_MODE) + return FAILED; + + if (task_management_status == RS_BUSY) { + /* + * Busy, probably because we are flashing. Wait a bit and + * try again. + */ + msleep(100); + goto retry; + } + + return SUCCESS; +} + +int esas2r_device_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd); + + return esas2r_dev_targ_reset(cmd, false); + +} + +int esas2r_target_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd); + + return esas2r_dev_targ_reset(cmd, true); +} + +int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason) +{ + esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth); + + scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth); + + return dev->queue_depth; +} + +int esas2r_change_queue_type(struct scsi_device *dev, int type) +{ + esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type); + + if (dev->tagged_supported) { + scsi_set_tag_type(dev, type); + + if (type) + scsi_activate_tcq(dev, dev->queue_depth); + else + scsi_deactivate_tcq(dev, dev->queue_depth); + } else { + type = 0; + } + + return type; +} + +int esas2r_slave_alloc(struct scsi_device *dev) +{ + return 0; +} + +int esas2r_slave_configure(struct scsi_device *dev) +{ + esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev), + "esas2r_slave_configure()"); + + if (dev->tagged_supported) { + scsi_set_tag_type(dev, MSG_SIMPLE_TAG); + scsi_activate_tcq(dev, cmd_per_lun); + } else { + scsi_set_tag_type(dev, 0); + scsi_deactivate_tcq(dev, cmd_per_lun); + } + + return 0; +} + +void esas2r_slave_destroy(struct scsi_device *dev) +{ + esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev), + "esas2r_slave_destroy()"); +} + +void esas2r_log_request_failure(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u8 reqstatus = rq->req_stat; + + if (reqstatus == RS_SUCCESS) + return; + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + if (reqstatus == RS_SCSI_ERROR) { + if (rq->func_rsp.scsi_rsp.sense_len >= 13) { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x", + rq->sense_buf[2], rq->sense_buf[12], + rq->sense_buf[13], + rq->vrq->scsi.cdb[0]); + } else { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - SCSI error CDB:%x\n", + rq->vrq->scsi.cdb[0]); + } + } else if ((rq->vrq->scsi.cdb[0] != INQUIRY + && rq->vrq->scsi.cdb[0] != REPORT_LUNS) + || (reqstatus != RS_SEL + && reqstatus != RS_SEL2)) { + if ((reqstatus == RS_UNDERRUN) && + (rq->vrq->scsi.cdb[0] == INQUIRY)) { + /* Don't log inquiry underruns */ + } else { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - cdb:%x reqstatus:%d target:%d", + rq->vrq->scsi.cdb[0], reqstatus, + rq->target_id); + } + } + } +} + +void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + u32 starttime; + u32 timeout; + + starttime = jiffies_to_msecs(jiffies); + timeout = rq->timeout ? rq->timeout : 5000; + + while (true) { + esas2r_polled_interrupt(a); + + if (rq->req_stat != RS_STARTED) + break; + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { + esas2r_hdebug("request TMO"); + esas2r_bugon(); + + rq->req_stat = RS_TIMEOUT; + + esas2r_local_reset_adapter(a); + return; + } + } +} + +u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo) +{ + u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1); + u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE; + + if (a->window_base != base) { + esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP, + base | MVRPW1R_ENABLE); + esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP); + a->window_base = base; + } + + return offset; +} + +/* Read a block of data from chip memory */ +bool esas2r_read_mem_block(struct esas2r_adapter *a, + void *to, + u32 from, + u32 size) +{ + u8 *end = (u8 *)to; + + while (size) { + u32 len; + u32 offset; + u32 iatvr; + + iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE); + + esas2r_map_data_window(a, iatvr); + + offset = from & (MW_DATA_WINDOW_SIZE - 1); + len = size; + + if (len > MW_DATA_WINDOW_SIZE - offset) + len = MW_DATA_WINDOW_SIZE - offset; + + from += len; + size -= len; + + while (len--) { + *end++ = esas2r_read_data_byte(a, offset); + offset++; + } + } + + return true; +} + +void esas2r_nuxi_mgt_data(u8 function, void *data) +{ + struct atto_vda_grp_info *g; + struct atto_vda_devinfo *d; + struct atto_vdapart_info *p; + struct atto_vda_dh_info *h; + struct atto_vda_metrics_info *m; + struct atto_vda_schedule_info *s; + struct atto_vda_buzzer_info *b; + u8 i; + + switch (function) { + case VDAMGT_BUZZER_INFO: + case VDAMGT_BUZZER_SET: + + b = (struct atto_vda_buzzer_info *)data; + + b->duration = le32_to_cpu(b->duration); + break; + + case VDAMGT_SCHEDULE_INFO: + case VDAMGT_SCHEDULE_EVENT: + + s = (struct atto_vda_schedule_info *)data; + + s->id = le32_to_cpu(s->id); + + break; + + case VDAMGT_DEV_INFO: + case VDAMGT_DEV_CLEAN: + case VDAMGT_DEV_PT_INFO: + case VDAMGT_DEV_FEATURES: + case VDAMGT_DEV_PT_FEATURES: + case VDAMGT_DEV_OPERATION: + + d = (struct atto_vda_devinfo *)data; + + d->capacity = le64_to_cpu(d->capacity); + d->block_size = le32_to_cpu(d->block_size); + d->ses_dev_index = le16_to_cpu(d->ses_dev_index); + d->target_id = le16_to_cpu(d->target_id); + d->lun = le16_to_cpu(d->lun); + d->features = le16_to_cpu(d->features); + break; + + case VDAMGT_GRP_INFO: + case VDAMGT_GRP_CREATE: + case VDAMGT_GRP_DELETE: + case VDAMGT_ADD_STORAGE: + case VDAMGT_MEMBER_ADD: + case VDAMGT_GRP_COMMIT: + case VDAMGT_GRP_REBUILD: + case VDAMGT_GRP_COMMIT_INIT: + case VDAMGT_QUICK_RAID: + case VDAMGT_GRP_FEATURES: + case VDAMGT_GRP_COMMIT_INIT_AUTOMAP: + case VDAMGT_QUICK_RAID_INIT_AUTOMAP: + case VDAMGT_SPARE_LIST: + case VDAMGT_SPARE_ADD: + case VDAMGT_SPARE_REMOVE: + case VDAMGT_LOCAL_SPARE_ADD: + case VDAMGT_GRP_OPERATION: + + g = (struct atto_vda_grp_info *)data; + + g->capacity = le64_to_cpu(g->capacity); + g->block_size = le32_to_cpu(g->block_size); + g->interleave = le32_to_cpu(g->interleave); + g->features = le16_to_cpu(g->features); + + for (i = 0; i < 32; i++) + g->members[i] = le16_to_cpu(g->members[i]); + + break; + + case VDAMGT_PART_INFO: + case VDAMGT_PART_MAP: + case VDAMGT_PART_UNMAP: + case VDAMGT_PART_AUTOMAP: + case VDAMGT_PART_SPLIT: + case VDAMGT_PART_MERGE: + + p = (struct atto_vdapart_info *)data; + + p->part_size = le64_to_cpu(p->part_size); + p->start_lba = le32_to_cpu(p->start_lba); + p->block_size = le32_to_cpu(p->block_size); + p->target_id = le16_to_cpu(p->target_id); + break; + + case VDAMGT_DEV_HEALTH_REQ: + + h = (struct atto_vda_dh_info *)data; + + h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt); + h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt); + break; + + case VDAMGT_DEV_METRICS: + + m = (struct atto_vda_metrics_info *)data; + + for (i = 0; i < 32; i++) + m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]); + + break; + + default: + break; + } +} + +void esas2r_nuxi_cfg_data(u8 function, void *data) +{ + struct atto_vda_cfg_init *ci; + + switch (function) { + case VDA_CFG_INIT: + case VDA_CFG_GET_INIT: + case VDA_CFG_GET_INIT2: + + ci = (struct atto_vda_cfg_init *)data; + + ci->date_time.year = le16_to_cpu(ci->date_time.year); + ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size); + ci->vda_version = le32_to_cpu(ci->vda_version); + ci->epoch_time = le32_to_cpu(ci->epoch_time); + ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel); + ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend); + break; + + default: + break; + } +} + +void esas2r_nuxi_ae_data(union atto_vda_ae *ae) +{ + struct atto_vda_ae_raid *r = &ae->raid; + struct atto_vda_ae_lu *l = &ae->lu; + + switch (ae->hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + + r->dwflags = le32_to_cpu(r->dwflags); + break; + + case VDAAE_HDR_TYPE_LU: + + l->dwevent = le32_to_cpu(l->dwevent); + l->wphys_target_id = le16_to_cpu(l->wphys_target_id); + l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id); + + if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) { + l->id.tgtlun_raid.dwinterleave + = le32_to_cpu(l->id.tgtlun_raid.dwinterleave); + l->id.tgtlun_raid.dwblock_size + = le32_to_cpu(l->id.tgtlun_raid.dwblock_size); + } + + break; + + case VDAAE_HDR_TYPE_DISK: + default: + break; + } +} + +void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + unsigned long flags; + + esas2r_rq_destroy_request(rq, a); + spin_lock_irqsave(&a->request_lock, flags); + list_add(&rq->comp_list, &a->avail_request); + spin_unlock_irqrestore(&a->request_lock, flags); +} + +struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a) +{ + struct esas2r_request *rq; + unsigned long flags; + + spin_lock_irqsave(&a->request_lock, flags); + + if (unlikely(list_empty(&a->avail_request))) { + spin_unlock_irqrestore(&a->request_lock, flags); + return NULL; + } + + rq = list_first_entry(&a->avail_request, struct esas2r_request, + comp_list); + list_del(&rq->comp_list); + spin_unlock_irqrestore(&a->request_lock, flags); + esas2r_rq_init_request(rq, a); + + return rq; + +} + +void esas2r_complete_request_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_debug("completing request %p\n", rq); + + scsi_dma_unmap(rq->cmd); + + if (unlikely(rq->req_stat != RS_SUCCESS)) { + esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id, + rq->req_stat, + rq->func_rsp.scsi_rsp.scsi_stat, + rq->cmd); + + rq->cmd->result = + ((esas2r_req_status_to_error(rq->req_stat) << 16) + | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK)); + + if (rq->req_stat == RS_UNDERRUN) + scsi_set_resid(rq->cmd, + le32_to_cpu(rq->func_rsp.scsi_rsp. + residual_length)); + else + scsi_set_resid(rq->cmd, 0); + } + + rq->cmd->scsi_done(rq->cmd); + + esas2r_free_request(a, rq); +} + +/* Run tasklet to handle stuff outside of interrupt context. */ +void esas2r_adapter_tasklet(unsigned long context) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)context; + + if (unlikely(a->flags2 & AF2_TIMER_TICK)) { + esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK); + esas2r_timer_tick(a); + } + + if (likely(a->flags2 & AF2_INT_PENDING)) { + esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING); + esas2r_adapter_interrupt(a); + } + + if (esas2r_is_tasklet_pending(a)) + esas2r_do_tasklet_tasks(a); + + if (esas2r_is_tasklet_pending(a) + || (a->flags2 & AF2_INT_PENDING) + || (a->flags2 & AF2_TIMER_TICK)) { + esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); + esas2r_schedule_tasklet(a); + } else { + esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); + } +} + +static void esas2r_timer_callback(unsigned long context); + +void esas2r_kickoff_timer(struct esas2r_adapter *a) +{ + init_timer(&a->timer); + + a->timer.function = esas2r_timer_callback; + a->timer.data = (unsigned long)a; + a->timer.expires = jiffies + + msecs_to_jiffies(100); + + add_timer(&a->timer); +} + +static void esas2r_timer_callback(unsigned long context) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)context; + + esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK); + + esas2r_schedule_tasklet(a); + + esas2r_kickoff_timer(a); +} + +/* + * Firmware events need to be handled outside of interrupt context + * so we schedule a delayed_work to handle them. + */ + +static void +esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event) +{ + unsigned long flags; + struct esas2r_adapter *a = fw_event->a; + + spin_lock_irqsave(&a->fw_event_lock, flags); + list_del(&fw_event->list); + kfree(fw_event); + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void +esas2r_fw_event_off(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->fw_event_lock, flags); + a->fw_events_off = 1; + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void +esas2r_fw_event_on(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->fw_event_lock, flags); + a->fw_events_off = 0; + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id) +{ + int ret; + struct scsi_device *scsi_dev; + + scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); + + if (scsi_dev) { + esas2r_log_dev( + ESAS2R_LOG_WARN, + &(scsi_dev-> + sdev_gendev), + "scsi device already exists at id %d", target_id); + + scsi_device_put(scsi_dev); + } else { + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(a->host-> + shost_gendev), + "scsi_add_device() called for 0:%d:0", + target_id); + + ret = scsi_add_device(a->host, 0, target_id, 0); + if (ret) { + esas2r_log_dev( + ESAS2R_LOG_CRIT, + &(a->host-> + shost_gendev), + "scsi_add_device failed with %d for id %d", + ret, target_id); + } + } +} + +static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id) +{ + struct scsi_device *scsi_dev; + + scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); + + if (scsi_dev) { + scsi_device_set_state(scsi_dev, SDEV_OFFLINE); + + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(scsi_dev-> + sdev_gendev), + "scsi_remove_device() called for 0:%d:0", + target_id); + + scsi_remove_device(scsi_dev); + + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(scsi_dev-> + sdev_gendev), + "scsi_device_put() called"); + + scsi_device_put(scsi_dev); + } else { + esas2r_log_dev( + ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "no target found at id %d", + target_id); + } +} + +/* + * Sends a firmware asynchronous event to anyone who happens to be + * listening on the defined ATTO VDA event ports. + */ +static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event) +{ + struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data; + char *type; + + switch (ae->vda_ae.hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + type = "RAID group state change"; + break; + + case VDAAE_HDR_TYPE_LU: + type = "Mapped destination LU change"; + break; + + case VDAAE_HDR_TYPE_DISK: + type = "Physical disk inventory change"; + break; + + case VDAAE_HDR_TYPE_RESET: + type = "Firmware reset"; + break; + + case VDAAE_HDR_TYPE_LOG_INFO: + type = "Event Log message (INFO level)"; + break; + + case VDAAE_HDR_TYPE_LOG_WARN: + type = "Event Log message (WARN level)"; + break; + + case VDAAE_HDR_TYPE_LOG_CRIT: + type = "Event Log message (CRIT level)"; + break; + + case VDAAE_HDR_TYPE_LOG_FAIL: + type = "Event Log message (FAIL level)"; + break; + + case VDAAE_HDR_TYPE_NVC: + type = "NVCache change"; + break; + + case VDAAE_HDR_TYPE_TLG_INFO: + type = "Time stamped log message (INFO level)"; + break; + + case VDAAE_HDR_TYPE_TLG_WARN: + type = "Time stamped log message (WARN level)"; + break; + + case VDAAE_HDR_TYPE_TLG_CRIT: + type = "Time stamped log message (CRIT level)"; + break; + + case VDAAE_HDR_TYPE_PWRMGT: + type = "Power management"; + break; + + case VDAAE_HDR_TYPE_MUTE: + type = "Mute button pressed"; + break; + + case VDAAE_HDR_TYPE_DEV: + type = "Device attribute change"; + break; + + default: + type = "Unknown"; + break; + } + + esas2r_log(ESAS2R_LOG_WARN, + "An async event of type \"%s\" was received from the firmware. The event contents are:", + type); + esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae, + ae->vda_ae.hdr.bylength); + +} + +static void +esas2r_firmware_event_work(struct work_struct *work) +{ + struct esas2r_fw_event_work *fw_event = + container_of(work, struct esas2r_fw_event_work, work.work); + + struct esas2r_adapter *a = fw_event->a; + + u16 target_id = *(u16 *)&fw_event->data[0]; + + if (a->fw_events_off) + goto done; + + switch (fw_event->type) { + case fw_event_null: + break; /* do nothing */ + + case fw_event_lun_change: + esas2r_remove_device(a, target_id); + esas2r_add_device(a, target_id); + break; + + case fw_event_present: + esas2r_add_device(a, target_id); + break; + + case fw_event_not_present: + esas2r_remove_device(a, target_id); + break; + + case fw_event_vda_ae: + esas2r_send_ae_event(fw_event); + break; + } + +done: + esas2r_free_fw_event(fw_event); +} + +void esas2r_queue_fw_event(struct esas2r_adapter *a, + enum fw_event_type type, + void *data, + int data_sz) +{ + struct esas2r_fw_event_work *fw_event; + unsigned long flags; + + fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC); + if (!fw_event) { + esas2r_log(ESAS2R_LOG_WARN, + "esas2r_queue_fw_event failed to alloc"); + return; + } + + if (type == fw_event_vda_ae) { + struct esas2r_vda_ae *ae = + (struct esas2r_vda_ae *)fw_event->data; + + ae->signature = ESAS2R_VDA_EVENT_SIG; + ae->bus_number = a->pcid->bus->number; + ae->devfn = a->pcid->devfn; + memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae)); + } else { + memcpy(fw_event->data, data, data_sz); + } + + fw_event->type = type; + fw_event->a = a; + + spin_lock_irqsave(&a->fw_event_lock, flags); + list_add_tail(&fw_event->list, &a->fw_event_list); + INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work); + queue_delayed_work_on( + smp_processor_id(), a->fw_event_q, &fw_event->work, + msecs_to_jiffies(1)); + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id, + u8 state) +{ + if (state == TS_LUN_CHANGE) + esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id, + sizeof(targ_id)); + else if (state == TS_PRESENT) + esas2r_queue_fw_event(a, fw_event_present, &targ_id, + sizeof(targ_id)); + else if (state == TS_NOT_PRESENT) + esas2r_queue_fw_event(a, fw_event_not_present, &targ_id, + sizeof(targ_id)); +} + +/* Translate status to a Linux SCSI mid-layer error code */ +int esas2r_req_status_to_error(u8 req_stat) +{ + switch (req_stat) { + case RS_OVERRUN: + case RS_UNDERRUN: + case RS_SUCCESS: + /* + * NOTE: SCSI mid-layer wants a good status for a SCSI error, because + * it will check the scsi_stat value in the completion anyway. + */ + case RS_SCSI_ERROR: + return DID_OK; + + case RS_SEL: + case RS_SEL2: + return DID_NO_CONNECT; + + case RS_RESET: + return DID_RESET; + + case RS_ABORTED: + return DID_ABORT; + + case RS_BUSY: + return DID_BUS_BUSY; + } + + /* everything else is just an error. */ + + return DID_ERROR; +} + +module_init(esas2r_init); +module_exit(esas2r_exit); diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c new file mode 100644 index 000000000000..e540a2fa3d15 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_targdb.c @@ -0,0 +1,306 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_targdb.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +void esas2r_targ_db_initialize(struct esas2r_adapter *a) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + memset(t, 0, sizeof(struct esas2r_target)); + + t->target_state = TS_NOT_PRESENT; + t->buffered_target_state = TS_NOT_PRESENT; + t->new_target_state = TS_INVALID; + } +} + +void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify) +{ + struct esas2r_target *t; + unsigned long flags; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->target_state != TS_PRESENT) + continue; + + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_targ_db_remove(a, t); + spin_unlock_irqrestore(&a->mem_lock, flags); + + if (notify) { + esas2r_trace("remove id:%d", esas2r_targ_get_id(t, + a)); + esas2r_target_state_changed(a, esas2r_targ_get_id(t, + a), + TS_NOT_PRESENT); + } + } +} + +void esas2r_targ_db_report_changes(struct esas2r_adapter *a) +{ + struct esas2r_target *t; + unsigned long flags; + + esas2r_trace_enter(); + + if (a->flags & AF_DISC_PENDING) { + esas2r_trace_exit(); + return; + } + + for (t = a->targetdb; t < a->targetdb_end; t++) { + u8 state = TS_INVALID; + + spin_lock_irqsave(&a->mem_lock, flags); + if (t->buffered_target_state != t->target_state) + state = t->buffered_target_state = t->target_state; + + spin_unlock_irqrestore(&a->mem_lock, flags); + if (state != TS_INVALID) { + esas2r_trace("targ_db_report_changes:%d", + esas2r_targ_get_id( + t, + a)); + esas2r_trace("state:%d", state); + + esas2r_target_state_changed(a, + esas2r_targ_get_id(t, + a), + state); + } + } + + esas2r_trace_exit(); +} + +struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, + struct esas2r_disc_context * + dc) +{ + struct esas2r_target *t; + + esas2r_trace_enter(); + + if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { + esas2r_bugon(); + esas2r_trace_exit(); + return NULL; + } + + t = a->targetdb + dc->curr_virt_id; + + if (t->target_state == TS_PRESENT) { + esas2r_trace_exit(); + return NULL; + } + + esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name, + esas2r_targ_get_id( + t, + a)); + + if (dc->interleave == 0 + || dc->block_size == 0) { + /* these are invalid values, don't create the target entry. */ + + esas2r_hdebug("invalid RAID group dimensions"); + + esas2r_trace_exit(); + + return NULL; + } + + t->block_size = dc->block_size; + t->inter_byte = dc->interleave; + t->inter_block = dc->interleave / dc->block_size; + t->virt_targ_id = dc->curr_virt_id; + t->phys_targ_id = ESAS2R_TARG_ID_INV; + + t->flags &= ~TF_PASS_THRU; + t->flags |= TF_USED; + + t->identifier_len = 0; + + t->target_state = TS_PRESENT; + + return t; +} + +struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, + struct esas2r_disc_context *dc, + u8 *ident, + u8 ident_len) +{ + struct esas2r_target *t; + + esas2r_trace_enter(); + + if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { + esas2r_bugon(); + esas2r_trace_exit(); + return NULL; + } + + /* see if we found this device before. */ + + t = esas2r_targ_db_find_by_ident(a, ident, ident_len); + + if (t == NULL) { + t = a->targetdb + dc->curr_virt_id; + + if (ident_len > sizeof(t->identifier) + || t->target_state == TS_PRESENT) { + esas2r_trace_exit(); + return NULL; + } + } + + esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a), + dc->curr_virt_id, + dc->curr_phys_id); + + t->block_size = 0; + t->inter_byte = 0; + t->inter_block = 0; + t->virt_targ_id = dc->curr_virt_id; + t->phys_targ_id = dc->curr_phys_id; + t->identifier_len = ident_len; + + memcpy(t->identifier, ident, ident_len); + + t->flags |= TF_PASS_THRU | TF_USED; + + t->target_state = TS_PRESENT; + + return t; +} + +void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t) +{ + esas2r_trace_enter(); + + t->target_state = TS_NOT_PRESENT; + + esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a)); + + esas2r_trace_exit(); +} + +struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, + u64 *sas_addr) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->sas_addr == *sas_addr) + return t; + + return NULL; +} + +struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, + void *identifier, + u8 ident_len) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (ident_len == t->identifier_len + && memcmp(&t->identifier[0], identifier, + ident_len) == 0) + return t; + } + + return NULL; +} + +u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id) +{ + u16 id = target_id + 1; + + while (id < ESAS2R_MAX_TARGETS) { + struct esas2r_target *t = a->targetdb + id; + + if (t->target_state == TS_PRESENT) + break; + + id++; + } + + return id; +} + +struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, + u16 virt_id) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->target_state != TS_PRESENT) + continue; + + if (t->virt_targ_id == virt_id) + return t; + } + + return NULL; +} + +u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a) +{ + u16 devcnt = 0; + struct esas2r_target *t; + unsigned long flags; + + spin_lock_irqsave(&a->mem_lock, flags); + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->target_state == TS_PRESENT) + devcnt++; + + spin_unlock_irqrestore(&a->mem_lock, flags); + + return devcnt; +} diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c new file mode 100644 index 000000000000..f8ec6d636846 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -0,0 +1,521 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_vda.c + * esas2r driver VDA firmware interface functions + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +static u8 esas2r_vdaioctl_versions[] = { + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_FLASH_VER, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_CLI_VER, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_CFG_VER, + ATTO_VDA_MGT_VER, + ATTO_VDA_GSV_VER +}; + +static void clear_vda_request(struct esas2r_request *rq); + +static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq); + +/* Prepare a VDA IOCTL request to be sent to the firmware. */ +bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, + struct atto_ioctl_vda *vi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + u32 datalen = 0; + struct atto_vda_sge *firstsg = NULL; + u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions); + + vi->status = ATTO_STS_SUCCESS; + vi->vda_status = RS_PENDING; + + if (vi->function >= vercnt) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (vi->version > esas2r_vdaioctl_versions[vi->function]) { + vi->status = ATTO_STS_INV_VERSION; + return false; + } + + if (a->flags & AF_DEGRADED_MODE) { + vi->status = ATTO_STS_DEGRADED; + return false; + } + + if (vi->function != VDA_FUNC_SCSI) + clear_vda_request(rq); + + rq->vrq->scsi.function = vi->function; + rq->interrupt_cb = esas2r_complete_vda_ioctl; + rq->interrupt_cx = vi; + + switch (vi->function) { + case VDA_FUNC_FLASH: + + if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD + && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE + && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO) + datalen = vi->data_length; + + rq->vrq->flash.length = cpu_to_le32(datalen); + rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; + + memcpy(rq->vrq->flash.data.file.file_name, + vi->cmd.flash.data.file.file_name, + sizeof(vi->cmd.flash.data.file.file_name)); + + firstsg = rq->vrq->flash.data.file.sge; + break; + + case VDA_FUNC_CLI: + + datalen = vi->data_length; + + rq->vrq->cli.cmd_rsp_len = + cpu_to_le32(vi->cmd.cli.cmd_rsp_len); + rq->vrq->cli.length = cpu_to_le32(datalen); + + firstsg = rq->vrq->cli.sge; + break; + + case VDA_FUNC_MGT: + { + u8 *cmdcurr_offset = sgc->cur_offset + - offsetof(struct atto_ioctl_vda, data) + + offsetof(struct atto_ioctl_vda, cmd) + + offsetof(struct atto_ioctl_vda_mgt_cmd, + data); + /* + * build the data payload SGL here first since + * esas2r_sgc_init() will modify the S/G list offset for the + * management SGL (which is built below where the data SGL is + * usually built). + */ + + if (vi->data_length) { + u32 payldlen = 0; + + if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ + || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) { + rq->vrq->mgt.payld_sglst_offset = + (u8)offsetof(struct atto_vda_mgmt_req, + payld_sge); + + payldlen = vi->data_length; + datalen = vi->cmd.mgt.data_length; + } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2 + || vi->cmd.mgt.mgt_func == + VDAMGT_DEV_INFO2_BYADDR) { + datalen = vi->data_length; + cmdcurr_offset = sgc->cur_offset; + } else { + vi->status = ATTO_STS_INV_PARAM; + return false; + } + + /* Setup the length so building the payload SGL works */ + rq->vrq->mgt.length = cpu_to_le32(datalen); + + if (payldlen) { + rq->vrq->mgt.payld_length = + cpu_to_le32(payldlen); + + esas2r_sgc_init(sgc, a, rq, + rq->vrq->mgt.payld_sge); + sgc->length = payldlen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + vi->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + } else { + datalen = vi->cmd.mgt.data_length; + + rq->vrq->mgt.length = cpu_to_le32(datalen); + } + + /* + * Now that the payload SGL is built, if any, setup to build + * the management SGL. + */ + firstsg = rq->vrq->mgt.sge; + sgc->cur_offset = cmdcurr_offset; + + /* Finish initializing the management request. */ + rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func; + rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation; + rq->vrq->mgt.dev_index = + cpu_to_le32(vi->cmd.mgt.dev_index); + + esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); + break; + } + + case VDA_FUNC_CFG: + + if (vi->data_length + || vi->cmd.cfg.data_length == 0) { + vi->status = ATTO_STS_INV_PARAM; + return false; + } + + if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func; + rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length); + + if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { + memcpy(&rq->vrq->cfg.data, + &vi->cmd.cfg.data, + vi->cmd.cfg.data_length); + + esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, + &rq->vrq->cfg.data); + } else { + vi->status = ATTO_STS_INV_FUNC; + + return false; + } + + break; + + case VDA_FUNC_GSV: + + vi->cmd.gsv.rsp_len = vercnt; + + memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions, + vercnt); + + vi->vda_status = RS_SUCCESS; + break; + + default: + + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (datalen) { + esas2r_sgc_init(sgc, a, rq, firstsg); + sgc->length = datalen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + vi->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + + esas2r_start_request(a, rq); + + return true; +} + +static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx; + + vi->vda_status = rq->req_stat; + + switch (vi->function) { + case VDA_FUNC_FLASH: + + if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO + || vi->cmd.flash.sub_func == VDA_FLASH_FREAD) + vi->cmd.flash.data.file.file_size = + le32_to_cpu(rq->func_rsp.flash_rsp.file_size); + + break; + + case VDA_FUNC_MGT: + + vi->cmd.mgt.scan_generation = + rq->func_rsp.mgt_rsp.scan_generation; + vi->cmd.mgt.dev_index = le16_to_cpu( + rq->func_rsp.mgt_rsp.dev_index); + + if (vi->data_length == 0) + vi->cmd.mgt.data_length = + le32_to_cpu(rq->func_rsp.mgt_rsp.length); + + esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); + break; + + case VDA_FUNC_CFG: + + if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { + struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; + struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; + + cfg->data_length = + cpu_to_le32(sizeof(struct atto_vda_cfg_init)); + cfg->data.init.vda_version = + le32_to_cpu(rsp->vda_version); + cfg->data.init.fw_build = rsp->fw_build; + + sprintf((char *)&cfg->data.init.fw_release, + "%1d.%02d", + (int)LOBYTE(le16_to_cpu(rsp->fw_release)), + (int)HIBYTE(le16_to_cpu(rsp->fw_release))); + + if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') + cfg->data.init.fw_version = + cfg->data.init.fw_build; + else + cfg->data.init.fw_version = + cfg->data.init.fw_release; + } else { + esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, + &vi->cmd.cfg.data); + } + + break; + + case VDA_FUNC_CLI: + + vi->cmd.cli.cmd_rsp_len = + le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len); + break; + + default: + + break; + } +} + +/* Build a flash VDA request. */ +void esas2r_build_flash_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 cksum, + u32 addr, + u32 length) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_FLASH; + + if (sub_func == VDA_FLASH_BEGINW + || sub_func == VDA_FLASH_WRITE + || sub_func == VDA_FLASH_READ) + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req, + data.sge); + + vrq->length = cpu_to_le32(length); + vrq->flash_addr = cpu_to_le32(addr); + vrq->checksum = cksum; + vrq->sub_func = sub_func; +} + +/* Build a VDA management request. */ +void esas2r_build_mgt_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 scan_gen, + u16 dev_index, + u32 length, + void *data) +{ + struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_MGT; + + vrq->mgt_func = sub_func; + vrq->scan_generation = scan_gen; + vrq->dev_index = cpu_to_le16(dev_index); + vrq->length = cpu_to_le32(length); + + if (vrq->length) { + if (a->flags & AF_LEGACY_SGE_MODE) { + vrq->sg_list_offset = (u8)offsetof( + struct atto_vda_mgmt_req, sge); + + vrq->sge[0].length = cpu_to_le32(SGE_LAST | length); + vrq->sge[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } else { + vrq->sg_list_offset = (u8)offsetof( + struct atto_vda_mgmt_req, prde); + + vrq->prde[0].ctl_len = cpu_to_le32(length); + vrq->prde[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } + } + + if (data) { + esas2r_nuxi_mgt_data(sub_func, data); + + memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data, + length); + } +} + +/* Build a VDA asyncronous event (AE) request. */ +void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + struct atto_vda_ae_req *vrq = &rq->vrq->ae; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_AE; + + vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); + + if (a->flags & AF_LEGACY_SGE_MODE) { + vrq->sg_list_offset = + (u8)offsetof(struct atto_vda_ae_req, sge); + vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length); + vrq->sge[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } else { + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req, + prde); + vrq->prde[0].ctl_len = cpu_to_le32(vrq->length); + vrq->prde[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } +} + +/* Build a VDA CLI request. */ +void esas2r_build_cli_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u32 cmd_rsp_len) +{ + struct atto_vda_cli_req *vrq = &rq->vrq->cli; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_CLI; + + vrq->length = cpu_to_le32(length); + vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len); + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge); +} + +/* Build a VDA IOCTL request. */ +void esas2r_build_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u8 sub_func) +{ + struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_IOCTL; + + vrq->length = cpu_to_le32(length); + vrq->sub_func = sub_func; + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge); +} + +/* Build a VDA configuration request. */ +void esas2r_build_cfg_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u32 length, + void *data) +{ + struct atto_vda_cfg_req *vrq = &rq->vrq->cfg; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_CFG; + + vrq->sub_func = sub_func; + vrq->length = cpu_to_le32(length); + + if (data) { + esas2r_nuxi_cfg_data(sub_func, data); + + memcpy(&vrq->data, data, length); + } +} + +static void clear_vda_request(struct esas2r_request *rq) +{ + u32 handle = rq->vrq->scsi.handle; + + memset(rq->vrq, 0, sizeof(*rq->vrq)); + + rq->vrq->scsi.handle = handle; + + rq->req_stat = RS_PENDING; + + /* since the data buffer is separate clear that too */ + + memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN); + + /* + * Setup next and prev pointer in case the request is not going through + * esas2r_start_request(). + */ + + INIT_LIST_HEAD(&rq->req_list); +} diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 7f4f790a3d71..b766f5aea584 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -583,7 +583,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); if (likely(h->msix_vector)) c->Header.ReplyQueue = - smp_processor_id() % h->nreply_queues; + raw_smp_processor_id() % h->nreply_queues; } } @@ -1205,8 +1205,8 @@ static void complete_scsi_command(struct CommandList *cp) scsi_set_resid(cmd, ei->ResidualCnt); if (ei->CommandStatus == 0) { - cmd->scsi_done(cmd); cmd_free(h, cp); + cmd->scsi_done(cmd); return; } @@ -1379,8 +1379,8 @@ static void complete_scsi_command(struct CommandList *cp) dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } - cmd->scsi_done(cmd); cmd_free(h, cp); + cmd->scsi_done(cmd); } static void hpsa_pci_unmap(struct pci_dev *pdev, @@ -2721,7 +2721,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) } while (test_and_set_bit (i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); - h->nr_allocs++; spin_unlock_irqrestore(&h->lock, flags); c = h->cmd_pool + i; @@ -2793,7 +2792,6 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c) spin_lock_irqsave(&h->lock, flags); clear_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); - h->nr_frees++; spin_unlock_irqrestore(&h->lock, flags); } diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 981647989bfd..bc85e7244f40 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -98,8 +98,6 @@ struct ctlr_info { struct ErrorInfo *errinfo_pool; dma_addr_t errinfo_pool_dhandle; unsigned long *cmd_pool_bits; - int nr_allocs; - int nr_frees; int scan_finished; spinlock_t scan_lock; wait_queue_head_t scan_wait_queue; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 6601e03520cc..36ac1c34ce97 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9990,6 +9990,20 @@ static struct pci_device_id ipr_pci_table[] = { PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 07a85ce41782..cad1483f05da 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -100,6 +100,13 @@ #define IPR_SUBS_DEV_ID_57D6 0x03FC #define IPR_SUBS_DEV_ID_57D7 0x03FF #define IPR_SUBS_DEV_ID_57D8 0x03FE +#define IPR_SUBS_DEV_ID_57D9 0x046D +#define IPR_SUBS_DEV_ID_57EB 0x0474 +#define IPR_SUBS_DEV_ID_57EC 0x0475 +#define IPR_SUBS_DEV_ID_57ED 0x0499 +#define IPR_SUBS_DEV_ID_57EE 0x049A +#define IPR_SUBS_DEV_ID_57EF 0x049B +#define IPR_SUBS_DEV_ID_57F0 0x049C #define IPR_NAME "ipr" /* diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index cd962da4a57a..85c77f6b802b 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -311,9 +311,9 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); + phy_index++; } - phy_index++; } return sci_port_configuration_agent_validate_ports(ihost, port_agent); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index ae69dfcc7834..e3995612ea76 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2812,6 +2812,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) kfree(session->boot_nic); kfree(session->boot_target); kfree(session->ifacename); + kfree(session->portal_type); + kfree(session->discovery_parent_type); iscsi_destroy_session(cls_session); iscsi_host_dec_session_cnt(shost); @@ -3168,6 +3170,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; + int val; switch(param) { case ISCSI_PARAM_FAST_ABORT: @@ -3257,6 +3260,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, return iscsi_switch_str_param(&session->boot_nic, buf); case ISCSI_PARAM_BOOT_TARGET: return iscsi_switch_str_param(&session->boot_target, buf); + case ISCSI_PARAM_PORTAL_TYPE: + return iscsi_switch_str_param(&session->portal_type, buf); + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + return iscsi_switch_str_param(&session->discovery_parent_type, + buf); + case ISCSI_PARAM_DISCOVERY_SESS: + sscanf(buf, "%d", &val); + session->discovery_sess = !!val; + break; default: return -ENOSYS; } @@ -3305,6 +3317,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, case ISCSI_PARAM_DATASEQ_INORDER_EN: len = sprintf(buf, "%d\n", session->dataseq_inorder_en); break; + case ISCSI_PARAM_DEF_TASKMGMT_TMO: + len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo); + break; case ISCSI_PARAM_ERL: len = sprintf(buf, "%d\n", session->erl); break; @@ -3344,6 +3359,52 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, case ISCSI_PARAM_BOOT_TARGET: len = sprintf(buf, "%s\n", session->boot_target); break; + case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: + len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable); + break; + case ISCSI_PARAM_DISCOVERY_SESS: + len = sprintf(buf, "%u\n", session->discovery_sess); + break; + case ISCSI_PARAM_PORTAL_TYPE: + len = sprintf(buf, "%s\n", session->portal_type); + break; + case ISCSI_PARAM_CHAP_AUTH_EN: + len = sprintf(buf, "%u\n", session->chap_auth_en); + break; + case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: + len = sprintf(buf, "%u\n", session->discovery_logout_en); + break; + case ISCSI_PARAM_BIDI_CHAP_EN: + len = sprintf(buf, "%u\n", session->bidi_chap_en); + break; + case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: + len = sprintf(buf, "%u\n", session->discovery_auth_optional); + break; + case ISCSI_PARAM_DEF_TIME2WAIT: + len = sprintf(buf, "%d\n", session->time2wait); + break; + case ISCSI_PARAM_DEF_TIME2RETAIN: + len = sprintf(buf, "%d\n", session->time2retain); + break; + case ISCSI_PARAM_TSID: + len = sprintf(buf, "%u\n", session->tsid); + break; + case ISCSI_PARAM_ISID: + len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", + session->isid[0], session->isid[1], + session->isid[2], session->isid[3], + session->isid[4], session->isid[5]); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_IDX: + len = sprintf(buf, "%u\n", session->discovery_parent_idx); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + if (session->discovery_parent_type) + len = sprintf(buf, "%s\n", + session->discovery_parent_type); + else + len = sprintf(buf, "\n"); + break; default: return -ENOSYS; } @@ -3433,6 +3494,54 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_PERSISTENT_ADDRESS: len = sprintf(buf, "%s\n", conn->persistent_address); break; + case ISCSI_PARAM_STATSN: + len = sprintf(buf, "%u\n", conn->statsn); + break; + case ISCSI_PARAM_MAX_SEGMENT_SIZE: + len = sprintf(buf, "%u\n", conn->max_segment_size); + break; + case ISCSI_PARAM_KEEPALIVE_TMO: + len = sprintf(buf, "%u\n", conn->keepalive_tmo); + break; + case ISCSI_PARAM_LOCAL_PORT: + len = sprintf(buf, "%u\n", conn->local_port); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_STAT: + len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat); + break; + case ISCSI_PARAM_TCP_NAGLE_DISABLE: + len = sprintf(buf, "%u\n", conn->tcp_nagle_disable); + break; + case ISCSI_PARAM_TCP_WSF_DISABLE: + len = sprintf(buf, "%u\n", conn->tcp_wsf_disable); + break; + case ISCSI_PARAM_TCP_TIMER_SCALE: + len = sprintf(buf, "%u\n", conn->tcp_timer_scale); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_EN: + len = sprintf(buf, "%u\n", conn->tcp_timestamp_en); + break; + case ISCSI_PARAM_IP_FRAGMENT_DISABLE: + len = sprintf(buf, "%u\n", conn->fragment_disable); + break; + case ISCSI_PARAM_IPV4_TOS: + len = sprintf(buf, "%u\n", conn->ipv4_tos); + break; + case ISCSI_PARAM_IPV6_TC: + len = sprintf(buf, "%u\n", conn->ipv6_traffic_class); + break; + case ISCSI_PARAM_IPV6_FLOW_LABEL: + len = sprintf(buf, "%u\n", conn->ipv6_flow_label); + break; + case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: + len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6); + break; + case ISCSI_PARAM_TCP_XMIT_WSF: + len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf); + break; + case ISCSI_PARAM_TCP_RECV_WSF: + len = sprintf(buf, "%u\n", conn->tcp_recv_wsf); + break; default: return -ENOSYS; } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 93f222d66716..df43bfe6d573 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -421,6 +421,7 @@ struct lpfc_vport { uint32_t cfg_enable_da_id; uint32_t cfg_max_scsicmpl_time; uint32_t cfg_tgt_queue_depth; + uint32_t cfg_first_burst_size; uint32_t dev_loss_tmo_changed; @@ -710,8 +711,6 @@ struct lpfc_hba { uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; uint32_t cfg_fcp_cpu_map; - uint32_t cfg_fcp_wq_count; - uint32_t cfg_fcp_eq_count; uint32_t cfg_fcp_io_channel; uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5cb08ae3e8c2..22f42f866f75 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -674,9 +674,6 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int i; int rc; - if (phba->pport->fc_flag & FC_OFFLINE_MODE) - return 0; - init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); @@ -744,14 +741,15 @@ lpfc_selective_reset(struct lpfc_hba *phba) int status = 0; int rc; - if ((!phba->cfg_enable_hba_reset) || - (phba->pport->fc_flag & FC_OFFLINE_MODE)) + if (!phba->cfg_enable_hba_reset) return -EACCES; - status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (status != 0) - return status; + if (status != 0) + return status; + } init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, @@ -2591,9 +2589,12 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, /* # lun_queue_depth: This parameter is used to limit the number of outstanding -# commands per FCP LUN. Value range is [1,128]. Default value is 30. +# commands per FCP LUN. Value range is [1,512]. Default value is 30. +# If this parameter value is greater than 1/8th the maximum number of exchanges +# supported by the HBA port, then the lun queue depth will be reduced to +# 1/8th the maximum number of exchanges. */ -LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, +LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512, "Max number of FCP commands we can queue to a specific LUN"); /* @@ -2601,7 +2602,7 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, # commands per target port. Value range is [10,65535]. Default value is 65535. */ LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535, - "Max number of FCP commands we can queue to a specific target port"); + "Max number of FCP commands we can queue to a specific target port"); /* # hba_queue_depth: This parameter is used to limit the number of outstanding @@ -3949,6 +3950,14 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* +# lpfc_first_burst_size: First burst size to use on the NPorts +# that support first burst. +# Value range is [0,65536]. Default value is 0. +*/ +LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, + "First burst size for Targets that support first burst"); + +/* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the # SCSI command completion time is not used for controlling I/O queue depth. When @@ -4112,25 +4121,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* -# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues -# This parameter is ignored and will eventually be depricated -# -# Value range is [1,7]. Default value is 4. -*/ -LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, - LPFC_FCP_IO_CHAN_MAX, - "Set the number of fast-path FCP work queues, if possible"); - -/* -# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels -# -# Value range is [1,7]. Default value is 4. -*/ -LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, - LPFC_FCP_IO_CHAN_MAX, - "Set the number of fast-path FCP event queues, if possible"); - -/* # lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels # # Value range is [1,7]. Default value is 4. @@ -4276,6 +4266,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, + &dev_attr_lpfc_first_burst_size, &dev_attr_lpfc_ack0, &dev_attr_lpfc_topology, &dev_attr_lpfc_scan_down, @@ -4307,8 +4298,6 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_cpu_map, - &dev_attr_lpfc_fcp_wq_count, - &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_fcp_io_channel, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, @@ -4352,6 +4341,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_restrict_login, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, + &dev_attr_lpfc_first_burst_size, &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_nport_evt_cnt, @@ -5290,8 +5280,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); - lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); - lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); @@ -5331,6 +5319,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport) lpfc_restrict_login_init(vport, lpfc_restrict_login); lpfc_fcp_class_init(vport, lpfc_fcp_class); lpfc_use_adisc_init(vport, lpfc_use_adisc); + lpfc_first_burst_size_init(vport, lpfc_first_burst_size); lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); lpfc_fdmi_on_init(vport, lpfc_fdmi_on); lpfc_discovery_threads_init(vport, lpfc_discovery_threads); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 6630520d295c..bc270639c1c3 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2498,7 +2498,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, struct lpfc_sli_ct_request *ctreq = NULL; int ret_val = 0; int time_left; - int iocb_stat = 0; + int iocb_stat = IOCB_SUCCESS; unsigned long flags; *txxri = 0; @@ -2574,6 +2574,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; + cmdiocbq->iocb_cmpl = NULL; iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, @@ -2963,7 +2964,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) uint8_t *ptr = NULL, *rx_databuf = NULL; int rc = 0; int time_left; - int iocb_stat; + int iocb_stat = IOCB_SUCCESS; unsigned long flags; void *dataout = NULL; uint32_t total_mem; @@ -3149,6 +3150,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) } cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; + cmdiocbq->iocb_cmpl = NULL; iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); @@ -3209,7 +3211,7 @@ err_loopback_test_exit: lpfc_bsg_event_unref(evt); /* delete */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - if (cmdiocbq != NULL) + if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) lpfc_sli_release_iocbq(phba, cmdiocbq); if (rspiocbq != NULL) diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 68391177432b..02e8cd923d0a 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -895,7 +895,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, - "0268 NS cmd %x Error (%d %d)\n", + "0268 NS cmd x%x Error (x%x x%x)\n", cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index af49fb03dbb8..e409ba5f728c 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -154,6 +154,7 @@ struct lpfc_node_rrq { #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ +#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ #define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ /* ndlp usage management macros */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6b8ee7449f16..110445f0c58d 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2122,6 +2122,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } npr->estabImagePair = 1; npr->readXferRdyDis = 1; + if (vport->cfg_first_burst_size) + npr->writeXferRdyDis = 1; /* For FCP support */ npr->prliType = PRLI_FCP_TYPE; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 4ec3d7c044c2..086c3f28caa6 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -234,6 +234,9 @@ struct ulp_bde64 { uint32_t addrHigh; }; +/* Maximun size of immediate data that can fit into a 128 byte WQE */ +#define LPFC_MAX_BDE_IMM_SIZE 64 + struct lpfc_sli4_flags { uint32_t word0; #define lpfc_idx_rsrc_rdy_SHIFT 0 @@ -2585,6 +2588,9 @@ struct lpfc_sli4_parameters { #define cfg_mqv_WORD word6 uint32_t word7; uint32_t word8; +#define cfg_wqsize_SHIFT 8 +#define cfg_wqsize_MASK 0x0000000f +#define cfg_wqsize_WORD word8 #define cfg_wqv_SHIFT 14 #define cfg_wqv_MASK 0x00000003 #define cfg_wqv_WORD word8 @@ -3622,6 +3628,13 @@ union lpfc_wqe { struct gen_req64_wqe gen_req; }; +union lpfc_wqe128 { + uint32_t words[32]; + struct lpfc_wqe_generic generic; + struct xmit_seq64_wqe xmit_sequence; + struct gen_req64_wqe gen_req; +}; + #define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001 #define LPFC_FILE_TYPE_GROUP 0xf7 #define LPFC_FILE_ID_GROUP 0xa2 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e0b20fad8502..501147c4a147 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -472,10 +472,22 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_sli_read_link_ste(phba); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) - phba->cfg_hba_queue_depth = - (mb->un.varRdConfig.max_xri + 1) - - lpfc_sli4_get_els_iocb_cnt(phba); + i = (mb->un.varRdConfig.max_xri + 1); + if (phba->cfg_hba_queue_depth > i) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3359 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, i); + phba->cfg_hba_queue_depth = i; + } + + /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ + i = (mb->un.varRdConfig.max_xri >> 3); + if (phba->pport->cfg_lun_queue_depth > i) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3360 LUN queue depth changed from %d to %d\n", + phba->pport->cfg_lun_queue_depth, i); + phba->pport->cfg_lun_queue_depth = i; + } phba->lmt = mb->un.varRdConfig.lmt; @@ -4901,9 +4913,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) lpfc_get_cfgparam(phba); phba->max_vpi = LPFC_MAX_VPI; - /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ - phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count; - /* This will be set to correct value after the read_config mbox */ phba->max_vports = 0; @@ -6664,12 +6673,14 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) goto read_cfg_out; /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > - (phba->sli4_hba.max_cfg_param.max_xri - - lpfc_sli4_get_els_iocb_cnt(phba))) - phba->cfg_hba_queue_depth = - phba->sli4_hba.max_cfg_param.max_xri - - lpfc_sli4_get_els_iocb_cnt(phba); + length = phba->sli4_hba.max_cfg_param.max_xri - + lpfc_sli4_get_els_iocb_cnt(phba); + if (phba->cfg_hba_queue_depth > length) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3361 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, length); + phba->cfg_hba_queue_depth = length; + } if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_2) @@ -6859,11 +6870,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; } - /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ - /* The actual number of FCP event queues adopted */ - phba->cfg_fcp_eq_count = cfg_fcp_io_channel; - phba->cfg_fcp_wq_count = cfg_fcp_io_channel; phba->cfg_fcp_io_channel = cfg_fcp_io_channel; /* Get EQ depth from module parameter, fake the default for now */ @@ -9154,6 +9161,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); + sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index b1c510f6b8f0..1f292e29d566 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -178,7 +178,8 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mb->mbxOwner = OWN_HOST; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; - mb->un.varDmp.entry_index = 0; + if (phba->sli_rev < LPFC_SLI_REV4) + mb->un.varDmp.entry_index = 0; mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; mb->un.varDmp.co = 0; @@ -361,7 +362,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* NEW_FEATURE * SLI-2, Coalescing Response Feature. */ - if (phba->cfg_cr_delay) { + if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) { mb->un.varCfgLnk.cr = 1; mb->un.varCfgLnk.ci = 1; mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; @@ -377,7 +378,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mb->un.varCfgLnk.crtov = phba->fc_crtov; mb->un.varCfgLnk.citov = phba->fc_citov; - if (phba->cfg_ack0) + if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4)) mb->un.varCfgLnk.ack0_enable = 1; mb->mbxCommand = MBX_CONFIG_LINK; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 6aaf39a1f1c5..abc361259d6d 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -690,11 +690,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; if (npr->prliType == PRLI_FCP_TYPE) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; - if (npr->targetFunc) + if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } @@ -1676,12 +1680,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Check out PRLI rsp */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; - if (npr->targetFunc) + if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 243de1d324b7..1242b6c4308b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4386,11 +4386,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; - if (sli4) - iocb_cmd->ulpPU = PARM_READ_CHECK; - else { - iocb_cmd->un.fcpi.fcpi_parm = 0; - iocb_cmd->ulpPU = 0; + iocb_cmd->ulpPU = PARM_READ_CHECK; + if (vport->cfg_first_burst_size && + (pnode->nlp_flag & NLP_FIRSTBURST)) { + piocbq->iocb.un.fcpi.fcpi_XRdy = + vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; @@ -5022,6 +5022,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, lpfc_release_scsi_buf(phba, lpfc_cmd); return FAILED; } + iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %d " @@ -5034,7 +5035,6 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, iocbq, iocbqrsp, lpfc_cmd->timeout); if (status != IOCB_SUCCESS) { if (status == IOCB_TIMEDOUT) { - iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; ret = TIMEOUT_ERROR; } else ret = FAILED; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 43440ca16f46..0392e114531c 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -6163,6 +6163,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) kfree(vpd); goto out_free_mbox; } + mqe = &mboxq->u.mqe; phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) @@ -6249,6 +6250,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); + /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ + rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); + if (phba->pport->cfg_lun_queue_depth > rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3362 LUN queue depth changed from %d to %d\n", + phba->pport->cfg_lun_queue_depth, rc); + phba->pport->cfg_lun_queue_depth = rc; + } + + /* * Discover the port's supported feature set and match it against the * hosts requests. @@ -9889,6 +9900,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd; spin_lock_irqsave(&phba->hbalock, iflags); + if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { + + /* + * A time out has occurred for the iocb. If a time out + * completion handler has been supplied, call it. Otherwise, + * just free the iocbq. + */ + + spin_unlock_irqrestore(&phba->hbalock, iflags); + cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; + cmdiocbq->wait_iocb_cmpl = NULL; + if (cmdiocbq->iocb_cmpl) + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); + else + lpfc_sli_release_iocbq(phba, cmdiocbq); + return; + } + cmdiocbq->iocb_flag |= LPFC_IO_WAKE; if (cmdiocbq->context2 && rspiocbq) memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, @@ -9944,10 +9973,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba, * @timeout: Timeout in number of seconds. * * This function issues the iocb to firmware and waits for the - * iocb to complete. If the iocb command is not - * completed within timeout seconds, it returns IOCB_TIMEDOUT. - * Caller should not free the iocb resources if this function - * returns IOCB_TIMEDOUT. + * iocb to complete. The iocb_cmpl field of the shall be used + * to handle iocbs which time out. If the field is NULL, the + * function shall free the iocbq structure. If more clean up is + * needed, the caller is expected to provide a completion function + * that will provide the needed clean up. If the iocb command is + * not completed within timeout seconds, the function will either + * free the iocbq structure (if iocb_cmpl == NULL) or execute the + * completion function set in the iocb_cmpl field and then return + * a status of IOCB_TIMEDOUT. The caller should not free the iocb + * resources if this function returns IOCB_TIMEDOUT. * The function waits for the iocb completion using an * non-interruptible wait. * This function will sleep while waiting for iocb completion. @@ -9980,6 +10015,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, int txq_cnt = 0; int txcmplq_cnt = 0; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + unsigned long iflags; + bool iocb_completed = true; + /* * If the caller has provided a response iocbq buffer, then context2 * is NULL or its an error. @@ -9990,9 +10028,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->context2 = prspiocbq; } + piocb->wait_iocb_cmpl = piocb->iocb_cmpl; piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; piocb->context_un.wait_queue = &done_q; - piocb->iocb_flag &= ~LPFC_IO_WAKE; + piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) @@ -10009,8 +10048,19 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); + spin_lock_irqsave(&phba->hbalock, iflags); + if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { - if (piocb->iocb_flag & LPFC_IO_WAKE) { + /* + * IOCB timed out. Inform the wake iocb wait + * completion function and set local status + */ + + iocb_completed = false; + piocb->iocb_flag |= LPFC_IO_WAKE_TMO; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (iocb_completed) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0331 IOCB wake signaled\n"); } else if (timeleft == 0) { @@ -10122,7 +10172,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, */ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { retval = MBX_SUCCESS; - lpfc_sli4_swap_str(phba, pmboxq); } else { retval = MBX_TIMEOUT; pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -12820,10 +12869,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, wq->page_count); bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); + + /* wqv is the earliest version supported, NOT the latest */ bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.wqv); - if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { + switch (phba->sli4_hba.pc_sli4_params.wqv) { + case LPFC_Q_CREATE_VERSION_0: + switch (wq->entry_size) { + default: + case 64: + /* Nothing to do, version 0 ONLY supports 64 byte */ + page = wq_create->u.request.page; + break; + case 128: + if (!(phba->sli4_hba.pc_sli4_params.wqsize & + LPFC_WQ_SZ128_SUPPORT)) { + status = -ERANGE; + goto out; + } + /* If we get here the HBA MUST also support V1 and + * we MUST use it + */ + bf_set(lpfc_mbox_hdr_version, &shdr->request, + LPFC_Q_CREATE_VERSION_1); + + bf_set(lpfc_mbx_wq_create_wqe_count, + &wq_create->u.request_1, wq->entry_count); + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_128); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + page = wq_create->u.request_1.page; + break; + } + break; + case LPFC_Q_CREATE_VERSION_1: bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, wq->entry_count); switch (wq->entry_size) { @@ -12834,6 +12917,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_64); break; case 128: + if (!(phba->sli4_hba.pc_sli4_params.wqsize & + LPFC_WQ_SZ128_SUPPORT)) { + status = -ERANGE; + goto out; + } bf_set(lpfc_mbx_wq_create_wqe_size, &wq_create->u.request_1, LPFC_WQ_WQE_SIZE_128); @@ -12842,9 +12930,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, (PAGE_SIZE/SLI4_PAGE_SIZE)); page = wq_create->u.request_1.page; - } else { - page = wq_create->u.request.page; + break; + default: + status = -ERANGE; + goto out; } + list_for_each_entry(dmabuf, &wq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); @@ -14665,14 +14756,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->phba->vpi_ids[vport->vpi]; /* put the first buffer into the first IOCBq */ + tot_len = bf_get(lpfc_rcqe_length, + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + first_iocbq->context2 = &seq_dmabuf->dbuf; first_iocbq->context3 = NULL; first_iocbq->iocb.ulpBdeCount = 1; - first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = + if (tot_len > LPFC_DATA_BUF_SIZE) + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; + first_iocbq->iocb.un.rcvels.remoteID = sid; - tot_len = bf_get(lpfc_rcqe_length, - &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; } iocbq = first_iocbq; @@ -14688,14 +14785,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) if (!iocbq->context3) { iocbq->context3 = d_buf; iocbq->iocb.ulpBdeCount++; - pbde = (struct ulp_bde64 *) - &iocbq->iocb.unsli3.sli3Words[4]; - pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; - /* We need to get the size out of the right CQE */ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); len = bf_get(lpfc_rcqe_length, &hbq_buf->cq_event.cqe.rcqe_cmpl); + pbde = (struct ulp_bde64 *) + &iocbq->iocb.unsli3.sli3Words[4]; + if (len > LPFC_DATA_BUF_SIZE) + pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + pbde->tus.f.bdeSize = len; + iocbq->iocb.unsli3.rcvsli3.acc_len += len; tot_len += len; } else { @@ -14710,16 +14810,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) lpfc_in_buf_free(vport->phba, d_buf); continue; } + /* We need to get the size out of the right CQE */ + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + len = bf_get(lpfc_rcqe_length, + &hbq_buf->cq_event.cqe.rcqe_cmpl); iocbq->context2 = d_buf; iocbq->context3 = NULL; iocbq->iocb.ulpBdeCount = 1; - iocbq->iocb.un.cont64[0].tus.f.bdeSize = + if (len > LPFC_DATA_BUF_SIZE) + iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; - /* We need to get the size out of the right CQE */ - hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); - len = bf_get(lpfc_rcqe_length, - &hbq_buf->cq_event.cqe.rcqe_cmpl); tot_len += len; iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 9d2e0c6fe334..97617996206d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -60,7 +60,8 @@ struct lpfc_iocbq { uint8_t retry; /* retry counter for IOCB cmd - if needed */ uint16_t iocb_flag; #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ -#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ +#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ +#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ @@ -93,6 +94,8 @@ struct lpfc_iocbq { void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); }; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index d710b87a4417..5bcc38223ac9 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -117,6 +117,7 @@ union sli4_qe { struct lpfc_rcqe_complete *rcqe_complete; struct lpfc_mqe *mqe; union lpfc_wqe *wqe; + union lpfc_wqe128 *wqe128; struct lpfc_rqe *rqe; }; @@ -325,12 +326,14 @@ struct lpfc_bmbx { #define LPFC_EQE_SIZE_16B 16 #define LPFC_CQE_SIZE 16 #define LPFC_WQE_SIZE 64 +#define LPFC_WQE128_SIZE 128 #define LPFC_MQE_SIZE 256 #define LPFC_RQE_SIZE 8 #define LPFC_EQE_DEF_COUNT 1024 #define LPFC_CQE_DEF_COUNT 1024 #define LPFC_WQE_DEF_COUNT 256 +#define LPFC_WQE128_DEF_COUNT 128 #define LPFC_MQE_DEF_COUNT 16 #define LPFC_RQE_DEF_COUNT 512 @@ -416,6 +419,9 @@ struct lpfc_pc_sli4_params { uint8_t mqv; uint8_t wqv; uint8_t rqv; + uint8_t wqsize; +#define LPFC_WQ_SZ64_SUPPORT 1 +#define LPFC_WQ_SZ128_SUPPORT 2 }; struct lpfc_iov { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c6c32eebf3dd..21859d2006ce 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.40" +#define LPFC_DRIVER_VERSION "8.3.41" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index e28e431564b0..a87ee33f4f2a 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -387,6 +387,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) /* Create binary sysfs attribute for vport */ lpfc_alloc_sysfs_attr(vport); + /* Set the DFT_LUN_Q_DEPTH accordingly */ + vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; + *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 31b5b15a4726..7b14a015c903 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.27 + * mpi2.h Version: 02.00.28 * * Version History * --------------- @@ -77,6 +77,7 @@ * Added Hard Reset delay timings. * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -102,7 +103,7 @@ #define MPI2_VERSION_02_00 (0x0200) /* versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x1B) +#define MPI2_HEADER_VERSION_UNIT (0x1C) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 737fa8cfb54a..88cb7f828bbd 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2011 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_cnfg.h * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.22 + * mpi2_cnfg.h Version: 02.00.23 * * Version History * --------------- @@ -149,6 +149,8 @@ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. * Added UEFIVersion field to BIOS Page 1 and defined new * BiosOptions bits. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. * -------------------------------------------------------------------------- */ @@ -698,6 +700,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) /* defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) @@ -1224,6 +1227,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 #define MPI2_BIOSPAGE1_PAGEVERSION (0x05) /* values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) + #define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) #define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) #define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h index 963761fb8462..9d284dae6553 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_init.h diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index e93f8f53adf9..d159c5f24aab 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_ioc.h diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h index 255b0ca219a4..0d202a2c6db7 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_raid.h diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h index fdffde1ebc0f..50b39ccd526a 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_sas.h diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 67c387f10e59..11b2ac4e7c6e 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_tool.h diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h index cfde017bf16e..0b128b68a5ea 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_type.h diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index ccd6d5a97ec3..3901edc35812 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -768,10 +768,9 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * - * Return 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Returns void. */ -static u8 +static void _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { Mpi2EventNotificationReply_t *mpi_reply; @@ -780,9 +779,9 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (!mpi_reply) - return 1; + return; if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) - return 1; + return; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _base_display_event_data(ioc, mpi_reply); #endif @@ -812,7 +811,7 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) /* ctl callback handler */ mpt2sas_ctl_event_callback(ioc, msix_index, reply); - return 1; + return; } /** @@ -1409,8 +1408,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) int i; u8 try_msix = 0; - INIT_LIST_HEAD(&ioc->reply_queue_list); - if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1489,6 +1486,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) if (pci_enable_device_mem(pdev)) { printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: " "failed\n", ioc->name); + ioc->bars = 0; return -ENODEV; } @@ -1497,6 +1495,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) MPT2SAS_DRIVER_NAME)) { printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: " "failed\n", ioc->name); + ioc->bars = 0; r = -ENODEV; goto out_fail; } @@ -4229,18 +4228,25 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); - _base_mask_interrupts(ioc); - ioc->shost_recovery = 1; - _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); - ioc->shost_recovery = 0; + if (ioc->chip_phys && ioc->chip) { + _base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + ioc->shost_recovery = 0; + } + _base_free_irq(ioc); _base_disable_msix(ioc); - if (ioc->chip_phys) + + if (ioc->chip_phys && ioc->chip) iounmap(ioc->chip); ioc->chip_phys = 0; - pci_release_selected_regions(ioc->pdev, ioc->bars); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); + + if (pci_is_enabled(pdev)) { + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } return; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 6fbd08417773..1f2ac3a28621 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt2sas/mpt2_base.h - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -69,8 +69,8 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "15.100.00.00" -#define MPT2SAS_MAJOR_VERSION 15 +#define MPT2SAS_DRIVER_VERSION "16.100.00.00" +#define MPT2SAS_MAJOR_VERSION 16 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 #define MPT2SAS_RELEASE_VERSION 00 @@ -1061,7 +1061,7 @@ void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc, int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc); /* scsih shared API */ -u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, +void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, @@ -1144,7 +1144,7 @@ void mpt2sas_ctl_exit(void); u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); -u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, +void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, Mpi2EventNotificationReply_t *mpi_reply); diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 863778071a9d..0c47425c73f2 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -2,7 +2,7 @@ * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index eec052c2670a..b7f887c9b0bf 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -397,18 +397,22 @@ mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * - * Return 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Returns void. */ -u8 +void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { Mpi2EventNotificationReply_t *mpi_reply; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } mpt2sas_ctl_add_to_event_log(ioc, mpi_reply); - return 1; + return; } /** diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index b5eb0d1b8ea6..8b2ac1869dcc 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h index 69cc7d0c112c..a9021cbd6628 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_debug.h +++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h @@ -2,7 +2,7 @@ * Logging Support for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 51004768d0f5..7f0af4fcc001 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2,7 +2,7 @@ * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -628,11 +628,12 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, * devices while scanning is turned on due to an oops in * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() */ - if (!ioc->is_driver_loading) + if (!ioc->is_driver_loading) { mpt2sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); - _scsih_sas_device_remove(ioc, sas_device); + _scsih_sas_device_remove(ioc, sas_device); + } } } @@ -1402,6 +1403,7 @@ _scsih_slave_alloc(struct scsi_device *sdev) struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; + struct _sas_device *sas_device; unsigned long flags; sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); @@ -1430,6 +1432,19 @@ _scsih_slave_alloc(struct scsi_device *sdev) spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, + sas_target_priv_data->sas_address); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return 0; } @@ -6753,7 +6768,7 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc) handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(sas_device_pg0.DevHandle); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); @@ -6862,7 +6877,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(volume_pg1.DevHandle); @@ -6887,7 +6902,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); @@ -6967,7 +6982,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(expander_pg0.DevHandle); @@ -7109,8 +7124,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: " "ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -7153,8 +7166,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:" "ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -7219,8 +7230,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: " "ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -7278,8 +7287,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:" " ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -7471,10 +7478,9 @@ _firmware_event_work(struct work_struct *work) * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * - * Return 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Returns void. */ -u8 +void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { @@ -7485,14 +7491,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, /* events turned off due to host reset or driver unloading */ if (ioc->remove_host || ioc->pci_error_recovery) - return 1; + return; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (unlikely(!mpi_reply)) { printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); - return 1; + return; } event = le16_to_cpu(mpi_reply->Event); @@ -7507,11 +7513,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, if (baen_data->Primitive != MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) - return 1; + return; if (ioc->broadcast_aen_busy) { ioc->broadcast_aen_pending++; - return 1; + return; } else ioc->broadcast_aen_busy = 1; break; @@ -7587,14 +7593,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, break; default: /* ignore the rest */ - return 1; + return; } fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); - return 1; + return; } sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; fw_event->event_data = kzalloc(sz, GFP_ATOMIC); @@ -7602,7 +7608,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); kfree(fw_event); - return 1; + return; } memcpy(fw_event->event_data, mpi_reply->EventData, @@ -7612,7 +7618,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, fw_event->VP_ID = mpi_reply->VP_ID; fw_event->event = event; _scsih_fw_event_add(ioc, fw_event); - return 1; + return; } /* shost template */ @@ -7711,10 +7717,6 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc) if (!ioc->ir_firmware) return; - /* are there any volumes ? */ - if (list_empty(&ioc->raid_device_list)) - return; - mutex_lock(&ioc->scsih_cmds.mutex); if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) { @@ -7929,10 +7931,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { - if (!ioc->is_driver_loading) - mpt2sas_transport_port_remove(ioc, sas_address, + if (!ioc->is_driver_loading) { + mpt2sas_transport_port_remove(ioc, + sas_address, sas_address_parent); - _scsih_sas_device_remove(ioc, sas_device); + _scsih_sas_device_remove(ioc, sas_device); + } } } } @@ -7985,14 +7989,14 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) kfree(sas_device); continue; } else if (!sas_device->starget) { - if (!ioc->is_driver_loading) + if (!ioc->is_driver_loading) { mpt2sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); - list_del(&sas_device->list); - kfree(sas_device); - continue; - + list_del(&sas_device->list); + kfree(sas_device); + continue; + } } spin_lock_irqsave(&ioc->sas_device_lock, flags); list_move_tail(&sas_device->list, &ioc->sas_device_list); @@ -8175,6 +8179,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); INIT_LIST_HEAD(&ioc->delayed_tr_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->reply_queue_list); /* init shost parameters */ shost->max_cmd_len = 32; @@ -8280,6 +8285,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt2sas_base_stop_watchdog(ioc); scsi_block_requests(shost); + _scsih_ir_shutdown(ioc); device_state = pci_choose_state(pdev, state); printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering " "operating state [D%d]\n", ioc->name, pdev, diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 193e7ae90c3b..9d26637308be 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -2,7 +2,7 @@ * SAS Transport Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -1006,9 +1006,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, &mpt2sas_phy->remote_identify); _transport_add_phy_to_an_existing_port(ioc, sas_node, mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); - } else + } else { memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct sas_identify)); + _transport_del_phy_from_an_existing_port(ioc, sas_node, + mpt2sas_phy); + } if (mpt2sas_phy->phy) mpt2sas_phy->phy->negotiated_linkrate = diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 5dc280c75325..fa785062e97b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -82,6 +82,10 @@ static int msix_disable = -1; module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); +static int max_msix_vectors = 8; +module_param(max_msix_vectors, int, 0); +MODULE_PARM_DESC(max_msix_vectors, + " max msix vectors - (default=8)"); static int mpt3sas_fwfault_debug; MODULE_PARM_DESC(mpt3sas_fwfault_debug, @@ -1709,8 +1713,6 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) int i; u8 try_msix = 0; - INIT_LIST_HEAD(&ioc->reply_queue_list); - if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1723,6 +1725,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc->reply_queue_count = min_t(int, ioc->cpu_count, ioc->msix_vector_count); + printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" + ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, + ioc->cpu_count, max_msix_vectors); + + if (max_msix_vectors > 0) { + ioc->reply_queue_count = min_t(int, max_msix_vectors, + ioc->reply_queue_count); + ioc->msix_vector_count = ioc->reply_queue_count; + } + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), GFP_KERNEL); if (!entries) { @@ -1790,6 +1802,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) if (pci_enable_device_mem(pdev)) { pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", ioc->name); + ioc->bars = 0; return -ENODEV; } @@ -1798,6 +1811,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) MPT3SAS_DRIVER_NAME)) { pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", ioc->name); + ioc->bars = 0; r = -ENODEV; goto out_fail; } @@ -4393,18 +4407,25 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, __func__)); - _base_mask_interrupts(ioc); - ioc->shost_recovery = 1; - _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); - ioc->shost_recovery = 0; + if (ioc->chip_phys && ioc->chip) { + _base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + ioc->shost_recovery = 0; + } + _base_free_irq(ioc); _base_disable_msix(ioc); - if (ioc->chip_phys) + + if (ioc->chip_phys && ioc->chip) iounmap(ioc->chip); ioc->chip_phys = 0; - pci_release_selected_regions(ioc->pdev, ioc->bars); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); + + if (pci_is_enabled(pdev)) { + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } return; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8cbe8fd21fc4..a961fe11b527 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -7779,6 +7779,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); INIT_LIST_HEAD(&ioc->delayed_tr_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->reply_queue_list); /* init shost parameters */ shost->max_cmd_len = 32; diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index dcadd56860ff..e771a88c6a74 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1003,9 +1003,12 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, &mpt3sas_phy->remote_identify); _transport_add_phy_to_an_existing_port(ioc, sas_node, mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); - } else + } else { memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct sas_identify)); + _transport_del_phy_from_an_existing_port(ioc, sas_node, + mpt3sas_phy); + } if (mpt3sas_phy->phy) mpt3sas_phy->phy->negotiated_linkrate = diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 3861aa1f4520..f7c189606b84 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -424,7 +424,8 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) PM8001_INIT_DBG(pm8001_ha, pm8001_printk( "base addr %llx virt_addr=%llx len=%d\n", (u64)pm8001_ha->io_mem[logicalBar].membase, - (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr, + (u64)(unsigned long) + pm8001_ha->io_mem[logicalBar].memvirtaddr, pm8001_ha->io_mem[logicalBar].memsize)); } else { pm8001_ha->io_mem[logicalBar].membase = 0; @@ -734,7 +735,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) pdev = pm8001_ha->pdev; #ifdef PM8001_USE_MSIX - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) + if (pdev->msix_cap) return pm8001_setup_msix(pm8001_ha); else { PM8001_INIT_DBG(pm8001_ha, diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index c37b244cf8ae..ff0fc7c7812f 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile @@ -1,6 +1,6 @@ qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ - qla_nx.o qla_mr.o qla_target.o + qla_nx.o qla_mr.o qla_nx2.o qla_target.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index d7a99ae7f39d..5f174b83f56f 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -29,7 +29,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) return 0; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (off < ha->md_template_size) { rval = memory_read_from_buffer(buf, count, &off, ha->md_tmplt_hdr, ha->md_template_size); @@ -71,7 +71,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x705d, "Firmware dump cleared on (%ld).\n", vha->host_no); - if (IS_QLA82XX(vha->hw)) { + if (IS_P3P_TYPE(ha)) { qla82xx_md_free(vha); qla82xx_md_prep(vha); } @@ -95,11 +95,15 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); } else qla2x00_system_error(vha); break; case 4: - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (ha->md_tmplt_hdr) ql_dbg(ql_dbg_user, vha, 0x705b, "MiniDump supported with this firmware.\n"); @@ -109,7 +113,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, } break; case 5: - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case 6: @@ -586,7 +590,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int type; uint32_t idc_control; - + uint8_t *tmp_data = NULL; if (off != 0) return -EINVAL; @@ -597,14 +601,23 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, "Issuing ISP reset.\n"); scsi_block_requests(vha->host); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); if (IS_QLA82XX(ha)) { ha->flags.isp82xx_no_md_cap = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + idc_control = qla8044_rd_reg(ha, + QLA8044_IDC_DRV_CTRL); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_control | GRACEFUL_RESET_BIT1)); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); + } else { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); } - qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); break; @@ -640,7 +653,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, break; } case 0x2025e: - if (!IS_QLA82XX(ha) || vha != base_vha) { + if (!IS_P3P_TYPE(ha) || vha != base_vha) { ql_log(ql_log_info, vha, 0x7071, "FCoE ctx reset no supported.\n"); return -EPERM; @@ -674,7 +687,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); break; - + case 0x20261: + ql_dbg(ql_dbg_user, vha, 0x70e0, + "Updating cache versions without reset "); + + tmp_data = vmalloc(256); + if (!tmp_data) { + ql_log(ql_log_warn, vha, 0x70e1, + "Unable to allocate memory for VPD information update.\n"); + return -ENOMEM; + } + ha->isp_ops->get_flash_version(vha, tmp_data); + vfree(tmp_data); + break; } return count; } @@ -1212,7 +1237,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) return snprintf(buf, PAGE_SIZE, "\n"); return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", @@ -1265,10 +1290,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev, if (!IS_CNA_CAPABLE(vha->hw)) return snprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", - vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4], - vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2], - vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); + return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); } static ssize_t @@ -1287,12 +1309,6 @@ qla2x00_thermal_temp_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); uint16_t temp = 0; - if (!vha->hw->thermal_support) { - ql_log(ql_log_warn, vha, 0x70db, - "Thermal not supported by this card.\n"); - goto done; - } - if (qla2x00_reset_active(vha)) { ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); goto done; @@ -1725,11 +1741,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) pfc_host_stat->lip_count = stats->lip_cnt; pfc_host_stat->tx_frames = stats->tx_frames; pfc_host_stat->rx_frames = stats->rx_frames; - pfc_host_stat->dumped_frames = stats->dumped_frames; + pfc_host_stat->dumped_frames = stats->discarded_frames; pfc_host_stat->nos_count = stats->nos_rcvd; + pfc_host_stat->error_frames = + stats->dropped_frames + stats->discarded_frames; + pfc_host_stat->rx_words = vha->qla_stats.input_bytes; + pfc_host_stat->tx_words = vha->qla_stats.output_bytes; } + pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests; + pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests; + pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests; pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; + pfc_host_stat->seconds_since_last_reset = + get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; + do_div(pfc_host_stat->seconds_since_last_reset, HZ); done_free: dma_pool_free(ha->s_dma_pool, stats, stats_dma); @@ -1738,6 +1764,16 @@ done: } static void +qla2x00_reset_host_stats(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); + + vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); +} + +static void qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); @@ -2043,6 +2079,7 @@ struct fc_function_template qla2xxx_transport_functions = { .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, .terminate_rport_io = qla2x00_terminate_rport_io, .get_fc_host_stats = qla2x00_get_fc_host_stats, + .reset_fc_host_stats = qla2x00_reset_host_stats, .vport_create = qla24xx_vport_create, .vport_disable = qla24xx_vport_disable, @@ -2089,6 +2126,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, .terminate_rport_io = qla2x00_terminate_rport_io, .get_fc_host_stats = qla2x00_get_fc_host_stats, + .reset_fc_host_stats = qla2x00_reset_host_stats, + .bsg_request = qla24xx_bsg_request, .bsg_timeout = qla24xx_bsg_timeout, }; diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 417eaad50ae2..b989add77ec3 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -125,7 +125,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) uint32_t len; uint32_t oper; - if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) { + if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { ret = -EINVAL; goto exit_fcp_prio_cfg; } @@ -559,7 +559,7 @@ qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, uint16_t new_config[4]; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) goto done_reset_internal; memset(new_config, 0 , sizeof(new_config)); @@ -627,9 +627,10 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, { int ret = 0; int rval = 0; + unsigned long rem_tmo = 0, current_tmo = 0; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) goto done_set_internal; if (mode == INTERNAL_LOOPBACK) @@ -652,8 +653,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, } /* Wait for DCBX complete event */ - if (!wait_for_completion_timeout(&ha->dcbx_comp, - (DCBX_COMP_TIMEOUT * HZ))) { + current_tmo = DCBX_COMP_TIMEOUT * HZ; + while (1) { + rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, + current_tmo); + if (!ha->idc_extend_tmo || rem_tmo) { + ha->idc_extend_tmo = 0; + break; + } + current_tmo = ha->idc_extend_tmo * HZ; + ha->idc_extend_tmo = 0; + } + + if (!rem_tmo) { ql_dbg(ql_dbg_user, vha, 0x7022, "DCBX completion not received.\n"); ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); @@ -678,6 +690,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, } ha->notify_dcbx_comp = 0; + ha->idc_extend_tmo = 0; done_set_internal: return rval; @@ -773,7 +786,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) if (atomic_read(&vha->loop_state) == LOOP_READY && (ha->current_topology == ISP_CFG_F || - ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && + ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) { @@ -783,7 +796,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) command_sent = INT_DEF_LB_ECHO_CMD; rval = qla2x00_echo_test(vha, &elreq, response); } else { - if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { + if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { memset(config, 0, sizeof(config)); memset(new_config, 0, sizeof(new_config)); @@ -806,7 +819,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) "elreq.options=%04x\n", elreq.options); if (elreq.options == EXTERNAL_LOOPBACK) - if (IS_QLA8031(ha)) + if (IS_QLA8031(ha) || IS_QLA8044(ha)) rval = qla81xx_set_loopback_mode(vha, config, new_config, elreq.options); else @@ -1266,6 +1279,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) int rval = 0; struct qla_port_param *port_param = NULL; fc_port_t *fcport = NULL; + int found = 0; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint8_t *rsp_ptr = NULL; @@ -1288,10 +1302,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, fcport->port_name, sizeof(fcport->port_name))) continue; + + found = 1; break; } - if (!fcport) { + if (!found) { ql_log(ql_log_warn, vha, 0x7049, "Failed to find port.\n"); return -EINVAL; @@ -1318,12 +1334,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) if (rval) { ql_log(ql_log_warn, vha, 0x704c, - "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- " - "%04x %x %04x %04x.\n", fcport->port_name[0], - fcport->port_name[1], fcport->port_name[2], - fcport->port_name[3], fcport->port_name[4], - fcport->port_name[5], fcport->port_name[6], - fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); + "iIDMA cmd failed for %8phN -- " + "%04x %x %04x %04x.\n", fcport->port_name, + rval, fcport->fp_speed, mb[0], mb[1]); rval = (DID_ERROR << 16); } else { if (!port_param->mode) { diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index df132fec6d86..2ef497ebadc0 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,9 +11,12 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa | - * | Mailbox commands | 0x117a | 0x111a-0x111b | + * | Module Init and Probe | 0x0159 | 0x4b,0xba,0xfa | + * | Mailbox commands | 0x1181 | 0x111a-0x111b | * | | | 0x1155-0x1158 | + * | | | 0x1018-0x1019 | + * | | | 0x1115-0x1116 | + * | | | 0x10ca | * | Device Discovery | 0x2095 | 0x2020-0x2022, | * | | | 0x2011-0x2012, | * | | | 0x2016 | @@ -24,11 +27,12 @@ * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4022 | 0x4002,0x4013 | - * | Async Events | 0x5081 | 0x502b-0x502f | + * | Async Events | 0x5087 | 0x502b-0x502f | * | | | 0x5047,0x5052 | - * | | | 0x5040,0x5075 | - * | Timer Routines | 0x6011 | | - * | User Space Interactions | 0x70dd | 0x7018,0x702e, | + * | | | 0x5084,0x5075 | + * | | | 0x503d,0x5044 | + * | Timer Routines | 0x6012 | | + * | User Space Interactions | 0x70e1 | 0x7018,0x702e, | * | | | 0x7020,0x7024, | * | | | 0x7039,0x7045, | * | | | 0x7073-0x7075, | @@ -36,17 +40,28 @@ * | | | 0x70a5,0x70a6, | * | | | 0x70a8,0x70ab, | * | | | 0x70ad-0x70ae, | - * | | | 0x70d1-0x70da, | + * | | | 0x70d1-0x70db, | * | | | 0x7047,0x703b | - * | Task Management | 0x803c | 0x8025-0x8026 | + * | | | 0x70de-0x70df, | + * | Task Management | 0x803d | 0x8025-0x8026 | * | | | 0x800b,0x8039 | * | AER/EEH | 0x9011 | | * | Virtual Port | 0xa007 | | - * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 | + * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 | + * | | | 0xb09e,0xb0ae | + * | | | 0xb0e0-0xb0ef | + * | | | 0xb085,0xb0dc | + * | | | 0xb107,0xb108 | + * | | | 0xb111,0xb11e | + * | | | 0xb12c,0xb12d | + * | | | 0xb13a,0xb142 | + * | | | 0xb13c-0xb140 | + * | | | 0xb149 | * | MultiQ | 0xc00c | | * | Misc | 0xd010 | | - * | Target Mode | 0xe070 | | - * | Target Mode Management | 0xf072 | | + * | Target Mode | 0xe070 | 0xe021 | + * | Target Mode Management | 0xf072 | 0xf002-0xf003 | + * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000b | | * ---------------------------------------------------------------------- */ @@ -519,7 +534,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) uint32_t cnt, que_idx; uint8_t que_cnt; struct qla2xxx_mq_chain *mq = ptr; - struct device_reg_25xxmq __iomem *reg; + device_reg_t __iomem *reg; if (!ha->mqenable || IS_QLA83XX(ha)) return ptr; @@ -533,13 +548,16 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) ha->max_req_queues : ha->max_rsp_queues; mq->count = htonl(que_cnt); for (cnt = 0; cnt < que_cnt; cnt++) { - reg = (struct device_reg_25xxmq __iomem *) - (ha->mqiobase + cnt * QLA_QUE_PAGE); + reg = ISP_QUE_REG(ha, cnt); que_idx = cnt * 4; - mq->qregs[que_idx] = htonl(RD_REG_DWORD(®->req_q_in)); - mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(®->req_q_out)); - mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(®->rsp_q_in)); - mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(®->rsp_q_out)); + mq->qregs[que_idx] = + htonl(RD_REG_DWORD(®->isp25mq.req_q_in)); + mq->qregs[que_idx+1] = + htonl(RD_REG_DWORD(®->isp25mq.req_q_out)); + mq->qregs[que_idx+2] = + htonl(RD_REG_DWORD(®->isp25mq.rsp_q_in)); + mq->qregs[que_idx+3] = + htonl(RD_REG_DWORD(®->isp25mq.rsp_q_out)); } return ptr + sizeof(struct qla2xxx_mq_chain); @@ -941,7 +959,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; risc_address = ext_mem_cnt = 0; @@ -2530,7 +2548,7 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) if (!ql_mask_match(level)) return; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) mbx_reg = ®82->mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha)) mbx_reg = ®24->mailbox0; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 95ca32a71e75..93db74ef3461 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -35,6 +35,7 @@ #include "qla_bsg.h" #include "qla_nx.h" +#include "qla_nx2.h" #define QLA2XXX_DRIVER_NAME "qla2xxx" #define QLA2XXX_APIDEV "ql2xapidev" #define QLA2XXX_MANUFACTURER "QLogic Corporation" @@ -642,6 +643,7 @@ struct device_reg_fx00 { uint32_t initval6; /* C8 */ uint32_t initval7; /* CC */ uint32_t fwheartbeat; /* D0 */ + uint32_t pseudoaen; /* D4 */ }; @@ -805,6 +807,7 @@ struct mbx_cmd_32 { #define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change Notification */ #define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */ +#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */ /* 83XX FCoE specific */ #define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ @@ -997,6 +1000,7 @@ struct mbx_cmd_32 { #define MBX_1 BIT_1 #define MBX_0 BIT_0 +#define RNID_TYPE_SET_VERSION 0x9 #define RNID_TYPE_ASIC_TEMP 0xC /* @@ -1233,8 +1237,9 @@ struct link_statistics { uint32_t unused1[0x1a]; uint32_t tx_frames; uint32_t rx_frames; - uint32_t dumped_frames; - uint32_t unused2[2]; + uint32_t discarded_frames; + uint32_t dropped_frames; + uint32_t unused2[1]; uint32_t nos_rcvd; }; @@ -2656,6 +2661,11 @@ struct qla_statistics { uint32_t total_isp_aborts; uint64_t input_bytes; uint64_t output_bytes; + uint64_t input_requests; + uint64_t output_requests; + uint32_t control_requests; + + uint64_t jiffies_at_last_reset; }; struct bidi_statistics { @@ -2670,9 +2680,8 @@ struct bidi_statistics { #define QLA_MAX_QUEUES 256 #define ISP_QUE_REG(ha, id) \ ((ha->mqenable || IS_QLA83XX(ha)) ? \ - ((device_reg_t __iomem *)(ha->mqiobase) +\ - (QLA_QUE_PAGE * id)) :\ - ((device_reg_t __iomem *)(ha->iobase))) + ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\ + ((void __iomem *)ha->iobase)) #define QLA_REQ_QUE_ID(tag) \ ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0) #define QLA_DEFAULT_QUE_QOS 5 @@ -2935,7 +2944,8 @@ struct qla_hw_data { #define DT_ISP2031 BIT_15 #define DT_ISP8031 BIT_16 #define DT_ISPFX00 BIT_17 -#define DT_ISP_LAST (DT_ISPFX00 << 1) +#define DT_ISP8044 BIT_18 +#define DT_ISP_LAST (DT_ISP8044 << 1) #define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 @@ -2961,6 +2971,7 @@ struct qla_hw_data { #define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) #define IS_QLA81XX(ha) (IS_QLA8001(ha)) #define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) +#define IS_QLA8044(ha) (DT_MASK(ha) & DT_ISP8044) #define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) #define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) @@ -2975,10 +2986,12 @@ struct qla_hw_data { #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ IS_QLA84XX(ha)) #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ - IS_QLA8031(ha)) + IS_QLA8031(ha) || IS_QLA8044(ha)) +#define IS_P3P_TYPE(ha) (IS_QLA82XX(ha) || IS_QLA8044(ha)) #define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ - IS_QLA82XX(ha) || IS_QLA83XX(ha)) + IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA8044(ha)) #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) #define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) @@ -3187,10 +3200,12 @@ struct qla_hw_data { uint32_t nvram_data_off; uint32_t fdt_wrt_disable; + uint32_t fdt_wrt_enable; uint32_t fdt_erase_cmd; uint32_t fdt_block_size; uint32_t fdt_unprotect_sec_cmd; uint32_t fdt_protect_sec_cmd; + uint32_t fdt_wrt_sts_reg_cmd; uint32_t flt_region_flt; uint32_t flt_region_fdt; @@ -3277,6 +3292,7 @@ struct qla_hw_data { /* QLA83XX IDC specific fields */ uint32_t idc_audit_ts; + uint32_t idc_extend_tmo; /* DPC low-priority workqueue */ struct workqueue_struct *dpc_lp_wq; @@ -3296,9 +3312,6 @@ struct qla_hw_data { struct mr_data_fx00 mr; struct qlt_hw_data tgt; - uint16_t thermal_support; -#define THERMAL_SUPPORT_I2C BIT_0 -#define THERMAL_SUPPORT_ISP BIT_1 }; /* @@ -3364,6 +3377,7 @@ typedef struct scsi_qla_host { #define PORT_UPDATE_NEEDED 24 #define FX00_RESET_RECOVERY 25 #define FX00_TARGET_SCAN 26 +#define FX00_CRITEMP_RECOVERY 27 uint32_t device_flags; #define SWITCH_FOUND BIT_0 @@ -3402,7 +3416,7 @@ typedef struct scsi_qla_host { uint16_t fcoe_fcf_idx; uint8_t fcoe_vn_port_mac[6]; - uint32_t vp_abort_cnt; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ uint16_t vp_idx; /* vport ID */ @@ -3435,6 +3449,7 @@ typedef struct scsi_qla_host { struct bidi_statistics bidi_stats; atomic_t vref_count; + struct qla8044_reset_template reset_tmplt; } scsi_qla_host_t; #define SET_VP_IDX 1 diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 1ac2b0e3a0e1..610d3aa905a0 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1387,6 +1387,8 @@ struct qla_flt_header { #define FLT_REG_GOLD_FW 0x2f #define FLT_REG_FCP_PRIO_0 0x87 #define FLT_REG_FCP_PRIO_1 0x88 +#define FLT_REG_CNA_FW 0x97 +#define FLT_REG_BOOT_CODE_8044 0xA2 #define FLT_REG_FCOE_FW 0xA4 #define FLT_REG_FCOE_NVRAM_0 0xAA #define FLT_REG_FCOE_NVRAM_1 0xAC diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 2d98232a08eb..4446bf5fe292 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -357,6 +357,12 @@ extern int qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); extern int +qla82xx_set_driver_version(scsi_qla_host_t *, char *); + +extern int +qla25xx_set_driver_version(scsi_qla_host_t *, char *); + +extern int qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, uint16_t, uint16_t, uint16_t, uint16_t); @@ -435,19 +441,19 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, */ extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); extern int qla2x00_beacon_on(struct scsi_qla_host *); @@ -463,21 +469,25 @@ extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t); extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *); extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *); extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t, - uint32_t, uint16_t *); + uint32_t, uint16_t *); extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); +extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *, + uint8_t *, uint32_t, uint32_t); +extern void qla8044_watchdog(struct scsi_qla_host *vha); extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); +extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *); extern int qla2xxx_get_flash_info(scsi_qla_host_t *); extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); @@ -498,7 +508,7 @@ extern void qla2x00_dump_buffer(uint8_t *, uint32_t); extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, - uint8_t *, uint32_t); + uint8_t *, uint32_t); extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); /* @@ -584,6 +594,7 @@ extern int qlafx00_start_scsi(srb_t *); extern int qlafx00_abort_isp(scsi_qla_host_t *); extern int qlafx00_iospace_config(struct qla_hw_data *); extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t); +extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int); extern int qlafx00_fw_ready(scsi_qla_host_t *); extern int qlafx00_configure_devices(scsi_qla_host_t *); extern int qlafx00_reset_initialize(scsi_qla_host_t *); @@ -601,6 +612,7 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *); extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *); extern void qlafx00_timer_routine(scsi_qla_host_t *); extern int qlafx00_rescan_isp(scsi_qla_host_t *); +extern int qlafx00_loop_reset(scsi_qla_host_t *vha); /* qla82xx related functions */ @@ -619,9 +631,9 @@ extern int qla82xx_start_firmware(scsi_qla_host_t *); /* Firmware and flash related functions */ extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); /* Mailbox related functions */ extern int qla82xx_abort_isp(scsi_qla_host_t *); @@ -662,7 +674,7 @@ extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *); extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, - size_t, char *); + size_t, char *); extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); extern void qla82xx_start_iocbs(scsi_qla_host_t *); @@ -674,6 +686,8 @@ extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *); extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); extern char *qdev_state(uint32_t); extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *); +extern int qla82xx_read_temperature(scsi_qla_host_t *); +extern int qla8044_read_temperature(scsi_qla_host_t *); /* BSG related functions */ extern int qla24xx_bsg_request(struct fc_bsg_job *); @@ -695,5 +709,31 @@ extern void qla82xx_md_free(scsi_qla_host_t *); extern int qla82xx_md_collect(scsi_qla_host_t *); extern void qla82xx_md_prep(scsi_qla_host_t *); extern void qla82xx_set_reset_owner(scsi_qla_host_t *); +extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha); + +/* Function declarations for ISP8044 */ +extern int qla8044_idc_lock(struct qla_hw_data *ha); +extern void qla8044_idc_unlock(struct qla_hw_data *ha); +extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr); +extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val); +extern void qla8044_read_reset_template(struct scsi_qla_host *ha); +extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha); +extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg); +extern void qla8044_wr_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg, const uint32_t value); +extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha); +extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha); +extern int qla8044_device_state_handler(struct scsi_qla_host *vha); +extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha); +extern void qla8044_clear_drv_active(struct scsi_qla_host *vha); +void qla8044_get_minidump(struct scsi_qla_host *vha); +int qla8044_collect_md_data(struct scsi_qla_host *vha); +extern int qla8044_md_get_template(scsi_qla_host_t *); +extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *, + uint32_t, uint32_t); +extern irqreturn_t qla8044_intr_handler(int, void *); +extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t); +extern int qla8044_abort_isp(scsi_qla_host_t *); +extern int qla8044_check_fw_alive(struct scsi_qla_host *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 0926451980ed..cd47f1b32d9a 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -49,6 +49,8 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; + vha->qla_stats.control_requests++; + return (ms_pkt); } @@ -87,6 +89,8 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = vha->vp_idx; + vha->qla_stats.control_requests++; + return (ct_pkt); } @@ -226,17 +230,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->d_id.b.domain = 0xf0; ql_dbg(ql_dbg_disc, vha, 0x2063, - "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " - "pn %02x%02x%02x%02x%02x%02x%02x%02x " + "GA_NXT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", - fcport->node_name[0], fcport->node_name[1], - fcport->node_name[2], fcport->node_name[3], - fcport->node_name[4], fcport->node_name[5], - fcport->node_name[6], fcport->node_name[7], - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], + fcport->node_name, fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } @@ -447,17 +443,9 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x2058, - "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x " - "pn %02x%02x%02x%02x%02x%02x%02X%02x " + "GID_PT entry - nn %8phN pn %8phN " "portid=%02x%02x%02x.\n", - list[i].node_name[0], list[i].node_name[1], - list[i].node_name[2], list[i].node_name[3], - list[i].node_name[4], list[i].node_name[5], - list[i].node_name[6], list[i].node_name[7], - list[i].port_name[0], list[i].port_name[1], - list[i].port_name[2], list[i].port_name[3], - list[i].port_name[4], list[i].port_name[5], - list[i].port_name[6], list[i].port_name[7], + list[i].node_name, list[i].port_name, list[i].d_id.b.domain, list[i].d_id.b.area, list[i].d_id.b.al_pa); } @@ -739,6 +727,8 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, wc = (data_size - 16) / 4; /* Size in 32bit words. */ sns_cmd->p.cmd.size = cpu_to_le16(wc); + vha->qla_stats.control_requests++; + return (sns_cmd); } @@ -796,17 +786,9 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->d_id.b.domain = 0xf0; ql_dbg(ql_dbg_disc, vha, 0x2061, - "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " - "pn %02x%02x%02x%02x%02x%02x%02x%02x " + "GA_NXT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", - fcport->node_name[0], fcport->node_name[1], - fcport->node_name[2], fcport->node_name[3], - fcport->node_name[4], fcport->node_name[5], - fcport->node_name[6], fcport->node_name[7], - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], + fcport->node_name, fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } @@ -991,17 +973,9 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x206e, - "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " - "pn %02x%02x%02x%02x%02x%02x%02x%02x " + "GID_PT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", - list[i].node_name[0], list[i].node_name[1], - list[i].node_name[2], list[i].node_name[3], - list[i].node_name[4], list[i].node_name[5], - list[i].node_name[6], list[i].node_name[7], - list[i].port_name[0], list[i].port_name[1], - list[i].port_name[2], list[i].port_name[3], - list[i].port_name[4], list[i].port_name[5], - list[i].port_name[6], list[i].port_name[7], + list[i].node_name, list[i].port_name, list[i].d_id.b.domain, list[i].d_id.b.area, list[i].d_id.b.al_pa); } @@ -1321,11 +1295,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) size += 4 + WWN_SIZE; ql_dbg(ql_dbg_disc, vha, 0x2025, - "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n", - eiter->a.node_name[0], eiter->a.node_name[1], - eiter->a.node_name[2], eiter->a.node_name[3], - eiter->a.node_name[4], eiter->a.node_name[5], - eiter->a.node_name[6], eiter->a.node_name[7]); + "NodeName = %8phN.\n", eiter->a.node_name); /* Manufacturer. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1428,16 +1398,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x202e, - "RHBA identifier = " - "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", - ct_req->req.rhba.hba_identifier[0], - ct_req->req.rhba.hba_identifier[1], - ct_req->req.rhba.hba_identifier[2], - ct_req->req.rhba.hba_identifier[3], - ct_req->req.rhba.hba_identifier[4], - ct_req->req.rhba.hba_identifier[5], - ct_req->req.rhba.hba_identifier[6], - ct_req->req.rhba.hba_identifier[7], size); + "RHBA identifier = %8phN size=%d.\n", + ct_req->req.rhba.hba_identifier, size); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076, entries, size); @@ -1494,11 +1456,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha) memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x2036, - "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n", - ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], - ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], - ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], - ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]); + "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -1678,12 +1636,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x203e, - "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n", - ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1], - ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3], - ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5], - ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7], - size); + "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079, entries, size); @@ -1940,16 +1893,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) ql_dbg(ql_dbg_disc, vha, 0x205b, "GPSC ext entry - fpn " - "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " - "speed=%04x.\n", - list[i].fabric_port_name[0], - list[i].fabric_port_name[1], - list[i].fabric_port_name[2], - list[i].fabric_port_name[3], - list[i].fabric_port_name[4], - list[i].fabric_port_name[5], - list[i].fabric_port_name[6], - list[i].fabric_port_name[7], + "%8phN speeds=%04x speed=%04x.\n", + list[i].fabric_port_name, be16_to_cpu(ct_rsp->rsp.gpsc.speeds), be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f2216ed2ad8c..03f715e7591e 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -524,7 +524,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) vha->flags.reset_active = 0; ha->flags.pci_channel_io_perm_failure = 0; ha->flags.eeh_busy = 0; - ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP; + vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; @@ -552,7 +552,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (rval) { ql_log(ql_log_fatal, vha, 0x004f, "Unable to validate FLASH data.\n"); - return (rval); + return rval; + } + + if (IS_QLA8044(ha)) { + qla8044_read_reset_template(vha); + + /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. + * If DONRESET_BIT0 is set, drivers should not set dev_state + * to NEED_RESET. But if NEED_RESET is set, drivers should + * should honor the reset. */ + if (ql2xdontresethba == 1) + qla8044_set_idc_dontreset(vha); } ha->isp_ops->get_flash_version(vha, req->ring); @@ -564,12 +575,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (ha->flags.disable_serdes) { /* Mask HBA via NVRAM settings? */ ql_log(ql_log_info, vha, 0x0077, - "Masking HBA WWPN " - "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", - vha->port_name[0], vha->port_name[1], - vha->port_name[2], vha->port_name[3], - vha->port_name[4], vha->port_name[5], - vha->port_name[6], vha->port_name[7]); + "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); return QLA_FUNCTION_FAILED; } @@ -620,6 +626,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) qla24xx_read_fcp_prio_cfg(vha); + if (IS_P3P_TYPE(ha)) + qla82xx_set_driver_version(vha, QLA2XXX_VERSION); + else + qla25xx_set_driver_version(vha, QLA2XXX_VERSION); + return (rval); } @@ -1332,7 +1343,7 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; @@ -1615,7 +1626,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) unsigned long flags; uint16_t fw_major_version; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { qla2x00_stop_firmware(vha); @@ -1651,7 +1662,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS) { enable_82xx_npiv: fw_major_version = ha->fw_major_version; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) qla82xx_check_md_needed(vha); else rval = qla2x00_get_fw_version(vha); @@ -1681,7 +1692,7 @@ enable_82xx_npiv: goto failed; if (!fw_major_version && ql2xallocfwdump - && !IS_QLA82XX(ha)) + && !(IS_P3P_TYPE(ha))) qla2x00_alloc_fw_dump(vha); } } else { @@ -1849,7 +1860,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) int rval; struct qla_hw_data *ha = vha->hw; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; /* Update Serial Link options. */ @@ -3061,22 +3072,13 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) mb); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2004, - "Unable to adjust iIDMA " - "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x " - "%04x.\n", fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], rval, - fcport->fp_speed, mb[0], mb[1]); + "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", + fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); } else { ql_dbg(ql_dbg_disc, vha, 0x2005, - "iIDMA adjusted to %s GB/s " - "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", + "iIDMA adjusted to %s GB/s on %8phN.\n", qla2x00_get_link_speed_str(ha, fcport->fp_speed), - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7]); + fcport->port_name); } } @@ -4007,10 +4009,18 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha) uint32_t class_type_mask = 0x3; uint16_t fcoe_other_function = 0xffff, i; - qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); - - qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); - qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); + if (IS_QLA8044(ha)) { + drv_presence = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + dev_part_info1 = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_PART_INFO_INDEX); + dev_part_info2 = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_PART_INFO2); + } else { + qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); + qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); + qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); + } for (i = 0; i < 8; i++) { class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && @@ -4347,7 +4357,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) /* For ISP82XX, driver waits for completion of the commands. * online flag should be set. */ - if (!IS_QLA82XX(ha)) + if (!(IS_P3P_TYPE(ha))) vha->flags.online = 0; ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -4360,7 +4370,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) * Driver waits for the completion of the commands. * the interrupts need to be enabled. */ - if (!IS_QLA82XX(ha)) + if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); @@ -4403,7 +4413,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!ha->flags.eeh_busy) { /* Make sure for ISP 82XX IO DMA is complete */ - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { qla82xx_chip_reset_cleanup(vha); ql_log(ql_log_info, vha, 0x00b4, "Done chip reset cleanup.\n"); @@ -4723,7 +4733,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; vha->flags.online = 0; @@ -4789,8 +4799,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) } ha->nvram_size = sizeof(struct nvram_24xx); ha->vpd_size = FA_NVRAM_VPD_SIZE; - if (IS_QLA82XX(ha)) - ha->vpd_size = FA_VPD_SIZE_82XX; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; @@ -5552,6 +5560,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* Determine NVRAM starting address. */ ha->nvram_size = sizeof(struct nvram_81xx); ha->vpd_size = FA_NVRAM_VPD_SIZE; + if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) + ha->vpd_size = FA_VPD_SIZE_82XX; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; @@ -5734,7 +5744,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* Link Down Timeout = 0: * - * When Port Down timer expires we will start returning + * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: @@ -6061,7 +6071,7 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) if (priority < 0) return QLA_FUNCTION_FAILED; - if (IS_QLA82XX(vha->hw)) { + if (IS_P3P_TYPE(vha->hw)) { fcport->fcp_prio = priority & 0xf; return QLA_SUCCESS; } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 28c38b4929ce..957088b04611 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -59,7 +59,7 @@ qla2x00_poll(struct rsp_que *rsp) unsigned long flags; struct qla_hw_data *ha = rsp->hw; local_irq_save(flags); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) qla82xx_poll(0, rsp); else ha->isp_ops->intr_handler(0, rsp); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index ef0a5481b9dd..46b9307e8be4 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -32,9 +32,11 @@ qla2x00_get_cmd_direction(srb_t *sp) if (cmd->sc_data_direction == DMA_TO_DEVICE) { cflags = CF_WRITE; vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cflags = CF_READ; vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } return (cflags); } @@ -474,7 +476,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) struct qla_hw_data *ha = vha->hw; device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { qla82xx_start_iocbs(vha); } else { /* Adjust ring index. */ @@ -642,10 +644,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, cmd_pkt->control_flags = __constant_cpu_to_le16(CF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = __constant_cpu_to_le16(CF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } cur_seg = scsi_sglist(cmd); @@ -758,10 +762,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, cmd_pkt->task_mgmt_flags = __constant_cpu_to_le16(TMF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->task_mgmt_flags = __constant_cpu_to_le16(TMF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } /* One DSD is available in the Command Type 3 IOCB */ @@ -1844,7 +1850,7 @@ skip_cmd_array: if (req->cnt < req_cnt) { if (ha->mqenable || IS_QLA83XX(ha)) cnt = RD_REG_DWORD(®->isp25mq.req_q_out); - else if (IS_QLA82XX(ha)) + else if (IS_P3P_TYPE(ha)) cnt = RD_REG_DWORD(®->isp82.req_q_out); else if (IS_FWI2_CAPABLE(ha)) cnt = RD_REG_DWORD(®->isp24.req_q_out); @@ -2056,6 +2062,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) (bsg_job->reply_payload.sg_list))); els_iocb->rx_len = cpu_to_le32(sg_dma_len (bsg_job->reply_payload.sg_list)); + + sp->fcport->vha->qla_stats.control_requests++; } static void @@ -2133,6 +2141,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) avail_dsds--; } ct_iocb->entry_count = entry_count; + + sp->fcport->vha->qla_stats.control_requests++; } static void @@ -2685,6 +2695,9 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, vha->bidi_stats.transfer_bytes += req_data_len; vha->bidi_stats.io_count++; + vha->qla_stats.output_bytes += req_data_len; + vha->qla_stats.output_requests++; + /* Only one dsd is available for bidirectional IOCB, remaining dsds * are bundled in continuation iocb */ diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 2d8e7b812352..df1b30ba938c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -282,25 +282,38 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) "%04x %04x %04x %04x %04x %04x %04x.\n", event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]); - if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { - vha->hw->flags.idc_compl_status = 1; - if (vha->hw->notify_dcbx_comp) - complete(&vha->hw->dcbx_comp); - } - - /* Acknowledgement needed? [Notify && non-zero timeout]. */ - timeout = (descr >> 8) & 0xf; - if (aen != MBA_IDC_NOTIFY || !timeout) - return; + switch (aen) { + /* Handle IDC Error completion case. */ + case MBA_IDC_COMPLETE: + if (mb[1] >> 15) { + vha->hw->flags.idc_compl_status = 1; + if (vha->hw->notify_dcbx_comp) + complete(&vha->hw->dcbx_comp); + } + break; - ql_dbg(ql_dbg_async, vha, 0x5022, - "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", - vha->host_no, event[aen & 0xff], timeout); + case MBA_IDC_NOTIFY: + /* Acknowledgement needed? [Notify && non-zero timeout]. */ + timeout = (descr >> 8) & 0xf; + ql_dbg(ql_dbg_async, vha, 0x5022, + "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", + vha->host_no, event[aen & 0xff], timeout); - rval = qla2x00_post_idc_ack_work(vha, mb); - if (rval != QLA_SUCCESS) - ql_log(ql_log_warn, vha, 0x5023, - "IDC failed to post ACK.\n"); + if (!timeout) + return; + rval = qla2x00_post_idc_ack_work(vha, mb); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x5023, + "IDC failed to post ACK.\n"); + break; + case MBA_IDC_TIME_EXT: + vha->hw->idc_extend_tmo = descr; + ql_dbg(ql_dbg_async, vha, 0x5087, + "%lu Inter-Driver Communication %s -- " + "Extend timeout by=%d.\n", + vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); + break; + } } #define LS_UNKNOWN 2 @@ -691,7 +704,8 @@ skip_rio: case MBA_LOOP_DOWN: /* Loop Down Event */ mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? RD_REG_WORD(®24->mailbox4) : 0; - mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; + mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) + : mbx; ql_dbg(ql_dbg_async, vha, 0x500b, "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], mbx); @@ -740,7 +754,7 @@ skip_rio: if (IS_QLA2100(ha)) break; - if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { + if (IS_CNA_CAPABLE(ha)) { ql_dbg(ql_dbg_async, vha, 0x500d, "DCBX Completed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); @@ -1002,7 +1016,7 @@ skip_rio: mb[1], mb[2], mb[3]); break; case MBA_IDC_NOTIFY: - if (IS_QLA8031(vha->hw)) { + if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { mb[4] = RD_REG_WORD(®24->mailbox4); if (((mb[2] & 0x7fff) == MBC_PORT_RESET || (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && @@ -1022,7 +1036,8 @@ skip_rio: complete(&ha->lb_portup_comp); /* Fallthru */ case MBA_IDC_TIME_EXT: - if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || + IS_QLA8044(ha)) qla81xx_idc_event(vha, mb[0], mb[1]); break; @@ -1063,7 +1078,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, ql_log(ql_log_warn, vha, 0x3014, "Invalid SCSI command index (%x).\n", index); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1080,7 +1095,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1100,7 +1115,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, "Invalid command index (%x).\n", index); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1805,6 +1820,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, if (scsi_status == 0) { bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; + vha->qla_stats.input_bytes += + bsg_job->reply->reply_payload_rcv_len; + vha->qla_stats.input_requests++; rval = EXT_STATUS_OK; } goto done; @@ -1949,7 +1967,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ql_dbg(ql_dbg_io, vha, 0x3017, "Invalid status handle (0x%x).\n", sts->handle); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -2176,8 +2194,10 @@ check_scsi_status: } ql_dbg(ql_dbg_io, fcport->vha, 0x3021, - "Port down status: port-state=0x%x.\n", - atomic_read(&fcport->state)); + "Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[atomic_read(&fcport->state)]); if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); @@ -2212,16 +2232,13 @@ check_scsi_status: out: if (logit) ql_dbg(ql_dbg_io, fcport->vha, 0x3022, - "FCP command status: 0x%x-0x%x (0x%x) " - "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " - "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " + "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " + "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, - cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], - cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], - cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, + cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len); if (!res) @@ -2324,7 +2341,7 @@ fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue.\n"); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -2452,7 +2469,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } /* Adjust ring index */ - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); } else @@ -2865,7 +2882,7 @@ msix_failed: ret = request_irq(qentry->vector, qla83xx_msix_entries[i].handler, 0, qla83xx_msix_entries[i].name, rsp); - } else if (IS_QLA82XX(ha)) { + } else if (IS_P3P_TYPE(ha)) { ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0, qla82xx_msix_entries[i].name, rsp); @@ -2950,7 +2967,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) skip_msix: if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && - !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha)) + !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha)) goto skip_msi; ret = pci_enable_msi(ha->pdev); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 7257c3c4f2d0..a9aae500e791 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) { + if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ql_log(ql_log_warn, vha, 0x1004, @@ -106,9 +106,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) spin_lock_irqsave(&ha->hardware_lock, flags); /* Load mailbox registers. */ - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; - else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha)) + else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) optr = (uint16_t __iomem *)®->isp24.mailbox0; else optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); @@ -117,33 +117,25 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) command = mcp->mb[0]; mboxes = mcp->out_mb; + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, + "Mailbox registers (OUT):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); - if (mboxes & BIT_0) + if (mboxes & BIT_0) { + ql_dbg(ql_dbg_mbx, vha, 0x1112, + "mbox[%d]<-0x%04x\n", cnt, *iptr); WRT_REG_WORD(optr, *iptr); + } mboxes >>= 1; optr++; iptr++; } - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, - "Loaded MBX registers (displayed in bytes) =.\n"); - ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112, - (uint8_t *)mcp->mb, 16); - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113, - ".\n"); - ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114, - ((uint8_t *)mcp->mb + 0x10), 16); - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115, - ".\n"); - ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116, - ((uint8_t *)mcp->mb + 0x20), 8); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, "I/O Address = %p.\n", optr); - ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e); /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; @@ -159,7 +151,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (RD_REG_DWORD(®->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, @@ -189,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (RD_REG_DWORD(®->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, @@ -236,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) { + if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { ha->flags.mbox_busy = 0; /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; @@ -254,9 +246,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) iptr2 = mcp->mb; iptr = (uint16_t *)&ha->mailbox_out[0]; mboxes = mcp->in_mb; + + ql_dbg(ql_dbg_mbx, vha, 0x1113, + "Mailbox registers (IN):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { - if (mboxes & BIT_0) + if (mboxes & BIT_0) { *iptr2 = *iptr; + ql_dbg(ql_dbg_mbx, vha, 0x1114, + "mbox[%d]->0x%04x\n", cnt, *iptr2); + } mboxes >>= 1; iptr2++; @@ -537,7 +535,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; - if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) + if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; @@ -556,7 +554,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ else ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; - if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) { + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; ha->mpi_version[2] = mcp->mb[11] & 0xff; @@ -1201,7 +1199,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, "Entered %s.\n", __func__); - if (IS_QLA82XX(ha) && ql2xdbwr) + if (IS_P3P_TYPE(ha) && ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); @@ -1667,7 +1665,11 @@ qla24xx_link_initialize(scsi_qla_host_t *vha) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_LINK_INITIALIZATION; - mcp->mb[1] = BIT_6|BIT_4; + mcp->mb[1] = BIT_4; + if (vha->hw->operating_mode == LOOP) + mcp->mb[1] |= BIT_6; + else + mcp->mb[1] |= BIT_5; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; @@ -3574,7 +3576,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, @@ -3595,9 +3596,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) if (IS_QLA83XX(ha)) mcp->mb[15] = 0; - reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + - QLA_QUE_PAGE * req->id); - mcp->mb[4] = req->id; /* que in ptr index */ mcp->mb[8] = 0; @@ -3619,12 +3617,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { - WRT_REG_DWORD(®->req_q_in, 0); + WRT_REG_DWORD(req->req_q_in, 0); if (!IS_QLA83XX(ha)) - WRT_REG_DWORD(®->req_q_out, 0); + WRT_REG_DWORD(req->req_q_out, 0); } - req->req_q_in = ®->req_q_in; - req->req_q_out = ®->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); @@ -3646,7 +3642,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, @@ -3664,9 +3659,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) if (IS_QLA83XX(ha)) mcp->mb[15] = 0; - reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + - QLA_QUE_PAGE * rsp->id); - mcp->mb[4] = rsp->id; /* que in ptr index */ mcp->mb[8] = 0; @@ -3690,9 +3682,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { - WRT_REG_DWORD(®->rsp_q_out, 0); + WRT_REG_DWORD(rsp->rsp_q_out, 0); if (!IS_QLA83XX(ha)) - WRT_REG_DWORD(®->rsp_q_in, 0); + WRT_REG_DWORD(rsp->rsp_q_in, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3872,6 +3864,112 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) return rval; } +int +qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int i; + int len; + uint16_t *str; + struct qla_hw_data *ha = vha->hw; + + if (!IS_P3P_TYPE(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, + "Entered %s.\n", __func__); + + str = (void *)version; + len = strlen(version); + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; + mcp->out_mb = MBX_1|MBX_0; + for (i = 4; i < 16 && len; i++, str++, len -= 2) { + mcp->mb[i] = cpu_to_le16p(str); + mcp->out_mb |= 1<<i; + } + for (; i < 16; i++) { + mcp->mb[i] = 0; + mcp->out_mb |= 1<<i; + } + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x117c, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int len; + uint16_t dwlen; + uint8_t *str; + dma_addr_t str_dma; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || + IS_P3P_TYPE(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, + "Entered %s.\n", __func__); + + str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); + if (!str) { + ql_log(ql_log_warn, vha, 0x117f, + "Failed to allocate driver version param.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memcpy(str, "\x7\x3\x11\x0", 4); + dwlen = str[0]; + len = dwlen * 4 - 4; + memset(str + 4, 0, len); + if (len > strlen(version)) + len = strlen(version); + memcpy(str + 4, version, len); + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; + mcp->mb[2] = MSW(LSD(str_dma)); + mcp->mb[3] = LSW(LSD(str_dma)); + mcp->mb[6] = MSW(MSD(str_dma)); + mcp->mb[7] = LSW(MSD(str_dma)); + mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1180, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, + "Done %s.\n", __func__); + } + + dma_pool_free(ha->s_dma_pool, str, str_dma); + + return rval; +} + static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) { @@ -4407,7 +4505,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, "Entered %s.\n", __func__); - if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_PORT_CONFIG; mcp->out_mb = MBX_0; @@ -4512,40 +4610,43 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) struct qla_hw_data *ha = vha->hw; uint8_t byte; - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, - "Entered %s.\n", __func__); - - if (ha->thermal_support & THERMAL_SUPPORT_I2C) { - rval = qla2x00_read_sfp(vha, 0, &byte, - 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0); - *temp = byte; - if (rval == QLA_SUCCESS) - goto done; - - ql_log(ql_log_warn, vha, 0x10c9, - "Thermal not supported through I2C bus, trying alternate " - "method (ISP access).\n"); - ha->thermal_support &= ~THERMAL_SUPPORT_I2C; + if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x1150, + "Thermal not supported by this card.\n"); + return rval; } - if (ha->thermal_support & THERMAL_SUPPORT_ISP) { - rval = qla2x00_read_asic_temperature(vha, temp); - if (rval == QLA_SUCCESS) - goto done; - - ql_log(ql_log_warn, vha, 0x1019, - "Thermal not supported through ISP.\n"); - ha->thermal_support &= ~THERMAL_SUPPORT_ISP; + if (IS_QLA25XX(ha)) { + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + ha->pdev->subsystem_device == 0x0175) { + rval = qla2x00_read_sfp(vha, 0, &byte, + 0x98, 0x1, 1, BIT_13|BIT_0); + *temp = byte; + return rval; + } + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && + ha->pdev->subsystem_device == 0x338e) { + rval = qla2x00_read_sfp(vha, 0, &byte, + 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); + *temp = byte; + return rval; + } + ql_dbg(ql_dbg_mbx, vha, 0x10c9, + "Thermal not supported by this card.\n"); + return rval; } - ql_log(ql_log_warn, vha, 0x1150, - "Thermal not supported by this card " - "(ignoring further requests).\n"); - return rval; + if (IS_QLA82XX(ha)) { + *temp = qla82xx_read_temperature(vha); + rval = QLA_SUCCESS; + return rval; + } else if (IS_QLA8044(ha)) { + *temp = qla8044_read_temperature(vha); + rval = QLA_SUCCESS; + return rval; + } -done: - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, - "Done %s.\n", __func__); + rval = qla2x00_read_asic_temperature(vha, temp); return rval; } @@ -4595,7 +4696,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, "Entered %s.\n", __func__); - if (!IS_QLA82XX(ha)) + if (!IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); @@ -4713,6 +4814,60 @@ qla82xx_md_get_template(scsi_qla_host_t *vha) } int +qla8044_md_get_template(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + int offset = 0, size = MINIDUMP_SIZE_36K; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, + "Entered %s.\n", __func__); + + ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, + ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); + if (!ha->md_tmplt_hdr) { + ql_log(ql_log_warn, vha, 0xb11b, + "Unable to allocate memory for Minidump template.\n"); + return rval; + } + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + while (offset < ha->md_template_size) { + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT); + mcp->mb[3] = MSW(RQST_TMPLT); + mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[8] = LSW(size); + mcp->mb[9] = MSW(size); + mcp->mb[10] = offset & 0x0000FFFF; + mcp->mb[11] = offset & 0xFFFF0000; + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xb11c, + "mailbox command FAILED=0x%x, subcode=%x.\n", + ((mcp->mb[1] << 16) | mcp->mb[0]), + ((mcp->mb[3] << 16) | mcp->mb[2])); + return rval; + } else + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, + "Done %s.\n", __func__); + offset = offset + size; + } + return rval; +} + +int qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; @@ -4808,7 +4963,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA82XX(ha)) + if (!IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f868a9f98afe..a72df701fb38 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -699,6 +699,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); + req->req_q_in = ®->isp25mq.req_q_in; + req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index d7993797f46e..2482975d72b2 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -294,7 +294,7 @@ premature_exit: * Context: * Kernel context. */ -static int +int qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) { int rval; @@ -776,6 +776,29 @@ qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag) } int +qlafx00_loop_reset(scsi_qla_host_t *vha) +{ + int ret; + struct fc_port *fcport; + struct qla_hw_data *ha = vha->hw; + + if (ql2xtargetreset) { + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + + ret = ha->isp_ops->target_reset(fcport, 0, 0); + if (ret != QLA_SUCCESS) { + ql_dbg(ql_dbg_taskm, vha, 0x803d, + "Bus Reset failed: Reset=%d " + "d_id=%x.\n", ret, fcport->d_id.b24); + } + } + } + return QLA_SUCCESS; +} + +int qlafx00_iospace_config(struct qla_hw_data *ha) { if (pci_request_selected_regions(ha->pdev, ha->bars, @@ -918,12 +941,23 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; uint32_t aenmbx, aenmbx7 = 0; + uint32_t pseudo_aen; uint32_t state[5]; bool done = false; /* 30 seconds wait - Adjust if required */ wait_time = 30; + pseudo_aen = RD_REG_DWORD(®->pseudoaen); + if (pseudo_aen == 1) { + aenmbx7 = RD_REG_DWORD(®->initval7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + rval = qlafx00_driver_shutdown(vha, 10); + if (rval != QLA_SUCCESS) + qlafx00_soft_reset(vha); + } + /* wait time before firmware ready */ wtime = jiffies + (wait_time * HZ); do { @@ -1349,21 +1383,22 @@ qlafx00_configure_devices(scsi_qla_host_t *vha) } static void -qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha) +qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp) { struct qla_hw_data *ha = vha->hw; fc_port_t *fcport; vha->flags.online = 0; - ha->flags.chip_reset_done = 0; ha->mr.fw_hbt_en = 0; - clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - vha->qla_stats.total_isp_aborts++; - - ql_log(ql_log_info, vha, 0x013f, - "Performing ISP error recovery - ha = %p.\n", ha); - ha->isp_ops->reset_chip(vha); + if (!critemp) { + ha->flags.chip_reset_done = 0; + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + vha->qla_stats.total_isp_aborts++; + ql_log(ql_log_info, vha, 0x013f, + "Performing ISP error recovery - ha = %p.\n", ha); + ha->isp_ops->reset_chip(vha); + } if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -1383,12 +1418,19 @@ qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha) } if (!ha->flags.eeh_busy) { - /* Requeue all commands in outstanding command list. */ - qla2x00_abort_all_cmds(vha, DID_RESET << 16); + if (critemp) { + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + } else { + /* Requeue all commands in outstanding command list. */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); + } } qla2x00_free_irqs(vha); - set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + if (critemp) + set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); + else + set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); /* Clear the Interrupts */ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); @@ -1475,6 +1517,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha) uint32_t fw_heart_beat; uint32_t aenmbx0; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + uint32_t tempc; /* Check firmware health */ if (ha->mr.fw_hbt_cnt) @@ -1539,10 +1582,36 @@ qlafx00_timer_routine(scsi_qla_host_t *vha) } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { ha->mr.fw_reset_timer_tick = QLAFX00_MAX_RESET_INTERVAL; + } else if (aenmbx0 == MBA_FW_RESET_FCT) { + ha->mr.fw_reset_timer_tick = + QLAFX00_MAX_RESET_INTERVAL; } ha->mr.old_aenmbx0_state = aenmbx0; ha->mr.fw_reset_timer_tick--; } + if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { + /* + * Critical temperature recovery to be + * performed in timer routine + */ + if (ha->mr.fw_critemp_timer_tick == 0) { + tempc = QLAFX00_GET_TEMPERATURE(ha); + ql_dbg(ql_dbg_timer, vha, 0x6012, + "ISPFx00(%s): Critical temp timer, " + "current SOC temperature: %d\n", + __func__, tempc); + if (tempc < ha->mr.critical_temperature) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + clear_bit(FX00_CRITEMP_RECOVERY, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + ha->mr.fw_critemp_timer_tick = + QLAFX00_CRITEMP_INTERVAL; + } else { + ha->mr.fw_critemp_timer_tick--; + } + } } /* @@ -1570,7 +1639,7 @@ qlafx00_reset_initialize(scsi_qla_host_t *vha) if (vha->flags.online) { scsi_block_requests(vha->host); - qlafx00_abort_isp_cleanup(vha); + qlafx00_abort_isp_cleanup(vha, false); } ql_log(ql_log_info, vha, 0x0143, @@ -1602,7 +1671,15 @@ qlafx00_abort_isp(scsi_qla_host_t *vha) } scsi_block_requests(vha->host); - qlafx00_abort_isp_cleanup(vha); + qlafx00_abort_isp_cleanup(vha, false); + } else { + scsi_block_requests(vha->host); + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + vha->qla_stats.total_isp_aborts++; + ha->isp_ops->reset_chip(vha); + set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + /* Clear the Interrupts */ + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); } ql_log(ql_log_info, vha, 0x0145, @@ -1688,6 +1765,15 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) aen_code = FCH_EVT_LINKDOWN; aen_data = 0; break; + case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ + ql_log(ql_log_info, vha, 0x5082, + "Process critical temperature event " + "aenmb[0]: %x\n", + evt->u.aenfx.evtcode); + scsi_block_requests(vha->host); + qlafx00_abort_isp_cleanup(vha, true); + scsi_unblock_requests(vha->host); + break; } fc_host_post_event(vha->host, fc_get_event_number(), @@ -1879,6 +1965,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) sizeof(vha->hw->mr.uboot_version)); memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, sizeof(vha->hw->mr.fru_serial_num)); + vha->hw->mr.critical_temperature = + (pinfo->nominal_temp_value) ? + pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD; + ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & + QLAFX00_EXTENDED_IO_EN_MASK) != 0; } else if (fx_type == FXDISC_GET_PORT_INFO) { struct port_info_data *pinfo = (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; @@ -2021,6 +2112,7 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; + uint32_t tempc; /* Clear adapter flags. */ vha->flags.online = 0; @@ -2028,7 +2120,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha) vha->flags.reset_active = 0; ha->flags.pci_channel_io_perm_failure = 0; ha->flags.eeh_busy = 0; - ha->thermal_support = 0; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; @@ -2072,6 +2163,11 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha) rval = qla2x00_init_rings(vha); ha->flags.chip_reset_done = 1; + tempc = QLAFX00_GET_TEMPERATURE(ha); + ql_dbg(ql_dbg_init, vha, 0x0152, + "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", + __func__, tempc); + return rval; } @@ -2526,16 +2622,13 @@ check_scsi_status: if (logit) ql_dbg(ql_dbg_io, fcport->vha, 0x3058, - "FCP command status: 0x%x-0x%x (0x%x) " - "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x" - "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " - "rsp_info=0x%x resid=0x%x fw_resid=0x%x " - "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n", + "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " + "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " + "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, " + "par_sense_len=0x%x, rsp_info_len=0x%x\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->tgt_id, - lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], - cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], - cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), + lscsi_status, cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len, sense_len, par_sense_len, rsp_info_len); @@ -2720,9 +2813,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha, struct sts_entry_fx00 *pkt; response_t *lptr; - if (!vha->flags.online) - return; - while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) != RESPONSE_PROCESSED) { lptr = rsp->ring_ptr; @@ -2824,6 +2914,28 @@ qlafx00_async_event(scsi_qla_host_t *vha) ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); data_size = 4; break; + + case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */ + ql_log(ql_log_info, vha, 0x5085, + "Asynchronous over temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */ + ql_log(ql_log_info, vha, 0x5086, + "Asynchronous normal temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ + ql_log(ql_log_info, vha, 0x5083, + "Asynchronous critical temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + default: ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h index 1a092af0e2c3..79a93c52baec 100644 --- a/drivers/scsi/qla2xxx/qla_mr.h +++ b/drivers/scsi/qla2xxx/qla_mr.h @@ -329,11 +329,13 @@ struct config_info_data { uint64_t adapter_id; uint32_t cluster_key_len; - uint8_t cluster_key[10]; + uint8_t cluster_key[16]; uint64_t cluster_master_id; uint64_t cluster_slave_id; uint8_t cluster_flags; + uint32_t enabled_capabilities; + uint32_t nominal_temp_value; } __packed; #define FXDISC_GET_CONFIG_INFO 0x01 @@ -342,10 +344,11 @@ struct config_info_data { #define FXDISC_GET_TGT_NODE_LIST 0x81 #define FXDISC_REG_HOST_INFO 0x99 -#define QLAFX00_HBA_ICNTRL_REG 0x21B08 +#define QLAFX00_HBA_ICNTRL_REG 0x20B08 #define QLAFX00_ICR_ENB_MASK 0x80000000 #define QLAFX00_ICR_DIS_MASK 0x7fffffff #define QLAFX00_HST_RST_REG 0x18264 +#define QLAFX00_SOC_TEMP_REG 0x184C4 #define QLAFX00_HST_TO_HBA_REG 0x20A04 #define QLAFX00_HBA_TO_HOST_REG 0x21B70 #define QLAFX00_HST_INT_STS_BITS 0x7 @@ -361,6 +364,9 @@ struct config_info_data { #define QLAFX00_INTR_ALL_CMPLT 0x7 #define QLAFX00_MBA_SYSTEM_ERR 0x8002 +#define QLAFX00_MBA_TEMP_OVER 0x8005 +#define QLAFX00_MBA_TEMP_NORM 0x8006 +#define QLAFX00_MBA_TEMP_CRIT 0x8007 #define QLAFX00_MBA_LINK_UP 0x8011 #define QLAFX00_MBA_LINK_DOWN 0x8012 #define QLAFX00_MBA_PORT_UPDATE 0x8014 @@ -434,9 +440,11 @@ struct qla_mt_iocb_rqst_fx00 { __le32 dataword_extra; - __le32 req_len; + __le16 req_len; + __le16 reserved_2; - __le32 rsp_len; + __le16 rsp_len; + __le16 reserved_3; }; struct qla_mt_iocb_rsp_fx00 { @@ -499,12 +507,37 @@ struct mr_data_fx00 { uint32_t old_fw_hbt_cnt; uint16_t fw_reset_timer_tick; uint8_t fw_reset_timer_exp; + uint16_t fw_critemp_timer_tick; uint32_t old_aenmbx0_state; + uint32_t critical_temperature; + bool extended_io_enabled; }; +#define QLAFX00_EXTENDED_IO_EN_MASK 0x20 + +/* + * SoC Junction Temperature is stored in + * bits 9:1 of SoC Junction Temperature Register + * in a firmware specific format format. + * To get the temperature in Celsius degrees + * the value from this bitfiled should be converted + * using this formula: + * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825) + * where X is the bit field value + * this macro reads the register, extracts the bitfield value, + * performs the calcualtions and returns temperature in Celsius + */ +#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \ + ((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825) + + #define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */ #define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */ #define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */ #define QLAFX00_RESET_INTERVAL 120 /* number of seconds */ #define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */ +#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */ + +#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */ + #endif diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index cce0cd0d7ec4..11ce53dcbe7e 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -848,7 +848,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; uint32_t lock_owner = 0; - scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (!done) { /* acquire semaphore2 from PCI HW block */ @@ -857,9 +856,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); - ql_dbg(ql_dbg_p3p, vha, 0xb085, - "Failed to acquire rom lock, acquired by %d.\n", - lock_owner); return -1; } timeout++; @@ -1666,8 +1662,14 @@ qla82xx_iospace_config(struct qla_hw_data *ha) } /* Mapping of IO base pointer */ - ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + - 0xbc000 + (ha->pdev->devfn << 11)); + if (IS_QLA8044(ha)) { + ha->iobase = + (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase); + } else if (IS_QLA82XX(ha)) { + ha->iobase = + (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + + 0xbc000 + (ha->pdev->devfn << 11)); + } if (!ql2xdbwr) { ha->nxdb_wr_ptr = @@ -1967,7 +1969,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \ * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ -static void +void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; @@ -2075,13 +2077,6 @@ qla82xx_intr_handler(int irq, void *dev_id) WRT_REG_DWORD(®->host_int, 0); } -#ifdef QL_DEBUG_LEVEL_17 - if (!irq && ha->flags.eeh_busy) - ql_log(ql_log_warn, vha, 0x503d, - "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", - status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); -#endif - qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2147,13 +2142,6 @@ qla82xx_msix_default(int irq, void *dev_id) WRT_REG_DWORD(®->host_int, 0); } while (0); -#ifdef QL_DEBUG_LEVEL_17 - if (!irq && ha->flags.eeh_busy) - ql_log(ql_log_warn, vha, 0x5044, - "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", - status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); -#endif - qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2247,7 +2235,10 @@ qla82xx_enable_intrs(struct qla_hw_data *ha) scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_enable(vha); spin_lock_irq(&ha->hardware_lock); - qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); + if (IS_QLA8044(ha)) + qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0); + else + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 1; } @@ -2258,7 +2249,10 @@ qla82xx_disable_intrs(struct qla_hw_data *ha) scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_disable(vha); spin_lock_irq(&ha->hardware_lock); - qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); + if (IS_QLA8044(ha)) + qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1); + else + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 0; } @@ -3008,6 +3002,9 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) if (IS_QLA82XX(ha)) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_clear_drv_active(vha); + qla8044_idc_unlock(ha); } /* Set DEV_FAILED flag to disable timer */ @@ -3134,7 +3131,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha) if (fw_major_version != ha->fw_major_version || fw_minor_version != ha->fw_minor_version || fw_subminor_version != ha->fw_subminor_version) { - ql_log(ql_log_info, vha, 0xb02d, + ql_dbg(ql_dbg_p3p, vha, 0xb02d, "Firmware version differs " "Previous version: %d:%d:%d - " "New version: %d:%d:%d\n", @@ -3330,6 +3327,14 @@ static int qla82xx_check_temp(scsi_qla_host_t *vha) return 0; } +int qla82xx_read_temperature(scsi_qla_host_t *vha) +{ + uint32_t temp; + + temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE); + return qla82xx_get_temp_val(temp); +} + void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; @@ -3423,8 +3428,18 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { - int rval; - rval = qla82xx_device_state_handler(vha); + int rval = -1; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA82XX(ha)) + rval = qla82xx_device_state_handler(vha); + else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + /* Decide the reset ownership */ + qla83xx_reset_ownership(vha); + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + } return rval; } @@ -3432,17 +3447,25 @@ void qla82xx_set_reset_owner(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - uint32_t dev_state; + uint32_t dev_state = 0; + + if (IS_QLA82XX(ha)) + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + else if (IS_QLA8044(ha)) + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (dev_state == QLA8XXX_DEV_READY) { ql_log(ql_log_info, vha, 0xb02f, "HW State: NEED RESET\n"); - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA8XXX_DEV_NEED_RESET); - ha->flags.nic_core_reset_owner = 1; - ql_dbg(ql_dbg_p3p, vha, 0xb030, - "reset_owner is 0x%x\n", ha->portnum); + if (IS_QLA82XX(ha)) { + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + ha->flags.nic_core_reset_owner = 1; + ql_dbg(ql_dbg_p3p, vha, 0xb030, + "reset_owner is 0x%x\n", ha->portnum); + } else if (IS_QLA8044(ha)) + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_NEED_RESET); } else ql_log(ql_log_info, vha, 0xb031, "Device state is 0x%x = %s.\n", @@ -3463,7 +3486,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha) int qla82xx_abort_isp(scsi_qla_host_t *vha) { - int rval; + int rval = -1; struct qla_hw_data *ha = vha->hw; if (vha->device_flags & DFLG_DEV_FAILED) { @@ -3477,7 +3500,15 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); - rval = qla82xx_device_state_handler(vha); + if (IS_QLA82XX(ha)) + rval = qla82xx_device_state_handler(vha); + else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + /* Decide the reset ownership */ + qla83xx_reset_ownership(vha); + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + } qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); @@ -3597,7 +3628,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) { - int i; + int i, fw_state = 0; unsigned long flags; struct qla_hw_data *ha = vha->hw; @@ -3608,7 +3639,11 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) if (!ha->flags.isp82xx_fw_hung) { for (i = 0; i < 2; i++) { msleep(1000); - if (qla82xx_check_fw_alive(vha)) { + if (IS_QLA82XX(ha)) + fw_state = qla82xx_check_fw_alive(vha); + else if (IS_QLA8044(ha)) + fw_state = qla8044_check_fw_alive(vha); + if (fw_state) { ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); break; @@ -4072,7 +4107,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, return QLA_SUCCESS; } -static int +int qla82xx_validate_template_chksum(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; @@ -4384,7 +4419,11 @@ qla82xx_md_prep(scsi_qla_host_t *vha) ha->md_template_size / 1024); /* Get Minidump template */ - rval = qla82xx_md_get_template(vha); + if (IS_QLA8044(ha)) + rval = qla8044_md_get_template(vha); + else + rval = qla82xx_md_get_template(vha); + if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb04b, "MiniDump Template obtained\n"); diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index d268e8406fdb..1bb93dbbccbb 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -589,6 +589,7 @@ * The PCI VendorID and DeviceID for our board. */ #define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021 +#define PCI_DEVICE_ID_QLOGIC_ISP8044 0x8044 #define QLA82XX_MSIX_TBL_SPACE 8192 #define QLA82XX_PCI_REG_MSIX_TBL 0x44 @@ -954,6 +955,11 @@ struct ct6_dsd { #define QLA82XX_CNTRL 98 #define QLA82XX_TLHDR 99 #define QLA82XX_RDEND 255 +#define QLA8044_POLLRD 35 +#define QLA8044_RDMUX2 36 +#define QLA8044_L1DTG 8 +#define QLA8044_L1ITG 9 +#define QLA8044_POLLRDMWR 37 /* * Opcodes for Control Entries. @@ -1191,4 +1197,8 @@ enum { QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ }; + +#define LEG_INTR_PTR_OFFSET 0x38C0 +#define LEG_INTR_TRIG_OFFSET 0x38C4 +#define LEG_INTR_MASK_OFFSET 0x38C8 #endif diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c new file mode 100644 index 000000000000..8164cc9e7286 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -0,0 +1,3716 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ + +#include <linux/vmalloc.h> + +#include "qla_def.h" +#include "qla_gbl.h" + +#include <linux/delay.h> + +/* 8044 Flash Read/Write functions */ +uint32_t +qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) +{ + return readl((void __iomem *) (ha->nx_pcibase + addr)); +} + +void +qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val) +{ + writel(val, (void __iomem *)((ha)->nx_pcibase + addr)); +} + +int +qla8044_rd_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg) +{ + struct qla_hw_data *ha = vha->hw; + + if (crb_reg < CRB_REG_INDEX_MAX) + return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]); + else + return QLA_FUNCTION_FAILED; +} + +void +qla8044_wr_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg, + const uint32_t value) +{ + struct qla_hw_data *ha = vha->hw; + + if (crb_reg < CRB_REG_INDEX_MAX) + qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value); +} + +static int +qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr) +{ + uint32_t val; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr); + val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum)); + + if (val != addr) { + ql_log(ql_log_warn, vha, 0xb087, + "%s: Failed to set register window : " + "addr written 0x%x, read 0x%x!\n", + __func__, addr, val); + ret_val = QLA_FUNCTION_FAILED; + } + return ret_val; +} + +static int +qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_set_win_base(vha, addr); + if (!ret_val) + *data = qla8044_rd_reg(ha, QLA8044_WILDCARD); + else + ql_log(ql_log_warn, vha, 0xb088, + "%s: failed read of addr 0x%x!\n", __func__, addr); + return ret_val; +} + +static int +qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_set_win_base(vha, addr); + if (!ret_val) + qla8044_wr_reg(ha, QLA8044_WILDCARD, data); + else + ql_log(ql_log_warn, vha, 0xb089, + "%s: failed wrt to addr 0x%x, data 0x%x\n", + __func__, addr, data); + return ret_val; +} + +/* + * qla8044_read_write_crb_reg - Read from raddr and write value to waddr. + * + * @ha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + * + */ +static void +qla8044_read_write_crb_reg(struct scsi_qla_host *vha, + uint32_t raddr, uint32_t waddr) +{ + uint32_t value; + + qla8044_rd_reg_indirect(vha, raddr, &value); + qla8044_wr_reg_indirect(vha, waddr, value); +} + +/* + * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, + * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. + * + * @vha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + * @p_rmw_hdr : header with shift/or/xor values. + * + */ +static void +qla8044_rmw_crb_reg(struct scsi_qla_host *vha, + uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr) +{ + uint32_t value; + + if (p_rmw_hdr->index_a) + value = vha->reset_tmplt.array[p_rmw_hdr->index_a]; + else + qla8044_rd_reg_indirect(vha, raddr, &value); + value &= p_rmw_hdr->test_mask; + value <<= p_rmw_hdr->shl; + value >>= p_rmw_hdr->shr; + value |= p_rmw_hdr->or_value; + value ^= p_rmw_hdr->xor_value; + qla8044_wr_reg_indirect(vha, waddr, value); + return; +} + +inline void +qla8044_set_qsnt_ready(struct scsi_qla_host *vha) +{ + uint32_t qsnt_state; + struct qla_hw_data *ha = vha->hw; + + qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + qsnt_state |= (1 << ha->portnum); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); + ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n", + __func__, vha->host_no, qsnt_state); +} + +void +qla8044_clear_qsnt_ready(struct scsi_qla_host *vha) +{ + uint32_t qsnt_state; + struct qla_hw_data *ha = vha->hw; + + qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + qsnt_state &= ~(1 << ha->portnum); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); + ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n", + __func__, vha->host_no, qsnt_state); +} + +/** + * + * qla8044_lock_recovery - Recovers the idc_lock. + * @ha : Pointer to adapter structure + * + * Lock Recovery Register + * 5-2 Lock recovery owner: Function ID of driver doing lock recovery, + * valid if bits 1..0 are set by driver doing lock recovery. + * 1-0 1 - Driver intends to force unlock the IDC lock. + * 2 - Driver is moving forward to unlock the IDC lock. Driver clears + * this field after force unlocking the IDC lock. + * + * Lock Recovery process + * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is + * greater than 0, then wait for the other driver to unlock otherwise + * move to the next step. + * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY + * register bits 1..0 and also set the function# in bits 5..2. + * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms. + * Wait for the other driver to perform lock recovery if the function + * number in bits 5..2 has changed, otherwise move to the next step. + * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0 + * leaving your function# in bits 5..2. + * e. Force unlock using the DRIVER_UNLOCK register and immediately clear + * the IDC_LOCK_RECOVERY bits 5..0 by writing 0. + **/ +static int +qla8044_lock_recovery(struct scsi_qla_host *vha) +{ + uint32_t lock = 0, lockid; + struct qla_hw_data *ha = vha->hw; + + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); + + /* Check for other Recovery in progress, go wait */ + if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0) + return QLA_FUNCTION_FAILED; + + /* Intent to Recover */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, + (ha->portnum << + IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER); + msleep(200); + + /* Check Intent to Recover is advertised */ + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); + if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum << + IDC_LOCK_RECOVERY_STATE_SHIFT_BITS)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n" + , __func__, ha->portnum); + + /* Proceed to Recover */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, + (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | + PROCEED_TO_RECOVER); + + /* Force Unlock() */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF); + qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); + + /* Clear bits 0-5 in IDC_RECOVERY register*/ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0); + + /* Get lock() */ + lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); + if (lock) { + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum; + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid); + return QLA_SUCCESS; + } else + return QLA_FUNCTION_FAILED; +} + +int +qla8044_idc_lock(struct qla_hw_data *ha) +{ + uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0; + uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while (status == 0) { + /* acquire semaphore5 from PCI HW block */ + status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); + + if (status) { + /* Increment Counter (8-31) and update func_num (0-7) on + * getting a successful lock */ + lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum; + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id); + break; + } + + if (timeout == 0) + first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + + if (++timeout >= + (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) { + tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + func_num = tmo_owner & 0xFF; + lock_cnt = tmo_owner >> 8; + ql_log(ql_log_warn, vha, 0xb114, + "%s: Lock by func %d failed after 2s, lock held " + "by func %d, lock count %d, first_owner %d\n", + __func__, ha->portnum, func_num, lock_cnt, + (first_owner & 0xFF)); + if (first_owner != tmo_owner) { + /* Some other driver got lock, + * OR same driver got lock again (counter + * value changed), when we were waiting for + * lock. Retry for another 2 sec */ + ql_dbg(ql_dbg_p3p, vha, 0xb115, + "%s: %d: IDC lock failed\n", + __func__, ha->portnum); + timeout = 0; + } else { + /* Same driver holding lock > 2sec. + * Force Recovery */ + if (qla8044_lock_recovery(vha) == QLA_SUCCESS) { + /* Recovered and got lock */ + ret_val = QLA_SUCCESS; + ql_dbg(ql_dbg_p3p, vha, 0xb116, + "%s:IDC lock Recovery by %d" + "successful...\n", __func__, + ha->portnum); + } + /* Recovery Failed, some other function + * has the lock, wait for 2secs + * and retry + */ + ql_dbg(ql_dbg_p3p, vha, 0xb08a, + "%s: IDC lock Recovery by %d " + "failed, Retrying timout\n", __func__, + ha->portnum); + timeout = 0; + } + } + msleep(QLA8044_DRV_LOCK_MSLEEP); + } + return ret_val; +} + +void +qla8044_idc_unlock(struct qla_hw_data *ha) +{ + int id; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + + if ((id & 0xFF) != ha->portnum) { + ql_log(ql_log_warn, vha, 0xb118, + "%s: IDC Unlock by %d failed, lock owner is %d!\n", + __func__, ha->portnum, (id & 0xFF)); + return; + } + + /* Keep lock counter value, update the ha->func_num to 0xFF */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF)); + qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); +} + +/* 8044 Flash Lock/Unlock functions */ +static int +qla8044_flash_lock(scsi_qla_host_t *vha) +{ + int lock_owner; + int timeout = 0; + uint32_t lock_status = 0; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + while (lock_status == 0) { + lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK); + if (lock_status) + break; + + if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) { + lock_owner = qla8044_rd_reg(ha, + QLA8044_FLASH_LOCK_ID); + ql_log(ql_log_warn, vha, 0xb113, + "%s: flash lock by %d failed, held by %d\n", + __func__, ha->portnum, lock_owner); + ret_val = QLA_FUNCTION_FAILED; + break; + } + msleep(20); + } + qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum); + return ret_val; +} + +static void +qla8044_flash_unlock(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + /* Reading FLASH_UNLOCK register unlocks the Flash */ + qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF); + ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); +} + + +static +void qla8044_flash_lock_recovery(struct scsi_qla_host *vha) +{ + + if (qla8044_flash_lock(vha)) { + /* Someone else is holding the lock. */ + ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n"); + } + + /* + * Either we got the lock, or someone + * else died while holding it. + * In either case, unlock. + */ + qla8044_flash_unlock(vha); +} + +/* + * Address and length are byte address + */ +static int +qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data, + uint32_t flash_addr, int u32_word_count) +{ + int i, ret_val = QLA_SUCCESS; + uint32_t u32_word; + + if (qla8044_flash_lock(vha) != QLA_SUCCESS) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_lock_error; + } + + if (flash_addr & 0x03) { + ql_log(ql_log_warn, vha, 0xb117, + "%s: Illegal addr = 0x%x\n", __func__, flash_addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_flash_read; + } + + for (i = 0; i < u32_word_count; i++) { + if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW, + (flash_addr & 0xFFFF0000))) { + ql_log(ql_log_warn, vha, 0xb119, + "%s: failed to write addr 0x%x to " + "FLASH_DIRECT_WINDOW\n! ", + __func__, flash_addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_flash_read; + } + + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(flash_addr), + &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb08c, + "%s: failed to read addr 0x%x!\n", + __func__, flash_addr); + goto exit_flash_read; + } + + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + flash_addr = flash_addr + 4; + } + +exit_flash_read: + qla8044_flash_unlock(vha); + +exit_lock_error: + return ret_val; +} + +/* + * Address and length are byte address + */ +uint8_t * +qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + scsi_block_requests(vha->host); + if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4) + != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb08d, + "%s: Failed to read from flash\n", + __func__); + } + scsi_unblock_requests(vha->host); + return buf; +} + +inline int +qla8044_need_reset(struct scsi_qla_host *vha) +{ + uint32_t drv_state, drv_active; + int rval; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + rval = drv_state & (1 << ha->portnum); + + if (ha->flags.eeh_busy && drv_active) + rval = 1; + return rval; +} + +/* + * qla8044_write_list - Write the value (p_entry->arg2) to address specified + * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between + * entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset_entry header for WRITE_LIST opcode. + * + */ +static void +qla8044_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + uint32_t i; + + p_entry = (struct qla8044_entry *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_read_write_list - Read from address specified by p_entry->arg1, + * write value read to address specified by p_entry->arg2, for all entries in + * header with delay of p_hdr->delay between entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset_entry header for READ_WRITE_LIST opcode. + * + */ +static void +qla8044_read_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + uint32_t i; + + p_entry = (struct qla8044_entry *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_read_write_crb_reg(vha, p_entry->arg1, + p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_poll_reg - Poll the given CRB addr for duration msecs till + * value read ANDed with test_mask is equal to test_result. + * + * @ha : Pointer to adapter structure + * @addr : CRB register address + * @duration : Poll for total of "duration" msecs + * @test_mask : Mask value read with "test_mask" + * @test_result : Compare (value&test_mask) with test_result. + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr, + int duration, uint32_t test_mask, uint32_t test_result) +{ + uint32_t value; + int timeout_error; + uint8_t retries; + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_rd_reg_indirect(vha, addr, &value); + if (ret_val == QLA_FUNCTION_FAILED) { + timeout_error = 1; + goto exit_poll_reg; + } + + /* poll every 1/10 of the total duration */ + retries = duration/10; + + do { + if ((value & test_mask) != test_result) { + timeout_error = 1; + msleep(duration/10); + ret_val = qla8044_rd_reg_indirect(vha, addr, &value); + if (ret_val == QLA_FUNCTION_FAILED) { + timeout_error = 1; + goto exit_poll_reg; + } + } else { + timeout_error = 0; + break; + } + } while (retries--); + +exit_poll_reg: + if (timeout_error) { + vha->reset_tmplt.seq_error++; + ql_log(ql_log_fatal, vha, 0xb090, + "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", + __func__, value, test_mask, test_result); + } + + return timeout_error; +} + +/* + * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB + * register specified by p_entry->arg1 and compare (value AND test_mask) with + * test_result to validate it. Wait for p_hdr->delay between processing entries. + * + * @ha : Pointer to adapter structure + * @p_hdr : reset_entry header for POLL_LIST opcode. + * + */ +static void +qla8044_poll_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla8044_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla8044_poll *) + ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); + + /* Entries start after 8 byte qla8044_poll, poll header contains + * the test_mask, test_value. + */ + p_entry = (struct qla8044_entry *)((char *)p_poll + + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + if (!delay) { + for (i = 0; i < p_hdr->count; i++, p_entry++) + qla8044_poll_reg(vha, p_entry->arg1, + delay, p_poll->test_mask, p_poll->test_value); + } else { + for (i = 0; i < p_hdr->count; i++, p_entry++) { + if (delay) { + if (qla8044_poll_reg(vha, + p_entry->arg1, delay, + p_poll->test_mask, + p_poll->test_value)) { + /*If + * (data_read&test_mask != test_value) + * read TIMEOUT_ADDR (arg1) and + * ADDR (arg2) registers + */ + qla8044_rd_reg_indirect(vha, + p_entry->arg1, &value); + qla8044_rd_reg_indirect(vha, + p_entry->arg2, &value); + } + } + } + } +} + +/* + * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr, + * read ar_addr, if (value& test_mask != test_mask) re-read till timeout + * expires. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset entry header for POLL_WRITE_LIST opcode. + * + */ +static void +qla8044_poll_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla8044_quad_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + + p_poll = (struct qla8044_poll *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_quad_entry *)((char *)p_poll + + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, + p_entry->dr_addr, p_entry->dr_value); + qla8044_wr_reg_indirect(vha, + p_entry->ar_addr, p_entry->ar_value); + if (delay) { + if (qla8044_poll_reg(vha, + p_entry->ar_addr, delay, + p_poll->test_mask, + p_poll->test_value)) { + ql_dbg(ql_dbg_p3p, vha, 0xb091, + "%s: Timeout Error: poll list, ", + __func__); + ql_dbg(ql_dbg_p3p, vha, 0xb092, + "item_num %d, entry_num %d\n", i, + vha->reset_tmplt.seq_index); + } + } + } +} + +/* + * qla8044_read_modify_write - Read value from p_entry->arg1, modify the + * value, write value to p_entry->arg2. Process entries with p_hdr->delay + * between entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : header with shift/or/xor values. + * + */ +static void +qla8044_read_modify_write(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + struct qla8044_rmw *p_rmw_hdr; + uint32_t i; + + p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr + + sizeof(struct qla8044_rmw)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_rmw_crb_reg(vha, p_entry->arg1, + p_entry->arg2, p_rmw_hdr); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_pause - Wait for p_hdr->delay msecs, called between processing + * two entries of a sequence. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static +void qla8044_pause(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + if (p_hdr->delay) + mdelay((uint32_t)((long)p_hdr->delay)); +} + +/* + * qla8044_template_end - Indicates end of reset sequence processing. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static void +qla8044_template_end(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + vha->reset_tmplt.template_end = 1; + + if (vha->reset_tmplt.seq_error == 0) { + ql_dbg(ql_dbg_p3p, vha, 0xb093, + "%s: Reset sequence completed SUCCESSFULLY.\n", __func__); + } else { + ql_log(ql_log_fatal, vha, 0xb094, + "%s: Reset sequence completed with some timeout " + "errors.\n", __func__); + } +} + +/* + * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr, + * if (value & test_mask != test_value) re-read till timeout value expires, + * read dr_addr register and assign to reset_tmplt.array. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static void +qla8044_poll_read_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + int index; + struct qla8044_quad_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla8044_poll *) + ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_quad_entry *) + ((char *)p_poll + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, p_entry->ar_addr, + p_entry->ar_value); + if (delay) { + if (qla8044_poll_reg(vha, p_entry->ar_addr, delay, + p_poll->test_mask, p_poll->test_value)) { + ql_dbg(ql_dbg_p3p, vha, 0xb095, + "%s: Timeout Error: poll " + "list, ", __func__); + ql_dbg(ql_dbg_p3p, vha, 0xb096, + "Item_num %d, " + "entry_num %d\n", i, + vha->reset_tmplt.seq_index); + } else { + index = vha->reset_tmplt.array_index; + qla8044_rd_reg_indirect(vha, + p_entry->dr_addr, &value); + vha->reset_tmplt.array[index++] = value; + if (index == QLA8044_MAX_RESET_SEQ_ENTRIES) + vha->reset_tmplt.array_index = 1; + } + } + } +} + +/* + * qla8031_process_reset_template - Process all entries in reset template + * till entry with SEQ_END opcode, which indicates end of the reset template + * processing. Each entry has a Reset Entry header, entry opcode/command, with + * size of the entry, number of entries in sub-sequence and delay in microsecs + * or timeout in millisecs. + * + * @ha : Pointer to adapter structure + * @p_buff : Common reset entry header. + * + */ +static void +qla8044_process_reset_template(struct scsi_qla_host *vha, + char *p_buff) +{ + int index, entries; + struct qla8044_reset_entry_hdr *p_hdr; + char *p_entry = p_buff; + + vha->reset_tmplt.seq_end = 0; + vha->reset_tmplt.template_end = 0; + entries = vha->reset_tmplt.hdr->entries; + index = vha->reset_tmplt.seq_index; + + for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) { + p_hdr = (struct qla8044_reset_entry_hdr *)p_entry; + switch (p_hdr->cmd) { + case OPCODE_NOP: + break; + case OPCODE_WRITE_LIST: + qla8044_write_list(vha, p_hdr); + break; + case OPCODE_READ_WRITE_LIST: + qla8044_read_write_list(vha, p_hdr); + break; + case OPCODE_POLL_LIST: + qla8044_poll_list(vha, p_hdr); + break; + case OPCODE_POLL_WRITE_LIST: + qla8044_poll_write_list(vha, p_hdr); + break; + case OPCODE_READ_MODIFY_WRITE: + qla8044_read_modify_write(vha, p_hdr); + break; + case OPCODE_SEQ_PAUSE: + qla8044_pause(vha, p_hdr); + break; + case OPCODE_SEQ_END: + vha->reset_tmplt.seq_end = 1; + break; + case OPCODE_TMPL_END: + qla8044_template_end(vha, p_hdr); + break; + case OPCODE_POLL_READ_LIST: + qla8044_poll_read_list(vha, p_hdr); + break; + default: + ql_log(ql_log_fatal, vha, 0xb097, + "%s: Unknown command ==> 0x%04x on " + "entry = %d\n", __func__, p_hdr->cmd, index); + break; + } + /* + *Set pointer to next entry in the sequence. + */ + p_entry += p_hdr->size; + } + vha->reset_tmplt.seq_index = index; +} + +static void +qla8044_process_init_seq(struct scsi_qla_host *vha) +{ + qla8044_process_reset_template(vha, + vha->reset_tmplt.init_offset); + if (vha->reset_tmplt.seq_end != 1) + ql_log(ql_log_fatal, vha, 0xb098, + "%s: Abrupt INIT Sub-Sequence end.\n", + __func__); +} + +static void +qla8044_process_stop_seq(struct scsi_qla_host *vha) +{ + vha->reset_tmplt.seq_index = 0; + qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset); + if (vha->reset_tmplt.seq_end != 1) + ql_log(ql_log_fatal, vha, 0xb099, + "%s: Abrupt STOP Sub-Sequence end.\n", __func__); +} + +static void +qla8044_process_start_seq(struct scsi_qla_host *vha) +{ + qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset); + if (vha->reset_tmplt.template_end != 1) + ql_log(ql_log_fatal, vha, 0xb09a, + "%s: Abrupt START Sub-Sequence end.\n", + __func__); +} + +static int +qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha, + uint32_t flash_addr, uint8_t *p_data, int u32_word_count) +{ + uint32_t i; + uint32_t u32_word; + uint32_t flash_offset; + uint32_t addr = flash_addr; + int ret_val = QLA_SUCCESS; + + flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1); + + if (addr & 0x3) { + ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n", + __func__, addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_lockless_read; + } + + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_DIRECT_WINDOW, (addr)); + + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09c, + "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + + /* Check if data is spread across multiple sectors */ + if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > + (QLA8044_FLASH_SECTOR_SIZE - 1)) { + /* Multi sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09d, + "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + addr = addr + 4; + flash_offset = flash_offset + 4; + if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) { + /* This write is needed once for each sector */ + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_DIRECT_WINDOW, (addr)); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09f, + "%s: failed to write addr " + "0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + flash_offset = 0; + } + } + } else { + /* Single sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb0a0, + "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + addr = addr + 4; + } + } + +exit_lockless_read: + return ret_val; +} + +/* + * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory + * + * @vha : Pointer to adapter structure + * addr : Flash address to write to + * data : Data to be written + * count : word_count to be written + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_ms_mem_write_128b(struct scsi_qla_host *vha, + uint64_t addr, uint32_t *data, uint32_t count) +{ + int i, j, ret_val = QLA_SUCCESS; + uint32_t agt_ctrl; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + /* Only 128-bit aligned access */ + if (addr & 0xF) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write; + } + write_lock_irqsave(&ha->hw_lock, flags); + + /* Write address */ + ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a1, + "%s: write to AGT_ADDR_HI failed!\n", __func__); + goto exit_ms_mem_write_unlock; + } + + for (i = 0; i < count; i++, addr += 16) { + if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET, + QLA8044_ADDR_QDR_NET_MAX)) || + (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET, + QLA8044_ADDR_DDR_NET_MAX)))) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write_unlock; + } + + ret_val = qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_ADDR_LO, addr); + + /* Write data */ + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_LO, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_HI, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_ULO, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_UHI, *data++); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a2, + "%s: write to AGT_WRDATA failed!\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + /* Check write status */ + ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_ENABLE); + ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_START); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a3, + "%s: write to AGT_CTRL failed!\n", __func__); + goto exit_ms_mem_write_unlock; + } + + for (j = 0; j < MAX_CTL_CHECK; j++) { + ret_val = qla8044_rd_reg_indirect(vha, + MD_MIU_TEST_AGT_CTRL, &agt_ctrl); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a4, + "%s: failed to read " + "MD_MIU_TEST_AGT_CTRL!\n", __func__); + goto exit_ms_mem_write_unlock; + } + if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) + break; + } + + /* Status check failed */ + if (j >= MAX_CTL_CHECK) { + ql_log(ql_log_fatal, vha, 0xb0a5, + "%s: MS memory write failed!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write_unlock; + } + } + +exit_ms_mem_write_unlock: + write_unlock_irqrestore(&ha->hw_lock, flags); + +exit_ms_mem_write: + return ret_val; +} + +static int +qla8044_copy_bootloader(struct scsi_qla_host *vha) +{ + uint8_t *p_cache; + uint32_t src, count, size; + uint64_t dest; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + src = QLA8044_BOOTLOADER_FLASH_ADDR; + dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR); + size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE); + + /* 128 bit alignment check */ + if (size & 0xF) + size = (size + 16) & ~0xF; + + /* 16 byte count */ + count = size/16; + + p_cache = vmalloc(size); + if (p_cache == NULL) { + ql_log(ql_log_fatal, vha, 0xb0a6, + "%s: Failed to allocate memory for " + "boot loader cache\n", __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_copy_bootloader; + } + + ret_val = qla8044_lockless_flash_read_u32(vha, src, + p_cache, size/sizeof(uint32_t)); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a7, + "%s: Error reading F/W from flash!!!\n", __func__); + goto exit_copy_error; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n", + __func__); + + /* 128 bit/16 byte write to MS memory */ + ret_val = qla8044_ms_mem_write_128b(vha, dest, + (uint32_t *)p_cache, count); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a9, + "%s: Error writing F/W to MS !!!\n", __func__); + goto exit_copy_error; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0aa, + "%s: Wrote F/W (size %d) to MS !!!\n", + __func__, size); + +exit_copy_error: + vfree(p_cache); + +exit_copy_bootloader: + return ret_val; +} + +static int +qla8044_restart(struct scsi_qla_host *vha) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + qla8044_process_stop_seq(vha); + + /* Collect minidump */ + if (ql2xmdenable) + qla8044_get_minidump(vha); + else + ql_log(ql_log_fatal, vha, 0xb14c, + "Minidump disabled.\n"); + + qla8044_process_init_seq(vha); + + if (qla8044_copy_bootloader(vha)) { + ql_log(ql_log_fatal, vha, 0xb0ab, + "%s: Copy bootloader, firmware restart failed!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_restart; + } + + /* + * Loads F/W from flash + */ + qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH); + + qla8044_process_start_seq(vha); + +exit_restart: + return ret_val; +} + +/* + * qla8044_check_cmd_peg_status - Check peg status to see if Peg is + * initialized. + * + * @ha : Pointer to adapter structure + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_check_cmd_peg_status(struct scsi_qla_host *vha) +{ + uint32_t val, ret_val = QLA_FUNCTION_FAILED; + int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; + struct qla_hw_data *ha = vha->hw; + + do { + val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE); + if (val == PHAN_INITIALIZE_COMPLETE) { + ql_dbg(ql_dbg_p3p, vha, 0xb0ac, + "%s: Command Peg initialization " + "complete! state=0x%x\n", __func__, val); + ret_val = QLA_SUCCESS; + break; + } + msleep(CRB_CMDPEG_CHECK_DELAY); + } while (--retries); + + return ret_val; +} + +static int +qla8044_start_firmware(struct scsi_qla_host *vha) +{ + int ret_val = QLA_SUCCESS; + + if (qla8044_restart(vha)) { + ql_log(ql_log_fatal, vha, 0xb0ad, + "%s: Restart Error!!!, Need Reset!!!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_start_fw; + } else + ql_dbg(ql_dbg_p3p, vha, 0xb0af, + "%s: Restart done!\n", __func__); + + ret_val = qla8044_check_cmd_peg_status(vha); + if (ret_val) { + ql_log(ql_log_fatal, vha, 0xb0b0, + "%s: Peg not initialized!\n", __func__); + ret_val = QLA_FUNCTION_FAILED; + } + +exit_start_fw: + return ret_val; +} + +void +qla8044_clear_drv_active(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_active &= ~(1 << (ha->portnum)); + + ql_log(ql_log_info, vha, 0xb0b1, + "%s(%ld): drv_active: 0x%08x\n", + __func__, vha->host_no, drv_active); + + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); +} + +/* + * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +static int +qla8044_device_bootstrap(struct scsi_qla_host *vha) +{ + int rval = QLA_FUNCTION_FAILED; + int i; + uint32_t old_count = 0, count = 0; + int need_reset = 0; + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + need_reset = qla8044_need_reset(vha); + + if (!need_reset) { + old_count = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + + for (i = 0; i < 10; i++) { + msleep(200); + + count = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } + } + qla8044_flash_lock_recovery(vha); + } else { + /* We are trying to perform a recovery here. */ + if (ha->flags.isp82xx_fw_hung) + qla8044_flash_lock_recovery(vha); + } + + /* set to DEV_INITIALIZING */ + ql_log(ql_log_info, vha, 0xb0b2, + "%s: HW State: INITIALIZING\n", __func__); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_INITIALIZING); + + qla8044_idc_unlock(ha); + rval = qla8044_start_firmware(vha); + qla8044_idc_lock(ha); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0xb0b3, + "%s: HW State: FAILED\n", __func__); + qla8044_clear_drv_active(vha); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + return rval; + } + + /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after + * device goes to INIT state. */ + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + if (idc_ctrl & GRACEFUL_RESET_BIT1) { + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_ctrl & ~GRACEFUL_RESET_BIT1)); + ha->fw_dumped = 0; + } + +dev_ready: + ql_log(ql_log_info, vha, 0xb0b4, + "%s: HW State: READY\n", __func__); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY); + + return rval; +} + +/*-------------------------Reset Sequence Functions-----------------------*/ +static void +qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha) +{ + u8 *phdr; + + if (!vha->reset_tmplt.buff) { + ql_log(ql_log_fatal, vha, 0xb0b5, + "%s: Error Invalid reset_seq_template\n", __func__); + return; + } + + phdr = vha->reset_tmplt.buff; + ql_dbg(ql_dbg_p3p, vha, 0xb0b6, + "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X" + "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n" + "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n", + *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), + *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), + *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), + *(phdr+13), *(phdr+14), *(phdr+15)); +} + +/* + * qla8044_reset_seq_checksum_test - Validate Reset Sequence template. + * + * @ha : Pointer to adapter structure + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha) +{ + uint32_t sum = 0; + uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff; + int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t); + + while (u16_count-- > 0) + sum += *buff++; + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + /* checksum of 0 indicates a valid template */ + if (~sum) { + return QLA_SUCCESS; + } else { + ql_log(ql_log_fatal, vha, 0xb0b7, + "%s: Reset seq checksum failed\n", __func__); + return QLA_FUNCTION_FAILED; + } +} + +/* + * qla8044_read_reset_template - Read Reset Template from Flash, validate + * the template and store offsets of stop/start/init offsets in ha->reset_tmplt. + * + * @ha : Pointer to adapter structure + */ +void +qla8044_read_reset_template(struct scsi_qla_host *vha) +{ + uint8_t *p_buff; + uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; + + vha->reset_tmplt.seq_error = 0; + vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE); + if (vha->reset_tmplt.buff == NULL) { + ql_log(ql_log_fatal, vha, 0xb0b8, + "%s: Failed to allocate reset template resources\n", + __func__); + goto exit_read_reset_template; + } + + p_buff = vha->reset_tmplt.buff; + addr = QLA8044_RESET_TEMPLATE_ADDR; + + tmplt_hdr_def_size = + sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0b9, + "%s: Read template hdr size %d from Flash\n", + __func__, tmplt_hdr_def_size); + + /* Copy template header from flash */ + if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { + ql_log(ql_log_fatal, vha, 0xb0ba, + "%s: Failed to read reset template\n", __func__); + goto exit_read_template_error; + } + + vha->reset_tmplt.hdr = + (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff; + + /* Validate the template header size and signature */ + tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); + if ((tmplt_hdr_size != tmplt_hdr_def_size) || + (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { + ql_log(ql_log_fatal, vha, 0xb0bb, + "%s: Template Header size invalid %d " + "tmplt_hdr_def_size %d!!!\n", __func__, + tmplt_hdr_size, tmplt_hdr_def_size); + goto exit_read_template_error; + } + + addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size; + p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size; + tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size - + vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0bc, + "%s: Read rest of the template size %d\n", + __func__, vha->reset_tmplt.hdr->size); + + /* Copy rest of the template */ + if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { + ql_log(ql_log_fatal, vha, 0xb0bd, + "%s: Failed to read reset tempelate\n", __func__); + goto exit_read_template_error; + } + + /* Integrity check */ + if (qla8044_reset_seq_checksum_test(vha)) { + ql_log(ql_log_fatal, vha, 0xb0be, + "%s: Reset Seq checksum failed!\n", __func__); + goto exit_read_template_error; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb0bf, + "%s: Reset Seq checksum passed! Get stop, " + "start and init seq offsets\n", __func__); + + /* Get STOP, START, INIT sequence offsets */ + vha->reset_tmplt.init_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->init_seq_offset; + + vha->reset_tmplt.start_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->start_seq_offset; + + vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->hdr_size; + + qla8044_dump_reset_seq_hdr(vha); + + goto exit_read_reset_template; + +exit_read_template_error: + vfree(vha->reset_tmplt.buff); + +exit_read_reset_template: + return; +} + +void +qla8044_set_idc_dontreset(struct scsi_qla_host *vha) +{ + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + idc_ctrl |= DONTRESET_BIT0; + ql_dbg(ql_dbg_p3p, vha, 0xb0c0, + "%s: idc_ctrl = %d\n", __func__, idc_ctrl); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); +} + +inline void +qla8044_set_rst_ready(struct scsi_qla_host *vha) +{ + uint32_t drv_state; + struct qla_hw_data *ha = vha->hw; + + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + /* For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function.*/ + drv_state |= (1 << ha->portnum); + + ql_log(ql_log_info, vha, 0xb0c1, + "%s(%ld): drv_state: 0x%08x\n", + __func__, vha->host_no, drv_state); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); +} + +/** + * qla8044_need_reset_handler - Code to start reset sequence + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +static void +qla8044_need_reset_handler(struct scsi_qla_host *vha) +{ + uint32_t dev_state = 0, drv_state, drv_active; + unsigned long reset_timeout, dev_init_timeout; + struct qla_hw_data *ha = vha->hw; + + ql_log(ql_log_fatal, vha, 0xb0c2, + "%s: Performing ISP error recovery\n", __func__); + + if (vha->flags.online) { + qla8044_idc_unlock(ha); + qla2x00_abort_isp_cleanup(vha); + ha->isp_ops->get_flash_version(vha, vha->req->ring); + ha->isp_ops->nvram_config(vha); + qla8044_idc_lock(ha); + } + + if (!ha->flags.nic_core_reset_owner) { + ql_dbg(ql_dbg_p3p, vha, 0xb0c3, + "%s(%ld): reset acknowledged\n", + __func__, vha->host_no); + qla8044_set_rst_ready(vha); + + /* Non-reset owners ACK Reset and wait for device INIT state + * as part of Reset Recovery by Reset Owner + */ + dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); + + do { + if (time_after_eq(jiffies, dev_init_timeout)) { + ql_log(ql_log_info, vha, 0xb0c4, + "%s: Non Reset owner DEV INIT " + "TIMEOUT!\n", __func__); + break; + } + + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + dev_state = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX); + } while (dev_state == QLA8XXX_DEV_NEED_RESET); + } else { + qla8044_set_rst_ready(vha); + + /* wait for 10 seconds for reset ack from all functions */ + reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); + + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + + ql_log(ql_log_info, vha, 0xb0c5, + "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", + __func__, vha->host_no, drv_state, drv_active); + + while (drv_state != drv_active) { + if (time_after_eq(jiffies, reset_timeout)) { + ql_log(ql_log_info, vha, 0xb0c6, + "%s: RESET TIMEOUT!" + "drv_state: 0x%08x, drv_active: 0x%08x\n", + QLA2XXX_DRIVER_NAME, drv_state, drv_active); + break; + } + + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + } + + if (drv_state != drv_active) { + ql_log(ql_log_info, vha, 0xb0c7, + "%s(%ld): Reset_owner turning off drv_active " + "of non-acking function 0x%x\n", __func__, + vha->host_no, (drv_active ^ drv_state)); + drv_active = drv_active & drv_state; + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, + drv_active); + } + + /* + * Clear RESET OWNER, will be set at next reset + * by next RST_OWNER + */ + ha->flags.nic_core_reset_owner = 0; + + /* Start Reset Recovery */ + qla8044_device_bootstrap(vha); + } +} + +static void +qla8044_set_drv_active(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function.*/ + drv_active |= (1 << ha->portnum); + + ql_log(ql_log_info, vha, 0xb0c8, + "%s(%ld): drv_active: 0x%08x\n", + __func__, vha->host_no, drv_active); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); +} + +static void +qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) +{ + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + idc_ctrl &= ~DONTRESET_BIT0; + ql_log(ql_log_info, vha, 0xb0c9, + "%s: idc_ctrl = %d\n", __func__, + idc_ctrl); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); +} + +static int +qla8044_set_idc_ver(struct scsi_qla_host *vha) +{ + int idc_ver; + uint32_t drv_active; + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + if (drv_active == (1 << ha->portnum)) { + idc_ver = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_IDC_VERSION_INDEX); + idc_ver &= (~0xFF); + idc_ver |= QLA8044_IDC_VER_MAJ_VALUE; + qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX, + idc_ver); + ql_log(ql_log_info, vha, 0xb0ca, + "%s: IDC version updated to %d\n", + __func__, idc_ver); + } else { + idc_ver = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_IDC_VERSION_INDEX); + idc_ver &= 0xFF; + if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) { + ql_log(ql_log_info, vha, 0xb0cb, + "%s: qla4xxx driver IDC version %d " + "is not compatible with IDC version %d " + "of other drivers!\n", + __func__, QLA8044_IDC_VER_MAJ_VALUE, + idc_ver); + rval = QLA_FUNCTION_FAILED; + goto exit_set_idc_ver; + } + } + + /* Update IDC_MINOR_VERSION */ + idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR); + idc_ver &= ~(0x03 << (ha->portnum * 2)); + idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2)); + qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver); + +exit_set_idc_ver: + return rval; +} + +static int +qla8044_update_idc_reg(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.init_done) + goto exit_update_idc_reg; + + qla8044_idc_lock(ha); + qla8044_set_drv_active(vha); + + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* If we are the first driver to load and + * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */ + if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba) + qla8044_clear_idc_dontreset(vha); + + rval = qla8044_set_idc_ver(vha); + if (rval == QLA_FUNCTION_FAILED) + qla8044_clear_drv_active(vha); + qla8044_idc_unlock(ha); + +exit_update_idc_reg: + return rval; +} + +/** + * qla8044_need_qsnt_handler - Code to start qsnt + * @ha: pointer to adapter structure + **/ +static void +qla8044_need_qsnt_handler(struct scsi_qla_host *vha) +{ + unsigned long qsnt_timeout; + uint32_t drv_state, drv_active, dev_state; + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.online) + qla2x00_quiesce_io(vha); + else + return; + + qla8044_set_qsnt_ready(vha); + + /* Wait for 30 secs for all functions to ack qsnt mode */ + qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ); + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* Shift drv_active by 1 to match drv_state. As quiescent ready bit + position is at bit 1 and drv active is at bit 0 */ + drv_active = drv_active << 1; + + while (drv_state != drv_active) { + if (time_after_eq(jiffies, qsnt_timeout)) { + /* Other functions did not ack, changing state to + * DEV_READY + */ + clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_READY); + qla8044_clear_qsnt_ready(vha); + ql_log(ql_log_info, vha, 0xb0cc, + "Timeout waiting for quiescent ack!!!\n"); + return; + } + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_active = drv_active << 1; + } + + /* All functions have Acked. Set quiescent state */ + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_QUIESCENT); + ql_log(ql_log_info, vha, 0xb0cd, + "%s: HW State: QUIESCENT\n", __func__); + } +} + +/* + * qla8044_device_state_handler - Adapter state machine + * @ha: pointer to host adapter structure. + * + * Note: IDC lock must be UNLOCKED upon entry + **/ +int +qla8044_device_state_handler(struct scsi_qla_host *vha) +{ + uint32_t dev_state; + int rval = QLA_SUCCESS; + unsigned long dev_init_timeout; + struct qla_hw_data *ha = vha->hw; + + rval = qla8044_update_idc_reg(vha); + if (rval == QLA_FUNCTION_FAILED) + goto exit_error; + + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + ql_dbg(ql_dbg_p3p, vha, 0xb0ce, + "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state(dev_state) : "Unknown"); + + /* wait for 30 seconds for device to go ready */ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); + + qla8044_idc_lock(ha); + + while (1) { + if (time_after_eq(jiffies, dev_init_timeout)) { + ql_log(ql_log_warn, vha, 0xb0cf, + "%s: Device Init Failed 0x%x = %s\n", + QLA2XXX_DRIVER_NAME, dev_state, + dev_state < MAX_STATES ? + qdev_state(dev_state) : "Unknown"); + + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + } + + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + ql_log(ql_log_info, vha, 0xb0d0, + "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state(dev_state) : "Unknown"); + + /* NOTE: Make sure idc unlocked upon exit of switch statement */ + switch (dev_state) { + case QLA8XXX_DEV_READY: + ha->flags.nic_core_reset_owner = 0; + goto exit; + case QLA8XXX_DEV_COLD: + rval = qla8044_device_bootstrap(vha); + goto exit; + case QLA8XXX_DEV_INITIALIZING: + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + break; + case QLA8XXX_DEV_NEED_RESET: + /* For ISP8044, if NEED_RESET is set by any driver, + * it should be honored, irrespective of IDC_CTRL + * DONTRESET_BIT0 */ + qla8044_need_reset_handler(vha); + break; + case QLA8XXX_DEV_NEED_QUIESCENT: + /* idc locked/unlocked in handler */ + qla8044_need_qsnt_handler(vha); + + /* Reset the init timeout after qsnt handler */ + dev_init_timeout = jiffies + + (ha->fcoe_reset_timeout * HZ); + break; + case QLA8XXX_DEV_QUIESCENT: + ql_log(ql_log_info, vha, 0xb0d1, + "HW State: QUIESCENT\n"); + + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + /* Reset the init timeout after qsnt handler */ + dev_init_timeout = jiffies + + (ha->fcoe_reset_timeout * HZ); + break; + case QLA8XXX_DEV_FAILED: + ha->flags.nic_core_reset_owner = 0; + qla8044_idc_unlock(ha); + qla8xxx_dev_failed_handler(vha); + rval = QLA_FUNCTION_FAILED; + qla8044_idc_lock(ha); + goto exit; + default: + qla8044_idc_unlock(ha); + qla8xxx_dev_failed_handler(vha); + rval = QLA_FUNCTION_FAILED; + qla8044_idc_lock(ha); + goto exit; + } + } +exit: + qla8044_idc_unlock(ha); + +exit_error: + return rval; +} + +/** + * qla4_8xxx_check_temp - Check the ISP82XX temperature. + * @ha: adapter block pointer. + * + * Note: The caller should not hold the idc lock. + **/ +static int +qla8044_check_temp(struct scsi_qla_host *vha) +{ + uint32_t temp, temp_state, temp_val; + int status = QLA_SUCCESS; + + temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); + temp_state = qla82xx_get_temp_state(temp); + temp_val = qla82xx_get_temp_val(temp); + + if (temp_state == QLA82XX_TEMP_PANIC) { + ql_log(ql_log_warn, vha, 0xb0d2, + "Device temperature %d degrees C" + " exceeds maximum allowed. Hardware has been shut" + " down\n", temp_val); + status = QLA_FUNCTION_FAILED; + return status; + } else if (temp_state == QLA82XX_TEMP_WARN) { + ql_log(ql_log_warn, vha, 0xb0d3, + "Device temperature %d" + " degrees C exceeds operating range." + " Immediate action needed.\n", temp_val); + } + return 0; +} + +int qla8044_read_temperature(scsi_qla_host_t *vha) +{ + uint32_t temp; + + temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); + return qla82xx_get_temp_val(temp); +} + +/** + * qla8044_check_fw_alive - Check firmware health + * @ha: Pointer to host adapter structure. + * + * Context: Interrupt + **/ +int +qla8044_check_fw_alive(struct scsi_qla_host *vha) +{ + uint32_t fw_heartbeat_counter; + uint32_t halt_status1, halt_status2; + int status = QLA_SUCCESS; + + fw_heartbeat_counter = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + + /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ + if (fw_heartbeat_counter == 0xffffffff) { + ql_dbg(ql_dbg_p3p, vha, 0xb0d4, + "scsi%ld: %s: Device in frozen " + "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", + vha->host_no, __func__); + return status; + } + + if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { + vha->seconds_since_last_heartbeat++; + /* FW not alive after 2 seconds */ + if (vha->seconds_since_last_heartbeat == 2) { + vha->seconds_since_last_heartbeat = 0; + halt_status1 = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS1_INDEX); + halt_status2 = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS2_INDEX); + + ql_log(ql_log_info, vha, 0xb0d5, + "scsi(%ld): %s, ISP8044 " + "Dumping hw/fw registers:\n" + " PEG_HALT_STATUS1: 0x%x, " + "PEG_HALT_STATUS2: 0x%x,\n", + vha->host_no, __func__, halt_status1, + halt_status2); + status = QLA_FUNCTION_FAILED; + } + } else + vha->seconds_since_last_heartbeat = 0; + + vha->fw_heartbeat_counter = fw_heartbeat_counter; + return status; +} + +void +qla8044_watchdog(struct scsi_qla_host *vha) +{ + uint32_t dev_state, halt_status; + int halt_status_unrecoverable = 0; + struct qla_hw_data *ha = vha->hw; + + /* don't poll if reset is going on or FW hang in quiescent state */ + if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || + test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) || + test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (qla8044_check_temp(vha)) { + set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); + ha->flags.isp82xx_fw_hung = 1; + qla2xxx_wake_dpc(vha); + } else if (dev_state == QLA8XXX_DEV_NEED_RESET && + !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0xb0d6, + "%s: HW State: NEED RESET!\n", + __func__); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && + !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0xb0d7, + "%s: HW State: NEED QUIES detected!\n", + __func__); + set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + /* Check firmware health */ + if (qla8044_check_fw_alive(vha)) { + halt_status = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS1_INDEX); + if (halt_status & + QLA8044_HALT_STATUS_FW_RESET) { + ql_log(ql_log_fatal, vha, + 0xb0d8, "%s: Firmware " + "error detected device " + "is being reset\n", + __func__); + } else if (halt_status & + QLA8044_HALT_STATUS_UNRECOVERABLE) { + halt_status_unrecoverable = 1; + } + + /* Since we cannot change dev_state in interrupt + * context, set appropriate DPC flag then wakeup + * DPC */ + if (halt_status_unrecoverable) { + set_bit(ISP_UNRECOVERABLE, + &vha->dpc_flags); + } else { + if (dev_state == + QLA8XXX_DEV_QUIESCENT) { + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + ql_log(ql_log_info, vha, 0xb0d9, + "%s: FW CONTEXT Reset " + "needed!\n", __func__); + } else { + ql_log(ql_log_info, vha, + 0xb0da, "%s: " + "detect abort needed\n", + __func__); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla82xx_clear_pending_mbx(vha); + } + } + ha->flags.isp82xx_fw_hung = 1; + ql_log(ql_log_warn, vha, 0xb10a, + "Firmware hung.\n"); + qla2xxx_wake_dpc(vha); + } + } + + } +} + +static int +qla8044_minidump_process_control(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr) +{ + struct qla8044_minidump_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time, addr, index; + uint32_t crb_addr, rval = QLA_SUCCESS; + unsigned long wtime; + struct qla8044_minidump_template_hdr *tmplt_hdr; + int i; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__); + tmplt_hdr = (struct qla8044_minidump_template_hdr *) + ha->md_tmplt_hdr; + crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr; + + crb_addr = crb_entry->addr; + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + + if (opcode & QLA82XX_DBG_OPCODE_WR) { + qla8044_wr_reg_indirect(vha, crb_addr, + crb_entry->value_1); + opcode &= ~QLA82XX_DBG_OPCODE_WR; + } + + if (opcode & QLA82XX_DBG_OPCODE_RW) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + opcode &= ~QLA82XX_DBG_OPCODE_RW; + } + + if (opcode & QLA82XX_DBG_OPCODE_AND) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + read_value &= crb_entry->value_2; + opcode &= ~QLA82XX_DBG_OPCODE_AND; + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + } + if (opcode & QLA82XX_DBG_OPCODE_OR) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + read_value |= crb_entry->value_3; + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + if (opcode & QLA82XX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + + do { + if ((read_value & crb_entry->value_2) == + crb_entry->value_1) { + break; + } else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_FUNCTION_FAILED; + break; + } else { + qla8044_rd_reg_indirect(vha, + crb_addr, &read_value); + } + } while (1); + opcode &= ~QLA82XX_DBG_OPCODE_POLL; + } + + if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + qla8044_rd_reg_indirect(vha, addr, &read_value); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else { + read_value = crb_entry->value_1; + } + + qla8044_wr_reg_indirect(vha, addr, read_value); + opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + return rval; +} + +static void +qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8044_minidump_entry_crb *crb_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__); + crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = r_addr; + *data_ptr++ = r_value; + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int +qla8044_minidump_process_rdmem(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla8044_minidump_entry_rdmem *m_hdr; + unsigned long flags; + uint32_t *data_ptr = *d_ptr; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__); + m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f0, + "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size); + + if (r_addr & 0xf) { + ql_dbg(ql_dbg_p3p, vha, 0xb0f1, + "[%s]: Read addr 0x%x not 16 bytes alligned\n", + __func__, r_addr); + return QLA_FUNCTION_FAILED; + } + + if (m_hdr->read_data_size % 16) { + ql_dbg(ql_dbg_p3p, vha, 0xb0f2, + "[%s]: Read data[0x%x] not multiple of 16 bytes\n", + __func__, m_hdr->read_data_size); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb0f3, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr); + r_value = 0; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value); + r_value = MIU_TA_CTL_ENABLE; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); + r_value = MIU_TA_CTL_START_ENABLE; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + &r_value); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR + "%s: failed to read through agent\n", __func__); + write_unlock_irqrestore(&ha->hw_lock, flags); + return QLA_SUCCESS; + } + + for (j = 0; j < 4; j++) { + qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j], + &r_data); + *data_ptr++ = r_data; + } + + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + + ql_dbg(ql_dbg_p3p, vha, 0xb0f4, + "Leaving fn: %s datacount: 0x%x\n", + __func__, (loop_cnt * 16)); + + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +/* ISP83xx flash read for _RDROM _BOARD */ +static uint32_t +qla8044_minidump_process_rdrom(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t fl_addr, u32_count, rval; + struct qla8044_minidump_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr; + fl_addr = rom_hdr->read_addr; + u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n", + __func__, fl_addr, u32_count); + + rval = qla8044_lockless_flash_read_u32(vha, fl_addr, + (u8 *)(data_ptr), u32_count); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb0f6, + "%s: Flash Read Error,Count=%d\n", __func__, u32_count); + return QLA_FUNCTION_FAILED; + } else { + data_ptr += u32_count; + *d_ptr = data_ptr; + return QLA_SUCCESS; + } +} + +static void +qla8044_mark_entry_skipped(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; + + ql_log(ql_log_info, vha, 0xb0f7, + "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", + vha->host_no, index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); +} + +static int +qla8044_minidump_process_l2tag(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla8044_minidump_entry_cache *cache_hdr; + int rval = QLA_FUNCTION_FAILED; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__); + cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; + + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + qla8044_wr_reg_indirect(vha, t_r_addr, t_value); + if (c_value_w) + qla8044_wr_reg_indirect(vha, c_addr, c_value_w); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + qla8044_rd_reg_indirect(vha, c_addr, + &c_value_r); + if ((c_value_r & p_mask) == 0) { + break; + } else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, addr, &r_value); + *data_ptr++ = r_value; + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static void +qla8044_minidump_process_l1cache(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla8044_minidump_entry_cache *cache_hdr; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + qla8044_wr_reg_indirect(vha, t_r_addr, t_value); + qla8044_wr_reg_indirect(vha, c_addr, c_value_w); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, addr, &r_value); + *data_ptr++ = r_value; + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_rdocm(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8044_minidump_entry_rdocm *ocm_hdr; + uint32_t *data_ptr = *d_ptr; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__); + + ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fa, + "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, r_stride, loop_cnt); + + for (i = 0; i < loop_cnt; i++) { + r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); + *data_ptr++ = r_value; + r_addr += r_stride; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n", + __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))); + + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_rdmux(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla8044_minidump_entry_mux *mux_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__); + + mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, s_addr, s_value); + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = s_value; + *data_ptr++ = r_value; + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_queue(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla8044_minidump_entry_queue *q_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__); + q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, s_addr, qid); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = r_value; + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +/* ISP83xx functions to process new minidump entries... */ +static uint32_t +qla8044_minidump_process_pollrd(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask; + uint16_t s_stride, i; + struct qla8044_minidump_entry_pollrd *pollrd_hdr; + uint32_t *data_ptr = *d_ptr; + + pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr; + s_addr = pollrd_hdr->select_addr; + r_addr = pollrd_hdr->read_addr; + s_value = pollrd_hdr->select_value; + s_stride = pollrd_hdr->select_value_stride; + + poll_wait = pollrd_hdr->poll_wait; + poll_mask = pollrd_hdr->poll_mask; + + for (i = 0; i < pollrd_hdr->op_count; i++) { + qla8044_wr_reg_indirect(vha, s_addr, s_value); + poll_wait = pollrd_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, s_addr, &r_value); + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb0fe, + "%s: TIMEOUT\n", __func__); + goto error; + } + } + } + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = s_value; + *data_ptr++ = r_value; + + s_value += s_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; + +error: + return QLA_FUNCTION_FAILED; +} + +static void +qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t sel_val1, sel_val2, t_sel_val, data, i; + uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr; + struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr; + uint32_t *data_ptr = *d_ptr; + + rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr; + sel_val1 = rdmux2_hdr->select_value_1; + sel_val2 = rdmux2_hdr->select_value_2; + sel_addr1 = rdmux2_hdr->select_addr_1; + sel_addr2 = rdmux2_hdr->select_addr_2; + sel_val_mask = rdmux2_hdr->select_value_mask; + read_addr = rdmux2_hdr->read_addr; + + for (i = 0; i < rdmux2_hdr->op_count; i++) { + qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1); + t_sel_val = sel_val1 & sel_val_mask; + *data_ptr++ = t_sel_val; + + qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); + qla8044_rd_reg_indirect(vha, read_addr, &data); + + *data_ptr++ = data; + + qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2); + t_sel_val = sel_val2 & sel_val_mask; + *data_ptr++ = t_sel_val; + + qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); + qla8044_rd_reg_indirect(vha, read_addr, &data); + + *data_ptr++ = data; + + sel_val1 += rdmux2_hdr->select_value_stride; + sel_val2 += rdmux2_hdr->select_value_stride; + } + + *d_ptr = data_ptr; +} + +static uint32_t +qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t poll_wait, poll_mask, r_value, data; + uint32_t addr_1, addr_2, value_1, value_2; + struct qla8044_minidump_entry_pollrdmwr *poll_hdr; + uint32_t *data_ptr = *d_ptr; + + poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr; + addr_1 = poll_hdr->addr_1; + addr_2 = poll_hdr->addr_2; + value_1 = poll_hdr->value_1; + value_2 = poll_hdr->value_2; + poll_mask = poll_hdr->poll_mask; + + qla8044_wr_reg_indirect(vha, addr_1, value_1); + + poll_wait = poll_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb0ff, + "%s: TIMEOUT\n", __func__); + goto error; + } + } + } + + qla8044_rd_reg_indirect(vha, addr_2, &data); + data &= poll_hdr->modify_mask; + qla8044_wr_reg_indirect(vha, addr_2, data); + qla8044_wr_reg_indirect(vha, addr_1, value_2); + + poll_wait = poll_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb100, + "%s: TIMEOUT2\n", __func__); + goto error; + } + } + } + + *data_ptr++ = addr_2; + *data_ptr++ = data; + + *d_ptr = data_ptr; + + return QLA_SUCCESS; + +error: + return QLA_FUNCTION_FAILED; +} + +#define ISP8044_PEX_DMA_ENGINE_INDEX 8 +#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000 +#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000 +#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0 +#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04 +#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08 + +#define ISP8044_PEX_DMA_READ_SIZE (16 * 1024) +#define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ + +static int +qla8044_check_dma_engine_state(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = ha->md_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); + + /* Read the pex-dma's command-status-and-control register. */ + rval = qla8044_rd_reg_indirect(vha, + (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + return QLA_FUNCTION_FAILED; + + /* Check if requested pex-dma engine is available. */ + if (cmd_sts_and_cntrl & BIT_31) + return QLA_SUCCESS; + + return QLA_FUNCTION_FAILED; +} + +static int +qla8044_start_pex_dma(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS, wait = 0; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = ha->md_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW, + m_hdr->desc_card_addr); + if (rval) + goto error_exit; + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0); + if (rval) + goto error_exit; + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL, + m_hdr->start_dma_cmd); + if (rval) + goto error_exit; + + /* Wait for dma operation to complete. */ + for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) { + rval = qla8044_rd_reg_indirect(vha, + (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + goto error_exit; + + if ((cmd_sts_and_cntrl & BIT_1) == 0) + break; + + udelay(10); + } + + /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ + if (wait >= ISP8044_PEX_DMA_MAX_WAIT) { + rval = QLA_FUNCTION_FAILED; + goto error_exit; + } + +error_exit: + return rval; +} + +static int +qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS; + struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL; + uint32_t chunk_size, read_size; + uint8_t *data_ptr = (uint8_t *)*d_ptr; + void *rdmem_buffer = NULL; + dma_addr_t rdmem_dma; + struct qla8044_pex_dma_descriptor dma_desc; + + rval = qla8044_check_dma_engine_state(vha); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, vha, 0xb147, + "DMA engine not available. Fallback to rdmem-read.\n"); + return QLA_FUNCTION_FAILED; + } + + m_hdr = (void *)entry_hdr; + + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, + ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL); + if (!rdmem_buffer) { + ql_dbg(ql_dbg_p3p, vha, 0xb148, + "Unable to allocate rdmem dma buffer\n"); + return QLA_FUNCTION_FAILED; + } + + /* Prepare pex-dma descriptor to be written to MS memory. */ + /* dma-desc-cmd layout: + * 0-3: dma-desc-cmd 0-3 + * 4-7: pcid function number + * 8-15: dma-desc-cmd 8-15 + * dma_bus_addr: dma buffer address + * cmd.read_data_size: amount of data-chunk to be read. + */ + dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); + dma_desc.cmd.dma_desc_cmd |= + ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); + + dma_desc.dma_bus_addr = rdmem_dma; + dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE; + read_size = 0; + + /* + * Perform rdmem operation using pex-dma. + * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE. + */ + while (read_size < m_hdr->read_data_size) { + if (m_hdr->read_data_size - read_size < + ISP8044_PEX_DMA_READ_SIZE) { + chunk_size = (m_hdr->read_data_size - read_size); + dma_desc.cmd.read_data_size = chunk_size; + } + + dma_desc.src_addr = m_hdr->read_addr + read_size; + + /* Prepare: Write pex-dma descriptor to MS memory. */ + rval = qla8044_ms_mem_write_128b(vha, + m_hdr->desc_card_addr, (void *)&dma_desc, + (sizeof(struct qla8044_pex_dma_descriptor)/16)); + if (rval) { + ql_log(ql_log_warn, vha, 0xb14a, + "%s: Error writing rdmem-dma-init to MS !!!\n", + __func__); + goto error_exit; + } + ql_dbg(ql_dbg_p3p, vha, 0xb14b, + "%s: Dma-descriptor: Instruct for rdmem dma " + "(chunk_size 0x%x).\n", __func__, chunk_size); + + /* Execute: Start pex-dma operation. */ + rval = qla8044_start_pex_dma(vha, m_hdr); + if (rval) + goto error_exit; + + memcpy(data_ptr, rdmem_buffer, chunk_size); + data_ptr += chunk_size; + read_size += chunk_size; + } + + *d_ptr = (void *)data_ptr; + +error_exit: + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE, + rdmem_buffer, rdmem_dma); + + return rval; +} + +/* + * + * qla8044_collect_md_data - Retrieve firmware minidump data. + * @ha: pointer to adapter structure + **/ +int +qla8044_collect_md_data(struct scsi_qla_host *vha) +{ + int num_entry_hdr = 0; + struct qla8044_minidump_entry_hdr *entry_hdr; + struct qla8044_minidump_template_hdr *tmplt_hdr; + uint32_t *data_ptr; + uint32_t data_collected = 0, f_capture_mask; + int i, rval = QLA_FUNCTION_FAILED; + uint64_t now; + uint32_t timestamp, idc_control; + struct qla_hw_data *ha = vha->hw; + + if (!ha->md_dump) { + ql_log(ql_log_info, vha, 0xb101, + "%s(%ld) No buffer to dump\n", + __func__, vha->host_no); + return rval; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xb10d, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", ha->fw_dump); + goto md_failed; + } + + ha->fw_dumped = 0; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb10e, + "Memory not allocated for minidump capture\n"); + goto md_failed; + } + + qla8044_idc_lock(ha); + idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + if (idc_control & GRACEFUL_RESET_BIT1) { + ql_log(ql_log_warn, vha, 0xb112, + "Forced reset from application, " + "ignore minidump capture\n"); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_control & ~GRACEFUL_RESET_BIT1)); + qla8044_idc_unlock(ha); + + goto md_failed; + } + qla8044_idc_unlock(ha); + + if (qla82xx_validate_template_chksum(vha)) { + ql_log(ql_log_info, vha, 0xb109, + "Template checksum validation error\n"); + goto md_failed; + } + + tmplt_hdr = (struct qla8044_minidump_template_hdr *) + ha->md_tmplt_hdr; + data_ptr = (uint32_t *)((uint8_t *)ha->md_dump); + num_entry_hdr = tmplt_hdr->num_of_entries; + + ql_dbg(ql_dbg_p3p, vha, 0xb11a, + "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); + + f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; + + /* Validate whether required debug level is set */ + if ((f_capture_mask & 0x3) != 0x3) { + ql_log(ql_log_warn, vha, 0xb10f, + "Minimum required capture mask[0x%x] level not set\n", + f_capture_mask); + + } + tmplt_hdr->driver_capture_mask = ql2xmdcapmask; + ql_log(ql_log_info, vha, 0xb102, + "[%s]: starting data ptr: %p\n", + __func__, data_ptr); + ql_log(ql_log_info, vha, 0xb10b, + "[%s]: no of entry headers in Template: 0x%x\n", + __func__, num_entry_hdr); + ql_log(ql_log_info, vha, 0xb10c, + "[%s]: Total_data_size 0x%x, %d obtained\n", + __func__, ha->md_dump_size, ha->md_dump_size); + + /* Update current timestamp before taking dump */ + now = get_jiffies_64(); + timestamp = (u32)(jiffies_to_msecs(now) / 1000); + tmplt_hdr->driver_timestamp = timestamp; + + entry_hdr = (struct qla8044_minidump_entry_hdr *) + (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); + tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] = + tmplt_hdr->ocm_window_reg[ha->portnum]; + + /* Walk through the entry headers - validate/perform required action */ + for (i = 0; i < num_entry_hdr; i++) { + if (data_collected > ha->md_dump_size) { + ql_log(ql_log_info, vha, 0xb103, + "Data collected: [0x%x], " + "Total Dump size: [0x%x]\n", + data_collected, ha->md_dump_size); + return rval; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ql2xmdcapmask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA82XX_DBG_SKIPPED_FLAG; + goto skip_nxt_entry; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb104, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, + (ha->md_dump_size - data_collected)); + + /* Decode the entry type and take required action to capture + * debug data + */ + switch (entry_hdr->entry_type) { + case QLA82XX_RDEND: + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_CNTRL: + rval = qla8044_minidump_process_control(vha, + entry_hdr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_RDCRB: + qla8044_minidump_process_rdcrb(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMEM: + rval = qla8044_minidump_pex_dma_read(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + rval = qla8044_minidump_process_rdmem(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, + entry_hdr, i); + goto md_failed; + } + } + break; + case QLA82XX_BOARD: + case QLA82XX_RDROM: + rval = qla8044_minidump_process_rdrom(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, + entry_hdr, i); + } + break; + case QLA82XX_L2DTG: + case QLA82XX_L2ITG: + case QLA82XX_L2DAT: + case QLA82XX_L2INS: + rval = qla8044_minidump_process_l2tag(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA8044_L1DTG: + case QLA8044_L1ITG: + case QLA82XX_L1DAT: + case QLA82XX_L1INS: + qla8044_minidump_process_l1cache(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDOCM: + qla8044_minidump_process_rdocm(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMUX: + qla8044_minidump_process_rdmux(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_QUEUE: + qla8044_minidump_process_queue(vha, + entry_hdr, &data_ptr); + break; + case QLA8044_POLLRD: + rval = qla8044_minidump_process_pollrd(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDMUX2: + qla8044_minidump_process_rdmux2(vha, + entry_hdr, &data_ptr); + break; + case QLA8044_POLLRDMWR: + rval = qla8044_minidump_process_pollrdmwr(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_RDNOP: + default: + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + } + + data_collected = (uint8_t *)data_ptr - + (uint8_t *)((uint8_t *)ha->md_dump); +skip_nxt_entry: + /* + * next entry in the template + */ + entry_hdr = (struct qla8044_minidump_entry_hdr *) + (((uint8_t *)entry_hdr) + entry_hdr->entry_size); + } + + if (data_collected != ha->md_dump_size) { + ql_log(ql_log_info, vha, 0xb105, + "Dump data mismatch: Data collected: " + "[0x%x], total_data_size:[0x%x]\n", + data_collected, ha->md_dump_size); + goto md_failed; + } + + ql_log(ql_log_info, vha, 0xb110, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); + ha->fw_dumped = 1; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + + + ql_log(ql_log_info, vha, 0xb106, + "Leaving fn: %s Last entry: 0x%x\n", + __func__, i); +md_failed: + return rval; +} + +void +qla8044_get_minidump(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!qla8044_collect_md_data(vha)) { + ha->fw_dumped = 1; + } else { + ql_log(ql_log_fatal, vha, 0xb0db, + "%s: Unable to collect minidump\n", + __func__); + } +} + +static int +qla8044_poll_flash_status_reg(struct scsi_qla_host *vha) +{ + uint32_t flash_status; + int retries = QLA8044_FLASH_READ_RETRY_COUNT; + int ret_val = QLA_SUCCESS; + + while (retries--) { + ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS, + &flash_status); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb13c, + "%s: Failed to read FLASH_STATUS reg.\n", + __func__); + break; + } + if ((flash_status & QLA8044_FLASH_STATUS_READY) == + QLA8044_FLASH_STATUS_READY) + break; + msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY); + } + + if (!retries) + ret_val = QLA_FUNCTION_FAILED; + + return ret_val; +} + +static int +qla8044_write_flash_status_reg(struct scsi_qla_host *vha, + uint32_t data) +{ + int ret_val = QLA_SUCCESS; + uint32_t cmd; + + cmd = vha->hw->fdt_wrt_sts_reg_cmd; + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb125, + "%s: Failed to write to FLASH_ADDR.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb126, + "%s: Failed to write to FLASH_WRDATA.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_SECOND_ERASE_MS_VAL); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb127, + "%s: Failed to write to FLASH_CONTROL.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb128, + "%s: Error polling flash status reg.\n", __func__); + +exit_func: + return ret_val; +} + +/* + * This function assumes that the flash lock is held. + */ +static int +qla8044_unprotect_flash(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb139, + "%s: Write flash status failed.\n", __func__); + + return ret_val; +} + +/* + * This function assumes that the flash lock is held. + */ +static int +qla8044_protect_flash(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb13b, + "%s: Write flash status failed.\n", __func__); + + return ret_val; +} + + +static int +qla8044_erase_flash_sector(struct scsi_qla_host *vha, + uint32_t sector_start_addr) +{ + uint32_t reversed_addr; + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb12e, + "%s: Poll flash status after erase failed..\n", __func__); + } + + reversed_addr = (((sector_start_addr & 0xFF) << 16) | + (sector_start_addr & 0xFF00) | + ((sector_start_addr & 0xFF0000) >> 16)); + + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_WRDATA, reversed_addr); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb12f, + "%s: Failed to write to FLASH_WRDATA.\n", __func__); + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb130, + "%s: Failed to write to FLASH_ADDR.\n", __func__); + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_LAST_ERASE_MS_VAL); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb131, + "%s: Failed write to FLASH_CONTROL.\n", __func__); + } + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb132, + "%s: Poll flash status failed.\n", __func__); + } + + + return ret_val; +} + +/* + * qla8044_flash_write_u32 - Write data to flash + * + * @ha : Pointer to adapter structure + * addr : Flash address to write to + * p_data : Data to be written + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + * + * NOTE: Lock should be held on entry + */ +static int +qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr, + uint32_t *p_data) +{ + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + 0x00800000 | (addr >> 2)); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb134, + "%s: Failed write to FLASH_ADDR.\n", __func__); + goto exit_func; + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb135, + "%s: Failed write to FLASH_WRDATA.\n", __func__); + goto exit_func; + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb136, + "%s: Failed write to FLASH_CONTROL.\n", __func__); + goto exit_func; + } + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb137, + "%s: Poll flash status failed.\n", __func__); + } + +exit_func: + return ret_val; +} + +static int +qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr, + uint32_t faddr, uint32_t dwords) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t spi_val; + + if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS || + dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) { + ql_dbg(ql_dbg_user, vha, 0xb123, + "Got unsupported dwords = 0x%x.\n", + dwords); + return QLA_FUNCTION_FAILED; + } + + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + spi_val | QLA8044_FLASH_SPI_CTL); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_FIRST_TEMP_VAL); + + /* First DWORD write to FLASH_WRDATA */ + ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, + *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_FIRST_MS_PATTERN); + + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb124, + "%s: Failed.\n", __func__); + goto exit_func; + } + + dwords--; + + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_SECOND_TEMP_VAL); + + + /* Second to N-1 DWORDS writes */ + while (dwords != 1) { + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_SECOND_MS_PATTERN); + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb129, + "%s: Failed.\n", __func__); + goto exit_func; + } + dwords--; + } + + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2)); + + /* Last DWORD write */ + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_LAST_MS_PATTERN); + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb12a, + "%s: Failed.\n", __func__); + goto exit_func; + } + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val); + + if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) { + ql_log(ql_log_warn, vha, 0xb12b, + "%s: Failed.\n", __func__); + spi_val = 0; + /* Operation failed, clear error bit. */ + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + &spi_val); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + spi_val | QLA8044_FLASH_SPI_CTL); + } +exit_func: + return ret; +} + +static int +qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr, + uint32_t faddr, uint32_t dwords) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t liter; + + for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { + ret = qla8044_flash_write_u32(vha, faddr, dwptr); + if (ret) { + ql_dbg(ql_dbg_p3p, vha, 0xb141, + "%s: flash address=%x data=%x.\n", __func__, + faddr, *dwptr); + break; + } + } + + return ret; +} + +int +qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + int rval = QLA_FUNCTION_FAILED, i, burst_iter_count; + int dword_count, erase_sec_count; + uint32_t erase_offset; + uint8_t *p_cache, *p_src; + + erase_offset = offset; + + p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL); + if (!p_cache) + return QLA_FUNCTION_FAILED; + + memcpy(p_cache, buf, length); + p_src = p_cache; + dword_count = length / sizeof(uint32_t); + /* Since the offset and legth are sector aligned, it will be always + * multiple of burst_iter_count (64) + */ + burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS; + erase_sec_count = length / QLA8044_SECTOR_SIZE; + + /* Suspend HBA. */ + scsi_block_requests(vha->host); + /* Lock and enable write for whole operation. */ + qla8044_flash_lock(vha); + qla8044_unprotect_flash(vha); + + /* Erasing the sectors */ + for (i = 0; i < erase_sec_count; i++) { + rval = qla8044_erase_flash_sector(vha, erase_offset); + ql_dbg(ql_dbg_user, vha, 0xb138, + "Done erase of sector=0x%x.\n", + erase_offset); + if (rval) { + ql_log(ql_log_warn, vha, 0xb121, + "Failed to erase the sector having address: " + "0x%x.\n", erase_offset); + goto out; + } + erase_offset += QLA8044_SECTOR_SIZE; + } + ql_dbg(ql_dbg_user, vha, 0xb13f, + "Got write for addr = 0x%x length=0x%x.\n", + offset, length); + + for (i = 0; i < burst_iter_count; i++) { + + /* Go with write. */ + rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src, + offset, QLA8044_MAX_OPTROM_BURST_DWORDS); + if (rval) { + /* Buffer Mode failed skip to dword mode */ + ql_log(ql_log_warn, vha, 0xb122, + "Failed to write flash in buffer mode, " + "Reverting to slow-write.\n"); + rval = qla8044_write_flash_dword_mode(vha, + (uint32_t *)p_src, offset, + QLA8044_MAX_OPTROM_BURST_DWORDS); + } + p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; + offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; + } + ql_dbg(ql_dbg_user, vha, 0xb133, + "Done writing.\n"); + +out: + qla8044_protect_flash(vha); + qla8044_flash_unlock(vha); + scsi_unblock_requests(vha->host); + kfree(p_cache); + + return rval; +} + +#define LEG_INT_PTR_B31 (1 << 31) +#define LEG_INT_PTR_B30 (1 << 30) +#define PF_BITS_MASK (0xF << 16) +/** + * qla8044_intr_handler() - Process interrupts for the ISP8044 + * @irq: + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qla8044_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_82xx __iomem *reg; + int status = 0; + unsigned long flags; + unsigned long iter; + uint32_t stat; + uint16_t mb[4]; + uint32_t leg_int_ptr = 0, pf_bit; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0xb143, + "%s(): NULL response queue pointer\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + vha = pci_get_drvdata(ha->pdev); + + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + + leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); + + /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ + if (!(leg_int_ptr & (LEG_INT_PTR_B31))) { + ql_dbg(ql_dbg_p3p, vha, 0xb144, + "%s: Legacy Interrupt Bit 31 not set, " + "spurious interrupt!\n", __func__); + return IRQ_NONE; + } + + pf_bit = ha->portnum << 16; + /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ + if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) { + ql_dbg(ql_dbg_p3p, vha, 0xb145, + "%s: Incorrect function ID 0x%x in " + "legacy interrupt register, " + "ha->pf_bit = 0x%x\n", __func__, + (leg_int_ptr & (PF_BITS_MASK)), pf_bit); + return IRQ_NONE; + } + + /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger + * Control register and poll till Legacy Interrupt Pointer register + * bit32 is 0. + */ + qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0); + do { + leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); + if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) + break; + } while (leg_int_ptr & (LEG_INT_PTR_B30)); + + reg = &ha->iobase->isp82; + spin_lock_irqsave(&ha->hardware_lock, flags); + for (iter = 1; iter--; ) { + + if (RD_REG_DWORD(®->host_int)) { + stat = RD_REG_DWORD(®->host_status); + if ((stat & HSRX_RISC_INT) == 0) + break; + + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla82xx_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = RD_REG_WORD(®->mailbox_out[1]); + mb[2] = RD_REG_WORD(®->mailbox_out[2]); + mb[3] = RD_REG_WORD(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_p3p, vha, 0xb146, + "Unrecognized interrupt type " + "(%d).\n", stat & 0xff); + break; + } + } + WRT_REG_DWORD(®->host_int, 0); + } + + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +static int +qla8044_idc_dontreset(struct qla_hw_data *ha) +{ + uint32_t idc_ctrl; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + return idc_ctrl & DONTRESET_BIT0; +} + +static void +qla8044_clear_rst_ready(scsi_qla_host_t *vha) +{ + uint32_t drv_state; + + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + /* + * For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP82xx, drv_active has 4 bits per function + */ + drv_state &= ~(1 << vha->hw->portnum); + + ql_dbg(ql_dbg_p3p, vha, 0xb13d, + "drv_state: 0x%08x\n", drv_state); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); +} + +int +qla8044_abort_isp(scsi_qla_host_t *vha) +{ + int rval; + uint32_t dev_state; + struct qla_hw_data *ha = vha->hw; + + qla8044_idc_lock(ha); + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (ql2xdontresethba) + qla8044_set_idc_dontreset(vha); + + /* If device_state is NEED_RESET, go ahead with + * Reset,irrespective of ql2xdontresethba. This is to allow a + * non-reset-owner to force a reset. Non-reset-owner sets + * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset + * and then forces a Reset by setting device_state to + * NEED_RESET. */ + if (dev_state == QLA8XXX_DEV_READY) { + /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset + * recovery */ + if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) { + ql_dbg(ql_dbg_p3p, vha, 0xb13e, + "Reset recovery disabled\n"); + rval = QLA_FUNCTION_FAILED; + goto exit_isp_reset; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb140, + "HW State: NEED RESET\n"); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_NEED_RESET); + } + + /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority + * and which drivers are present. Unlike ISP82XX, the function setting + * NEED_RESET, may not be the Reset owner. */ + qla83xx_reset_ownership(vha); + + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + qla8044_idc_lock(ha); + qla8044_clear_rst_ready(vha); + +exit_isp_reset: + qla8044_idc_unlock(ha); + if (rval == QLA_SUCCESS) { + ha->flags.isp82xx_fw_hung = 0; + ha->flags.nic_core_reset_hdlr_active = 0; + rval = qla82xx_restart_isp(vha); + } + + return rval; +} + diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h new file mode 100644 index 000000000000..2ab2eabab908 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -0,0 +1,551 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2013 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ + +#ifndef __QLA_NX2_H +#define __QLA_NX2_H + +#define QSNT_ACK_TOV 30 +#define INTENT_TO_RECOVER 0x01 +#define PROCEED_TO_RECOVER 0x02 +#define IDC_LOCK_RECOVERY_OWNER_MASK 0x3C +#define IDC_LOCK_RECOVERY_STATE_MASK 0x3 +#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS 2 + +#define QLA8044_DRV_LOCK_MSLEEP 200 +#define QLA8044_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLA8044_ADDR_DDR_NET_MAX (0x000000000fffffffULL) + +#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0 +#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4 +#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0 +#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4 +#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8 +#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC +#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8 +#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC + +/* MIU_TEST_AGT_CTRL flags. work for SIU as well */ +#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE) +#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE | \ + MIU_TA_CTL_START) +#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE) + +/* Imbus address bit used to indicate a host address. This bit is + * eliminated by the pcie bar and bar select before presentation + * over pcie. */ +/* host memory via IMBUS */ +#define QLA8044_P2_ADDR_PCIE (0x0000000800000000ULL) +#define QLA8044_P3_ADDR_PCIE (0x0000008000000000ULL) +#define QLA8044_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL) +#define QLA8044_ADDR_OCM0 (0x0000000200000000ULL) +#define QLA8044_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLA8044_ADDR_OCM1 (0x0000000200400000ULL) +#define QLA8044_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLA8044_ADDR_QDR_NET (0x0000000300000000ULL) +#define QLA8044_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) +#define QLA8044_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) +#define QLA8044_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) +#define QLA8044_PCI_CRBSPACE ((unsigned long)0x06000000) +#define QLA8044_PCI_DIRECT_CRB ((unsigned long)0x04400000) +#define QLA8044_PCI_CAMQM ((unsigned long)0x04800000) +#define QLA8044_PCI_CAMQM_MAX ((unsigned long)0x04ffffff) +#define QLA8044_PCI_DDR_NET ((unsigned long)0x00000000) +#define QLA8044_PCI_QDR_NET ((unsigned long)0x04000000) +#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff) + +/* PCI Windowing for DDR regions. */ +#define QLA8044_ADDR_IN_RANGE(addr, low, high) \ + (((addr) <= (high)) && ((addr) >= (low))) + +/* Indirectly Mapped Registers */ +#define QLA8044_FLASH_SPI_STATUS 0x2808E010 +#define QLA8044_FLASH_SPI_CONTROL 0x2808E014 +#define QLA8044_FLASH_STATUS 0x42100004 +#define QLA8044_FLASH_CONTROL 0x42110004 +#define QLA8044_FLASH_ADDR 0x42110008 +#define QLA8044_FLASH_WRDATA 0x4211000C +#define QLA8044_FLASH_RDDATA 0x42110018 +#define QLA8044_FLASH_DIRECT_WINDOW 0x42110030 +#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA)) + +/* Flash access regs */ +#define QLA8044_FLASH_LOCK 0x3850 +#define QLA8044_FLASH_UNLOCK 0x3854 +#define QLA8044_FLASH_LOCK_ID 0x3500 + +/* Driver Lock regs */ +#define QLA8044_DRV_LOCK 0x3868 +#define QLA8044_DRV_UNLOCK 0x386C +#define QLA8044_DRV_LOCK_ID 0x3504 +#define QLA8044_DRV_LOCKRECOVERY 0x379C + +/* IDC version */ +#define QLA8044_IDC_VER_MAJ_VALUE 0x1 +#define QLA8044_IDC_VER_MIN_VALUE 0x0 + +/* IDC Registers : Driver Coexistence Defines */ +#define QLA8044_CRB_IDC_VER_MAJOR 0x3780 +#define QLA8044_CRB_IDC_VER_MINOR 0x3798 +#define QLA8044_IDC_DRV_AUDIT 0x3794 +#define QLA8044_SRE_SHIM_CONTROL 0x0D200284 +#define QLA8044_PORT0_RXB_PAUSE_THRS 0x0B2003A4 +#define QLA8044_PORT1_RXB_PAUSE_THRS 0x0B2013A4 +#define QLA8044_PORT0_RXB_TC_MAX_CELL 0x0B200388 +#define QLA8044_PORT1_RXB_TC_MAX_CELL 0x0B201388 +#define QLA8044_PORT0_RXB_TC_STATS 0x0B20039C +#define QLA8044_PORT1_RXB_TC_STATS 0x0B20139C +#define QLA8044_PORT2_IFB_PAUSE_THRS 0x0B200704 +#define QLA8044_PORT3_IFB_PAUSE_THRS 0x0B201704 + +/* set value to pause threshold value */ +#define QLA8044_SET_PAUSE_VAL 0x0 +#define QLA8044_SET_TC_MAX_CELL_VAL 0x03FF03FF +#define QLA8044_PEG_HALT_STATUS1 0x34A8 +#define QLA8044_PEG_HALT_STATUS2 0x34AC +#define QLA8044_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */ +#define QLA8044_FW_CAPABILITIES 0x3528 +#define QLA8044_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */ +#define QLA8044_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */ +#define QLA8044_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */ +#define QLA8044_CRB_DRV_SCRATCH 0x3548 +#define QLA8044_CRB_DEV_PART_INFO1 0x37E0 +#define QLA8044_CRB_DEV_PART_INFO2 0x37E4 +#define QLA8044_FW_VER_MAJOR 0x3550 +#define QLA8044_FW_VER_MINOR 0x3554 +#define QLA8044_FW_VER_SUB 0x3558 +#define QLA8044_NPAR_STATE 0x359C +#define QLA8044_FW_IMAGE_VALID 0x35FC +#define QLA8044_CMDPEG_STATE 0x3650 +#define QLA8044_ASIC_TEMP 0x37B4 +#define QLA8044_FW_API 0x356C +#define QLA8044_DRV_OP_MODE 0x3570 +#define QLA8044_CRB_WIN_BASE 0x3800 +#define QLA8044_CRB_WIN_FUNC(f) (QLA8044_CRB_WIN_BASE+((f)*4)) +#define QLA8044_SEM_LOCK_BASE 0x3840 +#define QLA8044_SEM_UNLOCK_BASE 0x3844 +#define QLA8044_SEM_LOCK_FUNC(f) (QLA8044_SEM_LOCK_BASE+((f)*8)) +#define QLA8044_SEM_UNLOCK_FUNC(f) (QLA8044_SEM_UNLOCK_BASE+((f)*8)) +#define QLA8044_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0)) +#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) +#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) +#define QLA8044_LINK_SPEED_FACTOR 10 + +/* FLASH API Defines */ +#define QLA8044_FLASH_MAX_WAIT_USEC 100 +#define QLA8044_FLASH_LOCK_TIMEOUT 10000 +#define QLA8044_FLASH_SECTOR_SIZE 65536 +#define QLA8044_DRV_LOCK_TIMEOUT 2000 +#define QLA8044_FLASH_SECTOR_ERASE_CMD 0xdeadbeef +#define QLA8044_FLASH_WRITE_CMD 0xdacdacda +#define QLA8044_FLASH_BUFFER_WRITE_CMD 0xcadcadca +#define QLA8044_FLASH_READ_RETRY_COUNT 2000 +#define QLA8044_FLASH_STATUS_READY 0x6 +#define QLA8044_FLASH_BUFFER_WRITE_MIN 2 +#define QLA8044_FLASH_BUFFER_WRITE_MAX 64 +#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1 +#define QLA8044_ERASE_MODE 1 +#define QLA8044_WRITE_MODE 2 +#define QLA8044_DWORD_WRITE_MODE 3 +#define QLA8044_GLOBAL_RESET 0x38CC +#define QLA8044_WILDCARD 0x38F0 +#define QLA8044_INFORMANT 0x38FC +#define QLA8044_HOST_MBX_CTRL 0x3038 +#define QLA8044_FW_MBX_CTRL 0x303C +#define QLA8044_BOOTLOADER_ADDR 0x355C +#define QLA8044_BOOTLOADER_SIZE 0x3560 +#define QLA8044_FW_IMAGE_ADDR 0x3564 +#define QLA8044_MBX_INTR_ENABLE 0x1000 +#define QLA8044_MBX_INTR_MASK 0x1200 + +/* IDC Control Register bit defines */ +#define DONTRESET_BIT0 0x1 +#define GRACEFUL_RESET_BIT1 0x2 + +/* ISP8044 PEG_HALT_STATUS1 bits */ +#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29) +#define QLA8044_HALT_STATUS_FW_RESET (0x2 << 29) +#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29) + +/* Firmware image definitions */ +#define QLA8044_BOOTLOADER_FLASH_ADDR 0x10000 +#define QLA8044_BOOT_FROM_FLASH 0 +#define QLA8044_IDC_PARAM_ADDR 0x3e8020 + +/* FLASH related definitions */ +#define QLA8044_OPTROM_BURST_SIZE 0x100 +#define QLA8044_MAX_OPTROM_BURST_DWORDS (QLA8044_OPTROM_BURST_SIZE / 4) +#define QLA8044_MIN_OPTROM_BURST_DWORDS 2 +#define QLA8044_SECTOR_SIZE (64 * 1024) + +#define QLA8044_FLASH_SPI_CTL 0x4 +#define QLA8044_FLASH_FIRST_TEMP_VAL 0x00800000 +#define QLA8044_FLASH_SECOND_TEMP_VAL 0x00800001 +#define QLA8044_FLASH_FIRST_MS_PATTERN 0x43 +#define QLA8044_FLASH_SECOND_MS_PATTERN 0x7F +#define QLA8044_FLASH_LAST_MS_PATTERN 0x7D +#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG 0xFD0100 +#define QLA8044_FLASH_SECOND_ERASE_MS_VAL 0x5 +#define QLA8044_FLASH_ERASE_SIG 0xFD0300 +#define QLA8044_FLASH_LAST_ERASE_MS_VAL 0x3D + +/* Reset template definitions */ +#define QLA8044_MAX_RESET_SEQ_ENTRIES 16 +#define QLA8044_RESTART_TEMPLATE_SIZE 0x2000 +#define QLA8044_RESET_TEMPLATE_ADDR 0x4F0000 +#define QLA8044_RESET_SEQ_VERSION 0x0101 + +/* Reset template entry opcodes */ +#define OPCODE_NOP 0x0000 +#define OPCODE_WRITE_LIST 0x0001 +#define OPCODE_READ_WRITE_LIST 0x0002 +#define OPCODE_POLL_LIST 0x0004 +#define OPCODE_POLL_WRITE_LIST 0x0008 +#define OPCODE_READ_MODIFY_WRITE 0x0010 +#define OPCODE_SEQ_PAUSE 0x0020 +#define OPCODE_SEQ_END 0x0040 +#define OPCODE_TMPL_END 0x0080 +#define OPCODE_POLL_READ_LIST 0x0100 + +/* Template Header */ +#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE +#define QLA8044_IDC_DRV_CTRL 0x3790 +#define AF_8044_NO_FW_DUMP 27 /* 0x08000000 */ + +#define MINIDUMP_SIZE_36K 36864 + +struct qla8044_reset_template_hdr { + uint16_t version; + uint16_t signature; + uint16_t size; + uint16_t entries; + uint16_t hdr_size; + uint16_t checksum; + uint16_t init_seq_offset; + uint16_t start_seq_offset; +} __packed; + +/* Common Entry Header. */ +struct qla8044_reset_entry_hdr { + uint16_t cmd; + uint16_t size; + uint16_t count; + uint16_t delay; +} __packed; + +/* Generic poll entry type. */ +struct qla8044_poll { + uint32_t test_mask; + uint32_t test_value; +} __packed; + +/* Read modify write entry type. */ +struct qla8044_rmw { + uint32_t test_mask; + uint32_t xor_value; + uint32_t or_value; + uint8_t shl; + uint8_t shr; + uint8_t index_a; + uint8_t rsvd; +} __packed; + +/* Generic Entry Item with 2 DWords. */ +struct qla8044_entry { + uint32_t arg1; + uint32_t arg2; +} __packed; + +/* Generic Entry Item with 4 DWords.*/ +struct qla8044_quad_entry { + uint32_t dr_addr; + uint32_t dr_value; + uint32_t ar_addr; + uint32_t ar_value; +} __packed; + +struct qla8044_reset_template { + int seq_index; + int seq_error; + int array_index; + uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES]; + uint8_t *buff; + uint8_t *stop_offset; + uint8_t *start_offset; + uint8_t *init_offset; + struct qla8044_reset_template_hdr *hdr; + uint8_t seq_end; + uint8_t template_end; +}; + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +struct qla8044_minidump_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +} __packed; + +/* Read CRB entry header */ +struct qla8044_minidump_entry_crb { + struct qla8044_minidump_entry_hdr h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +} __packed; + +struct qla8044_minidump_entry_cache { + struct qla8044_minidump_entry_hdr h; + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + uint32_t data_size; + uint32_t op_count; + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +} __packed; + +/* Read OCM */ +struct qla8044_minidump_entry_rdocm { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; +} __packed; + +/* Read Memory */ +struct qla8044_minidump_entry_rdmem { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +/* Read Memory: For Pex-DMA */ +struct qla8044_minidump_entry_rdmem_pex_dma { + struct qla8044_minidump_entry_hdr h; + uint32_t desc_card_addr; + uint16_t dma_desc_cmd; + uint8_t rsvd[2]; + uint32_t start_dma_cmd; + uint8_t rsvd2[12]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* Read ROM */ +struct qla8044_minidump_entry_rdrom { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* Mux entry */ +struct qla8044_minidump_entry_mux { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +} __packed; + +/* Queue entry */ +struct qla8044_minidump_entry_queue { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +} __packed; + +/* POLLRD Entry */ +struct qla8044_minidump_entry_pollrd { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t read_addr; + uint32_t select_value; + uint16_t select_value_stride; + uint16_t op_count; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t data_size; + uint32_t rsvd_1; +} __packed; + +/* RDMUX2 Entry */ +struct qla8044_minidump_entry_rdmux2 { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr_1; + uint32_t select_addr_2; + uint32_t select_value_1; + uint32_t select_value_2; + uint32_t op_count; + uint32_t select_value_mask; + uint32_t read_addr; + uint8_t select_value_stride; + uint8_t data_size; + uint8_t rsvd[2]; +} __packed; + +/* POLLRDMWR Entry */ +struct qla8044_minidump_entry_pollrdmwr { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t modify_mask; + uint32_t data_size; +} __packed; + +/* IDC additional information */ +struct qla8044_idc_information { + uint32_t request_desc; /* IDC request descriptor */ + uint32_t info1; /* IDC additional info */ + uint32_t info2; /* IDC additional info */ + uint32_t info3; /* IDC additional info */ +} __packed; + +enum qla_regs { + QLA8044_PEG_HALT_STATUS1_INDEX = 0, + QLA8044_PEG_HALT_STATUS2_INDEX, + QLA8044_PEG_ALIVE_COUNTER_INDEX, + QLA8044_CRB_DRV_ACTIVE_INDEX, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8044_CRB_DRV_STATE_INDEX, + QLA8044_CRB_DRV_SCRATCH_INDEX, + QLA8044_CRB_DEV_PART_INFO_INDEX, + QLA8044_CRB_DRV_IDC_VERSION_INDEX, + QLA8044_FW_VERSION_MAJOR_INDEX, + QLA8044_FW_VERSION_MINOR_INDEX, + QLA8044_FW_VERSION_SUB_INDEX, + QLA8044_CRB_CMDPEG_STATE_INDEX, + QLA8044_CRB_TEMP_STATE_INDEX, +} __packed; + +#define CRB_REG_INDEX_MAX 14 +#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 +#define CRB_CMDPEG_CHECK_DELAY 500 + +static const uint32_t qla8044_reg_tbl[] = { + QLA8044_PEG_HALT_STATUS1, + QLA8044_PEG_HALT_STATUS2, + QLA8044_PEG_ALIVE_COUNTER, + QLA8044_CRB_DRV_ACTIVE, + QLA8044_CRB_DEV_STATE, + QLA8044_CRB_DRV_STATE, + QLA8044_CRB_DRV_SCRATCH, + QLA8044_CRB_DEV_PART_INFO1, + QLA8044_CRB_IDC_VER_MAJOR, + QLA8044_FW_VER_MAJOR, + QLA8044_FW_VER_MINOR, + QLA8044_FW_VER_SUB, + QLA8044_CMDPEG_STATE, + QLA8044_ASIC_TEMP, +}; + +/* MiniDump Structures */ + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +#define QLA8044_SS_OCM_WNDREG_INDEX 3 +#define QLA8044_DBG_STATE_ARRAY_LEN 16 +#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA8044_DBG_RSVD_ARRAY_LEN 8 +#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16 +#define QLA8044_SS_PCI_INDEX 0 + +struct qla8044_minidump_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info_word2; + uint32_t driver_info_word3; + uint32_t driver_info_word4; + + uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN]; + uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN]; +}; + +struct qla8044_pex_dma_descriptor { + struct { + uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */ + uint8_t rsvd[2]; + uint16_t dma_desc_cmd; + } cmd; + uint64_t src_addr; + uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/ + uint8_t rsvd[24]; +} __packed; + +#endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3e21e9fc9d91..9f01bbbf3a26 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1247,7 +1247,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) if (qla2x00_vp_abort_isp(vha)) goto eh_host_reset_lock; } else { - if (IS_QLA82XX(vha->hw)) { + if (IS_P3P_TYPE(vha->hw)) { if (!qla82xx_fcoe_ctx_reset(vha)) { /* Ctx reset success */ ret = SUCCESS; @@ -1303,6 +1303,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; + if (IS_QLAFX00(ha)) { + return qlafx00_loop_reset(vha); + } + if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->port_type != FCT_TARGET) @@ -1311,14 +1315,12 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) ret = ha->isp_ops->target_reset(fcport, 0, 0); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_taskm, vha, 0x802c, - "Bus Reset failed: Target Reset=%d " + "Bus Reset failed: Reset=%d " "d_id=%x.\n", ret, fcport->d_id.b24); } } } - if (IS_QLAFX00(ha)) - return QLA_SUCCESS; if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -1506,7 +1508,7 @@ qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha) if (sdev->queue_depth > shost->cmd_per_lun) { if (sdev->queue_depth < ha->cfg_lun_q_depth) continue; - ql_log(ql_log_warn, vp, 0x3031, + ql_dbg(ql_dbg_io, vp, 0x3031, "%ld:%d:%d: Ramping down queue depth to %d", vp->host_no, sdev->id, sdev->lun, ha->cfg_lun_q_depth); @@ -1911,7 +1913,7 @@ static struct isp_operations qla2300_isp_ops = { .get_flash_version = qla2x00_get_flash_version, .start_scsi = qla2x00_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; @@ -1949,7 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = { .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; @@ -1987,7 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = { .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; @@ -2025,7 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = { .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; @@ -2060,13 +2062,51 @@ static struct isp_operations qla82xx_isp_ops = { .beacon_blink = NULL, .read_optrom = qla82xx_read_optrom_data, .write_optrom = qla82xx_write_optrom_data, - .get_flash_version = qla24xx_get_flash_version, + .get_flash_version = qla82xx_get_flash_version, .start_scsi = qla82xx_start_scsi, .abort_isp = qla82xx_abort_isp, .iospace_config = qla82xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; +static struct isp_operations qla8044_isp_ops = { + .pci_config = qla82xx_pci_config, + .reset_chip = qla82xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla82xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla82xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla8044_intr_handler, + .enable_intrs = qla82xx_enable_intrs, + .disable_intrs = qla82xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla24xx_fw_dump, + .beacon_on = qla82xx_beacon_on, + .beacon_off = qla82xx_beacon_off, + .beacon_blink = NULL, + .read_optrom = qla82xx_read_optrom_data, + .write_optrom = qla8044_write_optrom_data, + .get_flash_version = qla82xx_get_flash_version, + .start_scsi = qla82xx_start_scsi, + .abort_isp = qla8044_abort_isp, + .iospace_config = qla82xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + static struct isp_operations qla83xx_isp_ops = { .pci_config = qla25xx_pci_config, .reset_chip = qla24xx_reset_chip, @@ -2237,6 +2277,14 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) /* Initialize 82XX ISP flags */ qla82xx_init_flags(ha); break; + case PCI_DEVICE_ID_QLOGIC_ISP8044: + ha->device_type |= DT_ISP8044; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + /* Initialize 82XX ISP flags */ + qla82xx_init_flags(ha); + break; case PCI_DEVICE_ID_QLOGIC_ISP2031: ha->device_type |= DT_ISP2031; ha->device_type |= DT_ZIO_SUPPORTED; @@ -2317,7 +2365,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) uint16_t req_length = 0, rsp_length = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; - bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); sht = &qla2xxx_driver_template; if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || @@ -2330,7 +2377,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || - pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) { + pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; ql_dbg_pci(ql_dbg_init, pdev, 0x0007, @@ -2484,6 +2532,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->flash_data_off = FARX_ACCESS_FLASH_DATA; ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; + } else if (IS_QLA8044(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_82XX; + rsp_length = RESPONSE_ENTRY_CNT_82XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_83XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla8044_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA; + ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; + ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; } else if (IS_QLA83XX(ha)) { ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; @@ -2512,6 +2575,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->port_down_retry_count = 30; /* default value */ ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; ha->mr.fw_hbt_en = 1; } @@ -2676,7 +2740,7 @@ que_init: rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; } - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { req->req_q_out = &ha->iobase->isp82.req_q_out[0]; rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; @@ -2709,6 +2773,14 @@ que_init: qla82xx_idc_unlock(ha); ql_log(ql_log_fatal, base_vha, 0x00d7, "HW State: FAILED.\n"); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_wr_direct(base_vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + qla8044_idc_unlock(ha); + ql_log(ql_log_fatal, base_vha, 0x0150, + "HW State: FAILED.\n"); } ret = -ENODEV; @@ -2804,6 +2876,13 @@ skip_dpc: ha->isp_ops->enable_intrs(ha); + if (IS_QLAFX00(ha)) { + ret = qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); + host->sg_tablesize = (ha->mr.extended_io_enabled) ? + QLA_SG_ALL : 128; + } + ret = scsi_add_host(host, &pdev->dev); if (ret) goto probe_failed; @@ -2824,9 +2903,6 @@ skip_dpc: if (IS_QLAFX00(ha)) { ret = qlafx00_fx_disc(base_vha, - &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); - - ret = qlafx00_fx_disc(base_vha, &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); /* Register system information */ @@ -2881,8 +2957,13 @@ probe_hw_failed: qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } + if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_drv_active(base_vha); + qla8044_idc_unlock(ha); + } iospace_config_failed: - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (!ha->nx_pcibase) iounmap((device_reg_t __iomem *)ha->nx_pcibase); if (!ql2xdbwr) @@ -2930,6 +3011,10 @@ qla2x00_shutdown(struct pci_dev *pdev) vha = pci_get_drvdata(pdev); ha = vha->hw; + /* Notify ISPFX00 firmware */ + if (IS_QLAFX00(ha)) + qlafx00_driver_shutdown(vha, 20); + /* Turn-off FCE trace */ if (ha->flags.fce_enabled) { qla2x00_disable_fce_trace(vha, NULL, NULL); @@ -2977,6 +3062,9 @@ qla2x00_remove_one(struct pci_dev *pdev) ha->flags.host_shutting_down = 1; set_bit(UNLOADING, &base_vha->dpc_flags); + if (IS_QLAFX00(ha)) + qlafx00_driver_shutdown(base_vha, 20); + mutex_lock(&ha->vport_lock); while (ha->cur_vport_count) { spin_lock_irqsave(&ha->vport_slock, flags); @@ -3061,6 +3149,11 @@ qla2x00_remove_one(struct pci_dev *pdev) scsi_host_put(base_vha->host); + if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_drv_active(base_vha); + qla8044_idc_unlock(ha); + } if (IS_QLA82XX(ha)) { qla82xx_idc_lock(ha); qla82xx_clear_drv_active(ha); @@ -3210,14 +3303,8 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, set_bit(RELOGIN_NEEDED, &vha->dpc_flags); ql_dbg(ql_dbg_disc, vha, 0x2067, - "Port login retry " - "%02x%02x%02x%02x%02x%02x%02x%02x, " - "id = 0x%04x retry cnt=%d.\n", - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], - fcport->loop_id, fcport->login_retry); + "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", + fcport->port_name, fcport->loop_id, fcport->login_retry); } } @@ -3290,7 +3377,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->srb_mempool) goto fail_free_gid_list; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { /* Allocate cache for CT6 Ctx. */ if (!ctx_cachep) { ctx_cachep = kmem_cache_create("qla2xxx_ctx", @@ -3324,7 +3411,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); - if (IS_QLA82XX(ha) || ql2xenabledif) { + if (IS_P3P_TYPE(ha) || ql2xenabledif) { ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, DSD_LIST_DMA_POOL_SIZE, 8, 0); if (!ha->dl_dma_pool) { @@ -3532,7 +3619,7 @@ fail: * Frees fw dump stuff. * * Input: -* ha = adapter block pointer. +* ha = adapter block pointer */ static void qla2x00_free_fw_dump(struct qla_hw_data *ha) @@ -4699,17 +4786,33 @@ qla2x00_do_dpc(void *data) qla2x00_do_work(base_vha); - if (IS_QLA82XX(ha)) { - if (test_and_clear_bit(ISP_UNRECOVERABLE, - &base_vha->dpc_flags)) { - qla82xx_idc_lock(ha); - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA8XXX_DEV_FAILED); - qla82xx_idc_unlock(ha); - ql_log(ql_log_info, base_vha, 0x4004, - "HW State: FAILED.\n"); - qla82xx_device_state_handler(base_vha); - continue; + if (IS_P3P_TYPE(ha)) { + if (IS_QLA8044(ha)) { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + qla8044_idc_lock(ha); + qla8044_wr_direct(base_vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + qla8044_idc_unlock(ha); + ql_log(ql_log_info, base_vha, 0x4004, + "HW State: FAILED.\n"); + qla8044_device_state_handler(base_vha); + continue; + } + + } else { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + qla82xx_idc_lock(ha); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + qla82xx_idc_unlock(ha); + ql_log(ql_log_info, base_vha, 0x0151, + "HW State: FAILED.\n"); + qla82xx_device_state_handler(base_vha); + continue; + } } if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, @@ -4809,16 +4912,26 @@ qla2x00_do_dpc(void *data) if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4009, "Quiescence mode scheduled.\n"); - if (IS_QLA82XX(ha)) { - qla82xx_device_state_handler(base_vha); + if (IS_P3P_TYPE(ha)) { + if (IS_QLA82XX(ha)) + qla82xx_device_state_handler(base_vha); + if (IS_QLA8044(ha)) + qla8044_device_state_handler(base_vha); clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); if (!ha->flags.quiesce_owner) { qla2x00_perform_loop_resync(base_vha); - - qla82xx_idc_lock(ha); - qla82xx_clear_qsnt_ready(base_vha); - qla82xx_idc_unlock(ha); + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_clear_qsnt_ready( + base_vha); + qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_qsnt_ready( + base_vha); + qla8044_idc_unlock(ha); + } } } else { clear_bit(ISP_QUIESCE_NEEDED, @@ -4992,10 +5105,13 @@ qla2x00_timer(scsi_qla_host_t *vha) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); /* Make sure qla82xx_watchdog is run only for physical port */ - if (!vha->vp_idx && IS_QLA82XX(ha)) { + if (!vha->vp_idx && IS_P3P_TYPE(ha)) { if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) start_dpc++; - qla82xx_watchdog(vha); + if (IS_QLA82XX(ha)) + qla82xx_watchdog(vha); + else if (IS_QLA8044(ha)) + qla8044_watchdog(vha); } if (!vha->vp_idx && IS_QLAFX00(ha)) @@ -5075,7 +5191,7 @@ qla2x00_timer(scsi_qla_host_t *vha) /* Check if beacon LED needs to be blinked for physical host only */ if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { /* There is no beacon_blink function for ISP82xx */ - if (!IS_QLA82XX(ha)) { + if (!IS_P3P_TYPE(ha)) { set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); start_dpc++; } @@ -5519,6 +5635,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 3bef6736d885..bd56cde795fc 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -565,7 +565,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) *start = FA_FLASH_LAYOUT_ADDR; else if (IS_QLA81XX(ha)) *start = FA_FLASH_LAYOUT_ADDR_81; - else if (IS_QLA82XX(ha)) { + else if (IS_P3P_TYPE(ha)) { *start = FA_FLASH_LAYOUT_ADDR_82; goto end; } else if (IS_QLA83XX(ha)) { @@ -719,7 +719,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) start = le32_to_cpu(region->start) >> 2; ql_dbg(ql_dbg_init, vha, 0x0049, "FLT[%02x]: start=0x%x " - "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), + "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff, start, le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)); @@ -741,13 +741,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) if (IS_QLA8031(ha)) break; ha->flt_region_vpd_nvram = start; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) break; if (ha->flags.port0) ha->flt_region_vpd = start; break; case FLT_REG_VPD_1: - if (IS_QLA82XX(ha) || IS_QLA8031(ha)) + if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) break; if (!ha->flags.port0) ha->flt_region_vpd = start; @@ -789,9 +789,17 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) case FLT_REG_BOOT_CODE_82XX: ha->flt_region_boot = start; break; + case FLT_REG_BOOT_CODE_8044: + if (IS_QLA8044(ha)) + ha->flt_region_boot = start; + break; case FLT_REG_FW_82XX: ha->flt_region_fw = start; break; + case FLT_REG_CNA_FW: + if (IS_CNA_CAPABLE(ha)) + ha->flt_region_fw = start; + break; case FLT_REG_GOLD_FW_82XX: ha->flt_region_gold_fw = start; break; @@ -803,13 +811,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_vpd = start; break; case FLT_REG_FCOE_NVRAM_0: - if (!IS_QLA8031(ha)) + if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) break; if (ha->flags.port0) ha->flt_region_nvram = start; break; case FLT_REG_FCOE_NVRAM_1: - if (!IS_QLA8031(ha)) + if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) break; if (!ha->flags.port0) ha->flt_region_nvram = start; @@ -883,7 +891,13 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) mid = le16_to_cpu(fdt->man_id); fid = le16_to_cpu(fdt->id); ha->fdt_wrt_disable = fdt->wrt_disable_bits; - ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd); + ha->fdt_wrt_enable = fdt->wrt_enable_bits; + ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd; + if (IS_QLA8044(ha)) + ha->fdt_erase_cmd = fdt->erase_cmd; + else + ha->fdt_erase_cmd = + flash_conf_addr(ha, 0x0300 | fdt->erase_cmd); ha->fdt_block_size = le32_to_cpu(fdt->block_size); if (fdt->unprotect_sec_cmd) { ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 | @@ -895,7 +909,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) goto done; no_flash_data: loc = locations[0]; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { ha->fdt_block_size = FLASH_BLK_SIZE_64K; goto done; } @@ -946,7 +960,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - if (!IS_QLA82XX(ha)) + if (!(IS_P3P_TYPE(ha))) return; wptr = (uint32_t *)req->ring; @@ -1008,6 +1022,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) if (ha->flags.nic_core_reset_hdlr_active) return; + if (IS_QLA8044(ha)) + return; + ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); if (hdr.version == __constant_cpu_to_le16(0xffff)) @@ -1302,7 +1319,7 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, uint32_t *dwptr; struct qla_hw_data *ha = vha->hw; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return buf; /* Dword reads to flash. */ @@ -1360,7 +1377,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, ret = QLA_SUCCESS; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return ret; /* Enable flash write. */ @@ -1474,7 +1491,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1752,7 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; if (IS_QLA8031(ha) || IS_QLA81XX(ha)) @@ -1804,7 +1821,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; ha->beacon_blink_led = 0; @@ -2822,6 +2839,121 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) } int +qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) +{ + int ret = QLA_SUCCESS; + uint32_t pcihdr, pcids; + uint32_t *dcode; + uint8_t *bcode; + uint8_t code_type, last_image; + struct qla_hw_data *ha = vha->hw; + + if (!mbuf) + return QLA_FUNCTION_FAILED; + + memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); + memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + + dcode = mbuf; + + /* Begin with first PCI expansion ROM header. */ + pcihdr = ha->flt_region_boot << 2; + last_image = 1; + do { + /* Verify PCI expansion ROM header. */ + ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr, + 0x20 * 4); + bcode = mbuf + (pcihdr % 4); + if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { + /* No signature */ + ql_log(ql_log_fatal, vha, 0x0154, + "No matching ROM signature.\n"); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Locate PCI data structure. */ + pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); + + ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids, + 0x20 * 4); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || + bcode[0x2] != 'I' || bcode[0x3] != 'R') { + /* Incorrect header. */ + ql_log(ql_log_fatal, vha, 0x0155, + "PCI data struct not found pcir_adr=%x.\n", pcids); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Read version */ + code_type = bcode[0x14]; + switch (code_type) { + case ROM_CODE_TYPE_BIOS: + /* Intel x86, PC-AT compatible. */ + ha->bios_revision[0] = bcode[0x12]; + ha->bios_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0156, + "Read BIOS %d.%d.\n", + ha->bios_revision[1], ha->bios_revision[0]); + break; + case ROM_CODE_TYPE_FCODE: + /* Open Firmware standard for PCI (FCode). */ + ha->fcode_revision[0] = bcode[0x12]; + ha->fcode_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0157, + "Read FCODE %d.%d.\n", + ha->fcode_revision[1], ha->fcode_revision[0]); + break; + case ROM_CODE_TYPE_EFI: + /* Extensible Firmware Interface (EFI). */ + ha->efi_revision[0] = bcode[0x12]; + ha->efi_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0158, + "Read EFI %d.%d.\n", + ha->efi_revision[1], ha->efi_revision[0]); + break; + default: + ql_log(ql_log_warn, vha, 0x0159, + "Unrecognized code type %x at pcids %x.\n", + code_type, pcids); + break; + } + + last_image = bcode[0x15] & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; + } while (!last_image); + + /* Read firmware image information. */ + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + dcode = mbuf; + ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2, + 0x20); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 && + bcode[0x2] == 0x40 && bcode[0x3] == 0x40) { + ha->fw_revision[0] = bcode[0x4]; + ha->fw_revision[1] = bcode[0x5]; + ha->fw_revision[2] = bcode[0x6]; + ql_dbg(ql_dbg_init, vha, 0x0153, + "Firmware revision %d.%d.%d\n", + ha->fw_revision[0], ha->fw_revision[1], + ha->fw_revision[2]); + } + + return ret; +} + +int qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) { int ret = QLA_SUCCESS; @@ -2832,7 +2964,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) int i; struct qla_hw_data *ha = vha->hw; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return ret; if (!mbuf) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 83a8f7a9ec76..ff12d4677cc4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -430,13 +430,8 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) } ql_dbg(ql_dbg_tgt, vha, 0xe047, - "scsi(%ld): resetting (session %p from port " - "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " - "mcmd %x, loop_id %d)\n", vha->host_no, sess, - sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], + "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " + "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id); lun = a->u.isp24.fcp_cmnd.lun; @@ -467,15 +462,10 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, sess->expires = jiffies + dev_loss_tmo * HZ; ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %02x:%02x:%02x:" - "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for " + "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " "deletion in %u secs (expires: %lu) immed: %d\n", - sess->vha->vp_idx, - sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], - sess->loop_id, dev_loss_tmo, sess->expires, immediate); + sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, + sess->expires, immediate); if (immediate) schedule_delayed_work(&tgt->sess_del_work, 0); @@ -630,13 +620,9 @@ static struct qla_tgt_sess *qlt_create_sess( sess = kzalloc(sizeof(*sess), GFP_KERNEL); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, - "qla_target(%u): session allocation failed, " - "all commands from port %02x:%02x:%02x:%02x:" - "%02x:%02x:%02x:%02x will be refused", vha->vp_idx, - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7]); + "qla_target(%u): session allocation failed, all commands " + "from port %8phC will be refused", vha->vp_idx, + fcport->port_name); return NULL; } @@ -680,15 +666,11 @@ static struct qla_tgt_sess *qlt_create_sess( spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, - "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:" - "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed" - " completion %ssupported) added\n", - vha->vp_idx, local ? "local " : "", fcport->port_name[0], - fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], - fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain, - sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ? - "" : "not "); + "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " + "s_id %x:%x:%x, confirmed completion %ssupported) added\n", + vha->vp_idx, local ? "local " : "", fcport->port_name, + fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, + sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); return sess; } @@ -730,13 +712,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) qlt_undelete_sess(sess); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, - "qla_target(%u): %ssession for port %02x:" - "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) " - "reappeared\n", vha->vp_idx, sess->local ? "local " - : "", sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], + "qla_target(%u): %ssession for port %8phC " + "(loop ID %d) reappeared\n", vha->vp_idx, + sess->local ? "local " : "", sess->port_name, sess->loop_id); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, @@ -749,13 +727,8 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) if (sess && sess->local) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, "qla_target(%u): local session for " - "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "(loop ID %d) became global\n", vha->vp_idx, - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], - sess->loop_id); + "port %8phC (loop ID %d) became global\n", vha->vp_idx, + fcport->port_name, sess->loop_id); sess->local = 0; } ha->tgt.tgt_ops->put_sess(sess); @@ -2840,10 +2813,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, int res = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, - "qla_target(%d): Port ID: 0x%02x:%02x:%02x" - " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0], - iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2], - iocb->u.isp24.status_subcode); + "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", + vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); switch (iocb->u.isp24.status_subcode) { case ELS_PLOGI: diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 6c66d22eb1b1..a808e293dae0 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.05.00.03-k" +#define QLA2XXX_VERSION "8.06.00.08-k" #define QLA_DRIVER_MAJOR_VER 8 -#define QLA_DRIVER_MINOR_VER 5 +#define QLA_DRIVER_MINOR_VER 6 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index a318092e033f..a6da313e253b 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1474,15 +1474,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) - pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", - sess, - sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], - sess->loop_id, loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, - s_id.b.domain, s_id.b.area, s_id.b.al_pa); + pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", + sess, sess->port_name, + sess->loop_id, loop_id, sess->s_id.b.domain, + sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, + s_id.b.area, s_id.b.al_pa); if (sess->loop_id != loop_id) { /* diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c index d607eb8e24cb..8196c2f7915c 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.c +++ b/drivers/scsi/qla4xxx/ql4_83xx.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -259,8 +259,8 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) * Return: On success return QLA_SUCCESS * On error return QLA_ERROR **/ -static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, - uint32_t *data, uint32_t count) +int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, + uint32_t *data, uint32_t count) { int i, j; uint32_t agt_ctrl; @@ -1473,9 +1473,9 @@ int qla4_83xx_isp_reset(struct scsi_qla_host *ha) __func__)); } - /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority - * and which drivers are present. Unlike ISP8022, the function setting - * NEED_RESET, may not be the Reset owner. */ + /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on + * priority and which drivers are present. Unlike ISP8022, the function + * setting NEED_RESET, may not be the Reset owner. */ if (qla4_83xx_can_perform_reset(ha)) set_bit(AF_8XXX_RST_OWNER, &ha->flags); diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h index fab237fa32cc..a0de6e25ea5a 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.h +++ b/drivers/scsi/qla4xxx/ql4_83xx.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -290,4 +290,38 @@ struct qla4_83xx_idc_information { uint32_t info3; /* IDC additional info */ }; +#define QLA83XX_PEX_DMA_ENGINE_INDEX 8 +#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000 +#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000 +#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0 +#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04 +#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08 + +#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024) +#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ + +/* Read Memory: For Pex-DMA */ +struct qla4_83xx_minidump_entry_rdmem_pex_dma { + struct qla8xxx_minidump_entry_hdr h; + uint32_t desc_card_addr; + uint16_t dma_desc_cmd; + uint8_t rsvd[2]; + uint32_t start_dma_cmd; + uint8_t rsvd2[12]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +struct qla4_83xx_pex_dma_descriptor { + struct { + uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */ + uint8_t rsvd[2]; + uint16_t dma_desc_cmd; + } cmd; + uint64_t src_addr; + uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func, + * 8-15: desc-cmd */ + uint8_t rsvd[24]; +} __packed; + #endif diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c index 19ee55a6226c..463239c972b0 100644 --- a/drivers/scsi/qla4xxx/ql4_attr.c +++ b/drivers/scsi/qla4xxx/ql4_attr.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2011 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -83,7 +83,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); if (is_qla8022(ha) || - (is_qla8032(ha) && + ((is_qla8032(ha) || is_qla8042(ha)) && qla4_83xx_can_perform_reset(ha))) { set_bit(AF_8XXX_RST_OWNER, &ha->flags); set_bit(AF_FW_RECOVERY, &ha->flags); @@ -158,14 +158,12 @@ qla4xxx_fw_version_show(struct device *dev, if (is_qla80XX(ha)) return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", - ha->firmware_version[0], - ha->firmware_version[1], - ha->patch_number, ha->build_number); + ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); else return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", - ha->firmware_version[0], - ha->firmware_version[1], - ha->patch_number, ha->build_number); + ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); } static ssize_t @@ -181,8 +179,8 @@ qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major, - ha->iscsi_minor); + return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major, + ha->fw_info.iscsi_minor); } static ssize_t @@ -191,8 +189,8 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr, { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", - ha->bootload_major, ha->bootload_minor, - ha->bootload_patch, ha->bootload_build); + ha->fw_info.bootload_major, ha->fw_info.bootload_minor, + ha->fw_info.bootload_patch, ha->fw_info.bootload_build); } static ssize_t @@ -259,6 +257,63 @@ qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name); } +static ssize_t +qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date, + ha->fw_info.fw_build_time); +} + +static ssize_t +qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user); +} + +static ssize_t +qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp); +} + +static ssize_t +qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + char *load_src = NULL; + + switch (ha->fw_info.fw_load_source) { + case 1: + load_src = "Flash Primary"; + break; + case 2: + load_src = "Flash Secondary"; + break; + case 3: + load_src = "Host Download"; + break; + } + + return snprintf(buf, PAGE_SIZE, "%s\n", load_src); +} + +static ssize_t +qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + qla4xxx_about_firmware(ha); + return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs, + ha->fw_uptime_msecs); +} + static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL); static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL); @@ -269,6 +324,12 @@ static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL); static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL); static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL); static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL); +static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL); +static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL); +static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show, + NULL); +static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL); +static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL); struct device_attribute *qla4xxx_host_attrs[] = { &dev_attr_fw_version, @@ -281,5 +342,10 @@ struct device_attribute *qla4xxx_host_attrs[] = { &dev_attr_phy_port_num, &dev_attr_iscsi_func_cnt, &dev_attr_hba_model, + &dev_attr_fw_timestamp, + &dev_attr_fw_build_user, + &dev_attr_fw_ext_timestamp, + &dev_attr_fw_load_src, + &dev_attr_fw_uptime, NULL, }; diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c index 8acdc582ff6d..cf8fdf1d1257 100644 --- a/drivers/scsi/qla4xxx/ql4_bsg.c +++ b/drivers/scsi/qla4xxx/ql4_bsg.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2011 QLogic Corporation + * Copyright (c) 2011-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c index 77b7c594010f..5649e9ef59a8 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.c +++ b/drivers/scsi/qla4xxx/ql4_dbg.c @@ -141,21 +141,22 @@ void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha) if (is_qla8022(ha)) { ql4_printk(KERN_INFO, ha, - "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n" + "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" - " PEG_NET_4_PC: 0x%x\n", ha->host_no, - __func__, halt_status1, halt_status2, + " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__, + ha->pdev->device, halt_status1, halt_status2, qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, - "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n" + "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", - ha->host_no, __func__, halt_status1, halt_status2); + ha->host_no, __func__, ha->pdev->device, + halt_status1, halt_status2); } } diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index ddf16a86bbf5..41327d46ecf5 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -64,6 +64,10 @@ #define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032 #endif +#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042 +#define PCI_DEVICE_ID_QLOGIC_ISP8042 0x8042 +#endif + #define ISP4XXX_PCI_FN_1 0x1 #define ISP4XXX_PCI_FN_2 0x3 @@ -201,6 +205,7 @@ #define MAX_RESET_HA_RETRIES 2 #define FW_ALIVE_WAIT_TOV 3 +#define IDC_EXTEND_TOV 8 #define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) @@ -335,6 +340,7 @@ struct ql4_tuple_ddb { #define DF_BOOT_TGT 1 /* Boot target entry */ #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ #define DF_FO_MASKED 3 +#define DF_DISABLE_RELOGIN 4 /* Disable relogin to device */ enum qla4_work_type { QLA4_EVENT_AEN, @@ -557,6 +563,7 @@ struct scsi_qla_host { #define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/ #define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/ #define DPC_POST_IDC_ACK 23 /* 0x00200000 */ +#define DPC_RESTORE_ACB 24 /* 0x01000000 */ struct Scsi_Host *host; /* pointer to host data */ uint32_t tot_ddbs; @@ -734,12 +741,9 @@ struct scsi_qla_host { struct iscsi_iface *iface_ipv6_1; /* --- From About Firmware --- */ - uint16_t iscsi_major; - uint16_t iscsi_minor; - uint16_t bootload_major; - uint16_t bootload_minor; - uint16_t bootload_patch; - uint16_t bootload_build; + struct about_fw_info fw_info; + uint32_t fw_uptime_secs; /* seconds elapsed since fw bootup */ + uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */ uint16_t def_timeout; /* Default login timeout */ uint32_t flash_state; @@ -780,9 +784,11 @@ struct scsi_qla_host { uint32_t *reg_tbl; struct qla4_83xx_reset_template reset_tmplt; struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address - for ISP8324 */ + for ISP8324 and + and ISP8042 */ uint32_t pf_bit; struct qla4_83xx_idc_information idc_info; + struct addr_ctrl_blk *saved_acb; }; struct ql4_task_data { @@ -850,9 +856,14 @@ static inline int is_qla8032(struct scsi_qla_host *ha) return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324; } +static inline int is_qla8042(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042; +} + static inline int is_qla80XX(struct scsi_qla_host *ha) { - return is_qla8022(ha) || is_qla8032(ha); + return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha); } static inline int is_aer_supported(struct scsi_qla_host *ha) diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index c7b8892b5a83..51d1a70f8b45 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -458,6 +458,7 @@ struct qla_flt_region { #define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 #define MBOX_CMD_IDC_ACK 0x0101 +#define MBOX_CMD_IDC_TIME_EXTEND 0x0102 #define MBOX_CMD_PORT_RESET 0x0120 #define MBOX_CMD_SET_PORT_CONFIG 0x0122 @@ -502,6 +503,7 @@ struct qla_flt_region { #define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036 #define MBOX_ASTS_IDC_COMPLETE 0x8100 #define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101 +#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION 0x8102 #define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110 #define MBOX_ASTS_TXSCVR_INSERTED 0x8130 #define MBOX_ASTS_TXSCVR_REMOVED 0x8131 @@ -512,6 +514,10 @@ struct qla_flt_region { #define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022 #define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 +/* ACB Configuration Defines */ +#define ACB_CONFIG_DISABLE 0x00 +#define ACB_CONFIG_SET 0x01 + /* ACB State Defines */ #define ACB_STATE_UNCONFIGURED 0x00 #define ACB_STATE_INVALID 0x01 @@ -955,7 +961,7 @@ struct about_fw_info { uint16_t bootload_minor; /* 46 - 47 */ uint16_t bootload_patch; /* 48 - 49 */ uint16_t bootload_build; /* 4A - 4B */ - uint8_t reserved2[180]; /* 4C - FF */ + uint8_t extended_timestamp[180];/* 4C - FF */ }; struct crash_record { diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 4a428009f699..e6f2a2669dbd 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -266,6 +266,14 @@ int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, dma_addr_t dma_addr); int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, char *password, uint16_t chap_index); +int qla4xxx_disable_acb(struct scsi_qla_host *ha); +int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t acb_dma); +int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, + uint32_t acb_type, uint32_t len); +int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); +int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, + uint64_t addr, uint32_t *data, uint32_t count); extern int ql4xextended_error_logging; extern int ql4xdontresethba; diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 8fc8548ba4ba..7456eeb2e58a 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -107,7 +107,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha) (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in); writel(0, (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in); writel(0, @@ -940,7 +940,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset) * while switching from polling to interrupt mode. IOCB interrupts are * enabled via isp_ops->enable_intrs. */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) qla4_83xx_enable_mbox_intrs(ha); if (qla4xxx_about_firmware(ha) == QLA_ERROR) diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h index 6f4decd44c6a..8503ad643bdd 100644 --- a/drivers/scsi/qla4xxx/ql4_inline.h +++ b/drivers/scsi/qla4xxx/ql4_inline.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index fad71ed067ec..e5697ab144d2 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 482287f4005f..7dff09f09b71 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -588,7 +588,7 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha) { int rval = 1; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) || (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) { DEBUG2(ql4_printk(KERN_INFO, ha, @@ -621,7 +621,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; __le32 __iomem *mailbox_out; - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0]; else if (is_qla8022(ha)) mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0]; @@ -665,7 +665,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, qla4xxx_dump_registers(ha); if ((is_qla8022(ha) && ql4xdontresethba) || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", ha->host_no, __func__)); } else { @@ -744,17 +745,23 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, * mbox_sts[3] = new ACB state */ if ((mbox_sts[3] == ACB_STATE_VALID) && ((mbox_sts[2] == ACB_STATE_TENTATIVE) || - (mbox_sts[2] == ACB_STATE_ACQUIRING))) + (mbox_sts[2] == ACB_STATE_ACQUIRING))) { set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); - else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && - (mbox_sts[2] == ACB_STATE_VALID)) { + } else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && + (mbox_sts[2] == ACB_STATE_VALID)) { if (is_qla80XX(ha)) set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else set_bit(DPC_RESET_HA, &ha->dpc_flags); - } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) + } else if (mbox_sts[3] == ACB_STATE_DISABLING) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n", + ha->host_no, __func__); + } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) { complete(&ha->disable_acb_comp); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n", + ha->host_no, __func__); + } break; case MBOX_ASTS_MAC_ADDRESS_CHANGED: @@ -836,7 +843,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, case MBOX_ASTS_IDC_REQUEST_NOTIFICATION: { uint32_t opcode; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], @@ -858,7 +865,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, } case MBOX_ASTS_IDC_COMPLETE: - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], @@ -868,10 +875,15 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, "scsi:%ld: AEN %04x IDC Complete notification\n", ha->host_no, mbox_sts[0])); - if (qla4_83xx_loopback_in_progress(ha)) + if (qla4_83xx_loopback_in_progress(ha)) { set_bit(AF_LOOPBACK, &ha->flags); - else + } else { clear_bit(AF_LOOPBACK, &ha->flags); + if (ha->saved_acb) + set_bit(DPC_RESTORE_ACB, + &ha->dpc_flags); + } + qla4xxx_wake_dpc(ha); } break; @@ -886,6 +898,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, ha->host_no, mbox_sts[0])); break; + case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n", + ha->host_no, mbox_sts[0])); + break; + case MBOX_ASTS_INITIALIZATION_FAILED: DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n", @@ -1297,7 +1320,7 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id) uint32_t intr_status; uint8_t reqs_count = 0; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_mailbox_intr_handler(irq, dev_id); } else { spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1334,7 +1357,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id) uint32_t ival = 0; spin_lock_irqsave(&ha->hardware_lock, flags); - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { ival = readl(&ha->qla4_83xx_reg->iocb_int_mask); if (ival == 0) { ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n", @@ -1425,10 +1448,10 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha) goto try_intx; if (ql4xenablemsix == 2) { - /* Note: MSI Interrupts not supported for ISP8324 */ - if (is_qla8032(ha)) { - ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n", - __func__); + /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */ + if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n", + __func__, ha->pdev->device); goto try_intx; } goto try_msi; @@ -1444,9 +1467,9 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha) "MSI-X: Enabled (0x%X).\n", ha->revision_id)); goto irq_attached; } else { - if (is_qla8032(ha)) { - ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n", - __func__, ret); + if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n", + __func__, ha->pdev->device, ret); goto try_intx; } } diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index a501beab3ffe..62d4208af21f 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -1,10 +1,11 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ +#include <linux/ctype.h> #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" @@ -52,7 +53,7 @@ static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha) { int rval = 1; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) rval = 0; @@ -223,7 +224,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0 | CRB_NIU_XG_PAUSE_CTL_P1); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n", __func__); qla4_83xx_disable_pause(ha); @@ -1270,16 +1271,28 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha) } /* Save version information. */ - ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major); - ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor); - ha->patch_number = le16_to_cpu(about_fw->fw_patch); - ha->build_number = le16_to_cpu(about_fw->fw_build); - ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major); - ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor); - ha->bootload_major = le16_to_cpu(about_fw->bootload_major); - ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor); - ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch); - ha->bootload_build = le16_to_cpu(about_fw->bootload_build); + ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major); + ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor); + ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch); + ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build); + memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date, + sizeof(about_fw->fw_build_date)); + memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time, + sizeof(about_fw->fw_build_time)); + strcpy((char *)ha->fw_info.fw_build_user, + skip_spaces((char *)about_fw->fw_build_user)); + ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source); + ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major); + ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor); + ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major); + ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor); + ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch); + ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build); + strcpy((char *)ha->fw_info.extended_timestamp, + skip_spaces((char *)about_fw->extended_timestamp)); + + ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]); + ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]); status = QLA_SUCCESS; exit_about_fw: @@ -1723,6 +1736,45 @@ int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, return status; } +/** + * qla4_84xx_extend_idc_tmo - Extend IDC Timeout. + * @ha: Pointer to host adapter structure. + * @ext_tmo: idc timeout value + * + * Requests firmware to extend the idc timeout value. + **/ +static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + ext_tmo &= 0xf; + + mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND; + mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) | + (ext_tmo << 8)); /* new timeout */ + mbox_cmd[2] = ha->idc_info.info1; + mbox_cmd[3] = ha->idc_info.info2; + mbox_cmd[4] = ha->idc_info.info3; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: failed status %04X\n", + ha->host_no, __func__, mbox_sts[0])); + return QLA_ERROR; + } else { + ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n", + __func__, ext_tmo); + } + + return QLA_SUCCESS; +} + int qla4xxx_disable_acb(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; @@ -1739,6 +1791,23 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha) DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB " "failed w/ status %04X %04X %04X", __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2])); + } else { + if (is_qla8042(ha) && + (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) { + /* + * Disable ACB mailbox command takes time to complete + * based on the total number of targets connected. + * For 512 targets, it took approximately 5 secs to + * complete. Setting the timeout value to 8, with the 3 + * secs buffer. + */ + qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV); + if (!wait_for_completion_timeout(&ha->disable_acb_comp, + IDC_EXTEND_TOV * HZ)) { + ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n", + __func__); + } + } } return status; } @@ -2145,8 +2214,80 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha) ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0]); else - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", - __func__)); + ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__); return status; } + +int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct addr_ctrl_blk *acb = NULL; + uint32_t acb_len = sizeof(struct addr_ctrl_blk); + int rval = QLA_SUCCESS; + dma_addr_t acb_dma; + + acb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &acb_dma, GFP_KERNEL); + if (!acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); + rval = QLA_ERROR; + goto exit_config_acb; + } + memset(acb, 0, acb_len); + + switch (acb_config) { + case ACB_CONFIG_DISABLE: + rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + rval = qla4xxx_disable_acb(ha); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + if (!ha->saved_acb) + ha->saved_acb = kzalloc(acb_len, GFP_KERNEL); + + if (!ha->saved_acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", + __func__); + rval = QLA_ERROR; + goto exit_config_acb; + } + memcpy(ha->saved_acb, acb, acb_len); + break; + case ACB_CONFIG_SET: + + if (!ha->saved_acb) { + ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n", + __func__); + rval = QLA_ERROR; + goto exit_free_acb; + } + + memcpy(acb, ha->saved_acb, acb_len); + kfree(ha->saved_acb); + ha->saved_acb = NULL; + + rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + break; + default: + ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n", + __func__); + } + +exit_free_acb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, + acb_dma); +exit_config_acb: + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s %s\n", __func__, + rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); + return rval; +} diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c index 325db1f2c091..3bf418fbd432 100644 --- a/drivers/scsi/qla4xxx/ql4_nvram.c +++ b/drivers/scsi/qla4xxx/ql4_nvram.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h index dba0514d1c70..e97d79ff16f7 100644 --- a/drivers/scsi/qla4xxx/ql4_nvram.h +++ b/drivers/scsi/qla4xxx/ql4_nvram.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index eaf00c162eb2..d001202d3565 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -1514,11 +1514,11 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_active |= (1 << ha->func_num); else drv_active |= (1 << (ha->func_num * 4)); @@ -1536,11 +1536,11 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha) drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_active &= ~(1 << (ha->func_num)); else drv_active &= ~(1 << (ha->func_num * 4)); @@ -1559,11 +1559,11 @@ inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) rval = drv_state & (1 << ha->func_num); else rval = drv_state & (1 << (ha->func_num * 4)); @@ -1581,11 +1581,11 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_state |= (1 << ha->func_num); else drv_state |= (1 << (ha->func_num * 4)); @@ -1602,11 +1602,11 @@ void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_state &= ~(1 << ha->func_num); else drv_state &= ~(1 << (ha->func_num * 4)); @@ -1624,11 +1624,11 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha) qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function. */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) qsnt_state |= (1 << ha->func_num); else qsnt_state |= (2 << (ha->func_num * 4)); @@ -1737,6 +1737,208 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, *d_ptr = data_ptr; } +static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha) +{ + int rval = QLA_SUCCESS; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); + + /* Read the pex-dma's command-status-and-control register. */ + rval = ha->isp_ops->rd_reg_indirect(ha, + (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + + if (rval) + return QLA_ERROR; + + /* Check if requested pex-dma engine is available. */ + if (cmd_sts_and_cntrl & BIT_31) + return QLA_SUCCESS; + else + return QLA_ERROR; +} + +static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha, + struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr) +{ + int rval = QLA_SUCCESS, wait = 0; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW, + m_hdr->desc_card_addr); + if (rval) + goto error_exit; + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0); + if (rval) + goto error_exit; + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL, + m_hdr->start_dma_cmd); + if (rval) + goto error_exit; + + /* Wait for dma operation to complete. */ + for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) { + rval = ha->isp_ops->rd_reg_indirect(ha, + (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + goto error_exit; + + if ((cmd_sts_and_cntrl & BIT_1) == 0) + break; + else + udelay(10); + } + + /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ + if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) { + rval = QLA_ERROR; + goto error_exit; + } + +error_exit: + return rval; +} + +static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int rval = QLA_SUCCESS; + struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL; + uint32_t size, read_size; + uint8_t *data_ptr = (uint8_t *)*d_ptr; + void *rdmem_buffer = NULL; + dma_addr_t rdmem_dma; + struct qla4_83xx_pex_dma_descriptor dma_desc; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + + rval = qla4_83xx_check_dma_engine_state(ha); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: DMA engine not available. Fallback to rdmem-read.\n", + __func__)); + return QLA_ERROR; + } + + m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr; + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, + QLA83XX_PEX_DMA_READ_SIZE, + &rdmem_dma, GFP_KERNEL); + if (!rdmem_buffer) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Unable to allocate rdmem dma buffer\n", + __func__)); + return QLA_ERROR; + } + + /* Prepare pex-dma descriptor to be written to MS memory. */ + /* dma-desc-cmd layout: + * 0-3: dma-desc-cmd 0-3 + * 4-7: pcid function number + * 8-15: dma-desc-cmd 8-15 + */ + dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); + dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); + dma_desc.dma_bus_addr = rdmem_dma; + + size = 0; + read_size = 0; + /* + * Perform rdmem operation using pex-dma. + * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE. + */ + while (read_size < m_hdr->read_data_size) { + if (m_hdr->read_data_size - read_size >= + QLA83XX_PEX_DMA_READ_SIZE) + size = QLA83XX_PEX_DMA_READ_SIZE; + else { + size = (m_hdr->read_data_size - read_size); + + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, + QLA83XX_PEX_DMA_READ_SIZE, + rdmem_buffer, rdmem_dma); + + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size, + &rdmem_dma, + GFP_KERNEL); + if (!rdmem_buffer) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Unable to allocate rdmem dma buffer\n", + __func__)); + return QLA_ERROR; + } + dma_desc.dma_bus_addr = rdmem_dma; + } + + dma_desc.src_addr = m_hdr->read_addr + read_size; + dma_desc.cmd.read_data_size = size; + + /* Prepare: Write pex-dma descriptor to MS memory. */ + rval = qla4_83xx_ms_mem_write_128b(ha, + (uint64_t)m_hdr->desc_card_addr, + (uint32_t *)&dma_desc, + (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); + if (rval == -1) { + ql4_printk(KERN_INFO, ha, + "%s: Error writing rdmem-dma-init to MS !!!\n", + __func__); + goto error_exit; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n", + __func__, size)); + /* Execute: Start pex-dma operation. */ + rval = qla4_83xx_start_pex_dma(ha, m_hdr); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi(%ld): start-pex-dma failed rval=0x%x\n", + ha->host_no, rval)); + goto error_exit; + } + + memcpy(data_ptr, rdmem_buffer, size); + data_ptr += size; + read_size += size; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__)); + + *d_ptr = (uint32_t *)data_ptr; + +error_exit: + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer, + rdmem_dma); + + return rval; +} + static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) @@ -2068,7 +2270,7 @@ static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha, #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 -static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, +static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { @@ -2150,6 +2352,28 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, return QLA_SUCCESS; } +static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t *data_ptr = *d_ptr; + int rval = QLA_SUCCESS; + + if (is_qla8032(ha) || is_qla8042(ha)) { + rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) { + rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, + &data_ptr); + } + } else { + rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, + &data_ptr); + } + *d_ptr = data_ptr; + return rval; +} + static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, int index) @@ -2398,13 +2622,13 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) (((uint8_t *)ha->fw_dump_tmplt_hdr) + tmplt_hdr->first_entry_offset); - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] = tmplt_hdr->ocm_window_reg[ha->func_num]; /* Walk through the entry headers - validate/perform required action */ for (i = 0; i < num_entry_hdr; i++) { - if (data_collected >= ha->fw_dump_size) { + if (data_collected > ha->fw_dump_size) { ql4_printk(KERN_INFO, ha, "Data collected: [0x%x], Total Dump size: [0x%x]\n", data_collected, ha->fw_dump_size); @@ -2455,7 +2679,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) if (is_qla8022(ha)) { qla4_82xx_minidump_process_rdrom(ha, entry_hdr, &data_ptr); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { rval = qla4_83xx_minidump_process_rdrom(ha, entry_hdr, &data_ptr); @@ -2496,7 +2720,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) &data_ptr); break; case QLA83XX_POLLRD: - if (!is_qla8032(ha)) { + if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } @@ -2506,7 +2730,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA83XX_RDMUX2: - if (!is_qla8032(ha)) { + if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } @@ -2514,7 +2738,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) &data_ptr); break; case QLA83XX_POLLRDMWR: - if (!is_qla8032(ha)) { + if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } @@ -2529,9 +2753,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) break; } - data_collected = (uint8_t *)data_ptr - - ((uint8_t *)((uint8_t *)ha->fw_dump + - ha->fw_dump_tmplt_size)); + data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump; skip_nxt_entry: /* next entry in the template */ entry_hdr = (struct qla8xxx_minidump_entry_hdr *) @@ -2539,10 +2761,11 @@ skip_nxt_entry: entry_hdr->entry_size); } - if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) { + if (data_collected != ha->fw_dump_size) { ql4_printk(KERN_INFO, ha, "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", data_collected, ha->fw_dump_size); + rval = QLA_ERROR; goto md_failed; } @@ -2642,10 +2865,10 @@ dev_initialize: QLA8XXX_DEV_INITIALIZING); /* - * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after - * device goes to INIT state. + * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, + * reset it after device goes to INIT state. */ - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); if (idc_ctrl & GRACEFUL_RESET_BIT1) { qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, @@ -2846,7 +3069,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha) * If we are the first driver to load and * ql4xdontresethba is not set, clear IDC_CTRL BIT0. */ - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba) qla4_83xx_clear_idc_dontreset(ha); @@ -2854,7 +3077,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha) if (is_qla8022(ha)) { qla4_82xx_set_idc_ver(ha); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { rval = qla4_83xx_set_idc_ver(ha); if (rval == QLA_ERROR) qla4_8xxx_clear_drv_active(ha); @@ -2922,11 +3145,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha) break; case QLA8XXX_DEV_NEED_RESET: /* - * For ISP8324, if NEED_RESET is set by any driver, - * it should be honored, irrespective of IDC_CTRL - * DONTRESET_BIT0 + * For ISP8324 and ISP8042, if NEED_RESET is set by any + * driver, it should be honored, irrespective of + * IDC_CTRL DONTRESET_BIT0 */ - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_need_reset_handler(ha); } else if (is_qla8022(ha)) { if (!ql4xdontresethba) { @@ -2976,7 +3199,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha) int retval; /* clear the interrupt */ - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, &ha->qla4_83xx_reg->risc_intr); readl(&ha->qla4_83xx_reg->risc_intr); } else if (is_qla8022(ha)) { @@ -3094,7 +3317,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr) if (is_qla8022(ha)) { qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, flt_addr << 2, OPTROM_BURST_SIZE); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { status = qla4_83xx_flash_read_u32(ha, flt_addr << 2, (uint8_t *)ha->request_ring, 0x400); @@ -3326,7 +3549,7 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha) if (is_qla8022(ha)) { qla4_82xx_get_fdt_info(ha); qla4_82xx_get_idc_param(ha); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_get_idc_param(ha); } @@ -3436,7 +3659,7 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) } /* Make sure we receive the minimum required data to cache internally */ - if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) < + if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) < offsetof(struct mbx_sys_info, reserved)) { DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" " error (%x)\n", ha->host_no, __func__, mbox_sts[4])); diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index 9dc0bbfe50d5..14500a0f62cc 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index b246b3c26912..f8a0a26a3cd4 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -378,6 +378,44 @@ static umode_t qla4_attr_is_visible(int param_type, int param) case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: + case ISCSI_PARAM_DISCOVERY_SESS: + case ISCSI_PARAM_PORTAL_TYPE: + case ISCSI_PARAM_CHAP_AUTH_EN: + case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: + case ISCSI_PARAM_BIDI_CHAP_EN: + case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_PARAM_DEF_TIME2WAIT: + case ISCSI_PARAM_DEF_TIME2RETAIN: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_MAX_SEGMENT_SIZE: + case ISCSI_PARAM_TCP_TIMESTAMP_STAT: + case ISCSI_PARAM_TCP_WSF_DISABLE: + case ISCSI_PARAM_TCP_NAGLE_DISABLE: + case ISCSI_PARAM_TCP_TIMER_SCALE: + case ISCSI_PARAM_TCP_TIMESTAMP_EN: + case ISCSI_PARAM_TCP_XMIT_WSF: + case ISCSI_PARAM_TCP_RECV_WSF: + case ISCSI_PARAM_IP_FRAGMENT_DISABLE: + case ISCSI_PARAM_IPV4_TOS: + case ISCSI_PARAM_IPV6_TC: + case ISCSI_PARAM_IPV6_FLOW_LABEL: + case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: + case ISCSI_PARAM_KEEPALIVE_TMO: + case ISCSI_PARAM_LOCAL_PORT: + case ISCSI_PARAM_ISID: + case ISCSI_PARAM_TSID: + case ISCSI_PARAM_DEF_TASKMGMT_TMO: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_STATSN: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_DISCOVERY_PARENT_IDX: + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: return S_IRUGO; default: return 0; @@ -2218,19 +2256,23 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); - fw_ddb_entry->ipv4_tos = conn->ipv4_tos; fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); - fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); + fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); fw_ddb_entry->port = cpu_to_le16(conn->port); fw_ddb_entry->def_timeout = cpu_to_le16(sess->default_taskmgmt_timeout); + if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) + fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; + else + fw_ddb_entry->ipv4_tos = conn->ipv4_tos; + if (conn->ipaddress) memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, sizeof(fw_ddb_entry->ip_addr)); @@ -2257,6 +2299,101 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, return rc; } +static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, + struct iscsi_session *sess, + struct dev_db_entry *fw_ddb_entry) +{ + unsigned long options = 0; + uint16_t ddb_link; + uint16_t disc_parent; + + options = le16_to_cpu(fw_ddb_entry->options); + conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); + sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, + &options); + sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); + + options = le16_to_cpu(fw_ddb_entry->iscsi_options); + conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); + conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); + sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); + sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); + sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, + &options); + sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); + sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); + sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, + &options); + sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); + sess->discovery_auth_optional = + test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); + if (test_bit(ISCSIOPT_ERL1, &options)) + sess->erl |= BIT_1; + if (test_bit(ISCSIOPT_ERL0, &options)) + sess->erl |= BIT_0; + + options = le16_to_cpu(fw_ddb_entry->tcp_options); + conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); + conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); + conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); + if (test_bit(TCPOPT_TIMER_SCALE3, &options)) + conn->tcp_timer_scale |= BIT_3; + if (test_bit(TCPOPT_TIMER_SCALE2, &options)) + conn->tcp_timer_scale |= BIT_2; + if (test_bit(TCPOPT_TIMER_SCALE1, &options)) + conn->tcp_timer_scale |= BIT_1; + + conn->tcp_timer_scale >>= 1; + conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); + + options = le16_to_cpu(fw_ddb_entry->ip_options); + conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); + + conn->max_recv_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); + conn->max_xmit_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); + sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); + sess->first_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); + sess->max_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); + sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); + sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); + conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; + conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; + conn->ipv4_tos = fw_ddb_entry->ipv4_tos; + conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); + conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); + conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); + conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); + sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); + COPY_ISID(sess->isid, fw_ddb_entry->isid); + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link < MAX_DDB_ENTRIES) + sess->discovery_parent_idx = ddb_link; + else + sess->discovery_parent_idx = DDB_NO_LINK; + + if (ddb_link == DDB_ISNS) + disc_parent = ISCSI_DISC_PARENT_ISNS; + else if (ddb_link == DDB_NO_LINK) + disc_parent = ISCSI_DISC_PARENT_UNKNOWN; + else if (ddb_link < MAX_DDB_ENTRIES) + disc_parent = ISCSI_DISC_PARENT_SENDTGT; + else + disc_parent = ISCSI_DISC_PARENT_UNKNOWN; + + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, + iscsi_get_discovery_parent_name(disc_parent), 0); + + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, + (char *)fw_ddb_entry->iscsi_alias, 0); +} + static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, struct iscsi_cls_session *cls_sess, @@ -2275,47 +2412,29 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); - conn->max_recv_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); - - conn->max_xmit_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); - - sess->initial_r2t_en = - (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); - - sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->first_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); - - sess->max_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); - - sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); - - sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); + sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); - sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); - + memset(ip_addr, 0, sizeof(ip_addr)); options = le16_to_cpu(fw_ddb_entry->options); - if (options & DDB_OPT_IPV6_DEVICE) + if (options & DDB_OPT_IPV6_DEVICE) { + iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); + + memset(ip_addr, 0, sizeof(ip_addr)); sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); - else + } else { + iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); + } + iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, + (char *)ip_addr, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, (char *)fw_ddb_entry->iscsi_name, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, (char *)ha->name_string, buflen); - iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, - (char *)ip_addr, buflen); - iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS, - (char *)fw_ddb_entry->iscsi_alias, buflen); } void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, @@ -2403,37 +2522,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, /* Update params */ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); - conn->max_recv_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); - - conn->max_xmit_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); - - sess->initial_r2t_en = - (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); - - sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->first_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); - - sess->max_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); - - sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); - - sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); - - sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); memcpy(sess->initiatorname, ha->name_string, min(sizeof(ha->name_string), sizeof(sess->initiatorname))); - iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS, - (char *)fw_ddb_entry->iscsi_alias, 0); - exit_session_conn_param: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), @@ -2578,6 +2671,8 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) !test_bit(AF_ONLINE, &ha->flags) || !test_bit(AF_LINK_UP, &ha->flags) || test_bit(AF_LOOPBACK, &ha->flags) || + test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || + test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) goto qc_host_busy; @@ -2652,7 +2747,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) if (ha->nx_pcibase) iounmap( (struct device_reg_82xx __iomem *)ha->nx_pcibase); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { if (ha->nx_pcibase) iounmap( (struct device_reg_83xx __iomem *)ha->nx_pcibase); @@ -2846,7 +2941,7 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) __func__); if (halt_status & HALT_STATUS_UNRECOVERABLE) halt_status_unrecoverable = 1; - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", __func__); @@ -2901,7 +2996,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", __func__); - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { @@ -2912,7 +3007,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) } } - if (is_qla8032(ha) || + if ((is_qla8032(ha) || is_qla8042(ha)) || (is_qla8022(ha) && !ql4xdontresethba)) { set_bit(DPC_RESET_HA, &ha->dpc_flags); qla4xxx_wake_dpc(ha); @@ -3296,7 +3391,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); - if (is_qla8032(ha) && + if ((is_qla8032(ha) || is_qla8042(ha)) && !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", __func__); @@ -3494,7 +3589,9 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) } else { /* Trigger relogin */ if (ddb_entry->ddb_type == FLASH_DDB) { - if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) + if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || + test_bit(DF_DISABLE_RELOGIN, + &ddb_entry->flags))) qla4xxx_arm_relogin_timer(ddb_entry); } else iscsi_session_failure(cls_session->dd_data, @@ -3597,6 +3694,9 @@ static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) if (!(ddb_entry->ddb_type == FLASH_DDB)) return; + if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) + return; + if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && !iscsi_is_session_online(cls_sess)) { DEBUG2(ql4_printk(KERN_INFO, ha, @@ -3750,7 +3850,7 @@ static void qla4xxx_do_dpc(struct work_struct *work) if (is_qla80XX(ha)) { if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", __func__); /* disable pause frame for ISP83xx */ @@ -3765,8 +3865,35 @@ static void qla4xxx_do_dpc(struct work_struct *work) qla4_8xxx_device_state_handler(ha); } - if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) + if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { + if (is_qla8042(ha)) { + if (ha->idc_info.info2 & + ENABLE_INTERNAL_LOOPBACK) { + ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", + __func__); + status = qla4_84xx_config_acb(ha, + ACB_CONFIG_DISABLE); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", + __func__); + } + } + } qla4_83xx_post_idc_ack(ha); + clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); + } + + if (is_qla8042(ha) && + test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", + __func__); + if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != + QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", + __func__); + } + clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); + } if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { qla4_8xxx_need_qsnt_handler(ha); @@ -3778,7 +3905,8 @@ static void qla4xxx_do_dpc(struct work_struct *work) test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { if ((is_qla8022(ha) && ql4xdontresethba) || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); clear_bit(DPC_RESET_HA, &ha->dpc_flags); @@ -3870,7 +3998,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha) } else if (is_qla8022(ha)) { writel(0, &ha->qla4_82xx_reg->host_int); readl(&ha->qla4_82xx_reg->host_int); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, &ha->qla4_83xx_reg->risc_intr); readl(&ha->qla4_83xx_reg->risc_intr); } @@ -3945,7 +4073,7 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) (ha->pdev->devfn << 11)); ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : QLA82XX_CAM_RAM_DB2); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) ((uint8_t *)ha->nx_pcibase); } @@ -5609,7 +5737,8 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, goto exit_ddb_add; } - for (idx = 0; idx < max_ddbs; idx++) { + /* Index 0 and 1 are reserved for boot target entries */ + for (idx = 2; idx < max_ddbs; idx++) { if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, idx)) break; @@ -5925,13 +6054,6 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) goto exit_ddb_logout; } - options = LOGOUT_OPTION_CLOSE_SESSION; - if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); - ret = -EIO; - goto exit_ddb_logout; - } - fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { @@ -5941,6 +6063,38 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) goto exit_ddb_logout; } + if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) + goto ddb_logout_init; + + ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto ddb_logout_init; + + if (ddb_state == DDB_DS_SESSION_ACTIVE) + goto ddb_logout_init; + + /* wait until next relogin is triggered using DF_RELOGIN and + * clear DF_RELOGIN to avoid invocation of further relogin + */ + wtime = jiffies + (HZ * RELOGIN_TOV); + do { + if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) + goto ddb_logout_init; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +ddb_logout_init: + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); + atomic_set(&ddb_entry->relogin_timer, 0); + + options = LOGOUT_OPTION_CLOSE_SESSION; + qla4xxx_session_logout_ddb(ha, ddb_entry, options); + + memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); wtime = jiffies + (HZ * LOGOUT_TOV); do { ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, @@ -5970,10 +6124,12 @@ ddb_logout_clr_sess: spin_lock_irqsave(&ha->hardware_lock, flags); qla4xxx_free_ddb(ha, ddb_entry); + clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); spin_unlock_irqrestore(&ha->hardware_lock, flags); iscsi_session_teardown(ddb_entry->sess); + clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); ret = QLA_SUCCESS; exit_ddb_logout: @@ -6110,7 +6266,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn; struct ql4_chap_table chap_tbl; struct device *dev; - int parent_type, parent_index = 0xffff; + int parent_type; int rc = 0; dev = iscsi_find_flashnode_conn(fnode_sess); @@ -6276,10 +6432,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: - if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES) - parent_index = fnode_sess->discovery_parent_idx; - - rc = sprintf(buf, "%u\n", parent_index); + rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: if (fnode_sess->discovery_parent_type == DDB_ISNS) @@ -6533,8 +6686,8 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, memcpy(fnode_conn->link_local_ipv6_addr, fnode_param->value, IPv6_ADDR_LEN); break; - case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: - fnode_sess->discovery_parent_type = + case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: + fnode_sess->discovery_parent_idx = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_TCP_XMIT_WSF: @@ -6910,7 +7063,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ha->isp_ops = &qla4_83xx_isp_ops; ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; } else { @@ -6981,7 +7134,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, if (is_qla80XX(ha)) qla4_8xxx_get_flash_info(ha); - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_read_reset_template(ha); /* * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. @@ -7036,7 +7189,8 @@ skip_retry_init: ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); if ((is_qla8022(ha) && ql4xdontresethba) || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { /* Put the device in failed state. */ DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); ha->isp_ops->idc_lock(ha); @@ -7097,8 +7251,8 @@ skip_retry_init: " QLogic iSCSI HBA Driver version: %s\n" " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), - ha->host_no, ha->firmware_version[0], ha->firmware_version[1], - ha->patch_number, ha->build_number); + ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); /* Set the driver version */ if (is_qla80XX(ha)) @@ -7645,16 +7799,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) ha = to_qla_host(cmd->device->host); - if (is_qla8032(ha) && ql4xdontresethba) + if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) qla4_83xx_set_idc_dontreset(ha); /* - * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other - * protocol drivers, we should not set device_state to - * NEED_RESET + * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other + * protocol drivers, we should not set device_state to NEED_RESET */ if (ql4xdontresethba || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); @@ -7779,9 +7933,10 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) } recover_adapter: - /* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if + /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if * reset is issued by application */ - if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + if ((is_qla8032(ha) || is_qla8042(ha)) && + test_bit(DPC_RESET_HA, &ha->dpc_flags)) { idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, (idc_ctrl | GRACEFUL_RESET_BIT1)); @@ -8078,6 +8233,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP8042, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index fe873cf7570d..f4fef72c9bcd 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -1,8 +1,8 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.03.00-k9" +#define QLA4XXX_DRIVER_VERSION "5.04.00-k1" diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index cb4fefa1bfba..01c0ffa31276 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1997,8 +1997,14 @@ static unsigned long lba_to_map_index(sector_t lba) static sector_t map_index_to_lba(unsigned long index) { - return index * scsi_debug_unmap_granularity - - scsi_debug_unmap_alignment; + sector_t lba = index * scsi_debug_unmap_granularity; + + if (scsi_debug_unmap_alignment) { + lba -= scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; + } + + return lba; } static unsigned int map_state(sector_t lba, unsigned int *num) @@ -2659,8 +2665,8 @@ static void __init sdebug_build_parts(unsigned char *ramp, / sdebug_sectors_per; pp->end_sector = (end_sec % sdebug_sectors_per) + 1; - pp->start_sect = start_sec; - pp->nr_sects = end_sec - start_sec + 1; + pp->start_sect = cpu_to_le32(start_sec); + pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); pp->sys_ind = 0x83; /* plain Linux partition */ } } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 21505962f539..83e591b60193 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -223,12 +223,80 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, } #endif + /** + * scsi_report_lun_change - Set flag on all *other* devices on the same target + * to indicate that a UNIT ATTENTION is expected. + * @sdev: Device reporting the UNIT ATTENTION + */ +static void scsi_report_lun_change(struct scsi_device *sdev) +{ + sdev->sdev_target->expecting_lun_change = 1; +} + +/** + * scsi_report_sense - Examine scsi sense information and log messages for + * certain conditions, also issue uevents for some of them. + * @sdev: Device reporting the sense code + * @sshdr: sshdr to be examined + */ +static void scsi_report_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sshdr) +{ + enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */ + + if (sshdr->sense_key == UNIT_ATTENTION) { + if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) { + evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Inquiry data has changed"); + } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) { + evt_type = SDEV_EVT_LUN_CHANGE_REPORTED; + scsi_report_lun_change(sdev); + sdev_printk(KERN_WARNING, sdev, + "Warning! Received an indication that the " + "LUN assignments on this target have " + "changed. The Linux SCSI layer does not " + "automatically remap LUN assignments.\n"); + } else if (sshdr->asc == 0x3f) + sdev_printk(KERN_WARNING, sdev, + "Warning! Received an indication that the " + "operating parameters on this target have " + "changed. The Linux SCSI layer does not " + "automatically adjust these parameters.\n"); + + if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) { + evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Warning! Received an indication that the " + "LUN reached a thin provisioning soft " + "threshold.\n"); + } + + if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) { + evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Mode parameters changed"); + } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) { + evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Capacity data has changed"); + } else if (sshdr->asc == 0x2a) + sdev_printk(KERN_WARNING, sdev, + "Parameters changed"); + } + + if (evt_type != SDEV_EVT_MAXBITS) { + set_bit(evt_type, sdev->pending_events); + schedule_work(&sdev->event_work); + } +} + /** * scsi_check_sense - Examine scsi cmd sense * @scmd: Cmd to have sense checked. * * Return value: - * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR + * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE * * Notes: * When a deferred error is detected the current command has @@ -250,6 +318,8 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) */ return SUCCESS; + scsi_report_sense(sdev, &sshdr); + if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; @@ -315,6 +385,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) } } /* + * we might also expect a cc/ua if another LUN on the target + * reported a UA with an ASC/ASCQ of 3F 0E - + * REPORTED LUNS DATA HAS CHANGED. + */ + if (scmd->device->sdev_target->expecting_lun_change && + sshdr.asc == 0x3f && sshdr.ascq == 0x0e) + return NEEDS_RETRY; + /* * if the device is in the process of becoming ready, we * should retry. */ @@ -327,26 +405,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->allow_restart && (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) return FAILED; - - if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e) - scmd_printk(KERN_WARNING, scmd, - "Warning! Received an indication that the " - "LUN assignments on this target have " - "changed. The Linux SCSI layer does not " - "automatically remap LUN assignments.\n"); - else if (sshdr.asc == 0x3f) - scmd_printk(KERN_WARNING, scmd, - "Warning! Received an indication that the " - "operating parameters on this target have " - "changed. The Linux SCSI layer does not " - "automatically adjust these parameters.\n"); - - if (sshdr.asc == 0x38 && sshdr.ascq == 0x07) - scmd_printk(KERN_WARNING, scmd, - "Warning! Received an indication that the " - "LUN reached a thin provisioning soft " - "threshold.\n"); - /* * Pass the UA upwards for a determination in the completion * functions. @@ -354,18 +412,25 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) return SUCCESS; /* these are not supported */ + case DATA_PROTECT: + if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) { + /* Thin provisioning hard threshold reached */ + set_host_byte(scmd, DID_ALLOC_FAILURE); + return SUCCESS; + } case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: case BLANK_CHECK: - case DATA_PROTECT: - return TARGET_ERROR; + set_host_byte(scmd, DID_TARGET_FAILURE); + return SUCCESS; case MEDIUM_ERROR: if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ sshdr.asc == 0x13 || /* AMNF DATA FIELD */ sshdr.asc == 0x14) { /* RECORD NOT FOUND */ - return TARGET_ERROR; + set_host_byte(scmd, DID_MEDIUM_ERROR); + return SUCCESS; } return NEEDS_RETRY; @@ -373,14 +438,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->retry_hwerror) return ADD_TO_MLQUEUE; else - return TARGET_ERROR; + set_host_byte(scmd, DID_TARGET_FAILURE); case ILLEGAL_REQUEST: if (sshdr.asc == 0x20 || /* Invalid command operation code */ sshdr.asc == 0x21 || /* Logical block address out of range */ sshdr.asc == 0x24 || /* Invalid field in cdb */ sshdr.asc == 0x26) { /* Parameter value invalid */ - return TARGET_ERROR; + set_host_byte(scmd, DID_TARGET_FAILURE); } return SUCCESS; @@ -843,7 +908,6 @@ retry: case SUCCESS: case NEEDS_RETRY: case FAILED: - case TARGET_ERROR: break; case ADD_TO_MLQUEUE: rtn = NEEDS_RETRY; @@ -1568,6 +1632,8 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) */ return ADD_TO_MLQUEUE; case GOOD: + if (scmd->cmnd[0] == REPORT_LUNS) + scmd->device->sdev_target->expecting_lun_change = 0; scsi_handle_queue_ramp_up(scmd->device); case COMMAND_TERMINATED: return SUCCESS; @@ -1577,14 +1643,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) rtn = scsi_check_sense(scmd); if (rtn == NEEDS_RETRY) goto maybe_retry; - else if (rtn == TARGET_ERROR) { - /* - * Need to modify host byte to signal a - * permanent target failure - */ - set_host_byte(scmd, DID_TARGET_FAILURE); - rtn = SUCCESS; - } /* if rtn == FAILED, we have no sense information; * returning FAILED will wake the error handler thread * to collect the sense and redo the decide diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 124392f3091e..d545931c85eb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -716,6 +716,20 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) } EXPORT_SYMBOL(scsi_release_buffers); +/** + * __scsi_error_from_host_byte - translate SCSI error code into errno + * @cmd: SCSI command (unused) + * @result: scsi error code + * + * Translate SCSI error code into standard UNIX errno. + * Return values: + * -ENOLINK temporary transport failure + * -EREMOTEIO permanent target failure, do not retry + * -EBADE permanent nexus failure, retry on other path + * -ENOSPC No write space available + * -ENODATA Medium error + * -EIO unspecified I/O error + */ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) { int error = 0; @@ -732,6 +746,14 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) set_host_byte(cmd, DID_OK); error = -EBADE; break; + case DID_ALLOC_FAILURE: + set_host_byte(cmd, DID_OK); + error = -ENOSPC; + break; + case DID_MEDIUM_ERROR: + set_host_byte(cmd, DID_OK); + error = -ENODATA; + break; default: error = -EIO; break; @@ -2231,7 +2253,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) case SDEV_EVT_MEDIA_CHANGE: envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; - + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; + break; + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; + break; + case SDEV_EVT_LUN_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; + break; default: /* do nothing */ break; @@ -2252,10 +2288,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) void scsi_evt_thread(struct work_struct *work) { struct scsi_device *sdev; + enum scsi_device_event evt_type; LIST_HEAD(event_list); sdev = container_of(work, struct scsi_device, event_work); + for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) + if (test_and_clear_bit(evt_type, sdev->pending_events)) + sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); + while (1) { struct scsi_event *evt; struct list_head *this, *tmp; @@ -2325,6 +2366,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, /* evt_type-specific initialization, if any */ switch (evt_type) { case SDEV_EVT_MEDIA_CHANGE: + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + case SDEV_EVT_LUN_CHANGE_REPORTED: default: /* do nothing */ break; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 7e50061e9ef6..40c639491b27 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -739,6 +739,11 @@ sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ #define REF_EVT(name) &dev_attr_evt_##name.attr DECLARE_EVT(media_change, MEDIA_CHANGE) +DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) +DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) +DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) +DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) +DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) /* Default template for device attributes. May NOT be modified */ static struct attribute *scsi_sdev_attrs[] = { @@ -759,6 +764,11 @@ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_ioerr_cnt.attr, &dev_attr_modalias.attr, REF_EVT(media_change), + REF_EVT(inquiry_change_reported), + REF_EVT(capacity_change_reported), + REF_EVT(soft_threshold_reached), + REF_EVT(mode_parameter_change_reported), + REF_EVT(lun_change_reported), NULL }; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index abf7c402e1a5..e4a989fa477d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -25,7 +25,6 @@ #include <linux/slab.h> #include <linux/bsg-lib.h> #include <linux/idr.h> -#include <linux/list.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -3327,6 +3326,23 @@ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); +iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT); +iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN); +iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO); +iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE); +iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT); +iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE); +iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE); +iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE); +iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN); +iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE); +iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS); +iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC); +iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL); +iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6); +iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF); +iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF); + #define iscsi_conn_ep_attr_show(param) \ static ssize_t show_conn_ep_param_##param(struct device *dev, \ @@ -3379,6 +3395,22 @@ static struct attribute *iscsi_conn_attrs[] = { &dev_attr_conn_persistent_port.attr, &dev_attr_conn_ping_tmo.attr, &dev_attr_conn_recv_tmo.attr, + &dev_attr_conn_local_port.attr, + &dev_attr_conn_statsn.attr, + &dev_attr_conn_keepalive_tmo.attr, + &dev_attr_conn_max_segment_size.attr, + &dev_attr_conn_tcp_timestamp_stat.attr, + &dev_attr_conn_tcp_wsf_disable.attr, + &dev_attr_conn_tcp_nagle_disable.attr, + &dev_attr_conn_tcp_timer_scale.attr, + &dev_attr_conn_tcp_timestamp_enable.attr, + &dev_attr_conn_fragment_disable.attr, + &dev_attr_conn_ipv4_tos.attr, + &dev_attr_conn_ipv6_traffic_class.attr, + &dev_attr_conn_ipv6_flow_label.attr, + &dev_attr_conn_is_fw_assigned_ipv6.attr, + &dev_attr_conn_tcp_xmit_wsf.attr, + &dev_attr_conn_tcp_recv_wsf.attr, NULL, }; @@ -3416,6 +3448,38 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj, param = ISCSI_PARAM_PING_TMO; else if (attr == &dev_attr_conn_recv_tmo.attr) param = ISCSI_PARAM_RECV_TMO; + else if (attr == &dev_attr_conn_local_port.attr) + param = ISCSI_PARAM_LOCAL_PORT; + else if (attr == &dev_attr_conn_statsn.attr) + param = ISCSI_PARAM_STATSN; + else if (attr == &dev_attr_conn_keepalive_tmo.attr) + param = ISCSI_PARAM_KEEPALIVE_TMO; + else if (attr == &dev_attr_conn_max_segment_size.attr) + param = ISCSI_PARAM_MAX_SEGMENT_SIZE; + else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr) + param = ISCSI_PARAM_TCP_TIMESTAMP_STAT; + else if (attr == &dev_attr_conn_tcp_wsf_disable.attr) + param = ISCSI_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_conn_tcp_nagle_disable.attr) + param = ISCSI_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_conn_tcp_timer_scale.attr) + param = ISCSI_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr) + param = ISCSI_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_conn_fragment_disable.attr) + param = ISCSI_PARAM_IP_FRAGMENT_DISABLE; + else if (attr == &dev_attr_conn_ipv4_tos.attr) + param = ISCSI_PARAM_IPV4_TOS; + else if (attr == &dev_attr_conn_ipv6_traffic_class.attr) + param = ISCSI_PARAM_IPV6_TC; + else if (attr == &dev_attr_conn_ipv6_flow_label.attr) + param = ISCSI_PARAM_IPV6_FLOW_LABEL; + else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr) + param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6; + else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr) + param = ISCSI_PARAM_TCP_XMIT_WSF; + else if (attr == &dev_attr_conn_tcp_recv_wsf.attr) + param = ISCSI_PARAM_TCP_RECV_WSF; else { WARN_ONCE(1, "Invalid conn attr"); return 0; @@ -3476,6 +3540,21 @@ iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0); iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0); iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0); iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0); +iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0); +iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0); +iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0); +iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0); +iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0); +iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0); +iscsi_session_attr(discovery_auth_optional, + ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0); +iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0); +iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0); +iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0); +iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0); +iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); +iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); +iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, @@ -3580,6 +3659,20 @@ static struct attribute *iscsi_session_attrs[] = { &dev_attr_sess_chap_out_idx.attr, &dev_attr_sess_chap_in_idx.attr, &dev_attr_priv_sess_target_id.attr, + &dev_attr_sess_auto_snd_tgt_disable.attr, + &dev_attr_sess_discovery_session.attr, + &dev_attr_sess_portal_type.attr, + &dev_attr_sess_chap_auth.attr, + &dev_attr_sess_discovery_logout.attr, + &dev_attr_sess_bidi_chap.attr, + &dev_attr_sess_discovery_auth_optional.attr, + &dev_attr_sess_def_time2wait.attr, + &dev_attr_sess_def_time2retain.attr, + &dev_attr_sess_isid.attr, + &dev_attr_sess_tsid.attr, + &dev_attr_sess_def_taskmgmt_tmo.attr, + &dev_attr_sess_discovery_parent_idx.attr, + &dev_attr_sess_discovery_parent_type.attr, NULL, }; @@ -3643,6 +3736,34 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, param = ISCSI_PARAM_BOOT_NIC; else if (attr == &dev_attr_sess_boot_target.attr) param = ISCSI_PARAM_BOOT_TARGET; + else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr) + param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE; + else if (attr == &dev_attr_sess_discovery_session.attr) + param = ISCSI_PARAM_DISCOVERY_SESS; + else if (attr == &dev_attr_sess_portal_type.attr) + param = ISCSI_PARAM_PORTAL_TYPE; + else if (attr == &dev_attr_sess_chap_auth.attr) + param = ISCSI_PARAM_CHAP_AUTH_EN; + else if (attr == &dev_attr_sess_discovery_logout.attr) + param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN; + else if (attr == &dev_attr_sess_bidi_chap.attr) + param = ISCSI_PARAM_BIDI_CHAP_EN; + else if (attr == &dev_attr_sess_discovery_auth_optional.attr) + param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL; + else if (attr == &dev_attr_sess_def_time2wait.attr) + param = ISCSI_PARAM_DEF_TIME2WAIT; + else if (attr == &dev_attr_sess_def_time2retain.attr) + param = ISCSI_PARAM_DEF_TIME2RETAIN; + else if (attr == &dev_attr_sess_isid.attr) + param = ISCSI_PARAM_ISID; + else if (attr == &dev_attr_sess_tsid.attr) + param = ISCSI_PARAM_TSID; + else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr) + param = ISCSI_PARAM_DEF_TASKMGMT_TMO; + else if (attr == &dev_attr_sess_discovery_parent_idx.attr) + param = ISCSI_PARAM_DISCOVERY_PARENT_IDX; + else if (attr == &dev_attr_sess_discovery_parent_type.attr) + param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE; else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) return S_IRUGO | S_IWUSR; else if (attr == &dev_attr_priv_sess_state.attr) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 86fcf2c313ad..b58e8f815a00 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -132,8 +132,8 @@ static const char *sd_cache_types[] = { }; static ssize_t -sd_store_cache_type(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +cache_type_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { int i, ct = -1, rcd, wce, sp; struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -199,8 +199,18 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +manage_start_stop_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return snprintf(buf, 20, "%u\n", sdp->manage_start_stop); +} + +static ssize_t +manage_start_stop_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -212,10 +222,19 @@ sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(manage_start_stop); static ssize_t -sd_store_allow_restart(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); +} + +static ssize_t +allow_restart_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -230,47 +249,30 @@ sd_store_allow_restart(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(allow_restart); static ssize_t -sd_show_cache_type(struct device *dev, struct device_attribute *attr, - char *buf) +cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); int ct = sdkp->RCD + 2*sdkp->WCE; return snprintf(buf, 40, "%s\n", sd_cache_types[ct]); } +static DEVICE_ATTR_RW(cache_type); static ssize_t -sd_show_fua(struct device *dev, struct device_attribute *attr, char *buf) +FUA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); } +static DEVICE_ATTR_RO(FUA); static ssize_t -sd_show_manage_start_stop(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct scsi_disk *sdkp = to_scsi_disk(dev); - struct scsi_device *sdp = sdkp->device; - - return snprintf(buf, 20, "%u\n", sdp->manage_start_stop); -} - -static ssize_t -sd_show_allow_restart(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct scsi_disk *sdkp = to_scsi_disk(dev); - - return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); -} - -static ssize_t -sd_show_protection_type(struct device *dev, struct device_attribute *attr, - char *buf) +protection_type_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -278,8 +280,8 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_protection_type(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +protection_type_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); unsigned int val; @@ -298,10 +300,11 @@ sd_store_protection_type(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(protection_type); static ssize_t -sd_show_protection_mode(struct device *dev, struct device_attribute *attr, - char *buf) +protection_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -320,24 +323,26 @@ sd_show_protection_mode(struct device *dev, struct device_attribute *attr, return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif); } +static DEVICE_ATTR_RO(protection_mode); static ssize_t -sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, - char *buf) +app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return snprintf(buf, 20, "%u\n", sdkp->ATO); } +static DEVICE_ATTR_RO(app_tag_own); static ssize_t -sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr, - char *buf) +thin_provisioning_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return snprintf(buf, 20, "%u\n", sdkp->lbpme); } +static DEVICE_ATTR_RO(thin_provisioning); static const char *lbp_mode[] = { [SD_LBP_FULL] = "full", @@ -349,8 +354,8 @@ static const char *lbp_mode[] = { }; static ssize_t -sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr, - char *buf) +provisioning_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -358,8 +363,8 @@ sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +provisioning_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -385,10 +390,11 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(provisioning_mode); static ssize_t -sd_show_max_medium_access_timeouts(struct device *dev, - struct device_attribute *attr, char *buf) +max_medium_access_timeouts_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -396,9 +402,9 @@ sd_show_max_medium_access_timeouts(struct device *dev, } static ssize_t -sd_store_max_medium_access_timeouts(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +max_medium_access_timeouts_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); int err; @@ -410,10 +416,11 @@ sd_store_max_medium_access_timeouts(struct device *dev, return err ? err : count; } +static DEVICE_ATTR_RW(max_medium_access_timeouts); static ssize_t -sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, - char *buf) +max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -421,8 +428,8 @@ sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -451,35 +458,29 @@ sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, return count; } - -static struct device_attribute sd_disk_attrs[] = { - __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, - sd_store_cache_type), - __ATTR(FUA, S_IRUGO, sd_show_fua, NULL), - __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart, - sd_store_allow_restart), - __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, - sd_store_manage_start_stop), - __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type, - sd_store_protection_type), - __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), - __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), - __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), - __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, - sd_store_provisioning_mode), - __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR, - sd_show_write_same_blocks, sd_store_write_same_blocks), - __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR, - sd_show_max_medium_access_timeouts, - sd_store_max_medium_access_timeouts), - __ATTR_NULL, +static DEVICE_ATTR_RW(max_write_same_blocks); + +static struct attribute *sd_disk_attrs[] = { + &dev_attr_cache_type.attr, + &dev_attr_FUA.attr, + &dev_attr_allow_restart.attr, + &dev_attr_manage_start_stop.attr, + &dev_attr_protection_type.attr, + &dev_attr_protection_mode.attr, + &dev_attr_app_tag_own.attr, + &dev_attr_thin_provisioning.attr, + &dev_attr_provisioning_mode.attr, + &dev_attr_max_write_same_blocks.attr, + &dev_attr_max_medium_access_timeouts.attr, + NULL, }; +ATTRIBUTE_GROUPS(sd_disk); static struct class sd_disk_class = { .name = "scsi_disk", .owner = THIS_MODULE, .dev_release = scsi_disk_release, - .dev_attrs = sd_disk_attrs, + .dev_groups = sd_disk_groups, }; static const struct dev_pm_ops sd_pm_ops = { diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index df5e961484e1..5cbc4bb1b395 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -105,11 +105,8 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ; static int sg_add(struct device *, struct class_interface *); static void sg_remove(struct device *, struct class_interface *); -static DEFINE_SPINLOCK(sg_open_exclusive_lock); - static DEFINE_IDR(sg_index_idr); -static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock - file descriptor list for device */ +static DEFINE_RWLOCK(sg_index_lock); static struct class_interface sg_interface = { .add_dev = sg_add, @@ -146,8 +143,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ - /* sfd_siblings is protected by sg_index_lock */ - struct list_head sfd_siblings; + struct list_head sfd_siblings; /* protected by sfd_lock of device */ struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ @@ -170,13 +166,12 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; - wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ - /* sfds is protected by sg_index_lock */ + spinlock_t sfd_lock; /* protect file descriptor list for device */ struct list_head sfds; + struct rw_semaphore o_sem; /* exclude open should hold this rwsem */ volatile char detached; /* 0->attached, 1->detached pending removal */ - /* exclude protected by sg_open_exclusive_lock */ char exclude; /* opened for exclusive access */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ struct gendisk *disk; @@ -225,35 +220,14 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd) return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } -static int get_exclude(Sg_device *sdp) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&sg_open_exclusive_lock, flags); - ret = sdp->exclude; - spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); - return ret; -} - -static int set_exclude(Sg_device *sdp, char val) -{ - unsigned long flags; - - spin_lock_irqsave(&sg_open_exclusive_lock, flags); - sdp->exclude = val; - spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); - return val; -} - static int sfds_list_empty(Sg_device *sdp) { unsigned long flags; int ret; - read_lock_irqsave(&sg_index_lock, flags); + spin_lock_irqsave(&sdp->sfd_lock, flags); ret = list_empty(&sdp->sfds); - read_unlock_irqrestore(&sg_index_lock, flags); + spin_unlock_irqrestore(&sdp->sfd_lock, flags); return ret; } @@ -265,7 +239,6 @@ sg_open(struct inode *inode, struct file *filp) struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; - int res; int retval; nonseekable_open(inode, filp); @@ -294,54 +267,52 @@ sg_open(struct inode *inode, struct file *filp) goto error_out; } - if (flags & O_EXCL) { - if (O_RDONLY == (flags & O_ACCMODE)) { - retval = -EPERM; /* Can't lock it with read only access */ - goto error_out; - } - if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { - retval = -EBUSY; - goto error_out; - } - res = wait_event_interruptible(sdp->o_excl_wait, - ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); - if (res) { - retval = res; /* -ERESTARTSYS because signal hit process */ - goto error_out; - } - } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */ - if (flags & O_NONBLOCK) { - retval = -EBUSY; - goto error_out; - } - res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp)); - if (res) { - retval = res; /* -ERESTARTSYS because signal hit process */ - goto error_out; - } - } - if (sdp->detached) { - retval = -ENODEV; + if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { + retval = -EPERM; /* Can't lock it with read only access */ goto error_out; } + if (flags & O_NONBLOCK) { + if (flags & O_EXCL) { + if (!down_write_trylock(&sdp->o_sem)) { + retval = -EBUSY; + goto error_out; + } + } else { + if (!down_read_trylock(&sdp->o_sem)) { + retval = -EBUSY; + goto error_out; + } + } + } else { + if (flags & O_EXCL) + down_write(&sdp->o_sem); + else + down_read(&sdp->o_sem); + } + /* Since write lock is held, no need to check sfd_list */ + if (flags & O_EXCL) + sdp->exclude = 1; /* used by release lock */ + if (sfds_list_empty(sdp)) { /* no existing opens on this device */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } - if ((sfp = sg_add_sfp(sdp, dev))) + sfp = sg_add_sfp(sdp, dev); + if (!IS_ERR(sfp)) filp->private_data = sfp; + /* retval is already provably zero at this point because of the + * check after retval = scsi_autopm_get_device(sdp->device)) + */ else { + retval = PTR_ERR(sfp); + if (flags & O_EXCL) { - set_exclude(sdp, 0); /* undo if error */ - wake_up_interruptible(&sdp->o_excl_wait); - } - retval = -ENOMEM; - goto error_out; - } - retval = 0; + sdp->exclude = 0; /* undo if error */ + up_write(&sdp->o_sem); + } else + up_read(&sdp->o_sem); error_out: - if (retval) { scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); @@ -358,13 +329,18 @@ sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; + int excl; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); - set_exclude(sdp, 0); - wake_up_interruptible(&sdp->o_excl_wait); + excl = sdp->exclude; + sdp->exclude = 0; + if (excl) + up_write(&sdp->o_sem); + else + up_read(&sdp->o_sem); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); @@ -1415,8 +1391,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) disk->first_minor = k; sdp->disk = disk; sdp->device = scsidp; + spin_lock_init(&sdp->sfd_lock); INIT_LIST_HEAD(&sdp->sfds); - init_waitqueue_head(&sdp->o_excl_wait); + init_rwsem(&sdp->o_sem); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); @@ -1549,11 +1526,13 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) /* Need a write lock to set sdp->detached. */ write_lock_irqsave(&sg_index_lock, iflags); + spin_lock(&sdp->sfd_lock); sdp->detached = 1; list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } + spin_unlock(&sdp->sfd_lock); write_unlock_irqrestore(&sg_index_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); @@ -2064,7 +2043,7 @@ sg_add_sfp(Sg_device * sdp, int dev) sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) - return NULL; + return ERR_PTR(-ENOMEM); init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); @@ -2078,9 +2057,13 @@ sg_add_sfp(Sg_device * sdp, int dev) sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; - write_lock_irqsave(&sg_index_lock, iflags); + spin_lock_irqsave(&sdp->sfd_lock, iflags); + if (sdp->detached) { + spin_unlock_irqrestore(&sdp->sfd_lock, iflags); + return ERR_PTR(-ENODEV); + } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); - write_unlock_irqrestore(&sg_index_lock, iflags); + spin_unlock_irqrestore(&sdp->sfd_lock, iflags); SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; @@ -2130,10 +2113,9 @@ static void sg_remove_sfp(struct kref *kref) struct sg_device *sdp = sfp->parentdp; unsigned long iflags; - write_lock_irqsave(&sg_index_lock, iflags); + spin_lock_irqsave(&sdp->sfd_lock, iflags); list_del(&sfp->sfd_siblings); - write_unlock_irqrestore(&sg_index_lock, iflags); - wake_up_interruptible(&sdp->o_excl_wait); + spin_unlock_irqrestore(&sdp->sfd_lock, iflags); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); @@ -2520,7 +2502,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) return 0; } -/* must be called while holding sg_index_lock */ +/* must be called while holding sg_index_lock and sfd_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, m, new_interface, blen, usg; @@ -2605,22 +2587,26 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v) read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; - if (sdp && !list_empty(&sdp->sfds)) { - struct scsi_device *scsidp = sdp->device; + if (sdp) { + spin_lock(&sdp->sfd_lock); + if (!list_empty(&sdp->sfds)) { + struct scsi_device *scsidp = sdp->device; - seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); - if (sdp->detached) - seq_printf(s, "detached pending close "); - else - seq_printf - (s, "scsi%d chan=%d id=%d lun=%d em=%d", - scsidp->host->host_no, - scsidp->channel, scsidp->id, - scsidp->lun, - scsidp->host->hostt->emulated); - seq_printf(s, " sg_tablesize=%d excl=%d\n", - sdp->sg_tablesize, get_exclude(sdp)); - sg_proc_debug_helper(s, sdp); + seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); + if (sdp->detached) + seq_printf(s, "detached pending close "); + else + seq_printf + (s, "scsi%d chan=%d id=%d lun=%d em=%d", + scsidp->host->host_no, + scsidp->channel, scsidp->id, + scsidp->lun, + scsidp->host->hostt->emulated); + seq_printf(s, " sg_tablesize=%d excl=%d\n", + sdp->sg_tablesize, sdp->exclude); + sg_proc_debug_helper(s, sdp); + } + spin_unlock(&sdp->sfd_lock); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 2a32036a9404..ff44b3c2cff2 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -82,7 +82,7 @@ static int try_rdio = 1; static int try_wdio = 1; static struct class st_sysfs_class; -static struct device_attribute st_dev_attrs[]; +static const struct attribute_group *st_dev_groups[]; MODULE_AUTHOR("Kai Makisara"); MODULE_DESCRIPTION("SCSI tape (st) driver"); @@ -4274,7 +4274,7 @@ static void scsi_tape_release(struct kref *kref) static struct class st_sysfs_class = { .name = "scsi_tape", - .dev_attrs = st_dev_attrs, + .dev_groups = st_dev_groups, }; static int __init init_st(void) @@ -4408,6 +4408,7 @@ defined_show(struct device *dev, struct device_attribute *attr, char *buf) l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); return l; } +static DEVICE_ATTR_RO(defined); static ssize_t default_blksize_show(struct device *dev, struct device_attribute *attr, @@ -4419,7 +4420,7 @@ default_blksize_show(struct device *dev, struct device_attribute *attr, l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); return l; } - +static DEVICE_ATTR_RO(default_blksize); static ssize_t default_density_show(struct device *dev, struct device_attribute *attr, @@ -4433,6 +4434,7 @@ default_density_show(struct device *dev, struct device_attribute *attr, l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density); return l; } +static DEVICE_ATTR_RO(default_density); static ssize_t default_compression_show(struct device *dev, struct device_attribute *attr, @@ -4444,6 +4446,7 @@ default_compression_show(struct device *dev, struct device_attribute *attr, l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); return l; } +static DEVICE_ATTR_RO(default_compression); static ssize_t options_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -4472,15 +4475,17 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf) l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); return l; } - -static struct device_attribute st_dev_attrs[] = { - __ATTR_RO(defined), - __ATTR_RO(default_blksize), - __ATTR_RO(default_density), - __ATTR_RO(default_compression), - __ATTR_RO(options), - __ATTR_NULL, +static DEVICE_ATTR_RO(options); + +static struct attribute *st_dev_attrs[] = { + &dev_attr_defined.attr, + &dev_attr_default_blksize.attr, + &dev_attr_default_density.attr, + &dev_attr_default_compression.attr, + &dev_attr_options.attr, + NULL, }; +ATTRIBUTE_GROUPS(st_dev); /* The following functions may be useful for a larger audience. */ static int sgl_map_user_pages(struct st_buffer *STbp, diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 139bc0647b41..bce09a6898c4 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -36,10 +36,17 @@ #ifndef _UFS_H #define _UFS_H +#include <linux/mutex.h> +#include <linux/types.h> + #define MAX_CDB_SIZE 16 +#define GENERAL_UPIU_REQUEST_SIZE 32 +#define QUERY_DESC_MAX_SIZE 256 +#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ + (sizeof(struct utp_upiu_header))) #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ - ((byte3 << 24) | (byte2 << 16) |\ + cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ (byte1 << 8) | (byte0)) /* @@ -62,7 +69,7 @@ enum { UPIU_TRANSACTION_COMMAND = 0x01, UPIU_TRANSACTION_DATA_OUT = 0x02, UPIU_TRANSACTION_TASK_REQ = 0x04, - UPIU_TRANSACTION_QUERY_REQ = 0x26, + UPIU_TRANSACTION_QUERY_REQ = 0x16, }; /* UTP UPIU Transaction Codes Target to Initiator */ @@ -73,6 +80,7 @@ enum { UPIU_TRANSACTION_TASK_RSP = 0x24, UPIU_TRANSACTION_READY_XFER = 0x31, UPIU_TRANSACTION_QUERY_RSP = 0x36, + UPIU_TRANSACTION_REJECT_UPIU = 0x3F, }; /* UPIU Read/Write flags */ @@ -90,8 +98,41 @@ enum { UPIU_TASK_ATTR_ACA = 0x03, }; -/* UTP QUERY Transaction Specific Fields OpCode */ +/* UPIU Query request function */ enum { + UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01, + UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81, +}; + +/* Flag idn for Query Requests*/ +enum flag_idn { + QUERY_FLAG_IDN_FDEVICEINIT = 0x01, + QUERY_FLAG_IDN_BKOPS_EN = 0x04, +}; + +/* Attribute idn for Query requests */ +enum attr_idn { + QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, + QUERY_ATTR_IDN_EE_CONTROL = 0x0D, + QUERY_ATTR_IDN_EE_STATUS = 0x0E, +}; + +/* Exception event mask values */ +enum { + MASK_EE_STATUS = 0xFFFF, + MASK_EE_URGENT_BKOPS = (1 << 2), +}; + +/* Background operation status */ +enum { + BKOPS_STATUS_NO_OP = 0x0, + BKOPS_STATUS_NON_CRITICAL = 0x1, + BKOPS_STATUS_PERF_IMPACT = 0x2, + BKOPS_STATUS_CRITICAL = 0x3, +}; + +/* UTP QUERY Transaction Specific Fields OpCode */ +enum query_opcode { UPIU_QUERY_OPCODE_NOP = 0x0, UPIU_QUERY_OPCODE_READ_DESC = 0x1, UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, @@ -103,6 +144,21 @@ enum { UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, }; +/* Query response result code */ +enum { + QUERY_RESULT_SUCCESS = 0x00, + QUERY_RESULT_NOT_READABLE = 0xF6, + QUERY_RESULT_NOT_WRITEABLE = 0xF7, + QUERY_RESULT_ALREADY_WRITTEN = 0xF8, + QUERY_RESULT_INVALID_LENGTH = 0xF9, + QUERY_RESULT_INVALID_VALUE = 0xFA, + QUERY_RESULT_INVALID_SELECTOR = 0xFB, + QUERY_RESULT_INVALID_INDEX = 0xFC, + QUERY_RESULT_INVALID_IDN = 0xFD, + QUERY_RESULT_INVALID_OPCODE = 0xFE, + QUERY_RESULT_GENERAL_FAILURE = 0xFF, +}; + /* UTP Transfer Request Command Type (CT) */ enum { UPIU_COMMAND_SET_TYPE_SCSI = 0x0, @@ -110,10 +166,18 @@ enum { UPIU_COMMAND_SET_TYPE_QUERY = 0x2, }; +/* UTP Transfer Request Command Offset */ +#define UPIU_COMMAND_TYPE_OFFSET 28 + +/* Offset of the response code in the UPIU header */ +#define UPIU_RSP_CODE_OFFSET 8 + enum { - MASK_SCSI_STATUS = 0xFF, - MASK_TASK_RESPONSE = 0xFF00, - MASK_RSP_UPIU_RESULT = 0xFFFF, + MASK_SCSI_STATUS = 0xFF, + MASK_TASK_RESPONSE = 0xFF00, + MASK_RSP_UPIU_RESULT = 0xFFFF, + MASK_QUERY_DATA_SEG_LEN = 0xFFFF, + MASK_RSP_EXCEPTION_EVENT = 0x10000, }; /* Task management service response */ @@ -138,26 +202,59 @@ struct utp_upiu_header { /** * struct utp_upiu_cmd - Command UPIU structure - * @header: UPIU header structure DW-0 to DW-2 * @data_transfer_len: Data Transfer Length DW-3 * @cdb: Command Descriptor Block CDB DW-4 to DW-7 */ struct utp_upiu_cmd { - struct utp_upiu_header header; u32 exp_data_transfer_len; u8 cdb[MAX_CDB_SIZE]; }; /** - * struct utp_upiu_rsp - Response UPIU structure - * @header: UPIU header DW-0 to DW-2 + * struct utp_upiu_query - upiu request buffer structure for + * query request. + * @opcode: command to perform B-0 + * @idn: a value that indicates the particular type of data B-1 + * @index: Index to further identify data B-2 + * @selector: Index to further identify data B-3 + * @reserved_osf: spec reserved field B-4,5 + * @length: number of descriptor bytes to read/write B-6,7 + * @value: Attribute value to be written DW-5 + * @reserved: spec reserved DW-6,7 + */ +struct utp_upiu_query { + u8 opcode; + u8 idn; + u8 index; + u8 selector; + u16 reserved_osf; + u16 length; + u32 value; + u32 reserved[2]; +}; + +/** + * struct utp_upiu_req - general upiu request structure + * @header:UPIU header structure DW-0 to DW-2 + * @sc: fields structure for scsi command DW-3 to DW-7 + * @qr: fields structure for query request DW-3 to DW-7 + */ +struct utp_upiu_req { + struct utp_upiu_header header; + union { + struct utp_upiu_cmd sc; + struct utp_upiu_query qr; + }; +}; + +/** + * struct utp_cmd_rsp - Response UPIU structure * @residual_transfer_count: Residual transfer count DW-3 * @reserved: Reserved double words DW-4 to DW-7 * @sense_data_len: Sense data length DW-8 U16 * @sense_data: Sense data field DW-8 to DW-12 */ -struct utp_upiu_rsp { - struct utp_upiu_header header; +struct utp_cmd_rsp { u32 residual_transfer_count; u32 reserved[4]; u16 sense_data_len; @@ -165,6 +262,20 @@ struct utp_upiu_rsp { }; /** + * struct utp_upiu_rsp - general upiu response structure + * @header: UPIU header structure DW-0 to DW-2 + * @sr: fields structure for scsi command DW-3 to DW-12 + * @qr: fields structure for query request DW-3 to DW-7 + */ +struct utp_upiu_rsp { + struct utp_upiu_header header; + union { + struct utp_cmd_rsp sr; + struct utp_upiu_query qr; + }; +}; + +/** * struct utp_upiu_task_req - Task request UPIU structure * @header - UPIU header structure DW0 to DW-2 * @input_param1: Input parameter 1 DW-3 @@ -194,4 +305,24 @@ struct utp_upiu_task_rsp { u32 reserved[3]; }; +/** + * struct ufs_query_req - parameters for building a query request + * @query_func: UPIU header query function + * @upiu_req: the query request data + */ +struct ufs_query_req { + u8 query_func; + struct utp_upiu_query upiu_req; +}; + +/** + * struct ufs_query_resp - UPIU QUERY + * @response: device response code + * @upiu_res: query response data + */ +struct ufs_query_res { + u8 response; + struct utp_upiu_query upiu_res; +}; + #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 48be39a6f6d7..a823cf44e949 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -35,6 +35,7 @@ #include "ufshcd.h" #include <linux/pci.h> +#include <linux/pm_runtime.h> #ifdef CONFIG_PM /** @@ -44,7 +45,7 @@ * * Returns -ENOSYS */ -static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int ufshcd_pci_suspend(struct device *dev) { /* * TODO: @@ -61,7 +62,7 @@ static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state) * * Returns -ENOSYS */ -static int ufshcd_pci_resume(struct pci_dev *pdev) +static int ufshcd_pci_resume(struct device *dev) { /* * TODO: @@ -71,8 +72,45 @@ static int ufshcd_pci_resume(struct pci_dev *pdev) return -ENOSYS; } +#else +#define ufshcd_pci_suspend NULL +#define ufshcd_pci_resume NULL #endif /* CONFIG_PM */ +#ifdef CONFIG_PM_RUNTIME +static int ufshcd_pci_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_suspend(hba); +} +static int ufshcd_pci_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_resume(hba); +} +static int ufshcd_pci_runtime_idle(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_idle(hba); +} +#else /* !CONFIG_PM_RUNTIME */ +#define ufshcd_pci_runtime_suspend NULL +#define ufshcd_pci_runtime_resume NULL +#define ufshcd_pci_runtime_idle NULL +#endif /* CONFIG_PM_RUNTIME */ + /** * ufshcd_pci_shutdown - main function to put the controller in reset state * @pdev: pointer to PCI device handle @@ -91,12 +129,10 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) { struct ufs_hba *hba = pci_get_drvdata(pdev); - disable_irq(pdev->irq); + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); ufshcd_remove(hba); - pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); - pci_clear_master(pdev); - pci_disable_device(pdev); } /** @@ -133,55 +169,49 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem *mmio_base; int err; - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "pci_enable_device failed\n"); - goto out_error; + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; } pci_set_master(pdev); - - err = pci_request_regions(pdev, UFSHCD); + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); if (err < 0) { - dev_err(&pdev->dev, "request regions failed\n"); - goto out_disable; + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; } - mmio_base = pci_ioremap_bar(pdev, 0); - if (!mmio_base) { - dev_err(&pdev->dev, "memory map failed\n"); - err = -ENOMEM; - goto out_release_regions; - } + mmio_base = pcim_iomap_table(pdev)[0]; err = ufshcd_set_dma_mask(pdev); if (err) { dev_err(&pdev->dev, "set dma mask failed\n"); - goto out_iounmap; + return err; } err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); - goto out_iounmap; + return err; } pci_set_drvdata(pdev, hba); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); return 0; - -out_iounmap: - iounmap(mmio_base); -out_release_regions: - pci_release_regions(pdev); -out_disable: - pci_clear_master(pdev); - pci_disable_device(pdev); -out_error: - return err; } +static const struct dev_pm_ops ufshcd_pci_pm_ops = { + .suspend = ufshcd_pci_suspend, + .resume = ufshcd_pci_resume, + .runtime_suspend = ufshcd_pci_runtime_suspend, + .runtime_resume = ufshcd_pci_runtime_resume, + .runtime_idle = ufshcd_pci_runtime_idle, +}; + static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { } /* terminate list */ @@ -195,10 +225,9 @@ static struct pci_driver ufshcd_pci_driver = { .probe = ufshcd_pci_probe, .remove = ufshcd_pci_remove, .shutdown = ufshcd_pci_shutdown, -#ifdef CONFIG_PM - .suspend = ufshcd_pci_suspend, - .resume = ufshcd_pci_resume, -#endif + .driver = { + .pm = &ufshcd_pci_pm_ops + }, }; module_pci_driver(ufshcd_pci_driver); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index c42db40d4e51..5e4623225422 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -34,6 +34,7 @@ */ #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include "ufshcd.h" @@ -87,6 +88,40 @@ static int ufshcd_pltfrm_resume(struct device *dev) #define ufshcd_pltfrm_resume NULL #endif +#ifdef CONFIG_PM_RUNTIME +static int ufshcd_pltfrm_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_suspend(hba); +} +static int ufshcd_pltfrm_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_resume(hba); +} +static int ufshcd_pltfrm_runtime_idle(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_idle(hba); +} +#else /* !CONFIG_PM_RUNTIME */ +#define ufshcd_pltfrm_runtime_suspend NULL +#define ufshcd_pltfrm_runtime_resume NULL +#define ufshcd_pltfrm_runtime_idle NULL +#endif /* CONFIG_PM_RUNTIME */ + /** * ufshcd_pltfrm_probe - probe routine of the driver * @pdev: pointer to Platform device handle @@ -102,15 +137,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem_res) { - dev_err(dev, "Memory resource not available\n"); - err = -ENODEV; - goto out; - } - mmio_base = devm_ioremap_resource(dev, mem_res); if (IS_ERR(mmio_base)) { - dev_err(dev, "memory map failed\n"); err = PTR_ERR(mmio_base); goto out; } @@ -122,14 +150,22 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) goto out; } + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + err = ufshcd_init(dev, &hba, mmio_base, irq); if (err) { dev_err(dev, "Intialization failed\n"); - goto out; + goto out_disable_rpm; } platform_set_drvdata(pdev, hba); + return 0; + +out_disable_rpm: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); out: return err; } @@ -144,7 +180,7 @@ static int ufshcd_pltfrm_remove(struct platform_device *pdev) { struct ufs_hba *hba = platform_get_drvdata(pdev); - disable_irq(hba->irq); + pm_runtime_get_sync(&(pdev)->dev); ufshcd_remove(hba); return 0; } @@ -157,6 +193,9 @@ static const struct of_device_id ufs_of_match[] = { static const struct dev_pm_ops ufshcd_dev_pm_ops = { .suspend = ufshcd_pltfrm_suspend, .resume = ufshcd_pltfrm_resume, + .runtime_suspend = ufshcd_pltfrm_runtime_suspend, + .runtime_resume = ufshcd_pltfrm_runtime_resume, + .runtime_idle = ufshcd_pltfrm_runtime_idle, }; static struct platform_driver ufshcd_pltfrm_driver = { diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b743bd6fce6b..b36ca9a2dfbb 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -43,6 +43,19 @@ /* UIC command timeout, unit: ms */ #define UIC_CMD_TIMEOUT 500 +/* NOP OUT retries waiting for NOP IN response */ +#define NOP_OUT_RETRIES 10 +/* Timeout after 30 msecs if NOP OUT hangs without response */ +#define NOP_OUT_TIMEOUT 30 /* msecs */ + +/* Query request retries */ +#define QUERY_REQ_RETRIES 10 +/* Query request timeout */ +#define QUERY_REQ_TIMEOUT 30 /* msec */ + +/* Expose the flag value from utp_upiu_query.value */ +#define MASK_QUERY_UPIU_FLAG_LOC 0xFF + enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, @@ -71,6 +84,40 @@ enum { INT_AGGR_CONFIG, }; +/* + * ufshcd_wait_for_register - wait for register value to change + * @hba - per-adapter interface + * @reg - mmio register offset + * @mask - mask to apply to read register value + * @val - wait condition + * @interval_us - polling interval in microsecs + * @timeout_ms - timeout in millisecs + * + * Returns -ETIMEDOUT on error, zero on success + */ +static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, unsigned long timeout_ms) +{ + int err = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + /* ignore bits that we don't intend to wait on */ + val = val & mask; + + while ((ufshcd_readl(hba, reg) & mask) != val) { + /* wakeup within 50us of expiry */ + usleep_range(interval_us, interval_us + 50); + + if (time_after(jiffies, timeout)) { + if ((ufshcd_readl(hba, reg) & mask) != val) + err = -ETIMEDOUT; + break; + } + } + + return err; +} + /** * ufshcd_get_intr_mask - Get the interrupt bit mask * @hba - Pointer to adapter instance @@ -191,18 +238,13 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) } /** - * ufshcd_is_valid_req_rsp - checks if controller TR response is valid + * ufshcd_get_req_rsp - returns the TR response transaction type * @ucd_rsp_ptr: pointer to response UPIU - * - * This function checks the response UPIU for valid transaction type in - * response field - * Returns 0 on success, non-zero on failure */ static inline int -ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) +ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) { - return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) == - UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16; + return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; } /** @@ -219,6 +261,21 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) } /** + * ufshcd_is_exception_event - Check if the device raised an exception event + * @ucd_rsp_ptr: pointer to response UPIU + * + * The function checks if the device raised an exception event indicated in + * the Device Information field of response UPIU. + * + * Returns true if exception is raised, false otherwise. + */ +static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_EXCEPTION_EVENT ? true : false; +} + +/** * ufshcd_config_int_aggr - Configure interrupt aggregation values. * Currently there is no use case where we want to configure * interrupt aggregation dynamically. So to configure interrupt @@ -299,14 +356,68 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) { int len; if (lrbp->sense_buffer) { - len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len); + len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); memcpy(lrbp->sense_buffer, - lrbp->ucd_rsp_ptr->sense_data, + lrbp->ucd_rsp_ptr->sr.sense_data, min_t(int, len, SCSI_SENSE_BUFFERSIZE)); } } /** + * ufshcd_query_to_cpu() - formats the buffer to native cpu endian + * @response: upiu query response to convert + */ +static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response) +{ + response->length = be16_to_cpu(response->length); + response->value = be32_to_cpu(response->value); +} + +/** + * ufshcd_query_to_be() - formats the buffer to big endian + * @request: upiu query request to convert + */ +static inline void ufshcd_query_to_be(struct utp_upiu_query *request) +{ + request->length = cpu_to_be16(request->length); + request->value = cpu_to_be32(request->value); +} + +/** + * ufshcd_copy_query_response() - Copy the Query Response and the data + * descriptor + * @hba: per adapter instance + * @lrb - pointer to local reference block + */ +static +void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + /* Get the UPIU response */ + query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> + UPIU_RSP_CODE_OFFSET; + + memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); + ufshcd_query_to_cpu(&query_res->upiu_res); + + + /* Get the descriptor */ + if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr + + GENERAL_UPIU_REQUEST_SIZE; + u16 len; + + /* data segment length */ + len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + + memcpy(hba->dev_cmd.query.descriptor, descp, + min_t(u16, len, QUERY_DESC_MAX_SIZE)); + } +} + +/** * ufshcd_hba_capabilities - Read controller capabilities * @hba: per adapter instance */ @@ -519,76 +630,170 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) } /** + * ufshcd_prepare_req_desc_hdr() - Fills the requests header + * descriptor according to request + * @lrbp: pointer to local reference block + * @upiu_flags: flags required in the header + * @cmd_dir: requests data direction + */ +static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, + u32 *upiu_flags, enum dma_data_direction cmd_dir) +{ + struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; + u32 data_direction; + u32 dword_0; + + if (cmd_dir == DMA_FROM_DEVICE) { + data_direction = UTP_DEVICE_TO_HOST; + *upiu_flags = UPIU_CMD_FLAGS_READ; + } else if (cmd_dir == DMA_TO_DEVICE) { + data_direction = UTP_HOST_TO_DEVICE; + *upiu_flags = UPIU_CMD_FLAGS_WRITE; + } else { + data_direction = UTP_NO_DATA_TRANSFER; + *upiu_flags = UPIU_CMD_FLAGS_NONE; + } + + dword_0 = data_direction | (lrbp->command_type + << UPIU_COMMAND_TYPE_OFFSET); + if (lrbp->intr_cmd) + dword_0 |= UTP_REQ_DESC_INT_CMD; + + /* Transfer request descriptor header fields */ + req_desc->header.dword_0 = cpu_to_le32(dword_0); + + /* + * assigning invalid value for command status. Controller + * updates OCS on command completion, with the command + * status + */ + req_desc->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); +} + +/** + * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, + * for scsi commands + * @lrbp - local reference block pointer + * @upiu_flags - flags + */ +static +void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_COMMAND, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); + + /* Total EHS length and Data segment length will be zero */ + ucd_req_ptr->header.dword_2 = 0; + + ucd_req_ptr->sc.exp_data_transfer_len = + cpu_to_be32(lrbp->cmd->sdb.length); + + memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, + (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE))); +} + +/** + * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, + * for query requsts + * @hba: UFS hba + * @lrbp: local reference block pointer + * @upiu_flags: flags + */ +static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + struct ufs_query *query = &hba->dev_cmd.query; + u16 len = query->request.upiu_req.length; + u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; + + /* Query request header */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_QUERY_REQ, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + 0, query->request.query_func, 0, 0); + + /* Data segment length */ + ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( + 0, 0, len >> 8, (u8)len); + + /* Copy the Query Request buffer as is */ + memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, + QUERY_OSF_SIZE); + ufshcd_query_to_be(&ucd_req_ptr->qr); + + /* Copy the Descriptor */ + if ((len > 0) && (query->request.upiu_req.opcode == + UPIU_QUERY_OPCODE_WRITE_DESC)) { + memcpy(descp, query->descriptor, + min_t(u16, len, QUERY_DESC_MAX_SIZE)); + } +} + +static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + + memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD( + UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); +} + +/** * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) + * @hba - per adapter instance * @lrb - pointer to local reference block */ -static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) +static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { - struct utp_transfer_req_desc *req_desc; - struct utp_upiu_cmd *ucd_cmd_ptr; - u32 data_direction; u32 upiu_flags; - - ucd_cmd_ptr = lrbp->ucd_cmd_ptr; - req_desc = lrbp->utr_descriptor_ptr; + int ret = 0; switch (lrbp->command_type) { case UTP_CMD_TYPE_SCSI: - if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) { - data_direction = UTP_DEVICE_TO_HOST; - upiu_flags = UPIU_CMD_FLAGS_READ; - } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) { - data_direction = UTP_HOST_TO_DEVICE; - upiu_flags = UPIU_CMD_FLAGS_WRITE; + if (likely(lrbp->cmd)) { + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, + lrbp->cmd->sc_data_direction); + ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); } else { - data_direction = UTP_NO_DATA_TRANSFER; - upiu_flags = UPIU_CMD_FLAGS_NONE; + ret = -EINVAL; } - - /* Transfer request descriptor header fields */ - req_desc->header.dword_0 = - cpu_to_le32(data_direction | UTP_SCSI_COMMAND); - - /* - * assigning invalid value for command status. Controller - * updates OCS on command completion, with the command - * status - */ - req_desc->header.dword_2 = - cpu_to_le32(OCS_INVALID_COMMAND_STATUS); - - /* command descriptor fields */ - ucd_cmd_ptr->header.dword_0 = - cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, - upiu_flags, - lrbp->lun, - lrbp->task_tag)); - ucd_cmd_ptr->header.dword_1 = - cpu_to_be32( - UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, - 0, - 0, - 0)); - - /* Total EHS length and Data segment length will be zero */ - ucd_cmd_ptr->header.dword_2 = 0; - - ucd_cmd_ptr->exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->sdb.length); - - memcpy(ucd_cmd_ptr->cdb, - lrbp->cmd->cmnd, - (min_t(unsigned short, - lrbp->cmd->cmd_len, - MAX_CDB_SIZE))); break; case UTP_CMD_TYPE_DEV_MANAGE: - /* For query function implementation */ + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) + ufshcd_prepare_utp_query_req_upiu( + hba, lrbp, upiu_flags); + else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) + ufshcd_prepare_utp_nop_upiu(lrbp); + else + ret = -EINVAL; break; case UTP_CMD_TYPE_UFS: /* For UFS native command implementation */ + ret = -ENOTSUPP; + dev_err(hba->dev, "%s: UFS native command are not supported\n", + __func__); + break; + default: + ret = -ENOTSUPP; + dev_err(hba->dev, "%s: unknown command type: 0x%x\n", + __func__, lrbp->command_type); break; } /* end of switch */ + + return ret; } /** @@ -615,21 +820,37 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } + /* acquire the tag to make sure device cmds don't use it */ + if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { + /* + * Dev manage command in progress, requeue the command. + * Requeuing the command helps in cases where the request *may* + * find different tag instead of waiting for dev manage command + * completion. + */ + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); lrbp->cmd = cmd; lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; lrbp->sense_buffer = cmd->sense_buffer; lrbp->task_tag = tag; lrbp->lun = cmd->device->lun; - + lrbp->intr_cmd = false; lrbp->command_type = UTP_CMD_TYPE_SCSI; /* form UPIU before issuing the command */ - ufshcd_compose_upiu(lrbp); + ufshcd_compose_upiu(hba, lrbp); err = ufshcd_map_sg(lrbp); - if (err) + if (err) { + lrbp->cmd = NULL; + clear_bit_unlock(tag, &hba->lrb_in_use); goto out; + } /* issue command to the controller */ spin_lock_irqsave(hba->host->host_lock, flags); @@ -639,6 +860,338 @@ out: return err; } +static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) +{ + lrbp->cmd = NULL; + lrbp->sense_bufflen = 0; + lrbp->sense_buffer = NULL; + lrbp->task_tag = tag; + lrbp->lun = 0; /* device management cmd is not specific to any LUN */ + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + lrbp->intr_cmd = true; /* No interrupt aggregation */ + hba->dev_cmd.type = cmd_type; + + return ufshcd_compose_upiu(hba, lrbp); +} + +static int +ufshcd_clear_cmd(struct ufs_hba *hba, int tag) +{ + int err = 0; + unsigned long flags; + u32 mask = 1 << tag; + + /* clear outstanding transaction before retry */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_utrl_clear(hba, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* + * wait for for h/w to clear corresponding bit in door-bell. + * max. wait is 1 sec. + */ + err = ufshcd_wait_for_register(hba, + REG_UTP_TRANSFER_REQ_DOOR_BELL, + mask, ~mask, 1000, 1000); + + return err; +} + +/** + * ufshcd_dev_cmd_completion() - handles device management command responses + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int +ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int resp; + int err = 0; + + resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); + + switch (resp) { + case UPIU_TRANSACTION_NOP_IN: + if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { + err = -EINVAL; + dev_err(hba->dev, "%s: unexpected response %x\n", + __func__, resp); + } + break; + case UPIU_TRANSACTION_QUERY_RSP: + ufshcd_copy_query_response(hba, lrbp); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + err = -EPERM; + dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", + __func__); + break; + default: + err = -EINVAL; + dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", + __func__, resp); + break; + } + + return err; +} + +static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, int max_timeout) +{ + int err = 0; + unsigned long time_left; + unsigned long flags; + + time_left = wait_for_completion_timeout(hba->dev_cmd.complete, + msecs_to_jiffies(max_timeout)); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->dev_cmd.complete = NULL; + if (likely(time_left)) { + err = ufshcd_get_tr_ocs(lrbp); + if (!err) + err = ufshcd_dev_cmd_completion(hba, lrbp); + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (!time_left) { + err = -ETIMEDOUT; + if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) + /* sucessfully cleared the command, retry if needed */ + err = -EAGAIN; + } + + return err; +} + +/** + * ufshcd_get_dev_cmd_tag - Get device management command tag + * @hba: per-adapter instance + * @tag: pointer to variable with available slot value + * + * Get a free slot and lock it until device management command + * completes. + * + * Returns false if free slot is unavailable for locking, else + * return true with tag value in @tag. + */ +static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) +{ + int tag; + bool ret = false; + unsigned long tmp; + + if (!tag_out) + goto out; + + do { + tmp = ~hba->lrb_in_use; + tag = find_last_bit(&tmp, hba->nutrs); + if (tag >= hba->nutrs) + goto out; + } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); + + *tag_out = tag; + ret = true; +out: + return ret; +} + +static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) +{ + clear_bit_unlock(tag, &hba->lrb_in_use); +} + +/** + * ufshcd_exec_dev_cmd - API for sending device management requests + * @hba - UFS hba + * @cmd_type - specifies the type (NOP, Query...) + * @timeout - time in seconds + * + * NOTE: Since there is only one available tag for device management commands, + * it is expected you hold the hba->dev_cmd.lock mutex. + */ +static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, + enum dev_cmd_type cmd_type, int timeout) +{ + struct ufshcd_lrb *lrbp; + int err; + int tag; + struct completion wait; + unsigned long flags; + + /* + * Get free slot, sleep if slots are unavailable. + * Even though we use wait_event() which sleeps indefinitely, + * the maximum wait time is bounded by SCSI request timeout. + */ + wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + + init_completion(&wait); + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); + if (unlikely(err)) + goto out_put_tag; + + hba->dev_cmd.complete = &wait; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_send_command(hba, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); + +out_put_tag: + ufshcd_put_dev_cmd_tag(hba, tag); + wake_up(&hba->dev_cmd.tag_wq); + return err; +} + +/** + * ufshcd_query_flag() - API function for sending flag query requests + * hba: per-adapter instance + * query_opcode: flag query to perform + * idn: flag idn to access + * flag_res: the flag value after the query request completes + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, bool *flag_res) +{ + struct ufs_query_req *request; + struct ufs_query_res *response; + int err; + + BUG_ON(!hba); + + mutex_lock(&hba->dev_cmd.lock); + request = &hba->dev_cmd.query.request; + response = &hba->dev_cmd.query.response; + memset(request, 0, sizeof(struct ufs_query_req)); + memset(response, 0, sizeof(struct ufs_query_res)); + + switch (opcode) { + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + if (!flag_res) { + /* No dummy reads */ + dev_err(hba->dev, "%s: Invalid argument for read request\n", + __func__); + err = -EINVAL; + goto out_unlock; + } + break; + default: + dev_err(hba->dev, + "%s: Expected query flag opcode but got = %d\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + request->upiu_req.opcode = opcode; + request->upiu_req.idn = idn; + + /* Send query request */ + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, + QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, + "%s: Sending flag query for idn %d failed, err = %d\n", + __func__, idn, err); + goto out_unlock; + } + + if (flag_res) + *flag_res = (response->upiu_res.value & + MASK_QUERY_UPIU_FLAG_LOC) & 0x1; + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); + return err; +} + +/** + * ufshcd_query_attr - API function for sending attribute requests + * hba: per-adapter instance + * opcode: attribute opcode + * idn: attribute idn to access + * index: index field + * selector: selector field + * attr_val: the attribute value after the query request completes + * + * Returns 0 for success, non-zero in case of failure +*/ +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) +{ + struct ufs_query_req *request; + struct ufs_query_res *response; + int err; + + BUG_ON(!hba); + + if (!attr_val) { + dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + mutex_lock(&hba->dev_cmd.lock); + request = &hba->dev_cmd.query.request; + response = &hba->dev_cmd.query.response; + memset(request, 0, sizeof(struct ufs_query_req)); + memset(response, 0, sizeof(struct ufs_query_res)); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + request->upiu_req.value = *attr_val; + break; + case UPIU_QUERY_OPCODE_READ_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + request->upiu_req.opcode = opcode; + request->upiu_req.idn = idn; + request->upiu_req.index = index; + request->upiu_req.selector = selector; + + /* Send query request */ + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, + QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", + __func__, opcode, idn, err); + goto out_unlock; + } + + *attr_val = response->upiu_res.value; + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); +out: + return err; +} + /** * ufshcd_memory_alloc - allocate memory for host memory space data structures * @hba: per adapter instance @@ -774,8 +1327,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); - hba->lrb[i].ucd_cmd_ptr = - (struct utp_upiu_cmd *)(cmd_descp + i); + hba->lrb[i].ucd_req_ptr = + (struct utp_upiu_req *)(cmd_descp + i); hba->lrb[i].ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; hba->lrb[i].ucd_prdt_ptr = @@ -809,6 +1362,57 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) } /** + * ufshcd_complete_dev_init() - checks device readiness + * hba: per-adapter instance + * + * Set fDeviceInit flag and poll until device toggles it. + */ +static int ufshcd_complete_dev_init(struct ufs_hba *hba) +{ + int i, retries, err = 0; + bool flag_res = 1; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + /* Set the fDeviceInit flag */ + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, NULL); + if (!err || err == -ETIMEDOUT) + break; + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + if (err) { + dev_err(hba->dev, + "%s setting fDeviceInit flag failed with error %d\n", + __func__, err); + goto out; + } + + /* poll for max. 100 iterations for fDeviceInit flag to clear */ + for (i = 0; i < 100 && !err && flag_res; i++) { + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = ufshcd_query_flag(hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + if (!err || err == -ETIMEDOUT) + break; + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, + err); + } + } + if (err) + dev_err(hba->dev, + "%s reading fDeviceInit flag failed with error %d\n", + __func__, err); + else if (flag_res) + dev_err(hba->dev, + "%s fDeviceInit was not cleared by the device\n", + __func__); + +out: + return err; +} + +/** * ufshcd_make_hba_operational - Make UFS controller operational * @hba: per adapter instance * @@ -961,6 +1565,38 @@ out: } /** + * ufshcd_verify_dev_init() - Verify device initialization + * @hba: per-adapter instance + * + * Send NOP OUT UPIU and wait for NOP IN response to check whether the + * device Transport Protocol (UTP) layer is ready after a reset. + * If the UTP layer at the device side is not initialized, it may + * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT + * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. + */ +static int ufshcd_verify_dev_init(struct ufs_hba *hba) +{ + int err = 0; + int retries; + + mutex_lock(&hba->dev_cmd.lock); + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, + NOP_OUT_TIMEOUT); + + if (!err || err == -ETIMEDOUT) + break; + + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + mutex_unlock(&hba->dev_cmd.lock); + + if (err) + dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); + return err; +} + +/** * ufshcd_do_reset - reset the host controller * @hba: per adapter instance * @@ -986,13 +1622,20 @@ static int ufshcd_do_reset(struct ufs_hba *hba) for (tag = 0; tag < hba->nutrs; tag++) { if (test_bit(tag, &hba->outstanding_reqs)) { lrbp = &hba->lrb[tag]; - scsi_dma_unmap(lrbp->cmd); - lrbp->cmd->result = DID_RESET << 16; - lrbp->cmd->scsi_done(lrbp->cmd); - lrbp->cmd = NULL; + if (lrbp->cmd) { + scsi_dma_unmap(lrbp->cmd); + lrbp->cmd->result = DID_RESET << 16; + lrbp->cmd->scsi_done(lrbp->cmd); + lrbp->cmd = NULL; + clear_bit_unlock(tag, &hba->lrb_in_use); + } } } + /* complete device management command */ + if (hba->dev_cmd.complete) + complete(hba->dev_cmd.complete); + /* clear outstanding request/task bit maps */ hba->outstanding_reqs = 0; hba->outstanding_tasks = 0; @@ -1199,27 +1842,39 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) switch (ocs) { case OCS_SUCCESS: + result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); - /* check if the returned transfer response is valid */ - result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); - if (result) { + switch (result) { + case UPIU_TRANSACTION_RESPONSE: + /* + * get the response UPIU result to extract + * the SCSI command status + */ + result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); + + /* + * get the result based on SCSI status response + * to notify the SCSI midlayer of the command status + */ + scsi_status = result & MASK_SCSI_STATUS; + result = ufshcd_scsi_cmd_status(lrbp, scsi_status); + + if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) + schedule_work(&hba->eeh_work); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + result = DID_ERROR << 16; + dev_err(hba->dev, + "Reject UPIU not fully implemented\n"); + break; + default: + result = DID_ERROR << 16; dev_err(hba->dev, - "Invalid response = %x\n", result); + "Unexpected request response code = %x\n", + result); break; } - - /* - * get the response UPIU result to extract - * the SCSI command status - */ - result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); - - /* - * get the result based on SCSI status response - * to notify the SCSI midlayer of the command status - */ - scsi_status = result & MASK_SCSI_STATUS; - result = ufshcd_scsi_cmd_status(lrbp, scsi_status); break; case OCS_ABORTED: result |= DID_ABORT << 16; @@ -1259,28 +1914,40 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) */ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) { - struct ufshcd_lrb *lrb; + struct ufshcd_lrb *lrbp; + struct scsi_cmnd *cmd; unsigned long completed_reqs; u32 tr_doorbell; int result; int index; + bool int_aggr_reset = false; - lrb = hba->lrb; tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; for (index = 0; index < hba->nutrs; index++) { if (test_bit(index, &completed_reqs)) { + lrbp = &hba->lrb[index]; + cmd = lrbp->cmd; + /* + * Don't skip resetting interrupt aggregation counters + * if a regular command is present. + */ + int_aggr_reset |= !lrbp->intr_cmd; - result = ufshcd_transfer_rsp_status(hba, &lrb[index]); - - if (lrb[index].cmd) { - scsi_dma_unmap(lrb[index].cmd); - lrb[index].cmd->result = result; - lrb[index].cmd->scsi_done(lrb[index].cmd); - + if (cmd) { + result = ufshcd_transfer_rsp_status(hba, lrbp); + scsi_dma_unmap(cmd); + cmd->result = result; /* Mark completed command as NULL in LRB */ - lrb[index].cmd = NULL; + lrbp->cmd = NULL; + clear_bit_unlock(index, &hba->lrb_in_use); + /* Do not touch lrbp after scsi done */ + cmd->scsi_done(cmd); + } else if (lrbp->command_type == + UTP_CMD_TYPE_DEV_MANAGE) { + if (hba->dev_cmd.complete) + complete(hba->dev_cmd.complete); } } /* end of if */ } /* end of for */ @@ -1288,8 +1955,238 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) /* clear corresponding bits of completed commands */ hba->outstanding_reqs ^= completed_reqs; + /* we might have free'd some tags above */ + wake_up(&hba->dev_cmd.tag_wq); + /* Reset interrupt aggregation counters */ - ufshcd_config_int_aggr(hba, INT_AGGR_RESET); + if (int_aggr_reset) + ufshcd_config_int_aggr(hba, INT_AGGR_RESET); +} + +/** + * ufshcd_disable_ee - disable exception event + * @hba: per-adapter instance + * @mask: exception event to disable + * + * Disables exception event in the device so that the EVENT_ALERT + * bit is not set. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) +{ + int err = 0; + u32 val; + + if (!(hba->ee_ctrl_mask & mask)) + goto out; + + val = hba->ee_ctrl_mask & ~mask; + val &= 0xFFFF; /* 2 bytes */ + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); + if (!err) + hba->ee_ctrl_mask &= ~mask; +out: + return err; +} + +/** + * ufshcd_enable_ee - enable exception event + * @hba: per-adapter instance + * @mask: exception event to enable + * + * Enable corresponding exception event in the device to allow + * device to alert host in critical scenarios. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) +{ + int err = 0; + u32 val; + + if (hba->ee_ctrl_mask & mask) + goto out; + + val = hba->ee_ctrl_mask | mask; + val &= 0xFFFF; /* 2 bytes */ + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); + if (!err) + hba->ee_ctrl_mask |= mask; +out: + return err; +} + +/** + * ufshcd_enable_auto_bkops - Allow device managed BKOPS + * @hba: per-adapter instance + * + * Allow device to manage background operations on its own. Enabling + * this might lead to inconsistent latencies during normal data transfers + * as the device is allowed to manage its own way of handling background + * operations. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->auto_bkops_enabled) + goto out; + + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to enable bkops %d\n", + __func__, err); + goto out; + } + + hba->auto_bkops_enabled = true; + + /* No need of URGENT_BKOPS exception from the device */ + err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) + dev_err(hba->dev, "%s: failed to disable exception event %d\n", + __func__, err); +out: + return err; +} + +/** + * ufshcd_disable_auto_bkops - block device in doing background operations + * @hba: per-adapter instance + * + * Disabling background operations improves command response latency but + * has drawback of device moving into critical state where the device is + * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the + * host is idle so that BKOPS are managed effectively without any negative + * impacts. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->auto_bkops_enabled) + goto out; + + /* + * If host assisted BKOPs is to be enabled, make sure + * urgent bkops exception is allowed. + */ + err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) { + dev_err(hba->dev, "%s: failed to enable exception event %d\n", + __func__, err); + goto out; + } + + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to disable bkops %d\n", + __func__, err); + ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + goto out; + } + + hba->auto_bkops_enabled = false; +out: + return err; +} + +/** + * ufshcd_force_reset_auto_bkops - force enable of auto bkops + * @hba: per adapter instance + * + * After a device reset the device may toggle the BKOPS_EN flag + * to default value. The s/w tracking variables should be updated + * as well. Do this by forcing enable of auto bkops. + */ +static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) +{ + hba->auto_bkops_enabled = false; + hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; + ufshcd_enable_auto_bkops(hba); +} + +static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); +} + +/** + * ufshcd_urgent_bkops - handle urgent bkops exception event + * @hba: per-adapter instance + * + * Enable fBackgroundOpsEn flag in the device to permit background + * operations. + */ +static int ufshcd_urgent_bkops(struct ufs_hba *hba) +{ + int err; + u32 status = 0; + + err = ufshcd_get_bkops_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } + + status = status & 0xF; + + /* handle only if status indicates performance impact or critical */ + if (status >= BKOPS_STATUS_PERF_IMPACT) + err = ufshcd_enable_auto_bkops(hba); +out: + return err; +} + +static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); +} + +/** + * ufshcd_exception_event_handler - handle exceptions raised by device + * @work: pointer to work data + * + * Read bExceptionEventStatus attribute from the device and handle the + * exception event accordingly. + */ +static void ufshcd_exception_event_handler(struct work_struct *work) +{ + struct ufs_hba *hba; + int err; + u32 status = 0; + hba = container_of(work, struct ufs_hba, eeh_work); + + pm_runtime_get_sync(hba->dev); + err = ufshcd_get_ee_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get exception status %d\n", + __func__, err); + goto out; + } + + status &= hba->ee_ctrl_mask; + if (status & MASK_EE_URGENT_BKOPS) { + err = ufshcd_urgent_bkops(hba); + if (err) + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", + __func__, err); + } +out: + pm_runtime_put_sync(hba->dev); + return; } /** @@ -1301,9 +2198,11 @@ static void ufshcd_fatal_err_handler(struct work_struct *work) struct ufs_hba *hba; hba = container_of(work, struct ufs_hba, feh_workq); + pm_runtime_get_sync(hba->dev); /* check if reset is already in progress */ if (hba->ufshcd_state != UFSHCD_STATE_RESET) ufshcd_do_reset(hba); + pm_runtime_put_sync(hba->dev); } /** @@ -1432,10 +2331,10 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, task_req_upiup = (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; task_req_upiup->header.dword_0 = - cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, - lrbp->lun, lrbp->task_tag)); + UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, + lrbp->lun, lrbp->task_tag); task_req_upiup->header.dword_1 = - cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0)); + UPIU_HEADER_DWORD(0, tm_function, 0, 0); task_req_upiup->input_param1 = lrbp->lun; task_req_upiup->input_param1 = @@ -1502,9 +2401,11 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd) if (hba->lrb[pos].cmd) { scsi_dma_unmap(hba->lrb[pos].cmd); hba->lrb[pos].cmd->result = - DID_ABORT << 16; + DID_ABORT << 16; hba->lrb[pos].cmd->scsi_done(cmd); hba->lrb[pos].cmd = NULL; + clear_bit_unlock(pos, &hba->lrb_in_use); + wake_up(&hba->dev_cmd.tag_wq); } } } /* end of for */ @@ -1572,6 +2473,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) __clear_bit(tag, &hba->outstanding_reqs); hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); + + clear_bit_unlock(tag, &hba->lrb_in_use); + wake_up(&hba->dev_cmd.tag_wq); out: return err; } @@ -1587,8 +2491,22 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) int ret; ret = ufshcd_link_startup(hba); - if (!ret) - scsi_scan_host(hba->host); + if (ret) + goto out; + + ret = ufshcd_verify_dev_init(hba); + if (ret) + goto out; + + ret = ufshcd_complete_dev_init(hba); + if (ret) + goto out; + + ufshcd_force_reset_auto_bkops(hba); + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); +out: + return; } static struct scsi_host_template ufshcd_driver_template = { @@ -1650,6 +2568,34 @@ int ufshcd_resume(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_resume); +int ufshcd_runtime_suspend(struct ufs_hba *hba) +{ + if (!hba) + return 0; + + /* + * The device is idle with no requests in the queue, + * allow background operations. + */ + return ufshcd_enable_auto_bkops(hba); +} +EXPORT_SYMBOL(ufshcd_runtime_suspend); + +int ufshcd_runtime_resume(struct ufs_hba *hba) +{ + if (!hba) + return 0; + + return ufshcd_disable_auto_bkops(hba); +} +EXPORT_SYMBOL(ufshcd_runtime_resume); + +int ufshcd_runtime_idle(struct ufs_hba *hba) +{ + return 0; +} +EXPORT_SYMBOL(ufshcd_runtime_idle); + /** * ufshcd_remove - de-allocate SCSI host and host memory space * data structure memory @@ -1657,11 +2603,11 @@ EXPORT_SYMBOL_GPL(ufshcd_resume); */ void ufshcd_remove(struct ufs_hba *hba) { + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); - scsi_remove_host(hba->host); scsi_host_put(hba->host); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -1740,10 +2686,17 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, /* Initialize work queues */ INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); + INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); /* Initialize UIC command mutex */ mutex_init(&hba->uic_cmd_mutex); + /* Initialize mutex for device management commands */ + mutex_init(&hba->dev_cmd.lock); + + /* Initialize device management tag acquire wait queue */ + init_waitqueue_head(&hba->dev_cmd.tag_wq); + /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { @@ -1773,6 +2726,9 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, *hba_handle = hba; + /* Hold auto suspend until async scan completes */ + pm_runtime_get_sync(dev); + async_schedule(ufshcd_async_scan, hba); return 0; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 49590ee07acc..59c9c4848be1 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -68,6 +68,11 @@ #define UFSHCD "ufshcd" #define UFSHCD_DRIVER_VERSION "0.2" +enum dev_cmd_type { + DEV_CMD_TYPE_NOP = 0x0, + DEV_CMD_TYPE_QUERY = 0x1, +}; + /** * struct uic_command - UIC command structure * @command: UIC command @@ -91,7 +96,7 @@ struct uic_command { /** * struct ufshcd_lrb - local reference block * @utr_descriptor_ptr: UTRD address of the command - * @ucd_cmd_ptr: UCD address of the command + * @ucd_req_ptr: UCD address of the command * @ucd_rsp_ptr: Response UPIU address for this command * @ucd_prdt_ptr: PRDT address of the command * @cmd: pointer to SCSI command @@ -101,10 +106,11 @@ struct uic_command { * @command_type: SCSI, UFS, Query. * @task_tag: Task tag of the command * @lun: LUN of the command + * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) */ struct ufshcd_lrb { struct utp_transfer_req_desc *utr_descriptor_ptr; - struct utp_upiu_cmd *ucd_cmd_ptr; + struct utp_upiu_req *ucd_req_ptr; struct utp_upiu_rsp *ucd_rsp_ptr; struct ufshcd_sg_entry *ucd_prdt_ptr; @@ -116,8 +122,35 @@ struct ufshcd_lrb { int command_type; int task_tag; unsigned int lun; + bool intr_cmd; }; +/** + * struct ufs_query - holds relevent data structures for query request + * @request: request upiu and function + * @descriptor: buffer for sending/receiving descriptor + * @response: response upiu and response + */ +struct ufs_query { + struct ufs_query_req request; + u8 *descriptor; + struct ufs_query_res response; +}; + +/** + * struct ufs_dev_cmd - all assosiated fields with device management commands + * @type: device management command type - Query, NOP OUT + * @lock: lock to allow one command at a time + * @complete: internal commands completion + * @tag_wq: wait queue until free command slot is available + */ +struct ufs_dev_cmd { + enum dev_cmd_type type; + struct mutex lock; + struct completion *complete; + wait_queue_head_t tag_wq; + struct ufs_query query; +}; /** * struct ufs_hba - per adapter private structure @@ -131,6 +164,7 @@ struct ufshcd_lrb { * @host: Scsi_Host instance of the driver * @dev: device handle * @lrb: local reference block + * @lrb_in_use: lrb in use * @outstanding_tasks: Bits representing outstanding task requests * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities @@ -144,8 +178,12 @@ struct ufshcd_lrb { * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states * @intr_mask: Interrupt Mask Bits + * @ee_ctrl_mask: Exception event control mask * @feh_workq: Work queue for fatal controller error handling + * @eeh_work: Worker to handle exception events * @errors: HBA errors + * @dev_cmd: ufs device management command information + * @auto_bkops_enabled: to track whether bkops is enabled in device */ struct ufs_hba { void __iomem *mmio_base; @@ -164,6 +202,7 @@ struct ufs_hba { struct device *dev; struct ufshcd_lrb *lrb; + unsigned long lrb_in_use; unsigned long outstanding_tasks; unsigned long outstanding_reqs; @@ -182,12 +221,19 @@ struct ufs_hba { u32 ufshcd_state; u32 intr_mask; + u16 ee_ctrl_mask; /* Work Queues */ struct work_struct feh_workq; + struct work_struct eeh_work; /* HBA Errors */ u32 errors; + + /* Device management request data */ + struct ufs_dev_cmd dev_cmd; + + bool auto_bkops_enabled; }; #define ufshcd_writel(hba, val, reg) \ @@ -208,4 +254,13 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba) ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); } +static inline void check_upiu_size(void) +{ + BUILD_BUG_ON(ALIGNED_UPIU_SIZE < + GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); +} + +extern int ufshcd_runtime_suspend(struct ufs_hba *hba); +extern int ufshcd_runtime_resume(struct ufs_hba *hba); +extern int ufshcd_runtime_idle(struct ufs_hba *hba); #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index d5c5f1482d7d..f1e1b7459107 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -39,7 +39,7 @@ enum { TASK_REQ_UPIU_SIZE_DWORDS = 8, TASK_RSP_UPIU_SIZE_DWORDS = 8, - ALIGNED_UPIU_SIZE = 128, + ALIGNED_UPIU_SIZE = 512, }; /* UFSHCI Registers */ |