aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFaiz Abbas2019-10-15 18:24:36 +0530
committerTom Rini2019-10-23 20:47:12 -0400
commit7feafb0ae4f703874119bd90d53258134f850d37 (patch)
tree334f27963392e1b9dcc4db93017eb10725b2c39a
parent8fbac8e23e55a63c218b8d737e629a8d07e9a840 (diff)
ufs: Add Initial Support for UFS subsystem
Add Support for UFS Host Controller Interface (UFSHCI) for communicating with Universal Flash Storage (UFS) devices. The steps to initialize the host controller interface are the following: - Initiate the Host Controller Initialization process by writing to the Host controller enable register. - Configure the Host Controller base address registers by allocating a host memory space and related data structures. - Unipro link startup procedure - Check for connected device - Configure UFS host controller to process requests Also register this host controller as a SCSI host controller. Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported to U-boot. Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
-rw-r--r--MAINTAINERS5
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/ufs/Kconfig9
-rw-r--r--drivers/ufs/Makefile6
-rw-r--r--drivers/ufs/ufs-uclass.c16
-rw-r--r--drivers/ufs/ufs.c1968
-rw-r--r--drivers/ufs/ufs.h918
-rw-r--r--drivers/ufs/unipro.h270
-rw-r--r--include/dm/uclass-id.h1
-rw-r--r--include/ufs.h29
11 files changed, 3225 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index a7c355c76f6..8766a702d8f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -840,6 +840,11 @@ S: Maintained
T: git https://gitlab.denx.de/u-boot/custodians/u-boot-ubi.git
F: drivers/mtd/ubi/
+UFS
+M: Faiz Abbas <faiz_abbas@ti.com>
+S: Maintained
+F: drivers/ufs/
+
USB
M: Marek Vasut <marex@denx.de>
S: Maintained
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 350acf81f30..9d99ce02261 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -118,6 +118,8 @@ source "drivers/tpm/Kconfig"
source "drivers/usb/Kconfig"
+source "drivers/ufs/Kconfig"
+
source "drivers/video/Kconfig"
source "drivers/virtio/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index a4bb5e4975c..0befeddfcbf 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -111,6 +111,7 @@ obj-y += soc/
obj-y += thermal/
obj-$(CONFIG_TEE) += tee/
obj-y += axi/
+obj-y += ufs/
obj-$(CONFIG_W1) += w1/
obj-$(CONFIG_W1_EEPROM) += w1-eeprom/
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig
new file mode 100644
index 00000000000..b0981062d8a
--- /dev/null
+++ b/drivers/ufs/Kconfig
@@ -0,0 +1,9 @@
+menu "UFS Host Controller Support"
+
+config UFS
+ bool "Support UFS controllers"
+ depends on DM_SCSI
+ help
+ This selects support for Universal Flash Subsystem (UFS).
+ Say Y here if you want UFS Support.
+endmenu
diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile
new file mode 100644
index 00000000000..b8df759f661
--- /dev/null
+++ b/drivers/ufs/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+#
+
+obj-$(CONFIG_UFS) += ufs.o ufs-uclass.o
diff --git a/drivers/ufs/ufs-uclass.c b/drivers/ufs/ufs-uclass.c
new file mode 100644
index 00000000000..920bfa64e19
--- /dev/null
+++ b/drivers/ufs/ufs-uclass.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * ufs-uclass.c - Universal Flash Subsystem (UFS) Uclass driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <common.h>
+#include "ufs.h"
+#include <dm.h>
+
+UCLASS_DRIVER(ufs) = {
+ .id = UCLASS_UFS,
+ .name = "ufs",
+ .per_device_auto_alloc_size = sizeof(struct ufs_hba),
+};
diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c
new file mode 100644
index 00000000000..23306863d52
--- /dev/null
+++ b/drivers/ufs/ufs.c
@@ -0,0 +1,1968 @@
+// SPDX-License-Identifier: GPL-2.0+
+/**
+ * ufs.c - Universal Flash Subsystem (UFS) driver
+ *
+ * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
+ * to u-boot.
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <charset.h>
+#include <common.h>
+#include <dm.h>
+#include <dm/lists.h>
+#include <dm/device-internal.h>
+#include <malloc.h>
+#include <hexdump.h>
+#include <scsi.h>
+
+#include <asm/dma-mapping.h>
+
+#include "ufs.h"
+
+#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
+ UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK)
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* maximum number of retries for a general UIC command */
+#define UFS_UIC_COMMAND_RETRIES 3
+
+/* Query request retries */
+#define QUERY_REQ_RETRIES 3
+/* Query request timeout */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+
+/* maximum timeout in ms for a general UIC command */
+#define UFS_UIC_CMD_TIMEOUT 1000
+/* NOP OUT retries waiting for NOP IN response */
+#define NOP_OUT_RETRIES 10
+/* Timeout after 30 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 30 /* msecs */
+
+/* Only use one Task Tag for all requests */
+#define TASK_TAG 0
+
+/* Expose the flag value from utp_upiu_query.value */
+#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+
+#define MAX_PRDT_ENTRY 262144
+
+/* maximum bytes per request */
+#define UFS_MAX_BYTES (128 * 256 * 1024)
+
+static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
+static inline void ufshcd_hba_stop(struct ufs_hba *hba);
+static int ufshcd_hba_enable(struct ufs_hba *hba);
+
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ */
+static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long timeout_ms)
+{
+ int err = 0;
+ unsigned long start = get_timer(0);
+
+ /* ignore bits that we don't intend to wait on */
+ val = val & mask;
+
+ while ((ufshcd_readl(hba, reg) & mask) != val) {
+ if (get_timer(start) > timeout_ms) {
+ if ((ufshcd_readl(hba, reg) & mask) != val)
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+ hba->pwr_info.gear_rx = UFS_PWM_G1;
+ hba->pwr_info.gear_tx = UFS_PWM_G1;
+ hba->pwr_info.lane_rx = 1;
+ hba->pwr_info.lane_tx = 1;
+ hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+ hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+ hba->pwr_info.hs_rate = 0;
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+ static const char * const names[] = {
+ "INVALID MODE",
+ "FAST MODE",
+ "SLOW_MODE",
+ "INVALID MODE",
+ "FASTAUTO_MODE",
+ "SLOWAUTO_MODE",
+ "INVALID MODE",
+ };
+
+ dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+ hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+ hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+ names[hba->pwr_info.pwr_rx],
+ names[hba->pwr_info.pwr_tx],
+ hba->pwr_info.hs_rate);
+}
+
+/**
+ * ufshcd_ready_for_uic_cmd - Check if controller is ready
+ * to accept UIC commands
+ */
+static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
+{
+ if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
+ MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ * the host controller
+ */
+static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+ DEVICE_PRESENT) ? true : false;
+}
+
+/**
+ * ufshcd_send_uic_cmd - UFS Interconnect layer command API
+ *
+ */
+static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ unsigned long start = 0;
+ u32 intr_status;
+ u32 enabled_intr_status;
+
+ if (!ufshcd_ready_for_uic_cmd(hba)) {
+ dev_err(hba->dev,
+ "Controller not ready to accept UIC commands\n");
+ return -EIO;
+ }
+
+ debug("sending uic command:%d\n", uic_cmd->command);
+
+ /* Write Args */
+ ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
+ ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
+ ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
+
+ /* Write UIC Cmd */
+ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
+ REG_UIC_COMMAND);
+
+ start = get_timer(0);
+ do {
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status = intr_status & hba->intr_mask;
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+
+ if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
+ dev_err(hba->dev,
+ "Timedout waiting for UIC response\n");
+
+ return -ETIMEDOUT;
+ }
+
+ if (enabled_intr_status & UFSHCD_ERROR_MASK) {
+ dev_err(hba->dev, "Error in status:%08x\n",
+ enabled_intr_status);
+
+ return -1;
+ }
+ } while (!(enabled_intr_status & UFSHCD_UIC_MASK));
+
+ uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
+ uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
+
+ debug("Sent successfully\n");
+
+ return 0;
+}
+
+/**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ *
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
+ u32 mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-set",
+ "dme-peer-set"
+ };
+ const char *set = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+ uic_cmd.argument1 = attr_sel;
+ uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+ uic_cmd.argument3 = mib_val;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ return ret;
+}
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ *
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-get",
+ "dme-peer-get"
+ };
+ const char *get = action[!!peer];
+ int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+ uic_cmd.argument1 = attr_sel;
+
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ } while (ret && peer && --retries);
+
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ if (mib_val && !ret)
+ *mib_val = uic_cmd.argument3;
+
+ return ret;
+}
+
+static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
+{
+ u32 tx_lanes, i, err = 0;
+
+ if (!peer)
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ else
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ for (i = 0; i < tx_lanes; i++) {
+ if (!peer)
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ else
+ err = ufshcd_dme_peer_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ if (err) {
+ dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
+ __func__, peer, i, err);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_disable_tx_lcc(hba, true);
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ *
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev,
+ "dme-link-startup: error code %d\n", ret);
+ return ret;
+}
+
+/**
+ * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
+ *
+ */
+static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+ return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ * When run-stop registers are set to 1, it indicates the
+ * host controller that it can process the requests
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP);
+ ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
+}
+
+/**
+ * ufshcd_enable_intr - enable interrupts
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 rw;
+
+ if (hba->version == UFSHCI_VERSION_10) {
+ rw = set & INTERRUPT_MASK_RW_VER_10;
+ set = rw | ((set ^ intrs) & intrs);
+ } else {
+ set |= intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+
+ hba->intr_mask = set;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base address
+ * 4. Configure run-stop-registers
+ *
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 reg;
+
+ /* Enable required interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
+
+ /* Disable interrupt aggregation */
+ ufshcd_disable_intr_aggr(hba);
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ /*
+ * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+ */
+ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
+ if (!(ufshcd_get_lists_status(reg))) {
+ ufshcd_enable_run_stop_reg(hba);
+ } else {
+ dev_err(hba->dev,
+ "Host controller not ready to process requests");
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_link_startup - Initialize unipro link startup
+ */
+static int ufshcd_link_startup(struct ufs_hba *hba)
+{
+ int ret;
+ int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = true;
+
+link_startup:
+ do {
+ ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
+
+ ret = ufshcd_dme_link_startup(hba);
+
+ /* check if device is detected by inter-connect layer */
+ if (!ret && !ufshcd_is_device_present(hba)) {
+ dev_err(hba->dev, "%s: Device not present\n", __func__);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * DME link lost indication is only received when link is up,
+ * but we can't be sure if the link is up until link startup
+ * succeeds. So reset the local Uni-Pro and try again.
+ */
+ if (ret && ufshcd_hba_enable(hba))
+ goto out;
+ } while (ret && retries--);
+
+ if (ret)
+ /* failed to get the link up... retire */
+ goto out;
+
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
+ /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+ ufshcd_init_pwr_info(hba);
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
+ ret = ufshcd_disable_device_tx_lcc(hba);
+ if (ret)
+ goto out;
+ }
+
+ /* Include any host controller configuration via UIC commands */
+ ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_make_hba_operational(hba);
+out:
+ if (ret)
+ dev_err(hba->dev, "link startup failed %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ int err;
+
+ ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+ 10);
+ if (err)
+ dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ */
+static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
+ ? false : true;
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+}
+
+/**
+ * ufshcd_hba_enable - initialize the controller
+ */
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int retry;
+
+ if (!ufshcd_is_hba_active(hba))
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba);
+
+ ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
+
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ mdelay(1);
+
+ /* wait for the host controller to complete initialization */
+ retry = 10;
+ while (ufshcd_is_hba_active(hba)) {
+ if (retry) {
+ retry--;
+ } else {
+ dev_err(hba->dev, "Controller enable failed\n");
+ return -EIO;
+ }
+ mdelay(5);
+ }
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+ ufshcd_ops_hce_enable_notify(hba, POST_CHANGE);
+
+ return 0;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ * memory offsets
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+ struct utp_transfer_req_desc *utrdlp;
+ dma_addr_t cmd_desc_dma_addr;
+ u16 response_offset;
+ u16 prdt_offset;
+
+ utrdlp = hba->utrdl;
+ cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
+
+ utrdlp->command_desc_base_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
+ utrdlp->command_desc_base_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
+
+ response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
+ prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
+ utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
+ utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+
+ hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
+ hba->ucd_rsp_ptr =
+ (struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
+ hba->ucd_prdt_ptr =
+ (struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+ /* Allocate one Transfer Request Descriptor
+ * Should be aligned to 1k boundary.
+ */
+ hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
+ if (!hba->utrdl) {
+ dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate one Command Descriptor
+ * Should be aligned to 1k boundary.
+ */
+ hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
+ if (!hba->ucdl) {
+ dev_err(hba->dev, "Command descriptor memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_get_intr_mask - Get the interrupt bit mask
+ */
+static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
+{
+ u32 intr_mask = 0;
+
+ switch (hba->version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ break;
+ }
+
+ return intr_mask;
+}
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
+ * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * descriptor according to request
+ */
+static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
+ u32 *upiu_flags,
+ enum dma_data_direction cmd_dir)
+{
+ u32 data_direction;
+ u32 dword_0;
+
+ if (cmd_dir == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ *upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (cmd_dir == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ *upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ *upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
+
+ /* Enable Interrupt for command */
+ dword_0 |= UTP_REQ_DESC_INT_CMD;
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 = cpu_to_le32(dword_0);
+ /* dword_1 is reserved, hence it is set to 0 */
+ req_desc->header.dword_1 = 0;
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ /* dword_3 is reserved, hence it is set to 0 */
+ req_desc->header.dword_3 = 0;
+
+ req_desc->prd_table_length = 0;
+}
+
+static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
+ u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
+ struct ufs_query *query = &hba->dev_cmd.query;
+ u16 len = be16_to_cpu(query->request.upiu_req.length);
+
+ /* Query request header */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
+ upiu_flags, 0, TASK_TAG);
+ ucd_req_ptr->header.dword_1 =
+ UPIU_HEADER_DWORD(0, query->request.query_func,
+ 0, 0);
+
+ /* Data segment length only need for WRITE_DESC */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ ucd_req_ptr->header.dword_2 =
+ UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+ else
+ ucd_req_ptr->header.dword_2 = 0;
+
+ /* Copy the Query Request buffer as is */
+ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
+
+ /* Copy the Descriptor */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ memcpy(ucd_req_ptr + 1, query->descriptor, len);
+
+ memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
+{
+ struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
+
+ memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, 0x1f);
+ /* clear rest of the fields of basic header */
+ ucd_req_ptr->header.dword_1 = 0;
+ ucd_req_ptr->header.dword_2 = 0;
+
+ memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+/**
+ * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
+ * for Device Management Purposes
+ */
+static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
+ enum dev_cmd_type cmd_type)
+{
+ u32 upiu_flags;
+ int ret = 0;
+ struct utp_transfer_req_desc *req_desc = hba->utrdl;
+
+ hba->dev_cmd.type = cmd_type;
+
+ ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
+ switch (cmd_type) {
+ case DEV_CMD_TYPE_QUERY:
+ ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
+ break;
+ case DEV_CMD_TYPE_NOP:
+ ufshcd_prepare_utp_nop_upiu(hba);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+ unsigned long start;
+ u32 intr_status;
+ u32 enabled_intr_status;
+
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+
+ start = get_timer(0);
+ do {
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status = intr_status & hba->intr_mask;
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+
+ if (get_timer(start) > QUERY_REQ_TIMEOUT) {
+ dev_err(hba->dev,
+ "Timedout waiting for UTP response\n");
+
+ return -ETIMEDOUT;
+ }
+
+ if (enabled_intr_status & UFSHCD_ERROR_MASK) {
+ dev_err(hba->dev, "Error in status:%08x\n",
+ enabled_intr_status);
+
+ return -1;
+ }
+ } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
+
+ return 0;
+}
+
+/**
+ * ufshcd_get_req_rsp - returns the TR response transaction type
+ */
+static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ *
+ */
+static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
+{
+ return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
+}
+
+static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+static int ufshcd_check_query_response(struct ufs_hba *hba)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+ return query_res->response;
+}
+
+/**
+ * ufshcd_copy_query_response() - Copy the Query Response and the data
+ * descriptor
+ */
+static int ufshcd_copy_query_response(struct ufs_hba *hba)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
+
+ /* Get the descriptor */
+ if (hba->dev_cmd.query.descriptor &&
+ hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)hba->ucd_rsp_ptr +
+ GENERAL_UPIU_REQUEST_SIZE;
+ u16 resp_len;
+ u16 buf_len;
+
+ /* data segment length */
+ resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+ buf_len =
+ be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
+ if (likely(buf_len >= resp_len)) {
+ memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
+ } else {
+ dev_warn(hba->dev,
+ "%s: Response size is bigger than buffer",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_exec_dev_cmd - API for sending device management requests
+ */
+static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type,
+ int timeout)
+{
+ int err;
+ int resp;
+
+ err = ufshcd_comp_devman_upiu(hba, cmd_type);
+ if (err)
+ return err;
+
+ err = ufshcd_send_command(hba, TASK_TAG);
+ if (err)
+ return err;
+
+ err = ufshcd_get_tr_ocs(hba);
+ if (err) {
+ dev_err(hba->dev, "Error in OCS:%d\n", err);
+ return -EINVAL;
+ }
+
+ resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
+ switch (resp) {
+ case UPIU_TRANSACTION_NOP_IN:
+ break;
+ case UPIU_TRANSACTION_QUERY_RSP:
+ err = ufshcd_check_query_response(hba);
+ if (!err)
+ err = ufshcd_copy_query_response(hba);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ err = -EPERM;
+ dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
+ __func__);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
+ __func__, resp);
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_init_query() - init the query response and request parameters
+ */
+static inline void ufshcd_init_query(struct ufs_hba *hba,
+ struct ufs_query_req **request,
+ struct ufs_query_res **response,
+ enum query_opcode opcode,
+ u8 idn, u8 index, u8 selector)
+{
+ *request = &hba->dev_cmd.query.request;
+ *response = &hba->dev_cmd.query.response;
+ memset(*request, 0, sizeof(struct ufs_query_req));
+ memset(*response, 0, sizeof(struct ufs_query_res));
+ (*request)->upiu_req.opcode = opcode;
+ (*request)->upiu_req.idn = idn;
+ (*request)->upiu_req.index = index;
+ (*request)->upiu_req.selector = selector;
+}
+
+/**
+ * ufshcd_query_flag() - API function for sending flag query requests
+ */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err, index = 0, selector = 0;
+ int timeout = QUERY_REQ_TIMEOUT;
+
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ if (!flag_res) {
+ /* No dummy reads */
+ dev_err(hba->dev, "%s: Invalid argument for read request\n",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query flag opcode but got = %d\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Sending flag query for idn %d failed, err = %d\n",
+ __func__, idn, err);
+ goto out;
+ }
+
+ if (flag_res)
+ *flag_res = (be32_to_cpu(response->upiu_res.value) &
+ MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
+
+out:
+ return err;
+}
+
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+ ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ if (ret)
+ dev_dbg(hba->dev,
+ "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ __func__, opcode, idn, ret, retries);
+ return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index, u8 selector,
+ u8 *desc_buf, int *buf_len)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ if (!desc_buf) {
+ dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+ hba->dev_cmd.query.descriptor = desc_buf;
+ request->upiu_req.length = cpu_to_be16(*buf_len);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
+ goto out;
+ }
+
+ hba->dev_cmd.query.descriptor = NULL;
+ *buf_len = be16_to_cpu(response->upiu_res.length);
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_query_descriptor_retry - API function for sending descriptor requests
+ */
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
+ enum desc_idn idn, u8 index, u8 selector,
+ u8 *desc_buf, int *buf_len)
+{
+ int err;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+ selector, desc_buf, buf_len);
+ if (!err || err == -EINVAL)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int desc_index, int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+
+ return ret;
+}
+
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
+ &hba->desc_size.hlth_desc);
+ if (err)
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ *
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_HEALTH:
+ *desc_len = hba->desc_size.hlth_desc;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ *
+ */
+int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
+ int desc_index, u8 param_offset, u8 *param_read_buf,
+ u8 param_size)
+{
+ int ret;
+ u8 *desc_buf;
+ int buff_len;
+ bool is_kmalloc = true;
+
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
+ return -EINVAL;
+
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
+ }
+
+ /* Request for full descriptor */
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, desc_buf,
+ &buff_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
+ goto out;
+ }
+
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
+
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+ if (is_kmalloc)
+ kfree(desc_buf);
+ return ret;
+}
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(uint8_t *val)
+{
+ if (!val)
+ return;
+
+ if (*val < 0x20 || *val > 0x7e)
+ *val = ' ';
+}
+
+/**
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
+ */
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+{
+ unsigned long start = 0;
+ u8 status;
+ int ret;
+
+ ret = ufshcd_send_uic_cmd(hba, cmd);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
+
+ return ret;
+ }
+
+ start = get_timer(0);
+ do {
+ status = ufshcd_get_upmcrs(hba);
+ if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
+ cmd->command, status);
+ ret = (status != PWR_OK) ? status : -1;
+ break;
+ }
+ } while (status != PWR_LOCAL);
+
+ return ret;
+}
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
+ * using DME_SET primitives.
+ */
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+
+ return ret;
+}
+
+static
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
+ struct scsi_cmd *pccb, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
+ unsigned int cdb_len;
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
+ pccb->lun, TASK_TAG);
+ ucd_req_ptr->header.dword_1 =
+ UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_req_ptr->header.dword_2 = 0;
+
+ ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
+
+ cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
+ memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
+
+ memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+}
+
+static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
+ unsigned char *buf, ulong len)
+{
+ entry->size = cpu_to_le32(len) | GENMASK(1, 0);
+ entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
+ entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
+}
+
+static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
+{
+ struct utp_transfer_req_desc *req_desc = hba->utrdl;
+ struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
+ ulong datalen = pccb->datalen;
+ int table_length;
+ u8 *buf;
+ int i;
+
+ if (!datalen) {
+ req_desc->prd_table_length = 0;
+ return;
+ }
+
+ table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
+ buf = pccb->pdata;
+ i = table_length;
+ while (--i) {
+ prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
+ MAX_PRDT_ENTRY - 1);
+ buf += MAX_PRDT_ENTRY;
+ datalen -= MAX_PRDT_ENTRY;
+ }
+
+ prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
+
+ req_desc->prd_table_length = table_length;
+}
+
+static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
+{
+ struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
+ struct utp_transfer_req_desc *req_desc = hba->utrdl;
+ u32 upiu_flags;
+ int ocs, result = 0;
+ u8 scsi_status;
+
+ ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
+ ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
+ prepare_prdt_table(hba, pccb);
+
+ ufshcd_send_command(hba, TASK_TAG);
+
+ ocs = ufshcd_get_tr_ocs(hba);
+ switch (ocs) {
+ case OCS_SUCCESS:
+ result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
+ switch (result) {
+ case UPIU_TRANSACTION_RESPONSE:
+ result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
+
+ scsi_status = result & MASK_SCSI_STATUS;
+ if (scsi_status)
+ return -EINVAL;
+
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ dev_err(hba->dev,
+ "Reject UPIU not fully implemented\n");
+ return -EINVAL;
+ default:
+ dev_err(hba->dev,
+ "Unexpected request response code = %x\n",
+ result);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
+ int desc_index, u8 *buf, u32 size)
+{
+ return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ *
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
+ u8 *buf, u32 size, bool ascii)
+{
+ int err = 0;
+
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
+ size);
+
+ if (err) {
+ dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+ __func__, QUERY_REQ_RETRIES, err);
+ goto out;
+ }
+
+ if (ascii) {
+ int desc_len;
+ int ascii_len;
+ int i;
+ u8 *buff_ascii;
+
+ desc_len = buf[0];
+ /* remove header and divide by 2 to move from UTF16 to UTF8 */
+ ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+ if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+ dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+ if (!buff_ascii) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * the descriptor contains string in UTF16 format
+ * we need to convert to utf-8 so it can be displayed
+ */
+ utf16_to_utf8(buff_ascii,
+ (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ascii_len; i++)
+ ufshcd_remove_non_printable(&buff_ascii[i]);
+
+ memset(buf + QUERY_DESC_HDR_SIZE, 0,
+ size - QUERY_DESC_HDR_SIZE);
+ memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+ buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+ kfree(buff_ascii);
+ }
+out:
+ return err;
+}
+
+static int ufs_get_device_desc(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
+{
+ int err;
+ size_t buff_len;
+ u8 model_index;
+ u8 *desc_buf;
+
+ buff_len = max_t(size_t, hba->desc_size.dev_desc,
+ QUERY_DESC_MAX_SIZE + 1);
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+ dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ /* Zero-pad entire buffer for string termination. */
+ memset(desc_buf, 0, buff_len);
+
+ err = ufshcd_read_string_desc(hba, model_index, desc_buf,
+ QUERY_DESC_MAX_SIZE, true/*ASCII*/);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
+ strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+
+ /* Null terminate the model string */
+ dev_desc->model[MAX_MODEL_LEN] = '\0';
+
+out:
+ kfree(desc_buf);
+ return err;
+}
+
+/**
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ */
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+ if (hba->max_pwr_info.is_valid)
+ return 0;
+
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
+ pwr_info->hs_rate = PA_HS_MODE_B;
+
+ /* Get the connected lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+ &pwr_info->lane_rx);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &pwr_info->lane_tx);
+
+ if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+ __func__, pwr_info->lane_rx, pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
+ /*
+ * First, get the maximum gears of HS speed.
+ * If a zero value, it means there is no HSGEAR capability.
+ * Then, get the maximum gears of PWM speed.
+ */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+ __func__, pwr_info->gear_rx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_rx = SLOW_MODE;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+ __func__, pwr_info->gear_tx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_tx = SLOW_MODE;
+ }
+
+ hba->max_pwr_info.is_valid = true;
+ return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ int ret;
+
+ /* if already configured to the requested pwr_mode */
+ if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+ pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+ pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+ pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+ pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+ dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Configure attributes for power mode change with below.
+ * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+ * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+ * - PA_HSSERIES
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ pwr_mode->lane_rx);
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ pwr_mode->lane_tx);
+ if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
+
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ pwr_mode->hs_rate);
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
+ pwr_mode->pwr_tx);
+
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: power mode change failed %d\n", __func__, ret);
+
+ return ret;
+ }
+
+ /* Copy new Power Mode to power info */
+ memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
+
+ return ret;
+}
+
+/**
+ * ufshcd_verify_dev_init() - Verify device initialization
+ *
+ */
+static int ufshcd_verify_dev_init(struct ufs_hba *hba)
+{
+ int retries;
+ int err;
+
+ for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
+ NOP_OUT_TIMEOUT);
+ if (!err || err == -ETIMEDOUT)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+
+ if (err)
+ dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
+
+ return err;
+}
+
+/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ */
+static int ufshcd_complete_dev_init(struct ufs_hba *hba)
+{
+ int i;
+ int err;
+ bool flag_res = 1;
+
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ if (err) {
+ dev_err(hba->dev,
+ "%s setting fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 1000 && !err && flag_res; i++)
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT,
+ &flag_res);
+
+ if (err)
+ dev_err(hba->dev,
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ else if (flag_res)
+ dev_err(hba->dev,
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+
+out:
+ return err;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
+}
+
+int ufs_start(struct ufs_hba *hba)
+{
+ struct ufs_dev_desc card = {0};
+ int ret;
+
+ ret = ufshcd_link_startup(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_verify_dev_init(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_complete_dev_init(hba);
+ if (ret)
+ return ret;
+
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
+ ret = ufs_get_device_desc(hba, &card);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+
+ return ret;
+ }
+
+ if (ufshcd_get_max_pwr_mode(hba)) {
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+ } else {
+ ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+
+ return ret;
+ }
+
+ printf("Device at %s up at:", hba->dev->name);
+ ufshcd_print_pwr_info(hba);
+ }
+
+ return 0;
+}
+
+int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
+{
+ struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
+ struct scsi_platdata *scsi_plat;
+ struct udevice *scsi_dev;
+ int err;
+
+ device_find_first_child(ufs_dev, &scsi_dev);
+ if (!scsi_dev)
+ return -ENODEV;
+
+ scsi_plat = dev_get_uclass_platdata(scsi_dev);
+ scsi_plat->max_id = UFSHCD_MAX_ID;
+ scsi_plat->max_lun = UFS_MAX_LUNS;
+ scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
+
+ hba->dev = ufs_dev;
+ hba->ops = hba_ops;
+ hba->mmio_base = (void *)dev_read_addr(ufs_dev);
+
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
+ ufshcd_ops_init(hba);
+
+ /* Read capabilties registers */
+ hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+
+ /* Get UFS version supported by the controller */
+ hba->version = ufshcd_get_ufs_version(hba);
+ if (hba->version != UFSHCI_VERSION_10 &&
+ hba->version != UFSHCI_VERSION_11 &&
+ hba->version != UFSHCI_VERSION_20 &&
+ hba->version != UFSHCI_VERSION_21)
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->version);
+
+ /* Get Interrupt bit mask per version */
+ hba->intr_mask = ufshcd_get_intr_mask(hba);
+
+ /* Allocate memory for host memory space */
+ err = ufshcd_memory_alloc(hba);
+ if (err) {
+ dev_err(hba->dev, "Memory allocation failed\n");
+ return err;
+ }
+
+ /* Configure Local data structures */
+ ufshcd_host_memory_configure(hba);
+
+ /*
+ * In order to avoid any spurious interrupt immediately after
+ * registering UFS controller interrupt handler, clear any pending UFS
+ * interrupt status and disable all the UFS interrupts.
+ */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+ REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+
+ err = ufshcd_hba_enable(hba);
+ if (err) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ return err;
+ }
+
+ err = ufs_start(hba);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
+{
+ int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
+ scsi_devp);
+
+ return ret;
+}
+
+static struct scsi_ops ufs_ops = {
+ .exec = ufs_scsi_exec,
+};
+
+int ufs_probe_dev(int index)
+{
+ struct udevice *dev;
+
+ return uclass_get_device(UCLASS_UFS, index, &dev);
+}
+
+int ufs_probe(void)
+{
+ struct udevice *dev;
+ int ret, i;
+
+ for (i = 0;; i++) {
+ ret = uclass_get_device(UCLASS_UFS, i, &dev);
+ if (ret == -ENODEV)
+ break;
+ }
+
+ return 0;
+}
+
+U_BOOT_DRIVER(ufs_scsi) = {
+ .id = UCLASS_SCSI,
+ .name = "ufs_scsi",
+ .ops = &ufs_ops,
+};
diff --git a/drivers/ufs/ufs.h b/drivers/ufs/ufs.h
new file mode 100644
index 00000000000..e0bde937768
--- /dev/null
+++ b/drivers/ufs/ufs.h
@@ -0,0 +1,918 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __UFS_H
+#define __UFS_H
+
+#include <asm/io.h>
+#include <dm.h>
+
+#include "unipro.h"
+
+#define UFS_CDB_SIZE 16
+#define UPIU_TRANSACTION_UIC_CMD 0x1F
+#define UIC_CMD_SIZE (sizeof(u32) * 4)
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
+#define UFS_MAX_LUNS 0x7F
+
+enum {
+ TASK_REQ_UPIU_SIZE_DWORDS = 8,
+ TASK_RSP_UPIU_SIZE_DWORDS = 8,
+ ALIGNED_UPIU_SIZE = 512,
+};
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+ UFS_ACTIVE_PWR_MODE = 1,
+ UFS_SLEEP_PWR_MODE = 2,
+ UFS_POWERDOWN_PWR_MODE = 3,
+};
+
+enum ufs_notify_change_status {
+ PRE_CHANGE,
+ POST_CHANGE,
+};
+
+struct ufs_pa_layer_attr {
+ u32 gear_rx;
+ u32 gear_tx;
+ u32 lane_rx;
+ u32 lane_tx;
+ u32 pwr_rx;
+ u32 pwr_tx;
+ u32 hs_rate;
+};
+
+struct ufs_pwr_mode_info {
+ bool is_valid;
+ struct ufs_pa_layer_attr info;
+};
+
+enum ufs_desc_def_size {
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
+ QUERY_DESC_POWER_DEF_SIZE = 0x62,
+ QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
+};
+
+struct ufs_desc_size {
+ int dev_desc;
+ int pwr_desc;
+ int geom_desc;
+ int interc_desc;
+ int unit_desc;
+ int conf_desc;
+ int hlth_desc;
+};
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+ UTP_CMD_TYPE_SCSI = 0x0,
+ UTP_CMD_TYPE_UFS = 0x1,
+ UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+/* UTP Transfer Request Command Offset */
+#define UPIU_COMMAND_TYPE_OFFSET 28
+
+/* Offset of the response code in the UPIU header */
+#define UPIU_RSP_CODE_OFFSET 8
+
+/* To accommodate UFS2.0 required Command type */
+enum {
+ UTP_CMD_TYPE_UFS_STORAGE = 0x1,
+};
+
+enum {
+ UTP_SCSI_COMMAND = 0x00000000,
+ UTP_NATIVE_UFS_COMMAND = 0x10000000,
+ UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+ UTP_REQ_DESC_INT_CMD = 0x01000000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+ UTP_NO_DATA_TRANSFER = 0x00000000,
+ UTP_HOST_TO_DEVICE = 0x02000000,
+ UTP_DEVICE_TO_HOST = 0x04000000,
+};
+
+/* Overall command status values */
+enum {
+ OCS_SUCCESS = 0x0,
+ OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+ OCS_INVALID_PRDT_ATTR = 0x2,
+ OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+ OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+ OCS_PEER_COMM_FAILURE = 0x5,
+ OCS_ABORTED = 0x6,
+ OCS_FATAL_ERROR = 0x7,
+ OCS_INVALID_COMMAND_STATUS = 0x0F,
+ MASK_OCS = 0x0F,
+};
+
+/* The maximum length of the data byte count field in the PRDT is 256KB */
+#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024)
+/* The granularity of the data byte count field in the PRDT is 32-bit */
+#define PRDT_DATA_BYTE_COUNT_PAD 4
+
+#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
+#define QUERY_DESC_MAX_SIZE 255
+#define QUERY_DESC_MIN_SIZE 2
+#define QUERY_DESC_HDR_SIZE 2
+#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
+ (sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
+#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
+ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
+ (byte1 << 8) | (byte0))
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+ UFS_ABORT_TASK = 0x01,
+ UFS_ABORT_TASK_SET = 0x02,
+ UFS_CLEAR_TASK_SET = 0x04,
+ UFS_LOGICAL_RESET = 0x08,
+ UFS_QUERY_TASK = 0x80,
+ UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+ UPIU_TRANSACTION_NOP_OUT = 0x00,
+ UPIU_TRANSACTION_COMMAND = 0x01,
+ UPIU_TRANSACTION_DATA_OUT = 0x02,
+ UPIU_TRANSACTION_TASK_REQ = 0x04,
+ UPIU_TRANSACTION_QUERY_REQ = 0x16,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+ UPIU_TRANSACTION_NOP_IN = 0x20,
+ UPIU_TRANSACTION_RESPONSE = 0x21,
+ UPIU_TRANSACTION_DATA_IN = 0x22,
+ UPIU_TRANSACTION_TASK_RSP = 0x24,
+ UPIU_TRANSACTION_READY_XFER = 0x31,
+ UPIU_TRANSACTION_QUERY_RSP = 0x36,
+ UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
+};
+
+/* UPIU Read/Write flags */
+enum {
+ UPIU_CMD_FLAGS_NONE = 0x00,
+ UPIU_CMD_FLAGS_WRITE = 0x20,
+ UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+ UPIU_TASK_ATTR_SIMPLE = 0x00,
+ UPIU_TASK_ATTR_ORDERED = 0x01,
+ UPIU_TASK_ATTR_HEADQ = 0x02,
+ UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UPIU Query request function */
+enum {
+ UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Offset of the response code in the UPIU header */
+#define UPIU_RSP_CODE_OFFSET 8
+
+enum {
+ MASK_SCSI_STATUS = 0xFF,
+ MASK_TASK_RESPONSE = 0xFF00,
+ MASK_RSP_UPIU_RESULT = 0xFFFF,
+ MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_EXCEPTION_EVENT = 0x10000,
+ MASK_TM_SERVICE_RESP = 0xFF,
+ MASK_TM_FUNC = 0xFF,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+ UPIU_QUERY_OPCODE_NOP = 0x0,
+ UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+ UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+ UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+ UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+ UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+ UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+ UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+ UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* Query response result code */
+enum {
+ QUERY_RESULT_SUCCESS = 0x00,
+ QUERY_RESULT_NOT_READABLE = 0xF6,
+ QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+ QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+ QUERY_RESULT_INVALID_LENGTH = 0xF9,
+ QUERY_RESULT_INVALID_VALUE = 0xFA,
+ QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+ QUERY_RESULT_INVALID_INDEX = 0xFC,
+ QUERY_RESULT_INVALID_IDN = 0xFD,
+ QUERY_RESULT_INVALID_OPCODE = 0xFE,
+ QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+};
+
+enum {
+ UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+ UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+ UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+ QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
+ QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
+ QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+ QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
+ QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
+ QUERY_FLAG_IDN_RESERVED2 = 0x07,
+ QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
+ QUERY_FLAG_IDN_BUSY_RTC = 0x09,
+ QUERY_FLAG_IDN_RESERVED3 = 0x0A,
+ QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+ QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
+ QUERY_ATTR_IDN_RESERVED = 0x01,
+ QUERY_ATTR_IDN_POWER_MODE = 0x02,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
+ QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
+ QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+ QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
+ QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
+ QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
+ QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
+ QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
+ QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
+ QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+ QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+ QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
+ QUERY_ATTR_IDN_CNTX_CONF = 0x10,
+ QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
+ QUERY_ATTR_IDN_RESERVED2 = 0x12,
+ QUERY_ATTR_IDN_RESERVED3 = 0x13,
+ QUERY_ATTR_IDN_FFU_STATUS = 0x14,
+ QUERY_ATTR_IDN_PSA_STATE = 0x15,
+ QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+};
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+ QUERY_DESC_IDN_DEVICE = 0x0,
+ QUERY_DESC_IDN_CONFIGURATION = 0x1,
+ QUERY_DESC_IDN_UNIT = 0x2,
+ QUERY_DESC_IDN_RFU_0 = 0x3,
+ QUERY_DESC_IDN_INTERCONNECT = 0x4,
+ QUERY_DESC_IDN_STRING = 0x5,
+ QUERY_DESC_IDN_RFU_1 = 0x6,
+ QUERY_DESC_IDN_GEOMETRY = 0x7,
+ QUERY_DESC_IDN_POWER = 0x8,
+ QUERY_DESC_IDN_HEALTH = 0x9,
+ QUERY_DESC_IDN_MAX,
+};
+
+enum desc_header_offset {
+ QUERY_DESC_LENGTH_OFFSET = 0x00,
+ QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+};
+
+struct ufshcd_sg_entry {
+ __le32 base_addr;
+ __le32 upper_addr;
+ __le32 reserved;
+ __le32 size;
+};
+
+#define MAX_BUFF 128
+/**
+ * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * @command_upiu: Command UPIU Frame address
+ * @response_upiu: Response UPIU Frame address
+ * @prd_table: Physical Region Descriptor
+ */
+struct utp_transfer_cmd_desc {
+ u8 command_upiu[ALIGNED_UPIU_SIZE];
+ u8 response_upiu[ALIGNED_UPIU_SIZE];
+ struct ufshcd_sg_entry prd_table[MAX_BUFF];
+};
+
+/**
+ * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+struct request_desc_header {
+ __le32 dword_0;
+ __le32 dword_1;
+ __le32 dword_2;
+ __le32 dword_3;
+};
+
+/**
+ * struct utp_transfer_req_desc - UTRD structure
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+struct utp_transfer_req_desc {
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-5*/
+ __le32 command_desc_base_addr_lo;
+ __le32 command_desc_base_addr_hi;
+
+ /* DW 6 */
+ __le16 response_upiu_length;
+ __le16 response_upiu_offset;
+
+ /* DW 7 */
+ __le16 prd_table_length;
+ __le16 prd_table_offset;
+};
+
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+};
+
+/**
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+ __u8 opcode;
+ __u8 idn;
+ __u8 index;
+ __u8 selector;
+ __be16 reserved_osf;
+ __be16 length;
+ __be32 value;
+ __be32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+ __be32 exp_data_transfer_len;
+ u8 cdb[UFS_CDB_SIZE];
+};
+
+/*
+ * UTMRD structure.
+ */
+struct utp_task_req_desc {
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-11 - Task request UPIU structure */
+ struct utp_upiu_header req_header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 __reserved1[2];
+
+ /* DW 12-19 - Task Management Response UPIU structure */
+ struct utp_upiu_header rsp_header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 __reserved2[3];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+ struct utp_upiu_header header;
+ union {
+ struct utp_upiu_cmd sc;
+ struct utp_upiu_query qr;
+ struct utp_upiu_query tr;
+ /* use utp_upiu_query to host the 4 dwords of uic command */
+ struct utp_upiu_query uc;
+ };
+};
+
+/**
+ * struct utp_cmd_rsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+struct utp_cmd_rsp {
+ __be32 residual_transfer_count;
+ __be32 reserved[4];
+ __be16 sense_data_len;
+ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
+};
+
+/**
+ * struct utp_upiu_rsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_rsp {
+ struct utp_upiu_header header;
+ union {
+ struct utp_cmd_rsp sr;
+ struct utp_upiu_query qr;
+ };
+};
+
+#define MAX_MODEL_LEN 16
+/**
+ * ufs_dev_desc - ufs device details from the device descriptor
+ *
+ * @wmanufacturerid: card details
+ * @model: card model
+ */
+struct ufs_dev_desc {
+ u16 wmanufacturerid;
+ char model[MAX_MODEL_LEN + 1];
+};
+
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+ DEVICE_DESC_PARAM_LEN = 0x0,
+ DEVICE_DESC_PARAM_TYPE = 0x1,
+ DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
+ DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
+ DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
+ DEVICE_DESC_PARAM_PRTCL = 0x5,
+ DEVICE_DESC_PARAM_NUM_LU = 0x6,
+ DEVICE_DESC_PARAM_NUM_WLU = 0x7,
+ DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
+ DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
+ DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
+ DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
+ DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
+ DEVICE_DESC_PARAM_SEC_LU = 0xD,
+ DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
+ DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
+ DEVICE_DESC_PARAM_SPEC_VER = 0x10,
+ DEVICE_DESC_PARAM_MANF_DATE = 0x12,
+ DEVICE_DESC_PARAM_MANF_NAME = 0x14,
+ DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
+ DEVICE_DESC_PARAM_SN = 0x16,
+ DEVICE_DESC_PARAM_OEM_ID = 0x17,
+ DEVICE_DESC_PARAM_MANF_ID = 0x18,
+ DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
+ DEVICE_DESC_PARAM_UD_LEN = 0x1B,
+ DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
+ DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
+ DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
+ DEVICE_DESC_PARAM_FFU_TMT = 0x20,
+ DEVICE_DESC_PARAM_Q_DPTH = 0x21,
+ DEVICE_DESC_PARAM_DEV_VER = 0x22,
+ DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
+ DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
+ DEVICE_DESC_PARAM_PSA_TMT = 0x29,
+ DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+};
+
+struct ufs_hba;
+
+enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+};
+
+enum dev_cmd_type {
+ DEV_CMD_TYPE_NOP = 0x0,
+ DEV_CMD_TYPE_QUERY = 0x1,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ * @done: UIC command completion
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+};
+
+/* GenSelectorIndex calculation macros for M-PHY attributes */
+#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
+#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
+
+#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
+ ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
+
+/* Link Status*/
+enum link_status {
+ UFSHCD_LINK_IS_DOWN = 1,
+ UFSHCD_LINK_IS_UP = 2,
+};
+
+#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
+ ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
+
+/* UIC Commands */
+enum uic_cmd_dme {
+ UIC_CMD_DME_GET = 0x01,
+ UIC_CMD_DME_SET = 0x02,
+ UIC_CMD_DME_PEER_GET = 0x03,
+ UIC_CMD_DME_PEER_SET = 0x04,
+ UIC_CMD_DME_POWERON = 0x10,
+ UIC_CMD_DME_POWEROFF = 0x11,
+ UIC_CMD_DME_ENABLE = 0x12,
+ UIC_CMD_DME_RESET = 0x14,
+ UIC_CMD_DME_END_PT_RST = 0x15,
+ UIC_CMD_DME_LINK_STARTUP = 0x16,
+ UIC_CMD_DME_HIBER_ENTER = 0x17,
+ UIC_CMD_DME_HIBER_EXIT = 0x18,
+ UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+ UIC_CMD_RESULT_SUCCESS = 0x00,
+ UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+ UIC_CMD_RESULT_FAILURE = 0x01,
+ UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+ UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+ UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+ UIC_CMD_RESULT_BAD_INDEX = 0x05,
+ UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+ UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+ UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+ UIC_CMD_RESULT_BUSY = 0x09,
+ UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define MASK_UIC_COMMAND_RESULT 0xFF
+
+/* Host <-> Device UniPro Link state */
+enum uic_link_state {
+ UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
+ UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
+ UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
+};
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL 0
+#define DME_PEER 1
+#define ATTR_SET_NOR 0 /* NORMAL */
+#define ATTR_SET_ST 1 /* STATIC */
+
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer);
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer);
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_PEER);
+}
+
+/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ u8 response;
+ struct utp_upiu_query upiu_res;
+};
+
+/**
+ * struct ufs_query - holds relevant data structures for query request
+ * @request: request upiu and function
+ * @descriptor: buffer for sending/receiving descriptor
+ * @response: response upiu and response
+ */
+struct ufs_query {
+ struct ufs_query_req request;
+ u8 *descriptor;
+ struct ufs_query_res response;
+};
+
+/**
+ * struct ufs_dev_cmd - all assosiated fields with device management commands
+ * @type: device management command type - Query, NOP OUT
+ * @tag_wq: wait queue until free command slot is available
+ */
+struct ufs_dev_cmd {
+ enum dev_cmd_type type;
+ struct ufs_query query;
+};
+
+struct ufs_hba_ops {
+ int (*init)(struct ufs_hba *hba);
+ int (*hce_enable_notify)(struct ufs_hba *hba,
+ enum ufs_notify_change_status);
+ int (*link_startup_notify)(struct ufs_hba *hba,
+ enum ufs_notify_change_status);
+ int (*phy_initialization)(struct ufs_hba *hba);
+};
+
+struct ufs_hba {
+ struct udevice *dev;
+ void __iomem *mmio_base;
+ struct ufs_hba_ops *ops;
+ struct ufs_desc_size desc_size;
+ u32 capabilities;
+ u32 version;
+ u32 intr_mask;
+ u32 quirks;
+/*
+ * If UFS host controller is having issue in processing LCC (Line
+ * Control Command) coming from device then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
+ * attribute of device to 0).
+ */
+#define UFSHCD_QUIRK_BROKEN_LCC 0x1
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl;
+ struct utp_transfer_req_desc *utrdl;
+ /* TODO: Add Task Manegement Support */
+ struct utp_task_req_desc *utmrdl;
+
+ struct utp_upiu_req *ucd_req_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ /* Power Mode information */
+ enum ufs_dev_pwr_mode curr_dev_pwr_mode;
+ struct ufs_pa_layer_attr pwr_info;
+ struct ufs_pwr_mode_info max_pwr_info;
+
+ struct ufs_dev_cmd dev_cmd;
+};
+
+static inline int ufshcd_ops_init(struct ufs_hba *hba)
+{
+ if (hba->ops && hba->ops->init)
+ return hba->ops->init(hba);
+
+ return 0;
+}
+
+static inline int ufshcd_ops_hce_enable_notify(struct ufs_hba *hba,
+ bool status)
+{
+ if (hba->ops && hba->ops->hce_enable_notify)
+ return hba->ops->hce_enable_notify(hba, status);
+
+ return 0;
+}
+
+static inline int ufshcd_ops_link_startup_notify(struct ufs_hba *hba,
+ bool status)
+{
+ if (hba->ops && hba->ops->link_startup_notify)
+ return hba->ops->link_startup_notify(hba, status);
+
+ return 0;
+}
+
+/* Controller UFSHCI version */
+enum {
+ UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
+ UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
+ UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
+ UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
+};
+
+/* Interrupt disable masks */
+enum {
+ /* Interrupt disable mask for UFSHCI v1.0 */
+ INTERRUPT_MASK_ALL_VER_10 = 0x30FFF,
+ INTERRUPT_MASK_RW_VER_10 = 0x30000,
+
+ /* Interrupt disable mask for UFSHCI v1.1 */
+ INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+
+ /* Interrupt disable mask for UFSHCI v2.1 */
+ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
+};
+
+/* UFSHCI Registers */
+enum {
+ REG_CONTROLLER_CAPABILITIES = 0x00,
+ REG_UFS_VERSION = 0x08,
+ REG_CONTROLLER_DEV_ID = 0x10,
+ REG_CONTROLLER_PROD_ID = 0x14,
+ REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
+ REG_INTERRUPT_STATUS = 0x20,
+ REG_INTERRUPT_ENABLE = 0x24,
+ REG_CONTROLLER_STATUS = 0x30,
+ REG_CONTROLLER_ENABLE = 0x34,
+ REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
+ REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
+ REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
+ REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
+ REG_UIC_ERROR_CODE_DME = 0x48,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
+ REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
+ REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
+ REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
+ REG_UIC_COMMAND = 0x90,
+ REG_UIC_COMMAND_ARG_1 = 0x94,
+ REG_UIC_COMMAND_ARG_2 = 0x98,
+ REG_UIC_COMMAND_ARG_3 = 0x9C,
+
+ UFSHCI_REG_SPACE_SIZE = 0xA0,
+
+ REG_UFS_CCAP = 0x100,
+ REG_UFS_CRYPTOCAP = 0x104,
+
+ UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
+};
+
+/* Controller capability masks */
+enum {
+ MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
+ MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
+ MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,
+ MASK_64_ADDRESSING_SUPPORT = 0x01000000,
+ MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
+ MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+};
+
+/* Interrupt Status 20h */
+#define UTP_TRANSFER_REQ_COMPL 0x1
+#define UIC_DME_END_PT_RESET 0x2
+#define UIC_ERROR 0x4
+#define UIC_TEST_MODE 0x8
+#define UIC_POWER_MODE 0x10
+#define UIC_HIBERNATE_EXIT 0x20
+#define UIC_HIBERNATE_ENTER 0x40
+#define UIC_LINK_LOST 0x80
+#define UIC_LINK_STARTUP 0x100
+#define UTP_TASK_REQ_COMPL 0x200
+#define UIC_COMMAND_COMPL 0x400
+#define DEVICE_FATAL_ERROR 0x800
+#define CONTROLLER_FATAL_ERROR 0x10000
+#define SYSTEM_BUS_FATAL_ERROR 0x20000
+
+#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
+ UIC_HIBERNATE_EXIT |\
+ UIC_POWER_MODE)
+
+#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UIC_POWER_MODE)
+
+#define UFSHCD_ERROR_MASK (UIC_ERROR |\
+ DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+/* Host Controller Enable 0x34h */
+#define CONTROLLER_ENABLE 0x1
+#define CONTROLLER_DISABLE 0x0
+/* HCS - Host Controller Status 30h */
+#define DEVICE_PRESENT 0x1
+#define UTP_TRANSFER_REQ_LIST_READY 0x2
+#define UTP_TASK_REQ_LIST_READY 0x4
+#define UIC_COMMAND_READY 0x8
+#define HOST_ERROR_INDICATOR 0x10
+#define DEVICE_ERROR_INDICATOR 0x20
+#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+
+#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
+ UTP_TASK_REQ_LIST_READY |\
+ UIC_COMMAND_READY)
+
+enum {
+ PWR_OK = 0x0,
+ PWR_LOCAL = 0x01,
+ PWR_REMOTE = 0x02,
+ PWR_BUSY = 0x03,
+ PWR_ERROR_CAP = 0x04,
+ PWR_FATAL_ERROR = 0x05,
+};
+
+/* UICCMD - UIC Command */
+#define COMMAND_OPCODE_MASK 0xFF
+#define GEN_SELECTOR_INDEX_MASK 0xFFFF
+
+#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
+#define RESET_LEVEL 0xFF
+
+#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
+#define CONFIG_RESULT_CODE_MASK 0xFF
+#define GENERIC_ERROR_CODE_MASK 0xFF
+
+#define ufshcd_writel(hba, val, reg) \
+ writel((val), (hba)->mmio_base + (reg))
+#define ufshcd_readl(hba, reg) \
+ readl((hba)->mmio_base + (reg))
+
+/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
+
+/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
+
+int ufshcd_probe(struct udevice *dev, struct ufs_hba_ops *hba_ops);
+
+#endif
diff --git a/drivers/ufs/unipro.h b/drivers/ufs/unipro.h
new file mode 100644
index 00000000000..b30b17fa5ad
--- /dev/null
+++ b/drivers/ufs/unipro.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _UNIPRO_H_
+#define _UNIPRO_H_
+
+/*
+ * M-TX Configuration Attributes
+ */
+#define TX_HIBERN8TIME_CAPABILITY 0x000F
+#define TX_MODE 0x0021
+#define TX_HSRATE_SERIES 0x0022
+#define TX_HSGEAR 0x0023
+#define TX_PWMGEAR 0x0024
+#define TX_AMPLITUDE 0x0025
+#define TX_HS_SLEWRATE 0x0026
+#define TX_SYNC_SOURCE 0x0027
+#define TX_HS_SYNC_LENGTH 0x0028
+#define TX_HS_PREPARE_LENGTH 0x0029
+#define TX_LS_PREPARE_LENGTH 0x002A
+#define TX_HIBERN8_CONTROL 0x002B
+#define TX_LCC_ENABLE 0x002C
+#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
+#define TX_BYPASS_8B10B_ENABLE 0x002E
+#define TX_DRIVER_POLARITY 0x002F
+#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
+#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
+#define TX_LCC_SEQUENCER 0x0032
+#define TX_MIN_ACTIVATETIME 0x0033
+#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
+#define TX_REFCLKFREQ 0x00EB
+#define TX_CFGCLKFREQVAL 0x00EC
+#define CFGEXTRATTR 0x00F0
+#define DITHERCTRL2 0x00F1
+
+/*
+ * M-RX Configuration Attributes
+ */
+#define RX_MODE 0x00A1
+#define RX_HSRATE_SERIES 0x00A2
+#define RX_HSGEAR 0x00A3
+#define RX_PWMGEAR 0x00A4
+#define RX_LS_TERMINATED_ENABLE 0x00A5
+#define RX_HS_UNTERMINATED_ENABLE 0x00A6
+#define RX_ENTER_HIBERN8 0x00A7
+#define RX_BYPASS_8B10B_ENABLE 0x00A8
+#define RX_TERMINATION_FORCE_ENABLE 0x0089
+#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
+#define RX_HIBERN8TIME_CAPABILITY 0x0092
+#define RX_REFCLKFREQ 0x00EB
+#define RX_CFGCLKFREQVAL 0x00EC
+#define CFGWIDEINLN 0x00F0
+#define CFGRXCDR8 0x00BA
+#define ENARXDIRECTCFG4 0x00F2
+#define CFGRXOVR8 0x00BD
+#define RXDIRECTCTRL2 0x00C7
+#define ENARXDIRECTCFG3 0x00F3
+#define RXCALCTRL 0x00B4
+#define ENARXDIRECTCFG2 0x00F4
+#define CFGRXOVR4 0x00E9
+#define RXSQCTRL 0x00B5
+#define CFGRXOVR6 0x00BF
+
+#define is_mphy_tx_attr(attr) (attr < RX_MODE)
+#define RX_MIN_ACTIVATETIME_UNIT_US 100
+#define HIBERN8TIME_UNIT_US 100
+
+/*
+ * Common Block Attributes
+ */
+#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B)
+#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF)
+#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD)
+#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6)
+#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA)
+#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0)
+#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1)
+#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3)
+#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8)
+#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB)
+
+#define UNIPRO_CB_OFFSET(x) (0x8000 | x)
+
+/*
+ * PHY Adpater attributes
+ */
+#define PA_ACTIVETXDATALANES 0x1560
+#define PA_ACTIVERXDATALANES 0x1580
+#define PA_TXTRAILINGCLOCKS 0x1564
+#define PA_PHY_TYPE 0x1500
+#define PA_AVAILTXDATALANES 0x1520
+#define PA_AVAILRXDATALANES 0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
+#define PA_TXPWRSTATUS 0x1567
+#define PA_RXPWRSTATUS 0x1582
+#define PA_TXFORCECLOCK 0x1562
+#define PA_TXPWRMODE 0x1563
+#define PA_LEGACYDPHYESCDL 0x1570
+#define PA_MAXTXSPEEDFAST 0x1521
+#define PA_MAXTXSPEEDSLOW 0x1522
+#define PA_MAXRXSPEEDFAST 0x1541
+#define PA_MAXRXSPEEDSLOW 0x1542
+#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_LOCAL_TX_LCC_ENABLE 0x155E
+#define PA_TXSPEEDFAST 0x1565
+#define PA_TXSPEEDSLOW 0x1566
+#define PA_REMOTEVERINFO 0x15A0
+#define PA_TXGEAR 0x1568
+#define PA_TXTERMINATION 0x1569
+#define PA_HSSERIES 0x156A
+#define PA_PWRMODE 0x1571
+#define PA_RXGEAR 0x1583
+#define PA_RXTERMINATION 0x1584
+#define PA_MAXRXPWMGEAR 0x1586
+#define PA_MAXRXHSGEAR 0x1587
+#define PA_RXHSUNTERMCAP 0x15A5
+#define PA_RXLSTERMCAP 0x15A6
+#define PA_GRANULARITY 0x15AA
+#define PA_PACPREQTIMEOUT 0x1590
+#define PA_PACPREQEOBTIMEOUT 0x1591
+#define PA_HIBERN8TIME 0x15A7
+#define PA_LOCALVERINFO 0x15A9
+#define PA_TACTIVATE 0x15A8
+#define PA_PACPFRAMECOUNT 0x15C0
+#define PA_PACPERRORCOUNT 0x15C1
+#define PA_PHYTESTCONTROL 0x15C2
+#define PA_PWRMODEUSERDATA0 0x15B0
+#define PA_PWRMODEUSERDATA1 0x15B1
+#define PA_PWRMODEUSERDATA2 0x15B2
+#define PA_PWRMODEUSERDATA3 0x15B3
+#define PA_PWRMODEUSERDATA4 0x15B4
+#define PA_PWRMODEUSERDATA5 0x15B5
+#define PA_PWRMODEUSERDATA6 0x15B6
+#define PA_PWRMODEUSERDATA7 0x15B7
+#define PA_PWRMODEUSERDATA8 0x15B8
+#define PA_PWRMODEUSERDATA9 0x15B9
+#define PA_PWRMODEUSERDATA10 0x15BA
+#define PA_PWRMODEUSERDATA11 0x15BB
+#define PA_CONNECTEDTXDATALANES 0x1561
+#define PA_CONNECTEDRXDATALANES 0x1581
+#define PA_LOGICALLANEMAP 0x15A1
+#define PA_SLEEPNOCONFIGTIME 0x15A2
+#define PA_STALLNOCONFIGTIME 0x15A3
+#define PA_SAVECONFIGTIME 0x15A4
+
+#define PA_TACTIVATE_TIME_UNIT_US 10
+#define PA_HIBERN8_TIME_UNIT_US 100
+
+/*Other attributes*/
+#define VS_MPHYCFGUPDT 0xD085
+#define VS_DEBUGOMC 0xD09E
+#define VS_POWERSTATE 0xD083
+
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
+/* PHY Adapter Protocol Constants */
+#define PA_MAXDATALANES 4
+
+/* PA power modes */
+enum {
+ FAST_MODE = 1,
+ SLOW_MODE = 2,
+ FASTAUTO_MODE = 4,
+ SLOWAUTO_MODE = 5,
+ UNCHANGED = 7,
+};
+
+/* PA TX/RX Frequency Series */
+enum {
+ PA_HS_MODE_A = 1,
+ PA_HS_MODE_B = 2,
+};
+
+enum ufs_pwm_gear_tag {
+ UFS_PWM_DONT_CHANGE, /* Don't change Gear */
+ UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
+ UFS_PWM_G2, /* PWM Gear 2 */
+ UFS_PWM_G3, /* PWM Gear 3 */
+ UFS_PWM_G4, /* PWM Gear 4 */
+ UFS_PWM_G5, /* PWM Gear 5 */
+ UFS_PWM_G6, /* PWM Gear 6 */
+ UFS_PWM_G7, /* PWM Gear 7 */
+};
+
+enum ufs_hs_gear_tag {
+ UFS_HS_DONT_CHANGE, /* Don't change Gear */
+ UFS_HS_G1, /* HS Gear 1 (default for reset) */
+ UFS_HS_G2, /* HS Gear 2 */
+ UFS_HS_G3, /* HS Gear 3 */
+};
+
+enum ufs_unipro_ver {
+ UFS_UNIPRO_VER_RESERVED = 0,
+ UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
+ UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
+ UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
+ UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
+ /* UniPro version field mask in PA_LOCALVERINFO */
+ UFS_UNIPRO_VER_MASK = 0xF,
+};
+
+/*
+ * Data Link Layer Attributes
+ */
+#define DL_TC0TXFCTHRESHOLD 0x2040
+#define DL_FC0PROTTIMEOUTVAL 0x2041
+#define DL_TC0REPLAYTIMEOUTVAL 0x2042
+#define DL_AFC0REQTIMEOUTVAL 0x2043
+#define DL_AFC0CREDITTHRESHOLD 0x2044
+#define DL_TC0OUTACKTHRESHOLD 0x2045
+#define DL_TC1TXFCTHRESHOLD 0x2060
+#define DL_FC1PROTTIMEOUTVAL 0x2061
+#define DL_TC1REPLAYTIMEOUTVAL 0x2062
+#define DL_AFC1REQTIMEOUTVAL 0x2063
+#define DL_AFC1CREDITTHRESHOLD 0x2064
+#define DL_TC1OUTACKTHRESHOLD 0x2065
+#define DL_TXPREEMPTIONCAP 0x2000
+#define DL_TC0TXMAXSDUSIZE 0x2001
+#define DL_TC0RXINITCREDITVAL 0x2002
+#define DL_TC0TXBUFFERSIZE 0x2005
+#define DL_PEERTC0PRESENT 0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
+#define DL_TC1TXMAXSDUSIZE 0x2003
+#define DL_TC1RXINITCREDITVAL 0x2004
+#define DL_TC1TXBUFFERSIZE 0x2006
+#define DL_PEERTC1PRESENT 0x2066
+#define DL_PEERTC1RXINITCREVAL 0x2067
+
+/*
+ * Network Layer Attributes
+ */
+#define N_DEVICEID 0x3000
+#define N_DEVICEID_VALID 0x3001
+#define N_TC0TXMAXSDUSIZE 0x3020
+#define N_TC1TXMAXSDUSIZE 0x3021
+
+/*
+ * Transport Layer Attributes
+ */
+#define T_NUMCPORTS 0x4000
+#define T_NUMTESTFEATURES 0x4001
+#define T_CONNECTIONSTATE 0x4020
+#define T_PEERDEVICEID 0x4021
+#define T_PEERCPORTID 0x4022
+#define T_TRAFFICCLASS 0x4023
+#define T_PROTOCOLID 0x4024
+#define T_CPORTFLAGS 0x4025
+#define T_TXTOKENVALUE 0x4026
+#define T_RXTOKENVALUE 0x4027
+#define T_LOCALBUFFERSPACE 0x4028
+#define T_PEERBUFFERSPACE 0x4029
+#define T_CREDITSTOSEND 0x402A
+#define T_CPORTMODE 0x402B
+#define T_TC0TXMAXSDUSIZE 0x4060
+#define T_TC1TXMAXSDUSIZE 0x4061
+
+#ifdef FALSE
+#undef FALSE
+#endif
+
+#ifdef TRUE
+#undef TRUE
+#endif
+
+/* Boolean attribute values */
+enum {
+ FALSE = 0,
+ TRUE,
+};
+
+#endif /* _UNIPRO_H_ */
diff --git a/include/dm/uclass-id.h b/include/dm/uclass-id.h
index f7f323752c2..0c563d898be 100644
--- a/include/dm/uclass-id.h
+++ b/include/dm/uclass-id.h
@@ -102,6 +102,7 @@ enum uclass_id {
UCLASS_THERMAL, /* Thermal sensor */
UCLASS_TIMER, /* Timer device */
UCLASS_TPM, /* Trusted Platform Module TIS interface */
+ UCLASS_UFS, /* Universal Flash Storage */
UCLASS_USB, /* USB bus */
UCLASS_USB_DEV_GENERIC, /* USB generic device */
UCLASS_USB_HUB, /* USB hub */
diff --git a/include/ufs.h b/include/ufs.h
new file mode 100644
index 00000000000..0592a763dd9
--- /dev/null
+++ b/include/ufs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _UFS_H
+#define _UFS_H
+/**
+ * ufs_probe() - initialize all devices in the UFS uclass
+ *
+ * @return 0 if Ok, -ve on error
+ */
+int ufs_probe(void);
+
+/**
+ * ufs_probe_dev() - initialize a particular device in the UFS uclass
+ *
+ * @index: index in the uclass sequence
+ *
+ * @return 0 if successfully probed, -ve on error
+ */
+int ufs_probe_dev(int index);
+
+/*
+ * ufs_scsi_bind() - Create a new scsi device as a child of the UFS device and
+ * bind it to the ufs_scsi driver
+ * @ufs_dev: UFS device
+ * @scsi_devp: Pointer to scsi device
+ *
+ * @return 0 if Ok, -ve on error
+ */
+int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp);
+#endif