diff options
Diffstat (limited to 'drivers/ufs')
-rw-r--r-- | drivers/ufs/core/ufshcd-priv.h | 6 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 86 | ||||
-rw-r--r-- | drivers/ufs/host/Kconfig | 12 | ||||
-rw-r--r-- | drivers/ufs/host/Makefile | 1 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-exynos.c | 182 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-exynos.h | 1 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-mediatek.c | 324 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-mediatek.h | 74 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 23 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-renesas.c | 412 | ||||
-rw-r--r-- | drivers/ufs/host/ufshcd-pci.c | 1 | ||||
-rw-r--r-- | drivers/ufs/host/ufshcd-pltfrm.c | 15 | ||||
-rw-r--r-- | drivers/ufs/host/ufshcd-pltfrm.h | 6 |
13 files changed, 1007 insertions, 136 deletions
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index ffb01fc6de75..8f67db202d7b 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -215,7 +215,7 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, hba->vops->config_scaling_param(hba, p, data); } -extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; +extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN @@ -234,8 +234,8 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask); int ufshcd_write_ee_control(struct ufs_hba *hba); -int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask, - u16 set, u16 clr); +int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, + const u16 *other_mask, u16 set, u16 clr); static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba, u16 set, u16 clr) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 2b40174e93ac..3bc0709a5dc2 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -64,9 +64,6 @@ /* maximum number of link-startup retries */ #define DME_LINKSTARTUP_RETRIES 3 -/* Maximum retries for Hibern8 enter */ -#define UIC_HIBERN8_ENTER_RETRIES 3 - /* maximum number of reset retries before giving up */ #define MAX_HOST_RESET_RETRIES 5 @@ -175,7 +172,7 @@ enum { #define ufshcd_clear_eh_in_progress(h) \ ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) -struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { +const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, @@ -363,7 +360,7 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, } static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, - struct uic_command *ucmd, + const struct uic_command *ucmd, enum ufs_trace_str_t str_t) { u32 cmd; @@ -443,11 +440,11 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba) } static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, - char *err_name) + const char *err_name) { int i; bool found = false; - struct ufs_event_hist *e; + const struct ufs_event_hist *e; if (id >= UFS_EVT_CNT) return; @@ -497,7 +494,7 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba) static void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) { - struct ufshcd_lrb *lrbp; + const struct ufshcd_lrb *lrbp; int prdt_length; int tag; @@ -553,7 +550,7 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) static void ufshcd_print_host_state(struct ufs_hba *hba) { - struct scsi_device *sdev_ufs = hba->ufs_device_wlun; + const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", @@ -1109,7 +1106,7 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, */ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) { - struct scsi_device *sdev; + const struct scsi_device *sdev; u32 pending = 0; lockdep_assert_held(hba->host->host_lock); @@ -2080,14 +2077,15 @@ static inline int ufshcd_monitor_opcode2dir(u8 opcode) static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { - struct ufs_hba_monitor *m = &hba->monitor; + const struct ufs_hba_monitor *m = &hba->monitor; return (m->enabled && lrbp && lrbp->cmd && (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) && ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); } -static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +static void ufshcd_start_monitor(struct ufs_hba *hba, + const struct ufshcd_lrb *lrbp) { int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); unsigned long flags; @@ -2098,14 +2096,14 @@ static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) spin_unlock_irqrestore(hba->host->host_lock, flags); } -static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp) { int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { - struct request *req = scsi_cmd_to_rq(lrbp->cmd); + const struct request *req = scsi_cmd_to_rq(lrbp->cmd); struct ufs_hba_monitor *m = &hba->monitor; ktime_t now, inc, lat; @@ -2227,6 +2225,8 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) int err; hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) + hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; @@ -3095,7 +3095,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, if (ret) dev_err(hba->dev, - "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", + "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n", __func__, opcode, idn, ret, retries); return ret; } @@ -3263,7 +3263,7 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba, if (ret) dev_err(hba->dev, - "%s: query attribute, idn %d, failed with error %d after %d retires\n", + "%s: query attribute, idn %d, failed with error %d after %d retries\n", __func__, idn, ret, QUERY_REQ_RETRIES); return ret; } @@ -3834,7 +3834,7 @@ int ufshcd_dme_configure_adapt(struct ufs_hba *hba, { int ret; - if (agreed_gear != UFS_HS_G4) + if (agreed_gear < UFS_HS_G4) adapt_val = PA_NO_ADAPT; ret = ufshcd_dme_set(hba, @@ -4123,7 +4123,7 @@ out_unlock: * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) { struct uic_command uic_cmd = {0}; int ret; @@ -4148,6 +4148,7 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) out: return ret; } +EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode); int ufshcd_link_recovery(struct ufs_hba *hba) { @@ -4290,8 +4291,13 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) if (hba->max_pwr_info.is_valid) return 0; - pwr_info->pwr_tx = FAST_MODE; - pwr_info->pwr_rx = FAST_MODE; + if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { + pwr_info->pwr_tx = FASTAUTO_MODE; + pwr_info->pwr_rx = FASTAUTO_MODE; + } else { + pwr_info->pwr_tx = FAST_MODE; + pwr_info->pwr_rx = FAST_MODE; + } pwr_info->hs_rate = PA_HS_MODE_B; /* Get the connected lane count */ @@ -4766,7 +4772,7 @@ link_startup: * but we can't be sure if the link is up until link startup * succeeds. So reset the local Uni-Pro and try again. */ - if (ret && ufshcd_hba_enable(hba)) { + if (ret && retries && ufshcd_hba_enable(hba)) { ufshcd_update_evt_hist(hba, UFS_EVT_LINK_STARTUP_FAIL, (u32)ret); @@ -4931,7 +4937,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba, * */ static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, - struct scsi_device *sdev) + const struct scsi_device *sdev) { if (hba->dev_info.f_power_on_wp_en && !hba->dev_info.is_lu_power_on_wp) { @@ -5450,8 +5456,8 @@ int ufshcd_write_ee_control(struct ufs_hba *hba) return err; } -int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask, - u16 set, u16 clr) +int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, + const u16 *other_mask, u16 set, u16 clr) { u16 new_mask, ee_ctrl_mask; int err = 0; @@ -7388,7 +7394,8 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) * * Returns calculated max ICC level for specific regulator */ -static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) +static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, + const char *buff) { int i; int curr_uA; @@ -7435,7 +7442,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) * Returns calculated ICC level */ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, - u8 *desc_buf, int len) + const u8 *desc_buf, int len) { u32 icc_level = 0; @@ -7585,7 +7592,7 @@ out: return ret; } -static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) +static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) { struct ufs_dev_info *dev_info = &hba->dev_info; u8 lun; @@ -7656,7 +7663,7 @@ wb_disabled: hba->caps &= ~UFSHCD_CAP_WB_EN; } -static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf) +static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) { struct ufs_dev_info *dev_info = &hba->dev_info; u32 ext_ufs_feature; @@ -7890,7 +7897,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) u32 granularity, peer_granularity; u32 pa_tactivate, peer_pa_tactivate; u32 pa_tactivate_us, peer_pa_tactivate_us; - u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; + static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity); @@ -8007,7 +8014,7 @@ struct ufs_ref_clk { enum ufs_ref_clk_freq val; }; -static struct ufs_ref_clk ufs_ref_clk_freqs[] = { +static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { {19200000, REF_CLK_FREQ_19_2_MHZ}, {26000000, REF_CLK_FREQ_26_MHZ}, {38400000, REF_CLK_FREQ_38_4_MHZ}, @@ -8449,7 +8456,7 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); } -static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) +int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; @@ -8465,6 +8472,7 @@ static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) out: return ret; } +EXPORT_SYMBOL_GPL(ufshcd_get_vreg); static int ufshcd_init_vreg(struct ufs_hba *hba) { @@ -8558,6 +8566,19 @@ out: return ret; } +static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba) +{ + u32 freq; + int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq); + + if (ret) { + dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret); + return REF_CLK_FREQ_INVAL; + } + + return ufs_get_bref_clk_from_hz(freq); +} + static int ufshcd_init_clocks(struct ufs_hba *hba) { int ret = 0; @@ -8651,6 +8672,9 @@ static int ufshcd_hba_init(struct ufs_hba *hba) if (err) goto out_disable_hba_vreg; + if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) + hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba); + err = ufshcd_setup_clocks(hba, true); if (err) goto out_disable_hba_vreg; diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig index 82590224da13..4cc2dbd79ed0 100644 --- a/drivers/ufs/host/Kconfig +++ b/drivers/ufs/host/Kconfig @@ -92,6 +92,18 @@ config SCSI_UFS_HISI Select this if you have UFS controller on Hisilicon chipset. If unsure, say N. +config SCSI_UFS_RENESAS + tristate "Renesas specific hooks to UFS controller platform driver" + depends on (ARCH_RENESAS || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM + help + This selects the Renesas specific additions to UFSHCD platform driver. + UFS host on Renesas needs some vendor specific configuration before + accessing the hardware. + + Select this if you have UFS controller on Renesas chipset. + + If unsure, say N. + config SCSI_UFS_TI_J721E tristate "TI glue layer for Cadence UFS Controller" depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST) diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile index e4be54273c98..7717ca93e7d5 100644 --- a/drivers/ufs/host/Makefile +++ b/drivers/ufs/host/Makefile @@ -11,4 +11,5 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o +obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index a81d8cbd542f..eced97538082 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -52,11 +52,12 @@ #define HCI_ERR_EN_DME_LAYER 0x88 #define HCI_CLKSTOP_CTRL 0xB0 #define REFCLKOUT_STOP BIT(4) +#define MPHY_APBCLK_STOP BIT(3) #define REFCLK_STOP BIT(2) #define UNIPRO_MCLK_STOP BIT(1) #define UNIPRO_PCLK_STOP BIT(0) #define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\ - UNIPRO_MCLK_STOP |\ + UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\ UNIPRO_PCLK_STOP) #define HCI_MISC 0xB4 #define REFCLK_CTRL_EN BIT(7) @@ -135,15 +136,9 @@ enum { /* * UNIPRO registers */ -#define UNIPRO_COMP_VERSION 0x000 -#define UNIPRO_DME_PWR_REQ 0x090 -#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094 -#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098 -#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C -#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0 -#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4 -#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8 -#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC +#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8 +#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC +#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0 /* * UFS Protector registers @@ -157,7 +152,6 @@ enum { #define CNTR_DIV_VAL 40 -static struct exynos_ufs_drv_data exynos_ufs_drvs; static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en); static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en); @@ -657,8 +651,9 @@ static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs) if (attr->rx_min_actv_time_cap) ufshcd_dme_set(hba, - UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP, - i), attr->rx_min_actv_time_cap); + UIC_ARG_MIB_SEL( + RX_MIN_ACTIVATETIME_CAPABILITY, i), + attr->rx_min_actv_time_cap); if (attr->rx_hibern8_time_cap) ufshcd_dme_set(hba, @@ -910,9 +905,13 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs) if (ret) { dev_err(hba->dev, "%s: phy init failed, ret = %d\n", __func__, ret); - goto out_exit_phy; + return ret; } + ret = phy_power_on(generic_phy); + if (ret) + goto out_exit_phy; + return 0; out_exit_phy: @@ -1174,10 +1173,6 @@ static int exynos_ufs_init(struct ufs_hba *hba) goto out; } - ret = phy_power_on(ufs->phy); - if (ret) - goto phy_off; - exynos_ufs_priv_init(hba, ufs); if (ufs->drv_data->drv_init) { @@ -1195,8 +1190,6 @@ static int exynos_ufs_init(struct ufs_hba *hba) exynos_ufs_config_smu(ufs); return 0; -phy_off: - phy_power_off(ufs->phy); out: hba->priv = NULL; return ret; @@ -1473,7 +1466,100 @@ static int exynosauto_ufs_vh_init(struct ufs_hba *hba) return 0; } -static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { +static int fsd_ufs_pre_link(struct exynos_ufs *ufs) +{ + int i; + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F); + } + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); + + exynos_ufs_establish_connt(ufs); + + return 0; +} + +static int fsd_ufs_post_link(struct exynos_ufs *ufs) +{ + int i; + struct ufs_hba *hba = ufs->hba; + u32 hw_cap_min_tactivate; + u32 peer_rx_min_actv_time_cap; + u32 max_rx_hibern8_time_cap; + + ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4), + &hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), + &peer_rx_min_actv_time_cap); /* PA_TActivate */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + &max_rx_hibern8_time_cap); /* PA_Hibern8Time */ + + if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate) + ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + peer_rx_min_actv_time_cap + 1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00); + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + + return 0; +} + +static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000); + + unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0); + unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1); + unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2); + + return 0; +} + +static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .name = "exynos_ufs", .init = exynos_ufs_init, .hce_enable_notify = exynos_ufs_hce_enable_notify, @@ -1514,9 +1600,14 @@ static int exynos_ufs_probe(struct platform_device *pdev) static int exynos_ufs_remove(struct platform_device *pdev) { struct ufs_hba *hba = platform_get_drvdata(pdev); + struct exynos_ufs *ufs = ufshcd_get_variant(hba); pm_runtime_get_sync(&(pdev)->dev); ufshcd_remove(hba); + + phy_power_off(ufs->phy); + phy_exit(ufs->phy); + return 0; } @@ -1545,7 +1636,7 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = { .pa_dbg_option_suite = 0x30103, }; -static struct exynos_ufs_drv_data exynosauto_ufs_drvs = { +static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = { .uic_attr = &exynos7_uic_attr, .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | @@ -1561,7 +1652,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_drvs = { .post_pwr_change = exynosauto_ufs_post_pwr_change, }; -static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = { +static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = { .vops = &ufs_hba_exynosauto_vh_ops, .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | @@ -1573,7 +1664,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = { .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, }; -static struct exynos_ufs_drv_data exynos_ufs_drvs = { +static const struct exynos_ufs_drv_data exynos_ufs_drvs = { .uic_attr = &exynos7_uic_attr, .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | @@ -1595,6 +1686,47 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = { .post_pwr_change = exynos7_ufs_post_pwr_change, }; +static struct exynos_ufs_uic_attr fsd_uic_attr = { + .tx_trailingclks = 0x10, + .tx_dif_p_nsec = 3000000, /* unit: ns */ + .tx_dif_n_nsec = 1000000, /* unit: ns */ + .tx_high_z_cnt_nsec = 20000, /* unit: ns */ + .tx_base_unit_nsec = 100000, /* unit: ns */ + .tx_gran_unit_nsec = 4000, /* unit: ns */ + .tx_sleep_cnt = 1000, /* unit: ns */ + .tx_min_activatetime = 0xa, + .rx_filler_enable = 0x2, + .rx_dif_p_nsec = 1000000, /* unit: ns */ + .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ + .rx_base_unit_nsec = 100000, /* unit: ns */ + .rx_gran_unit_nsec = 4000, /* unit: ns */ + .rx_sleep_cnt = 1280, /* unit: ns */ + .rx_stall_cnt = 320, /* unit: ns */ + .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), + .pa_dbg_option_suite = 0x2E820183, +}; + +struct exynos_ufs_drv_data fsd_ufs_drvs = { + .uic_attr = &fsd_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR, + .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .pre_link = fsd_ufs_pre_link, + .post_link = fsd_ufs_post_link, + .pre_pwr_change = fsd_ufs_pre_pwr_change, +}; + static const struct of_device_id exynos_ufs_of_match[] = { { .compatible = "samsung,exynos7-ufs", .data = &exynos_ufs_drvs }, @@ -1602,6 +1734,8 @@ static const struct of_device_id exynos_ufs_of_match[] = { .data = &exynosauto_ufs_drvs }, { .compatible = "samsung,exynosautov9-ufs-vh", .data = &exynosauto_ufs_vh_drvs }, + { .compatible = "tesla,fsd-ufs", + .data = &fsd_ufs_drvs }, {}, }; diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h index 0b0a3d530ca6..a4bd6646d7f1 100644 --- a/drivers/ufs/host/ufs-exynos.h +++ b/drivers/ufs/host/ufs-exynos.h @@ -22,6 +22,7 @@ #define PA_DBG_RXPHY_CFGUPDT 0x9519 #define PA_DBG_MODE 0x9529 #define PA_DBG_SKIP_RESET_PHY 0x9539 +#define PA_DBG_AUTOMODE_THLD 0x9536 #define PA_DBG_OV_TM 0x9540 #define PA_DBG_SKIP_LINE_RESET 0x9541 #define PA_DBG_LINE_RESET_REQ 0x9543 diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index beabc3ccd30b..c958279bdd8f 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -16,6 +16,7 @@ #include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/pm_qos.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/sched/clock.h> @@ -30,26 +31,11 @@ #define CREATE_TRACE_POINTS #include "ufs-mediatek-trace.h" -#define ufs_mtk_smc(cmd, val, res) \ - arm_smccc_smc(MTK_SIP_UFS_CONTROL, \ - cmd, val, 0, 0, 0, 0, 0, &(res)) - -#define ufs_mtk_va09_pwr_ctrl(res, on) \ - ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res) - -#define ufs_mtk_crypto_ctrl(res, enable) \ - ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res) - -#define ufs_mtk_ref_clk_notify(on, res) \ - ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res) - -#define ufs_mtk_device_reset_ctrl(high, res) \ - ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res) - static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { - { .wmanufacturerid = UFS_VENDOR_MICRON, + { .wmanufacturerid = UFS_ANY_VENDOR, .model = UFS_ANY_MODEL, - .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, + .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM | + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = "H9HQ21AFAMZDAR", .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES }, @@ -82,6 +68,13 @@ static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); } +static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); +} + static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) { u32 tmp; @@ -191,6 +184,14 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT; hba->ahit = 0; } + + /* + * Turn on CLK_CG early to bypass abnormal ERR_CHK signal + * to prevent host hang issue + */ + ufshcd_writel(hba, + ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, + REG_UFS_XOUFS_CTRL); } return 0; @@ -244,8 +245,9 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) if (host->ref_clk_enabled == on) return 0; + ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res); + if (on) { - ufs_mtk_ref_clk_notify(on, res); ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); } else { ufshcd_delay_us(host->ref_clk_gating_wait_us, 10); @@ -267,7 +269,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); - ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res); + ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res); return -ETIMEDOUT; @@ -275,8 +277,8 @@ out: host->ref_clk_enabled = on; if (on) ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10); - else - ufs_mtk_ref_clk_notify(on, res); + + ufs_mtk_ref_clk_notify(on, POST_CHANGE, res); return 0; } @@ -579,20 +581,38 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba) if (of_property_read_bool(np, "mediatek,ufs-broken-vcc")) host->caps |= UFS_MTK_CAP_BROKEN_VCC; + if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto")) + host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO; + dev_info(hba->dev, "caps: 0x%x", host->caps); } -static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up) +static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - ufs_mtk_boost_crypt(hba, up); - ufs_mtk_setup_ref_clk(hba, up); + if (!host || !host->pm_qos_init) + return; + + cpu_latency_qos_update_request(&host->pm_qos_req, + boost ? 0 : PM_QOS_DEFAULT_VALUE); +} - if (up) +static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (on) { phy_power_on(host->mphy); - else + ufs_mtk_setup_ref_clk(hba, on); + ufs_mtk_boost_crypt(hba, on); + ufs_mtk_boost_pm_qos(hba, on); + } else { + ufs_mtk_boost_pm_qos(hba, on); + ufs_mtk_boost_crypt(hba, on); + ufs_mtk_setup_ref_clk(hba, on); phy_power_off(host->mphy); + } } /** @@ -637,9 +657,9 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, } if (clk_pwr_off) - ufs_mtk_scale_perf(hba, false); + ufs_mtk_pwr_ctrl(hba, false); } else if (on && status == POST_CHANGE) { - ufs_mtk_scale_perf(hba, true); + ufs_mtk_pwr_ctrl(hba, true); } return ret; @@ -675,6 +695,73 @@ static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba) return hba->ufs_version; } +#define MAX_VCC_NAME 30 +static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + struct device_node *np = hba->dev->of_node; + struct device *dev = hba->dev; + char vcc_name[MAX_VCC_NAME]; + struct arm_smccc_res res; + int err, ver; + + if (hba->vreg_info.vcc) + return 0; + + if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) { + ufs_mtk_get_vcc_num(res); + if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX) + snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1); + else + return -ENODEV; + } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) { + ver = (hba->dev_info.wspecversion & 0xF00) >> 8; + snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver); + } else { + return 0; + } + + err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc); + if (err) + return err; + + err = ufshcd_get_vreg(dev, info->vcc); + if (err) + return err; + + err = regulator_enable(info->vcc->reg); + if (!err) { + info->vcc->enabled = true; + dev_info(dev, "%s: %s enabled\n", __func__, vcc_name); + } + + return err; +} + +static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + struct ufs_vreg **vreg_on, **vreg_off; + + if (hba->dev_info.wspecversion >= 0x0300) { + vreg_on = &info->vccq; + vreg_off = &info->vccq2; + } else { + vreg_on = &info->vccq2; + vreg_off = &info->vccq; + } + + if (*vreg_on) + (*vreg_on)->always_on = true; + + if (*vreg_off) { + regulator_disable((*vreg_off)->reg); + devm_kfree(hba->dev, (*vreg_off)->name); + devm_kfree(hba->dev, *vreg_off); + *vreg_off = NULL; + } +} + /** * ufs_mtk_init - find other essential mmio bases * @hba: host controller instance @@ -754,6 +841,26 @@ out: return err; } +static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, + struct ufs_pa_layer_attr *dev_req_params) +{ + if (!ufs_mtk_is_pmc_via_fastauto(hba)) + return false; + + if (dev_req_params->hs_rate == hba->pwr_info.hs_rate) + return false; + + if (dev_req_params->pwr_tx != FAST_MODE && + dev_req_params->gear_tx < UFS_HS_G4) + return false; + + if (dev_req_params->pwr_rx != FAST_MODE && + dev_req_params->gear_rx < UFS_HS_G4) + return false; + + return true; +} + static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) @@ -763,8 +870,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, int ret; ufshcd_init_pwr_dev_param(&host_cap); - host_cap.hs_rx_gear = UFS_HS_G4; - host_cap.hs_tx_gear = UFS_HS_G4; + host_cap.hs_rx_gear = UFS_HS_G5; + host_cap.hs_tx_gear = UFS_HS_G5; ret = ufshcd_get_pwr_dev_param(&host_cap, dev_max_params, @@ -774,6 +881,32 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, __func__); } + if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + dev_req_params->lane_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + dev_req_params->lane_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + dev_req_params->hs_rate); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), + PA_NO_ADAPT); + + ret = ufshcd_uic_change_pwr_mode(hba, + FASTAUTO_MODE << 4 | FASTAUTO_MODE); + + if (ret) { + dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n", + __func__, ret); + } + } + if (host->hw_ver.major >= 3) { ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx, @@ -963,6 +1096,11 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) { int err; + /* Disable reset confirm feature by UniPro */ + ufshcd_writel(hba, + (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100), + REG_UFS_XOUFS_CTRL); + err = ufs_mtk_unipro_set_lpm(hba, true); if (err) { /* Resume UniPro state for following error recovery */ @@ -973,17 +1111,52 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) return 0; } -static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm) +static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) +{ + struct ufs_vreg *vccqx = NULL; + + if (hba->vreg_info.vccq) + vccqx = hba->vreg_info.vccq; + else + vccqx = hba->vreg_info.vccq2; + + regulator_set_mode(vccqx->reg, + lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL); +} + +static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm) +{ + struct arm_smccc_res res; + + ufs_mtk_device_pwr_ctrl(!lpm, + (unsigned long)hba->dev_info.wspecversion, + res); +} + +static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) { - if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc) + if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) + return; + + /* Skip if VCC is assumed always-on */ + if (!hba->vreg_info.vcc) + return; + + /* Bypass LPM when device is still active */ + if (lpm && ufshcd_is_ufs_dev_active(hba)) + return; + + /* Bypass LPM if VCC is enabled */ + if (lpm && hba->vreg_info.vcc->enabled) return; - if (lpm && !hba->vreg_info.vcc->enabled) - regulator_set_mode(hba->vreg_info.vccq2->reg, - REGULATOR_MODE_IDLE); - else if (!lpm) - regulator_set_mode(hba->vreg_info.vccq2->reg, - REGULATOR_MODE_NORMAL); + if (lpm) { + ufs_mtk_vccqx_set_lpm(hba, lpm); + ufs_mtk_vsx_set_lpm(hba, lpm); + } else { + ufs_mtk_vsx_set_lpm(hba, lpm); + ufs_mtk_vccqx_set_lpm(hba, lpm); + } } static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) @@ -1026,7 +1199,6 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, * ufshcd_suspend() re-enabling regulators while vreg is still * in low-power mode. */ - ufs_mtk_vreg_set_lpm(hba, true); err = ufs_mtk_mphy_power_on(hba, false); if (err) goto fail; @@ -1035,6 +1207,8 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, if (ufshcd_is_link_off(hba)) ufs_mtk_device_reset_ctrl(0, res); + ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res); + return 0; fail: /* @@ -1049,13 +1223,17 @@ fail: static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { int err; + struct arm_smccc_res res; + + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + ufs_mtk_dev_vreg_set_lpm(hba, false); + + ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res); err = ufs_mtk_mphy_power_on(hba, true); if (err) goto fail; - ufs_mtk_vreg_set_lpm(hba, false); - if (ufshcd_is_link_hibern8(hba)) { err = ufs_mtk_link_set_hpm(hba); if (err) @@ -1087,8 +1265,10 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) struct ufs_dev_info *dev_info = &hba->dev_info; u16 mid = dev_info->wmanufacturerid; - if (mid == UFS_VENDOR_SAMSUNG) + if (mid == UFS_VENDOR_SAMSUNG) { ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10); + } /* * Decide waiting time before gating reference clock and @@ -1104,7 +1284,6 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) else ufs_mtk_setup_ref_clk_wait_us(hba, REFCLK_DEFAULT_WAIT_US); - return 0; } @@ -1122,6 +1301,9 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | UFS_DEVICE_QUIRK_DELAY_AFTER_LPM); } + + ufs_mtk_vreg_fix_vcc(hba); + ufs_mtk_vreg_fix_vccqx(hba); } static void ufs_mtk_event_notify(struct ufs_hba *hba, @@ -1220,9 +1402,59 @@ static int ufs_mtk_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int ufs_mtk_system_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + + ret = ufshcd_system_suspend(dev); + if (ret) + return ret; + + ufs_mtk_dev_vreg_set_lpm(hba, true); + + return 0; +} + +static int ufs_mtk_system_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + ufs_mtk_dev_vreg_set_lpm(hba, false); + + return ufshcd_system_resume(dev); +} +#endif + +static int ufs_mtk_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret = 0; + + ret = ufshcd_runtime_suspend(dev); + if (ret) + return ret; + + ufs_mtk_dev_vreg_set_lpm(hba, true); + + return 0; +} + +static int ufs_mtk_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + ufs_mtk_dev_vreg_set_lpm(hba, false); + + return ufshcd_runtime_resume(dev); +} + static const struct dev_pm_ops ufs_mtk_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) - SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend, + ufs_mtk_system_resume) + SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend, + ufs_mtk_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h index 414dca86c09f..aa26d415527b 100644 --- a/drivers/ufs/host/ufs-mediatek.h +++ b/drivers/ufs/host/ufs-mediatek.h @@ -7,11 +7,13 @@ #define _UFS_MEDIATEK_H #include <linux/bitops.h> +#include <linux/pm_qos.h> #include <linux/soc/mediatek/mtk_sip_svc.h> /* * Vendor specific UFSHCI Registers */ +#define REG_UFS_XOUFS_CTRL 0x140 #define REG_UFS_REFCLK_CTRL 0x144 #define REG_UFS_EXTREG 0x2100 #define REG_UFS_MPHYCTRL 0x2200 @@ -83,6 +85,9 @@ enum { #define UFS_MTK_SIP_DEVICE_RESET BIT(1) #define UFS_MTK_SIP_CRYPTO_CTRL BIT(2) #define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) +#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5) +#define UFS_MTK_SIP_GET_VCC_NUM BIT(6) +#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7) /* * VS_DEBUGCLOCKENABLE @@ -108,6 +113,7 @@ enum ufs_mtk_host_caps { UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, UFS_MTK_CAP_BROKEN_VCC = 1 << 3, + UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6, }; struct ufs_mtk_crypt_cfg { @@ -126,6 +132,7 @@ struct ufs_mtk_hw_ver { struct ufs_mtk_host { struct phy *mphy; + struct pm_qos_request pm_qos_req; struct regulator *reg_va09; struct reset_control *hci_reset; struct reset_control *unipro_reset; @@ -135,6 +142,7 @@ struct ufs_mtk_host { struct ufs_mtk_hw_ver hw_ver; enum ufs_mtk_host_caps caps; bool mphy_powered_on; + bool pm_qos_init; bool unipro_lpm; bool ref_clk_enabled; u16 ref_clk_ungating_wait_us; @@ -142,4 +150,70 @@ struct ufs_mtk_host { u32 ip_ver; }; +/* + * Multi-VCC by Numbering + */ +enum ufs_mtk_vcc_num { + UFS_VCC_NONE = 0, + UFS_VCC_1, + UFS_VCC_2, + UFS_VCC_MAX +}; + +/* + * Host Power Control options + */ +enum { + HOST_PWR_HCI = 0, + HOST_PWR_MPHY +}; + +/* + * SMC call wrapper function + */ +struct ufs_mtk_smc_arg { + unsigned long cmd; + struct arm_smccc_res *res; + unsigned long v1; + unsigned long v2; + unsigned long v3; + unsigned long v4; + unsigned long v5; + unsigned long v6; + unsigned long v7; +}; + +static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s) +{ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, + s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res); +} + +#define ufs_mtk_smc(...) \ + _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__}) + +/* + * SMC call interface + */ +#define ufs_mtk_va09_pwr_ctrl(res, on) \ + ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on) + +#define ufs_mtk_crypto_ctrl(res, enable) \ + ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable) + +#define ufs_mtk_ref_clk_notify(on, stage, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high) + +#define ufs_mtk_host_pwr_ctrl(opt, on, res) \ + ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on) + +#define ufs_mtk_get_vcc_num(res) \ + ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res)) + +#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver) + #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index f10d4668814c..473fad83701e 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -58,19 +58,6 @@ static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, ufshcd_dump_regs(hba, offset, len * 4, prefix); } -static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) -{ - int err = 0; - - err = ufshcd_dme_get(hba, - UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); - if (err) - dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", - __func__, err); - - return err; -} - static int ufs_qcom_host_clk_get(struct device *dev, const char *name, struct clk **clk_out, bool optional) { @@ -194,13 +181,6 @@ out: return err; } -static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) -{ - u32 tx_lanes; - - return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); -} - static int ufs_qcom_check_hibern8(struct ufs_hba *hba) { int err; @@ -570,9 +550,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, err = ufshcd_disable_host_tx_lcc(hba); break; - case POST_CHANGE: - ufs_qcom_link_startup_post_change(hba); - break; default: break; } diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c new file mode 100644 index 000000000000..f8a5e79ed3b4 --- /dev/null +++ b/drivers/ufs/host/ufs-renesas.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Renesas UFS host controller driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <ufs/ufshcd.h> + +#include "ufshcd-pltfrm.h" + +struct ufs_renesas_priv { + bool initialized; /* The hardware needs initialization once */ +}; + +enum { + SET_PHY_INDEX_LO = 0, + SET_PHY_INDEX_HI, + TIMER_INDEX, + MAX_INDEX +}; + +enum ufs_renesas_init_param_mode { + MODE_RESTORE, + MODE_SET, + MODE_SAVE, + MODE_POLL, + MODE_WAIT, + MODE_WRITE, +}; + +#define PARAM_RESTORE(_reg, _index) \ + { .mode = MODE_RESTORE, .reg = _reg, .index = _index } +#define PARAM_SET(_index, _set) \ + { .mode = MODE_SET, .index = _index, .u.set = _set } +#define PARAM_SAVE(_reg, _mask, _index) \ + { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \ + .index = _index } +#define PARAM_POLL(_reg, _expected, _mask) \ + { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \ + .mask = (u32)(_mask) } +#define PARAM_WAIT(_delay_us) \ + { .mode = MODE_WAIT, .u.delay_us = _delay_us } + +#define PARAM_WRITE(_reg, _val) \ + { .mode = MODE_WRITE, .reg = _reg, .u.val = _val } + +#define PARAM_WRITE_D0_D4(_d0, _d4) \ + PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4) + +#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_RESTORE_800_80C_POLL(_index) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE(0xd0, 0x00000800), \ + PARAM_RESTORE(0xd4, _index), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \ + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ + PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \ + PARAM_WRITE(0xd0, 0x0000080c), \ + PARAM_POLL(0xd4, BIT(8), BIT(8)) + +#define PARAM_WRITE_828_82C_POLL(_data_828) \ + PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \ + PARAM_WRITE_D0_D4(0x00000828, _data_828), \ + PARAM_WRITE(0xd0, 0x0000082c), \ + PARAM_POLL(0xd4, _data_828, _data_828) + +#define PARAM_WRITE_PHY(_addr16, _data16) \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_SET_PHY(_addr16, _data16) \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE_804_80C_POLL(0x1a, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \ + PARAM_WRITE_804_80C_POLL(0x1b, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0), \ + PARAM_WRITE(0xf0, 1), \ + PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ + PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ + PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \ + PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \ + PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \ + PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \ + PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \ + PARAM_WRITE(0xf0, _gpio), \ + PARAM_WRITE_800_80C_POLL(_addr, _data_800), \ + PARAM_WRITE_828_82C_POLL(0x0f000000), \ + PARAM_WRITE(0xf0, 0) + +#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \ + PARAM_WRITE(0xf0, _gpio), \ + PARAM_WRITE_800_80C_POLL(_addr, 0), \ + PARAM_WRITE(0xd0, 0x00000808), \ + PARAM_POLL(0xd4, _expected, _mask), \ + PARAM_WRITE(0xf0, 0) + +struct ufs_renesas_init_param { + enum ufs_renesas_init_param_mode mode; + u32 reg; + union { + u32 expected; + u32 delay_us; + u32 set; + u32 val; + } u; + u32 mask; + u32 index; +}; + +/* This setting is for SERIES B */ +static const struct ufs_renesas_init_param ufs_param[] = { + PARAM_WRITE(0xc0, 0x49425308), + PARAM_WRITE_D0_D4(0x00000104, 0x00000002), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000828, 0x00000200), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000828, 0x00000000), + PARAM_WRITE_D0_D4(0x00000104, 0x00000001), + PARAM_WRITE_D0_D4(0x00000940, 0x00000001), + PARAM_WAIT(1), + PARAM_WRITE_D0_D4(0x00000940, 0x00000000), + + PARAM_WRITE(0xc0, 0x49425308), + PARAM_WRITE(0xc0, 0x41584901), + + PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), + PARAM_WRITE_D0_D4(0x00000804, 0x00000000), + PARAM_WRITE(0xd0, 0x0000080c), + PARAM_POLL(0xd4, BIT(8), BIT(8)), + + PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001), + + PARAM_WRITE(0xd0, 0x00000804), + PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)), + + PARAM_WRITE(0xd0, 0x00000d00), + PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX), + PARAM_WRITE(0xd4, 0x00000000), + PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), + PARAM_WRITE_D0_D4(0x00000828, 0x08000000), + PARAM_WRITE(0xd0, 0x0000082c), + PARAM_POLL(0xd4, BIT(27), BIT(27)), + PARAM_WRITE(0xd0, 0x00000d2c), + PARAM_POLL(0xd4, BIT(0), BIT(0)), + + /* phy setup */ + PARAM_INDIRECT_WRITE(1, 0x01, 0x001f), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), + PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x60, 0x0003), + PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6), + PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003), + + PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)), + PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)), + + PARAM_INDIRECT_WRITE(1, 0x32, 0x0080), + PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001), + PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001), + PARAM_INDIRECT_WRITE(0, 0x32, 0x0087), + + PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061), + PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009), + PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005), + PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058), + PARAM_INDIRECT_WRITE(1, 0x39, 0x0027), + PARAM_INDIRECT_WRITE(1, 0x47, 0x004c), + + PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002), + PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), + + PARAM_WRITE_PHY(0x0028, 0x0061), + PARAM_WRITE_PHY(0x4014, 0x0061), + PARAM_SET_PHY(0x401c, BIT(2)), + PARAM_WRITE_PHY(0x4000, 0x0000), + PARAM_WRITE_PHY(0x4001, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0000), + PARAM_WRITE_PHY(0x10af, 0x0001), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0000), + PARAM_WRITE_PHY(0x10af, 0x0002), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0080), + PARAM_WRITE_PHY(0x10af, 0x0000), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_WRITE_PHY(0x10ae, 0x0001), + PARAM_WRITE_PHY(0x10ad, 0x0080), + PARAM_WRITE_PHY(0x10af, 0x001a), + PARAM_WRITE_PHY(0x10b6, 0x0001), + PARAM_WRITE_PHY(0x10ae, 0x0000), + + PARAM_INDIRECT_WRITE(7, 0x70, 0x0016), + PARAM_INDIRECT_WRITE(7, 0x71, 0x0016), + PARAM_INDIRECT_WRITE(7, 0x72, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x73, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x74, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x75, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x76, 0x0010), + PARAM_INDIRECT_WRITE(7, 0x77, 0x0010), + PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff), + PARAM_INDIRECT_WRITE(7, 0x79, 0x0000), + + PARAM_INDIRECT_WRITE(7, 0x19, 0x0007), + + PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007), + + PARAM_INDIRECT_WRITE(7, 0x24, 0x000c), + + PARAM_INDIRECT_WRITE(7, 0x25, 0x000c), + + PARAM_INDIRECT_WRITE(7, 0x62, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x63, 0x0000), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), + PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004), + PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), + PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)), + PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)), + /* end of phy setup */ + + PARAM_WRITE(0xf0, 0), + PARAM_WRITE(0xd0, 0x00000d00), + PARAM_RESTORE(0xd4, TIMER_INDEX), +}; + +static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + "); +} + +static void ufs_renesas_reg_control(struct ufs_hba *hba, + const struct ufs_renesas_init_param *p) +{ + static u32 save[MAX_INDEX]; + int ret; + u32 val; + + WARN_ON(p->index >= MAX_INDEX); + + switch (p->mode) { + case MODE_RESTORE: + ufshcd_writel(hba, save[p->index], p->reg); + break; + case MODE_SET: + save[p->index] |= p->u.set; + break; + case MODE_SAVE: + save[p->index] = ufshcd_readl(hba, p->reg) & p->mask; + break; + case MODE_POLL: + ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg, + val, + (val & p->mask) == p->u.expected, + 10, 1000); + if (ret) + dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n", + __func__, ret, val, p->mask, p->u.expected); + break; + case MODE_WAIT: + if (p->u.delay_us > 1000) + mdelay(DIV_ROUND_UP(p->u.delay_us, 1000)); + else + udelay(p->u.delay_us); + break; + case MODE_WRITE: + ufshcd_writel(hba, p->u.val, p->reg); + break; + default: + break; + } +} + +static void ufs_renesas_pre_init(struct ufs_hba *hba) +{ + const struct ufs_renesas_init_param *p = ufs_param; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ufs_param); i++) + ufs_renesas_reg_control(hba, &p[i]); +} + +static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_renesas_priv *priv = ufshcd_get_variant(hba); + + if (priv->initialized) + return 0; + + if (status == PRE_CHANGE) + ufs_renesas_pre_init(hba); + + priv->initialized = true; + + return 0; +} + +static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + if (on && status == PRE_CHANGE) + pm_runtime_get_sync(hba->dev); + else if (!on && status == POST_CHANGE) + pm_runtime_put(hba->dev); + + return 0; +} + +static int ufs_renesas_init(struct ufs_hba *hba) +{ + struct ufs_renesas_priv *priv; + + priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + ufshcd_set_variant(hba, priv); + + hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + + return 0; +} + +static const struct ufs_hba_variant_ops ufs_renesas_vops = { + .name = "renesas", + .init = ufs_renesas_init, + .setup_clocks = ufs_renesas_setup_clocks, + .hce_enable_notify = ufs_renesas_hce_enable_notify, + .dbg_register_dump = ufs_renesas_dbg_register_dump, +}; + +static const struct of_device_id __maybe_unused ufs_renesas_of_match[] = { + { .compatible = "renesas,r8a779f0-ufs" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ufs_renesas_of_match); + +static int ufs_renesas_probe(struct platform_device *pdev) +{ + return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops); +} + +static int ufs_renesas_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + ufshcd_remove(hba); + + return 0; +} + +static struct platform_driver ufs_renesas_platform = { + .probe = ufs_renesas_probe, + .remove = ufs_renesas_remove, + .driver = { + .name = "ufshcd-renesas", + .of_match_table = of_match_ptr(ufs_renesas_of_match), + }, +}; +module_platform_driver(ufs_renesas_platform); + +MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>"); +MODULE_DESCRIPTION("Renesas UFS host controller driver"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 04166bda41da..24af1f389bf2 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -426,6 +426,7 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) { hba->nop_out_timeout = 200; hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->caps |= UFSHCD_CAP_WB_EN; return ufs_intel_common_init(hba); } diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c index 173aea8e9997..5739ff007828 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.c +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -26,7 +26,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) int i; struct device *dev = hba->dev; struct device_node *np = dev->of_node; - char *name; + const char *name; u32 *clkfreq = NULL; struct ufs_clk_info *clki; int len = 0; @@ -79,8 +79,8 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) } for (i = 0; i < sz; i += 2) { - ret = of_property_read_string_index(np, - "clock-names", i/2, (const char **)&name); + ret = of_property_read_string_index(np, "clock-names", i/2, + &name); if (ret) goto out; @@ -120,8 +120,8 @@ static bool phandle_exists(const struct device_node *np, } #define MAX_PROP_SIZE 32 -static int ufshcd_populate_vreg(struct device *dev, const char *name, - struct ufs_vreg **out_vreg) +int ufshcd_populate_vreg(struct device *dev, const char *name, + struct ufs_vreg **out_vreg) { char prop_name[MAX_PROP_SIZE]; struct ufs_vreg *vreg = NULL; @@ -156,6 +156,7 @@ out: *out_vreg = vreg; return 0; } +EXPORT_SYMBOL_GPL(ufshcd_populate_vreg); /** * ufshcd_parse_regulator_info - get regulator info from device tree @@ -219,8 +220,8 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) * * Returns 0 on success, non-zero value on failure */ -int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param, - struct ufs_pa_layer_attr *dev_max, +int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param, + const struct ufs_pa_layer_attr *dev_max, struct ufs_pa_layer_attr *agreed_pwr) { int min_pltfrm_gear; diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h index 43c2e412bd99..2e4ba2bfbcad 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.h +++ b/drivers/ufs/host/ufshcd-pltfrm.h @@ -25,12 +25,14 @@ struct ufs_dev_params { u32 desired_working_mode; }; -int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param, - struct ufs_pa_layer_attr *dev_max, +int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param, + const struct ufs_pa_layer_attr *dev_max, struct ufs_pa_layer_attr *agreed_pwr); void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param); int ufshcd_pltfrm_init(struct platform_device *pdev, const struct ufs_hba_variant_ops *vops); void ufshcd_pltfrm_shutdown(struct platform_device *pdev); +int ufshcd_populate_vreg(struct device *dev, const char *name, + struct ufs_vreg **out_vreg); #endif /* UFSHCD_PLTFRM_H_ */ |