aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds2023-06-28 14:06:39 -0700
committerLinus Torvalds2023-06-28 14:06:39 -0700
commit89181f544ffa4da682b0145738342f9b78b9e8dc (patch)
tree0f20994265e7970167d783aa43a761632398c11c /drivers
parent1364b4068a421d99fb4da8b570e54525096b1cef (diff)
parent06b5d4fea89cd699408af12c14b6915d77ceffb0 (diff)
Merge tag 'mmc-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
Pull MMC updates from Ulf Hansson: "MMC core: - Allow synchronous detection of (e)MMC/SD/SDIO cards - Fixup error check for ioctls for SPI hosts - Disable broken SD-Cache support for Kingston Canvas Go Plus from 2019 - Disable broken eMMC-Trim support for Kingston EMMC04G-M627 - Disable broken eMMC-Trim support for Micron MTFC4GACAJCN-1M MMC host: - bcm2835: Convert DT bindings to YAML - mmci: - Enable asynchronous probe - Transform the ux500 HW-busy detection into a proper state machine - Add support for SW busy-end timeouts for the ux500 variants - mmci_stm32: - Add support for sdm32 variant revision v3.0 used on STM32MP25 - Improve the tuning sequence - mtk-sd: Tune polling-period to improve performance - sdhci: Fixup DMA configuration for 64-bit DMA mode - sdhci-bcm-kona: Convert DT bindings to YAML - sdhci-msm: - Switch to use the new ICE API - Add support for the SC8280XP/IPQ6018/QDU1000/QRU1000 variants - sdhci-pci-gli: - Add support SD Express cards for GL9767 - Add support for the Genesys Logic GL9767 variant" * tag 'mmc-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (42 commits) dt-bindings: mmc: fsl-imx-esdhc: Add imx6ul support mmc: mmci: Add support for SW busy-end timeouts mmc: Add MMC_QUIRK_BROKEN_SD_CACHE for Kingston Canvas Go Plus from 11/2019 mmc: core: disable TRIM on Kingston EMMC04G-M627 mmc: mmci: stm32: add delay block support for STM32MP25 mmc: mmci: stm32: prepare other delay block support mmc: mmci: stm32: manage block gap hardware flow control mmc: mmci: Add support for sdmmc variant revision v3.0 mmc: mmci: add stm32_idmabsize_align parameter dt-bindings: mmc: mmci: Add st,stm32mp25-sdmmc2 compatible mmc: core: disable TRIM on Micron MTFC4GACAJCN-1M mmc: mmci: Break out a helper function mmc: mmci: Use a switch statement machine mmc: mmci: Use state machine state as exit condition mmc: mmci: Retry the busy start condition mmc: mmci: Make busy complete state machine explicit mmc: mmci: Break out error check in busy detect mmc: mmci: Stash status while waiting for busy mmc: mmci: Unwind big if() clause mmc: mmci: Clear busy_status when starting command ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/memstick/host/r592.c4
-rw-r--r--drivers/mmc/core/block.c35
-rw-r--r--drivers/mmc/core/card.h30
-rw-r--r--drivers/mmc/core/core.c15
-rw-r--r--drivers/mmc/core/quirks.h27
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/cqhci.h3
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c5
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.h2
-rw-r--r--drivers/mmc/host/dw_mmc-starfive.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c8
-rw-r--r--drivers/mmc/host/mmci.c208
-rw-r--r--drivers/mmc/host/mmci.h25
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c179
-rw-r--r--drivers/mmc/host/mtk-sd.c48
-rw-r--r--drivers/mmc/host/sdhci-msm.c223
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c406
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mmc/host/sdhci.h7
24 files changed, 956 insertions, 286 deletions
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 42bfc46842b8..461f5ffd02bc 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -44,12 +44,10 @@ static const char *tpc_names[] = {
* memstick_debug_get_tpc_name - debug helper that returns string for
* a TPC number
*/
-const char *memstick_debug_get_tpc_name(int tpc)
+static __maybe_unused const char *memstick_debug_get_tpc_name(int tpc)
{
return tpc_names[tpc-1];
}
-EXPORT_SYMBOL(memstick_debug_get_tpc_name);
-
/* Read a register*/
static inline u32 r592_read_reg(struct r592_device *dev, int address)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 6d01025f1fcf..f701efb1fa78 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -178,6 +178,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
+static int mmc_spi_err_check(struct mmc_card *card);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -608,6 +609,11 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
return 0;
+ if (mmc_host_is_spi(card->host)) {
+ if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
+ return mmc_spi_err_check(card);
+ return err;
+ }
/* Ensure RPMB/R1B command has completed by polling with CMD13. */
if (idata->rpmb || r1b_resp)
err = mmc_poll_for_busy(card, busy_timeout_ms, false,
@@ -2505,9 +2511,9 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- pr_info("%s: %s %s %s %s\n",
+ pr_info("%s: %s %s %s%s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
- cap_str, md->read_only ? "(ro)" : "");
+ cap_str, md->read_only ? " (ro)" : "");
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
@@ -2899,12 +2905,12 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
.llseek = default_llseek,
};
-static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
struct dentry *root;
if (!card->debugfs_root)
- return 0;
+ return;
root = card->debugfs_root;
@@ -2913,19 +2919,13 @@ static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
debugfs_create_file_unsafe("status", 0400, root,
card,
&mmc_dbg_card_status_fops);
- if (!md->status_dentry)
- return -EIO;
}
if (mmc_card_mmc(card)) {
md->ext_csd_dentry =
debugfs_create_file("ext_csd", S_IRUSR, root, card,
&mmc_dbg_ext_csd_fops);
- if (!md->ext_csd_dentry)
- return -EIO;
}
-
- return 0;
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
@@ -2934,22 +2934,17 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
if (!card->debugfs_root)
return;
- if (!IS_ERR_OR_NULL(md->status_dentry)) {
- debugfs_remove(md->status_dentry);
- md->status_dentry = NULL;
- }
+ debugfs_remove(md->status_dentry);
+ md->status_dentry = NULL;
- if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
- debugfs_remove(md->ext_csd_dentry);
- md->ext_csd_dentry = NULL;
- }
+ debugfs_remove(md->ext_csd_dentry);
+ md->ext_csd_dentry = NULL;
}
#else
-static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
- return 0;
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index cfdd1ff40b86..4edf9057fa79 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -53,6 +53,10 @@ struct mmc_fixup {
unsigned int manfid;
unsigned short oemid;
+ /* Manufacturing date */
+ unsigned short year;
+ unsigned char month;
+
/* SDIO-specific fields. You can use SDIO_ANY_ID here of course */
u16 cis_vendor, cis_device;
@@ -68,6 +72,8 @@ struct mmc_fixup {
#define CID_MANFID_ANY (-1u)
#define CID_OEMID_ANY ((unsigned short) -1)
+#define CID_YEAR_ANY ((unsigned short) -1)
+#define CID_MONTH_ANY ((unsigned char) -1)
#define CID_NAME_ANY (NULL)
#define EXT_CSD_REV_ANY (-1u)
@@ -81,17 +87,21 @@ struct mmc_fixup {
#define CID_MANFID_APACER 0x27
#define CID_MANFID_KINGSTON 0x70
#define CID_MANFID_HYNIX 0x90
+#define CID_MANFID_KINGSTON_SD 0x9F
#define CID_MANFID_NUMONYX 0xFE
#define END_FIXUP { NULL }
-#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
- _cis_vendor, _cis_device, \
- _fixup, _data, _ext_csd_rev) \
+#define _FIXUP_EXT(_name, _manfid, _oemid, _year, _month, \
+ _rev_start, _rev_end, \
+ _cis_vendor, _cis_device, \
+ _fixup, _data, _ext_csd_rev) \
{ \
.name = (_name), \
.manfid = (_manfid), \
.oemid = (_oemid), \
+ .year = (_year), \
+ .month = (_month), \
.rev_start = (_rev_start), \
.rev_end = (_rev_end), \
.cis_vendor = (_cis_vendor), \
@@ -103,8 +113,8 @@ struct mmc_fixup {
#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
_fixup, _data, _ext_csd_rev) \
- _FIXUP_EXT(_name, _manfid, \
- _oemid, _rev_start, _rev_end, \
+ _FIXUP_EXT(_name, _manfid, _oemid, CID_YEAR_ANY, CID_MONTH_ANY, \
+ _rev_start, _rev_end, \
SDIO_ANY_ID, SDIO_ANY_ID, \
_fixup, _data, _ext_csd_rev) \
@@ -118,8 +128,9 @@ struct mmc_fixup {
_ext_csd_rev)
#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
- _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
- CID_OEMID_ANY, 0, -1ull, \
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, CID_OEMID_ANY, \
+ CID_YEAR_ANY, CID_MONTH_ANY, \
+ 0, -1ull, \
_vendor, _device, \
_fixup, _data, EXT_CSD_REV_ANY) \
@@ -264,4 +275,9 @@ static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
}
+static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
+}
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3d3e0ca52614..ec4108a3e5b9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2199,10 +2199,8 @@ int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
}
EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
-void mmc_rescan(struct work_struct *work)
+static void __mmc_rescan(struct mmc_host *host)
{
- struct mmc_host *host =
- container_of(work, struct mmc_host, detect.work);
int i;
if (host->rescan_disable)
@@ -2274,6 +2272,14 @@ void mmc_rescan(struct work_struct *work)
mmc_schedule_delayed_work(&host->detect, HZ);
}
+void mmc_rescan(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, detect.work);
+
+ __mmc_rescan(host);
+}
+
void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(min(freqs[0], host->f_max), host->f_min);
@@ -2286,7 +2292,8 @@ void mmc_start_host(struct mmc_host *host)
}
mmc_gpiod_request_cd_irq(host);
- _mmc_detect_change(host, 0, false);
+ host->detect_change = 1;
+ __mmc_rescan(host);
}
void __mmc_stop_host(struct mmc_host *host)
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 29b9497936df..32b64b564fb1 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -54,6 +54,15 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_QUIRK_BLK_NO_CMD23),
/*
+ * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
+ * This has so far only been observed on cards from 11/2019, while new
+ * cards from 2023/05 do not exhibit this behavior.
+ */
+ _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ /*
* Some SD cards lockup while using CMD23 multiblock transfers.
*/
MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
@@ -101,6 +110,20 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_QUIRK_TRIM_BROKEN),
/*
+ * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+ * support being used to offload WRITE_ZEROES.
+ */
+ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+ * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+ * support being used to offload WRITE_ZEROES.
+ */
+ MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
* Some SD cards reports discard support while they don't
*/
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
@@ -209,6 +232,10 @@ static inline void mmc_fixup_device(struct mmc_card *card,
if (f->of_compatible &&
!mmc_fixup_of_compatible_match(card, f->of_compatible))
continue;
+ if (f->year != CID_YEAR_ANY && f->year != card->cid.year)
+ continue;
+ if (f->month != CID_MONTH_ANY && f->month != card->cid.month)
+ continue;
dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
f->vendor_fixup(card, f->data);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 72b664ed90cf..246ce027ae0a 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1170,7 +1170,7 @@ static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
/* Cache support at bit 0. */
- if (reg_buf[4] & BIT(0))
+ if ((reg_buf[4] & BIT(0)) && !mmc_card_broken_sd_cache(card))
card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
/* Command queue support indicated via queue depth bits (0 to 4). */
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9f793892123c..159a3e9490ae 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -550,7 +550,7 @@ config MMC_SDHCI_MSM
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
- select QCOM_SCM if MMC_CRYPTO
+ select QCOM_INLINE_CRYPTO_ENGINE if MMC_CRYPTO
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index ba9387ed90eb..1a12e40a02e6 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -5,6 +5,7 @@
#define LINUX_MMC_CQHCI_H
#include <linux/compiler.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
@@ -23,6 +24,8 @@
/* capabilities */
#define CQHCI_CAP 0x04
#define CQHCI_CAP_CS 0x10000000 /* Crypto Support */
+#define CQHCI_CAP_ITCFMUL GENMASK(15, 12)
+#define CQHCI_ITCFMUL(x) FIELD_GET(CQHCI_CAP_ITCFMUL, (x))
/* configuration */
#define CQHCI_CFG 0x08
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index 10baf122bc15..4747e5698f48 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -52,7 +52,7 @@ static int dw_mci_bluefield_probe(struct platform_device *pdev)
static struct platform_driver dw_mci_bluefield_pltfm_driver = {
.probe = dw_mci_bluefield_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove_new = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_bluefield",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 0311a37dd4ab..e8ee7c43f60b 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -470,7 +470,7 @@ static const struct dev_pm_ops dw_mci_k3_dev_pm_ops = {
static struct platform_driver dw_mci_k3_pltfm_driver = {
.probe = dw_mci_k3_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove_new = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_k3",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 48b7da2b86b3..2353fadceda1 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -121,18 +121,17 @@ static int dw_mci_pltfm_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
-int dw_mci_pltfm_remove(struct platform_device *pdev)
+void dw_mci_pltfm_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
dw_mci_remove(host);
- return 0;
}
EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove);
static struct platform_driver dw_mci_pltfm_driver = {
.probe = dw_mci_pltfm_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove_new = dw_mci_pltfm_remove,
.driver = {
.name = "dw_mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h
index 2d50d7da2e36..64cf7522a3d4 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.h
+++ b/drivers/mmc/host/dw_mmc-pltfm.h
@@ -10,7 +10,7 @@
extern int dw_mci_pltfm_register(struct platform_device *pdev,
const struct dw_mci_drv_data *drv_data);
-extern int dw_mci_pltfm_remove(struct platform_device *pdev);
+extern void dw_mci_pltfm_remove(struct platform_device *pdev);
extern const struct dev_pm_ops dw_mci_pltfm_pmops;
#endif /* _DW_MMC_PLTFM_H_ */
diff --git a/drivers/mmc/host/dw_mmc-starfive.c b/drivers/mmc/host/dw_mmc-starfive.c
index dab1508bf83c..fd05a648a8bb 100644
--- a/drivers/mmc/host/dw_mmc-starfive.c
+++ b/drivers/mmc/host/dw_mmc-starfive.c
@@ -172,7 +172,7 @@ static int dw_mci_starfive_probe(struct platform_device *pdev)
static struct platform_driver dw_mci_starfive_driver = {
.probe = dw_mci_starfive_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove_new = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_starfive",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index da85c2f2acb8..97168cdfa8e9 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -776,6 +776,11 @@ static void meson_mx_sdhc_init_hw(struct mmc_host *mmc)
regmap_write(host->regmap, MESON_SDHC_ISTA, MESON_SDHC_ISTA_ALL_IRQS);
}
+static void meason_mx_mmc_free_host(void *data)
+{
+ mmc_free_host(data);
+}
+
static int meson_mx_sdhc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -788,8 +793,7 @@ static int meson_mx_sdhc_probe(struct platform_device *pdev)
if (!mmc)
return -ENOMEM;
- ret = devm_add_action_or_reset(dev, (void(*)(void *))mmc_free_host,
- mmc);
+ ret = devm_add_action_or_reset(dev, meason_mx_mmc_free_host, mmc);
if (ret) {
dev_err(dev, "Failed to register mmc_free_host action\n");
return ret;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 696cbef3ff7d..769b34afa835 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -37,6 +37,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
+#include <linux/workqueue.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -270,6 +271,7 @@ static struct variant_data variant_stm32_sdmmc = {
.datactrl_any_blocksz = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(12, 5),
+ .stm32_idmabsize_align = BIT(5),
.busy_timeout = true,
.busy_detect = true,
.busy_detect_flag = MCI_STM32_BUSYD0,
@@ -296,6 +298,35 @@ static struct variant_data variant_stm32_sdmmcv2 = {
.datactrl_any_blocksz = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(16, 5),
+ .stm32_idmabsize_align = BIT(5),
+ .dma_lli = true,
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
+ .init = sdmmc_variant_init,
+};
+
+static struct variant_data variant_stm32_sdmmcv3 = {
+ .fifosize = 256 * 4,
+ .fifohalfsize = 128 * 4,
+ .f_max = 267000000,
+ .stm32_clkdiv = true,
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
+ .datactrl_first = true,
+ .datacnt_useless = true,
+ .datalength_bits = 25,
+ .datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .stm32_idmabsize_mask = GENMASK(16, 6),
+ .stm32_idmabsize_align = BIT(6),
.dma_lli = true,
.busy_timeout = true,
.busy_detect = true,
@@ -654,10 +685,52 @@ static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
}
-static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+static void ux500_busy_clear_mask_done(struct mmci_host *host)
{
void __iomem *base = host->base;
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
+ host->busy_state = MMCI_BUSY_DONE;
+ host->busy_status = 0;
+}
+
+/*
+ * ux500_busy_complete() - this will wait until the busy status
+ * goes off, saving any status that occur in the meantime into
+ * host->busy_status until we know the card is not busy any more.
+ * The function returns true when the busy detection is ended
+ * and we should continue processing the command.
+ *
+ * The Ux500 typically fires two IRQs over a busy cycle like this:
+ *
+ * DAT0 busy +-----------------+
+ * | |
+ * DAT0 not busy ----+ +--------
+ *
+ * ^ ^
+ * | |
+ * IRQ1 IRQ2
+ */
+static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
+ u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+ int retries = 10;
+
+ if (status & err_msk) {
+ /* Stop any ongoing busy detection if an error occurs */
+ ux500_busy_clear_mask_done(host);
+ goto out_ret_state;
+ }
+
+ /*
+ * The state transitions are encoded in a state machine crossing
+ * the edges in this switch statement.
+ */
+ switch (host->busy_state) {
+
/*
* Before unmasking for the busy end IRQ, confirm that the
* command was sent successfully. To keep track of having a
@@ -667,19 +740,33 @@ static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
* Note that, the card may need a couple of clock cycles before
* it starts signaling busy on DAT0, hence re-read the
* MMCISTATUS register here, to allow the busy bit to be set.
- * Potentially we may even need to poll the register for a
- * while, to allow it to be set, but tests indicates that it
- * isn't needed.
*/
- if (!host->busy_status && !(status & err_msk) &&
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
- writel(readl(base + MMCIMASK0) |
- host->variant->busy_detect_mask,
- base + MMCIMASK0);
-
+ case MMCI_BUSY_DONE:
+ /*
+ * Save the first status register read to be sure to catch
+ * all bits that may be lost will retrying. If the command
+ * is still busy this will result in assigning 0 to
+ * host->busy_status, which is what it should be in IDLE.
+ */
host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
- return false;
- }
+ while (retries) {
+ status = readl(base + MMCISTATUS);
+ /* Keep accumulating status bits */
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ if (status & host->variant->busy_detect_flag) {
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_state = MMCI_BUSY_WAITING_FOR_START_IRQ;
+ schedule_delayed_work(&host->ux500_busy_timeout_work,
+ msecs_to_jiffies(cmd->busy_timeout));
+ goto out_ret_state;
+ }
+ retries--;
+ }
+ dev_dbg(mmc_dev(host->mmc), "no busy signalling in time\n");
+ ux500_busy_clear_mask_done(host);
+ break;
/*
* If there is a command in-progress that has been successfully
@@ -692,27 +779,39 @@ static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
* both the start and the end interrupts needs to be cleared,
* one after the other. So, clear the busy start IRQ here.
*/
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag)) {
- writel(host->variant->busy_detect_mask, base + MMCICLEAR);
- return false;
- }
+ case MMCI_BUSY_WAITING_FOR_START_IRQ:
+ if (status & host->variant->busy_detect_flag) {
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
+ } else {
+ dev_dbg(mmc_dev(host->mmc),
+ "lost busy status when waiting for busy start IRQ\n");
+ cancel_delayed_work(&host->ux500_busy_timeout_work);
+ ux500_busy_clear_mask_done(host);
+ }
+ break;
- /*
- * If there is a command in-progress that has been successfully
- * sent and the busy bit isn't set, it means we have received
- * the busy end IRQ. Clear and mask the IRQ, then continue to
- * process the command.
- */
- if (host->busy_status) {
- writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ case MMCI_BUSY_WAITING_FOR_END_IRQ:
+ if (!(status & host->variant->busy_detect_flag)) {
+ host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ cancel_delayed_work(&host->ux500_busy_timeout_work);
+ ux500_busy_clear_mask_done(host);
+ } else {
+ dev_dbg(mmc_dev(host->mmc),
+ "busy status still asserted when handling busy end IRQ - will keep waiting\n");
+ }
+ break;
- writel(readl(base + MMCIMASK0) &
- ~host->variant->busy_detect_mask, base + MMCIMASK0);
- host->busy_status = 0;
+ default:
+ dev_dbg(mmc_dev(host->mmc), "fell through on state %d\n",
+ host->busy_state);
+ break;
}
- return true;
+out_ret_state:
+ return (host->busy_state == MMCI_BUSY_DONE);
}
/*
@@ -1214,6 +1313,7 @@ static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
+ bool busy_resp = cmd->flags & MMC_RSP_BUSY;
unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
@@ -1238,10 +1338,14 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
c |= host->variant->cmdreg_srsp;
}
- if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
- if (!cmd->busy_timeout)
- cmd->busy_timeout = 10 * MSEC_PER_SEC;
+ host->busy_status = 0;
+ host->busy_state = MMCI_BUSY_DONE;
+ /* Assign a default timeout if the core does not provide one */
+ if (busy_resp && !cmd->busy_timeout)
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
+
+ if (busy_resp && host->variant->busy_timeout) {
if (cmd->busy_timeout > host->mmc->max_busy_timeout)
clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
else
@@ -1382,7 +1486,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
/* Handle busy detection on DAT0 if the variant supports it. */
if (busy_resp && host->variant->busy_detect)
- if (!host->ops->busy_complete(host, status, err_msk))
+ if (!host->ops->busy_complete(host, cmd, status, err_msk))
return;
host->cmd = NULL;
@@ -1429,6 +1533,34 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
}
}
+/*
+ * This busy timeout worker is used to "kick" the command IRQ if a
+ * busy detect IRQ fails to appear in reasonable time. Only used on
+ * variants with busy detection IRQ delivery.
+ */
+static void ux500_busy_timeout_work(struct work_struct *work)
+{
+ struct mmci_host *host = container_of(work, struct mmci_host,
+ ux500_busy_timeout_work.work);
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->cmd) {
+ dev_dbg(mmc_dev(host->mmc), "timeout waiting for busy IRQ\n");
+
+ /* If we are still busy let's tag on a cmd-timeout error. */
+ status = readl(host->base + MMCISTATUS);
+ if (status & host->variant->busy_detect_flag)
+ status |= MCI_CMDTIMEOUT;
+
+ mmci_cmd_irq(host, host->cmd, status);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
{
return remain - (readl(host->base + MMCIFIFOCNT) << 2);
@@ -2243,6 +2375,10 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
+ if (host->variant->busy_detect)
+ INIT_DELAYED_WORK(&host->ux500_busy_timeout_work,
+ ux500_busy_timeout_work);
+
writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
amba_set_drvdata(dev, mmc);
@@ -2441,6 +2577,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmcv2,
},
+ {
+ .id = 0x00353180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv3,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
@@ -2456,6 +2597,7 @@ static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
.pm = &mmci_dev_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mmci_probe,
.remove = mmci_remove,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index e1a9b96a3396..253197f132fc 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -218,6 +218,11 @@
#define MCI_STM32_BUSYD0ENDMASK BIT(21)
#define MMCIMASK1 0x040
+
+/* STM32 sdmmc data FIFO threshold register */
+#define MMCI_STM32_FIFOTHRR 0x044
+#define MMCI_STM32_THR_MASK GENMASK(3, 0)
+
#define MMCIFIFOCNT 0x048
#define MMCIFIFO 0x080 /* to 0x0bc */
@@ -227,8 +232,6 @@
#define MMCI_STM32_IDMALLIEN BIT(1)
#define MMCI_STM32_IDMABSIZER 0x054
-#define MMCI_STM32_IDMABNDT_SHIFT 5
-#define MMCI_STM32_IDMABNDT_MASK GENMASK(12, 5)
#define MMCI_STM32_IDMABASE0R 0x058
@@ -262,6 +265,19 @@ struct dma_chan;
struct mmci_host;
/**
+ * enum mmci_busy_state - enumerate the busy detect wait states
+ *
+ * This is used for the state machine waiting for different busy detect
+ * interrupts on hardware that fire a single IRQ for start and end of
+ * the busy detect phase on DAT0.
+ */
+enum mmci_busy_state {
+ MMCI_BUSY_WAITING_FOR_START_IRQ,
+ MMCI_BUSY_WAITING_FOR_END_IRQ,
+ MMCI_BUSY_DONE,
+};
+
+/**
* struct variant_data - MMCI variant-specific quirks
* @clkreg: default value for MCICLOCK register
* @clkreg_enable: enable value for MMCICLOCK register
@@ -361,6 +377,7 @@ struct variant_data {
u32 opendrain;
u8 dma_lli:1;
u32 stm32_idmabsize_mask;
+ u32 stm32_idmabsize_align;
void (*init)(struct mmci_host *host);
};
@@ -380,7 +397,7 @@ struct mmci_host_ops {
void (*dma_error)(struct mmci_host *host);
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
- bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
+ bool (*busy_complete)(struct mmci_host *host, struct mmc_command *cmd, u32 status, u32 err_msk);
void (*pre_sig_volt_switch)(struct mmci_host *host);
int (*post_sig_volt_switch)(struct mmci_host *host, struct mmc_ios *ios);
};
@@ -409,6 +426,7 @@ struct mmci_host {
u32 clk_reg;
u32 clk_reg_add;
u32 datactrl_reg;
+ enum mmci_busy_state busy_state;
u32 busy_status;
u32 mask1_reg;
u8 vqmmc_enabled:1;
@@ -437,6 +455,7 @@ struct mmci_host {
void *dma_priv;
s32 next_cookie;
+ struct delayed_work ux500_busy_timeout_work;
};
#define dma_inprogress(host) ((host)->dma_in_progress)
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 60bca78a72b1..35067e1e6cd8 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -15,7 +15,6 @@
#include "mmci.h"
#define SDMMC_LLI_BUF_LEN PAGE_SIZE
-#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT)
#define DLYB_CR 0x0
#define DLYB_CR_DEN BIT(0)
@@ -34,6 +33,20 @@
#define DLYB_LNG_TIMEOUT_US 1000
#define SDMMC_VSWEND_TIMEOUT_US 10000
+#define SYSCFG_DLYBSD_CR 0x0
+#define DLYBSD_CR_EN BIT(0)
+#define DLYBSD_CR_RXTAPSEL_MASK GENMASK(6, 1)
+#define DLYBSD_TAPSEL_NB 32
+#define DLYBSD_BYP_EN BIT(16)
+#define DLYBSD_BYP_CMD GENMASK(21, 17)
+#define DLYBSD_ANTIGLITCH_EN BIT(22)
+
+#define SYSCFG_DLYBSD_SR 0x4
+#define DLYBSD_SR_LOCK BIT(0)
+#define DLYBSD_SR_RXTAPSEL_ACK BIT(1)
+
+#define DLYBSD_TIMEOUT_1S_IN_US 1000000
+
struct sdmmc_lli_desc {
u32 idmalar;
u32 idmabase;
@@ -48,10 +61,21 @@ struct sdmmc_idma {
bool use_bounce_buffer;
};
+struct sdmmc_dlyb;
+
+struct sdmmc_tuning_ops {
+ int (*dlyb_enable)(struct sdmmc_dlyb *dlyb);
+ void (*set_input_ck)(struct sdmmc_dlyb *dlyb);
+ int (*tuning_prepare)(struct mmci_host *host);
+ int (*set_cfg)(struct sdmmc_dlyb *dlyb, int unit __maybe_unused,
+ int phase, bool sampler __maybe_unused);
+};
+
struct sdmmc_dlyb {
void __iomem *base;
u32 unit;
u32 max;
+ struct sdmmc_tuning_ops *ops;
};
static int sdmmc_idma_validate_data(struct mmci_host *host,
@@ -69,7 +93,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
idma->use_bounce_buffer = false;
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
- !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
+ !IS_ALIGNED(sg->length,
+ host->variant->stm32_idmabsize_align)) {
dev_dbg(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
@@ -293,23 +318,13 @@ static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
clk |= host->clk_reg_add;
clk |= ddr;
- /*
- * SDMMC_FBCK is selected when an external Delay Block is needed
- * with SDR104 or HS200.
- */
- if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) {
+ if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50)
clk |= MCI_STM32_CLK_BUSSPEED;
- if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 ||
- host->mmc->ios.timing == MMC_TIMING_MMC_HS200) {
- clk &= ~MCI_STM32_CLK_SEL_MSK;
- clk |= MCI_STM32_CLK_SELFBCK;
- }
- }
mmci_write_clkreg(host, clk);
}
-static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb)
+static void sdmmc_dlyb_mp15_input_ck(struct sdmmc_dlyb *dlyb)
{
if (!dlyb || !dlyb->base)
return;
@@ -326,7 +341,8 @@ static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
/* adds OF options */
pwr = host->pwr_reg_add;
- sdmmc_dlyb_input_ck(dlyb);
+ if (dlyb && dlyb->ops->set_input_ck)
+ dlyb->ops->set_input_ck(dlyb);
if (ios.power_mode == MMC_POWER_OFF) {
/* Only a reset could power-off sdmmc */
@@ -371,6 +387,19 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
datactrl = mmci_dctrl_blksz(host);
+ if (host->hw_revision >= 3) {
+ u32 thr = 0;
+
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_HS200) {
+ thr = ffs(min_t(unsigned int, host->data->blksz,
+ host->variant->fifosize));
+ thr = min_t(u32, thr, MMCI_STM32_THR_MASK);
+ }
+
+ writel_relaxed(thr, host->base + MMCI_STM32_FIFOTHRR);
+ }
+
if (host->mmc->card && mmc_card_sdio(host->mmc->card) &&
host->data->blocks == 1)
datactrl |= MCI_DPSM_STM32_MODE_SDIO;
@@ -382,7 +411,8 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
return datactrl;
}
-static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+static bool sdmmc_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
+ u32 status, u32 err_msk)
{
void __iomem *base = host->base;
u32 busy_d0, busy_d0end, mask, sdmmc_status;
@@ -423,8 +453,15 @@ complete:
return true;
}
-static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
- int unit, int phase, bool sampler)
+static int sdmmc_dlyb_mp15_enable(struct sdmmc_dlyb *dlyb)
+{
+ writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
+
+ return 0;
+}
+
+static int sdmmc_dlyb_mp15_set_cfg(struct sdmmc_dlyb *dlyb,
+ int unit, int phase, bool sampler)
{
u32 cfgr;
@@ -436,16 +473,18 @@ static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
if (!sampler)
writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
+
+ return 0;
}
-static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
+static int sdmmc_dlyb_mp15_prepare(struct mmci_host *host)
{
struct sdmmc_dlyb *dlyb = host->variant_priv;
u32 cfgr;
int i, lng, ret;
for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) {
- sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true);
+ dlyb->ops->set_cfg(dlyb, i, DLYB_CFGR_SEL_MAX, true);
ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr,
(cfgr & DLYB_CFGR_LNGF),
@@ -471,14 +510,58 @@ static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
return 0;
}
+static int sdmmc_dlyb_mp25_enable(struct sdmmc_dlyb *dlyb)
+{
+ u32 cr, sr;
+
+ cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR);
+ cr |= DLYBSD_CR_EN;
+
+ writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR);
+
+ return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR,
+ sr, sr & DLYBSD_SR_LOCK, 1,
+ DLYBSD_TIMEOUT_1S_IN_US);
+}
+
+static int sdmmc_dlyb_mp25_set_cfg(struct sdmmc_dlyb *dlyb,
+ int unit __maybe_unused, int phase,
+ bool sampler __maybe_unused)
+{
+ u32 cr, sr;
+
+ cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR);
+ cr &= ~DLYBSD_CR_RXTAPSEL_MASK;
+ cr |= FIELD_PREP(DLYBSD_CR_RXTAPSEL_MASK, phase);
+
+ writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR);
+
+ return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR,
+ sr, sr & DLYBSD_SR_RXTAPSEL_ACK, 1,
+ DLYBSD_TIMEOUT_1S_IN_US);
+}
+
+static int sdmmc_dlyb_mp25_prepare(struct mmci_host *host)
+{
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
+
+ dlyb->max = DLYBSD_TAPSEL_NB;
+
+ return 0;
+}
+
static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
{
struct sdmmc_dlyb *dlyb = host->variant_priv;
int cur_len = 0, max_len = 0, end_of_len = 0;
- int phase;
+ int phase, ret;
for (phase = 0; phase <= dlyb->max; phase++) {
- sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
+ ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "tuning config failed\n");
+ return ret;
+ }
if (mmc_send_tuning(host->mmc, opcode, NULL)) {
cur_len = 0;
@@ -496,10 +579,15 @@ static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
return -EINVAL;
}
- writel_relaxed(0, dlyb->base + DLYB_CR);
+ if (dlyb->ops->set_input_ck)
+ dlyb->ops->set_input_ck(dlyb);
phase = end_of_len - max_len / 2;
- sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
+ ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "tuning reconfig failed\n");
+ return ret;
+ }
dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n",
dlyb->unit, dlyb->max, phase);
@@ -511,12 +599,33 @@ static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct mmci_host *host = mmc_priv(mmc);
struct sdmmc_dlyb *dlyb = host->variant_priv;
+ u32 clk;
+ int ret;
+
+ if ((host->mmc->ios.timing != MMC_TIMING_UHS_SDR104 &&
+ host->mmc->ios.timing != MMC_TIMING_MMC_HS200) ||
+ host->mmc->actual_clock <= 50000000)
+ return 0;
if (!dlyb || !dlyb->base)
return -EINVAL;
- if (sdmmc_dlyb_lng_tuning(host))
- return -EINVAL;
+ ret = dlyb->ops->dlyb_enable(dlyb);
+ if (ret)
+ return ret;
+
+ /*
+ * SDMMC_FBCK is selected when an external Delay Block is needed
+ * with SDR104 or HS200.
+ */
+ clk = host->clk_reg;
+ clk &= ~MCI_STM32_CLK_SEL_MSK;
+ clk |= MCI_STM32_CLK_SELFBCK;
+ mmci_write_clkreg(host, clk);
+
+ ret = dlyb->ops->tuning_prepare(host);
+ if (ret)
+ return ret;
return sdmmc_dlyb_phase_tuning(host, opcode);
}
@@ -574,6 +683,19 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.post_sig_volt_switch = sdmmc_post_sig_volt_switch,
};
+static struct sdmmc_tuning_ops dlyb_tuning_mp15_ops = {
+ .dlyb_enable = sdmmc_dlyb_mp15_enable,
+ .set_input_ck = sdmmc_dlyb_mp15_input_ck,
+ .tuning_prepare = sdmmc_dlyb_mp15_prepare,
+ .set_cfg = sdmmc_dlyb_mp15_set_cfg,
+};
+
+static struct sdmmc_tuning_ops dlyb_tuning_mp25_ops = {
+ .dlyb_enable = sdmmc_dlyb_mp25_enable,
+ .tuning_prepare = sdmmc_dlyb_mp25_prepare,
+ .set_cfg = sdmmc_dlyb_mp25_set_cfg,
+};
+
void sdmmc_variant_init(struct mmci_host *host)
{
struct device_node *np = host->mmc->parent->of_node;
@@ -592,6 +714,11 @@ void sdmmc_variant_init(struct mmci_host *host)
return;
dlyb->base = base_dlyb;
+ if (of_device_is_compatible(np, "st,stm32mp25-sdmmc2"))
+ dlyb->ops = &dlyb_tuning_mp25_ops;
+ else
+ dlyb->ops = &dlyb_tuning_mp15_ops;
+
host->variant_priv = dlyb;
host->mmc_ops->execute_tuning = sdmmc_execute_tuning;
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 9785ec91654f..02403ff99e0d 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -473,6 +473,7 @@ struct msdc_host {
struct msdc_tune_para def_tune_para; /* default tune setting */
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
struct cqhci_host *cq_host;
+ u32 cq_ssc1_time;
};
static const struct mtk_mmc_compatible mt2701_compat = {
@@ -2450,9 +2451,49 @@ static void msdc_hs400_enhanced_strobe(struct mmc_host *mmc,
}
}
+static void msdc_cqe_cit_cal(struct msdc_host *host, u64 timer_ns)
+{
+ struct mmc_host *mmc = mmc_from_priv(host);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u8 itcfmul;
+ u64 hclk_freq, value;
+
+ /*
+ * On MediaTek SoCs the MSDC controller's CQE uses msdc_hclk as ITCFVAL
+ * so we multiply/divide the HCLK frequency by ITCFMUL to calculate the
+ * Send Status Command Idle Timer (CIT) value.
+ */
+ hclk_freq = (u64)clk_get_rate(host->h_clk);
+ itcfmul = CQHCI_ITCFMUL(cqhci_readl(cq_host, CQHCI_CAP));
+ switch (itcfmul) {
+ case 0x0:
+ do_div(hclk_freq, 1000);
+ break;
+ case 0x1:
+ do_div(hclk_freq, 100);
+ break;
+ case 0x2:
+ do_div(hclk_freq, 10);
+ break;
+ case 0x3:
+ break;
+ case 0x4:
+ hclk_freq = hclk_freq * 10;
+ break;
+ default:
+ host->cq_ssc1_time = 0x40;
+ return;
+ }
+
+ value = hclk_freq * timer_ns;
+ do_div(value, 1000000000);
+ host->cq_ssc1_time = value;
+}
+
static void msdc_cqe_enable(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
+ struct cqhci_host *cq_host = mmc->cqe_private;
/* enable cmdq irq */
writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN);
@@ -2462,6 +2503,9 @@ static void msdc_cqe_enable(struct mmc_host *mmc)
msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0);
/* default read data timeout 1s */
msdc_set_timeout(host, 1000000000ULL, 0);
+
+ /* Set the send status command idle timer */
+ cqhci_writel(cq_host, host->cq_ssc1_time, CQHCI_SSC1);
}
static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
@@ -2707,7 +2751,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
/* Support for SDIO eint irq ? */
if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
- host->eint_irq = platform_get_irq_byname(pdev, "sdio_wakeup");
+ host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup");
if (host->eint_irq > 0) {
host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
if (IS_ERR(host->pins_eint)) {
@@ -2803,6 +2847,8 @@ static int msdc_drv_probe(struct platform_device *pdev)
/* cqhci 16bit length */
/* 0 size, means 65536 so we don't have to -1 here */
mmc->max_seg_size = 64 * 1024;
+ /* Reduce CIT to 0x40 that corresponds to 2.35us */
+ msdc_cqe_cit_cal(host, 2350);
}
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1877d583fe8c..1c935b5bafe1 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -13,12 +13,13 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
-#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
+#include <soc/qcom/ice.h>
+
#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -258,12 +259,14 @@ struct sdhci_msm_variant_info {
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
- void __iomem *ice_mem; /* MSM ICE mapped address (if available) */
int pwr_irq; /* power irq */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
- /* core, iface, cal, sleep, and ice clocks */
- struct clk_bulk_data bulk_clks[5];
+ /* core, iface, cal and sleep clocks */
+ struct clk_bulk_data bulk_clks[4];
+#ifdef CONFIG_MMC_CRYPTO
+ struct qcom_ice *ice;
+#endif
unsigned long clk_rate;
struct mmc_host *mmc;
bool use_14lpp_dll_reset;
@@ -1804,164 +1807,51 @@ out:
#ifdef CONFIG_MMC_CRYPTO
-#define AES_256_XTS_KEY_SIZE 64
-
-/* QCOM ICE registers */
-
-#define QCOM_ICE_REG_VERSION 0x0008
-
-#define QCOM_ICE_REG_FUSE_SETTING 0x0010
-#define QCOM_ICE_FUSE_SETTING_MASK 0x1
-#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
-#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
-
-#define QCOM_ICE_REG_BIST_STATUS 0x0070
-#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000
-
-#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
-
-#define sdhci_msm_ice_writel(host, val, reg) \
- writel((val), (host)->ice_mem + (reg))
-#define sdhci_msm_ice_readl(host, reg) \
- readl((host)->ice_mem + (reg))
-
-static bool sdhci_msm_ice_supported(struct sdhci_msm_host *msm_host)
-{
- struct device *dev = mmc_dev(msm_host->mmc);
- u32 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_VERSION);
- int major = regval >> 24;
- int minor = (regval >> 16) & 0xFF;
- int step = regval & 0xFFFF;
-
- /* For now this driver only supports ICE version 3. */
- if (major != 3) {
- dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
- major, minor, step);
- return false;
- }
-
- dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
- major, minor, step);
-
- /* If fuses are blown, ICE might not work in the standard way. */
- regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_FUSE_SETTING);
- if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
- QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
- QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
- dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
- return false;
- }
- return true;
-}
-
-static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
-{
- return devm_clk_get(dev, "ice");
-}
-
static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
struct cqhci_host *cq_host)
{
struct mmc_host *mmc = msm_host->mmc;
struct device *dev = mmc_dev(mmc);
- struct resource *res;
+ struct qcom_ice *ice;
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
- res = platform_get_resource_byname(msm_host->pdev, IORESOURCE_MEM,
- "ice");
- if (!res) {
- dev_warn(dev, "ICE registers not found\n");
- goto disable;
- }
-
- if (!qcom_scm_ice_available()) {
- dev_warn(dev, "ICE SCM interface not found\n");
- goto disable;
+ ice = of_qcom_ice_get(dev);
+ if (ice == ERR_PTR(-EOPNOTSUPP)) {
+ dev_warn(dev, "Disabling inline encryption support\n");
+ ice = NULL;
}
- msm_host->ice_mem = devm_ioremap_resource(dev, res);
- if (IS_ERR(msm_host->ice_mem))
- return PTR_ERR(msm_host->ice_mem);
-
- if (!sdhci_msm_ice_supported(msm_host))
- goto disable;
+ if (IS_ERR_OR_NULL(ice))
+ return PTR_ERR_OR_ZERO(ice);
+ msm_host->ice = ice;
mmc->caps2 |= MMC_CAP2_CRYPTO;
- return 0;
-disable:
- dev_warn(dev, "Disabling inline encryption support\n");
return 0;
}
-static void sdhci_msm_ice_low_power_mode_enable(struct sdhci_msm_host *msm_host)
-{
- u32 regval;
-
- regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
- /*
- * Enable low power mode sequence
- * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
- */
- regval |= 0x7000;
- sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
-}
-
-static void sdhci_msm_ice_optimization_enable(struct sdhci_msm_host *msm_host)
+static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
{
- u32 regval;
-
- /* ICE Optimizations Enable Sequence */
- regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
- regval |= 0xD807100;
- /* ICE HPG requires delay before writing */
- udelay(5);
- sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
- udelay(5);
+ if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
+ qcom_ice_enable(msm_host->ice);
}
-/*
- * Wait until the ICE BIST (built-in self-test) has completed.
- *
- * This may be necessary before ICE can be used.
- *
- * Note that we don't really care whether the BIST passed or failed; we really
- * just want to make sure that it isn't still running. This is because (a) the
- * BIST is a FIPS compliance thing that never fails in practice, (b) ICE is
- * documented to reject crypto requests if the BIST fails, so we needn't do it
- * in software too, and (c) properly testing storage encryption requires testing
- * the full storage stack anyway, and not relying on hardware-level self-tests.
- */
-static int sdhci_msm_ice_wait_bist_status(struct sdhci_msm_host *msm_host)
+static __maybe_unused int sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
{
- u32 regval;
- int err;
+ if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
+ return qcom_ice_resume(msm_host->ice);
- err = readl_poll_timeout(msm_host->ice_mem + QCOM_ICE_REG_BIST_STATUS,
- regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
- 50, 5000);
- if (err)
- dev_err(mmc_dev(msm_host->mmc),
- "Timed out waiting for ICE self-test to complete\n");
- return err;
+ return 0;
}
-static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
+static __maybe_unused int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
{
- if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
- return;
- sdhci_msm_ice_low_power_mode_enable(msm_host);
- sdhci_msm_ice_optimization_enable(msm_host);
- sdhci_msm_ice_wait_bist_status(msm_host);
-}
+ if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
+ return qcom_ice_suspend(msm_host->ice);
-static int __maybe_unused sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
-{
- if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
- return 0;
- return sdhci_msm_ice_wait_bist_status(msm_host);
+ return 0;
}
/*
@@ -1972,48 +1862,28 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
const union cqhci_crypto_cfg_entry *cfg,
int slot)
{
- struct device *dev = mmc_dev(cq_host->mmc);
+ struct sdhci_host *host = mmc_priv(cq_host->mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
union cqhci_crypto_cap_entry cap;
- union {
- u8 bytes[AES_256_XTS_KEY_SIZE];
- u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
- } key;
- int i;
- int err;
-
- if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
- return qcom_scm_ice_invalidate_key(slot);
/* Only AES-256-XTS has been tested so far. */
cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
- cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256) {
- dev_err_ratelimited(dev,
- "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
- cap.algorithm_id, cap.key_size);
+ cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
return -EINVAL;
- }
- memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE);
-
- /*
- * The SCM call byte-swaps the 32-bit words of the key. So we have to
- * do the same, in order for the final key be correct.
- */
- for (i = 0; i < ARRAY_SIZE(key.words); i++)
- __cpu_to_be32s(&key.words[i]);
-
- err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
- QCOM_SCM_ICE_CIPHER_AES_256_XTS,
- cfg->data_unit_size);
- memzero_explicit(&key, sizeof(key));
- return err;
+ if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
+ return qcom_ice_program_key(msm_host->ice,
+ QCOM_ICE_CRYPTO_ALG_AES_XTS,
+ QCOM_ICE_CRYPTO_KEY_SIZE_256,
+ cfg->crypto_key,
+ cfg->data_unit_size, slot);
+ else
+ return qcom_ice_evict_key(msm_host->ice, slot);
}
+
#else /* CONFIG_MMC_CRYPTO */
-static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
-{
- return NULL;
-}
static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
struct cqhci_host *cq_host)
@@ -2025,11 +1895,17 @@ static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
{
}
-static inline int __maybe_unused
+static inline __maybe_unused int
sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
{
return 0;
}
+
+static inline __maybe_unused int
+sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
+{
+ return 0;
+}
#endif /* !CONFIG_MMC_CRYPTO */
/*****************************************************************************\
@@ -2633,11 +2509,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
clk = NULL;
msm_host->bulk_clks[3].clk = clk;
- clk = sdhci_msm_ice_get_clk(&pdev->dev);
- if (IS_ERR(clk))
- clk = NULL;
- msm_host->bulk_clks[4].clk = clk;
-
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
if (ret)
@@ -2830,7 +2701,7 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
- return 0;
+ return sdhci_msm_ice_suspend(msm_host);
}
static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 01975d145200..1c2572c0f012 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1903,6 +1903,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
+ SDHCI_PCI_DEVICE(GLI, 9767, gl9767),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 633a8ee8f8c5..ae8c307b7aa7 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -149,6 +149,72 @@
#define PCI_GLI_9755_PM_CTRL 0xFC
#define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
+#define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
+#define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
+
+#define PCIE_GLI_9767_VHS 0x884
+#define GLI_9767_VHS_REV GENMASK(19, 16)
+#define GLI_9767_VHS_REV_R 0x0
+#define GLI_9767_VHS_REV_M 0x1
+#define GLI_9767_VHS_REV_W 0x2
+
+#define PCIE_GLI_9767_COM_MAILBOX 0x888
+#define PCIE_GLI_9767_COM_MAILBOX_SSC_EN BIT(1)
+
+#define PCIE_GLI_9767_CFG 0x8A0
+#define PCIE_GLI_9767_CFG_LOW_PWR_OFF BIT(12)
+
+#define PCIE_GLI_9767_COMBO_MUX_CTL 0x8C8
+#define PCIE_GLI_9767_COMBO_MUX_CTL_RST_EN BIT(6)
+#define PCIE_GLI_9767_COMBO_MUX_CTL_WAIT_PERST_EN BIT(10)
+
+#define PCIE_GLI_9767_PWR_MACRO_CTL 0x8D0
+#define PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE GENMASK(3, 0)
+#define PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE GENMASK(15, 12)
+#define PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE_VALUE 0x7
+#define PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL GENMASK(29, 28)
+#define PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL_VALUE 0x3
+
+#define PCIE_GLI_9767_SCR 0x8E0
+#define PCIE_GLI_9767_SCR_AUTO_AXI_W_BURST BIT(6)
+#define PCIE_GLI_9767_SCR_AUTO_AXI_R_BURST BIT(7)
+#define PCIE_GLI_9767_SCR_AXI_REQ BIT(9)
+#define PCIE_GLI_9767_SCR_CARD_DET_PWR_SAVING_EN BIT(10)
+#define PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE0 BIT(16)
+#define PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE1 BIT(17)
+#define PCIE_GLI_9767_SCR_CORE_PWR_D3_OFF BIT(21)
+#define PCIE_GLI_9767_SCR_CFG_RST_DATA_LINK_DOWN BIT(30)
+
+#define PCIE_GLI_9767_SDHC_CAP 0x91C
+#define PCIE_GLI_9767_SDHC_CAP_SDEI_RESULT BIT(5)
+
+#define PCIE_GLI_9767_SD_PLL_CTL 0x938
+#define PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV GENMASK(9, 0)
+#define PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV GENMASK(15, 12)
+#define PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN BIT(16)
+#define PCIE_GLI_9767_SD_PLL_CTL_SSC_EN BIT(19)
+#define PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING GENMASK(28, 24)
+
+#define PCIE_GLI_9767_SD_PLL_CTL2 0x93C
+#define PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM GENMASK(31, 16)
+
+#define PCIE_GLI_9767_SD_EXPRESS_CTL 0x940
+#define PCIE_GLI_9767_SD_EXPRESS_CTL_SDEI_EXE BIT(0)
+#define PCIE_GLI_9767_SD_EXPRESS_CTL_SD_EXPRESS_MODE BIT(1)
+
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL 0x944
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME GENMASK(23, 16)
+#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME_VALUE 0x64
+
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2 0x950
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE BIT(0)
+
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2 0x954
+#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2_SDEI_COMPLETE_STATUS_EN BIT(0)
+
+#define PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2 0x958
+#define PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2_SDEI_COMPLETE_SIGNAL_EN BIT(0)
+
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
@@ -693,6 +759,293 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
gl9755_wt_off(pdev);
}
+static inline void gl9767_vhs_read(struct pci_dev *pdev)
+{
+ u32 vhs_enable;
+ u32 vhs_value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_VHS, &vhs_value);
+ vhs_enable = FIELD_GET(GLI_9767_VHS_REV, vhs_value);
+
+ if (vhs_enable == GLI_9767_VHS_REV_R)
+ return;
+
+ vhs_value &= ~GLI_9767_VHS_REV;
+ vhs_value |= FIELD_PREP(GLI_9767_VHS_REV, GLI_9767_VHS_REV_R);
+
+ pci_write_config_dword(pdev, PCIE_GLI_9767_VHS, vhs_value);
+}
+
+static inline void gl9767_vhs_write(struct pci_dev *pdev)
+{
+ u32 vhs_enable;
+ u32 vhs_value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_VHS, &vhs_value);
+ vhs_enable = FIELD_GET(GLI_9767_VHS_REV, vhs_value);
+
+ if (vhs_enable == GLI_9767_VHS_REV_W)
+ return;
+
+ vhs_value &= ~GLI_9767_VHS_REV;
+ vhs_value |= FIELD_PREP(GLI_9767_VHS_REV, GLI_9767_VHS_REV_W);
+
+ pci_write_config_dword(pdev, PCIE_GLI_9767_VHS, vhs_value);
+}
+
+static bool gl9767_ssc_enable(struct pci_dev *pdev)
+{
+ u32 value;
+ u8 enable;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_COM_MAILBOX, &value);
+ enable = FIELD_GET(PCIE_GLI_9767_COM_MAILBOX_SSC_EN, value);
+
+ gl9767_vhs_read(pdev);
+
+ return enable;
+}
+
+static void gl9767_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
+{
+ u32 pll;
+ u32 ssc;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL2, &ssc);
+ pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING |
+ PCIE_GLI_9767_SD_PLL_CTL_SSC_EN);
+ ssc &= ~PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM;
+ pll |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING, step) |
+ FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_SSC_EN, enable);
+ ssc |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM, ppm);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL2, ssc);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void gl9767_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv)
+{
+ u32 pll;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
+ pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV |
+ PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV |
+ PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN);
+ pll |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV, ldiv) |
+ FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV, pdiv) |
+ FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN, dir);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
+
+ gl9767_vhs_read(pdev);
+
+ /* wait for pll stable */
+ usleep_range(1000, 1100);
+}
+
+static void gl9767_set_ssc_pll_205mhz(struct pci_dev *pdev)
+{
+ bool enable = gl9767_ssc_enable(pdev);
+
+ /* set pll to 205MHz and ssc */
+ gl9767_set_ssc(pdev, enable, 0x1F, 0xF5C3);
+ gl9767_set_pll(pdev, 0x1, 0x246, 0x0);
+}
+
+static void gl9767_disable_ssc_pll(struct pci_dev *pdev)
+{
+ u32 pll;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
+ pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN | PCIE_GLI_9767_SD_PLL_CTL_SSC_EN);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct mmc_ios *ios = &host->mmc->ios;
+ struct pci_dev *pdev;
+ u32 value;
+ u16 clk;
+
+ pdev = slot->chip->pdev;
+ host->mmc->actual_clock = 0;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+
+ gl9767_disable_ssc_pll(pdev);
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
+ host->mmc->actual_clock = 205000000;
+ gl9767_set_ssc_pll_205mhz(pdev);
+ }
+
+ sdhci_enable_clk(host, clk);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void gli_set_9767(struct sdhci_host *host)
+{
+ u32 value;
+
+ value = sdhci_readl(host, SDHCI_GLI_9767_GM_BURST_SIZE);
+ value &= ~SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET;
+ sdhci_writel(host, value, SDHCI_GLI_9767_GM_BURST_SIZE);
+}
+
+static void gl9767_hw_setting(struct sdhci_pci_slot *slot)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_PWR_MACRO_CTL, &value);
+ value &= ~(PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE |
+ PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE |
+ PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL);
+
+ value |= PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE |
+ FIELD_PREP(PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE,
+ PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE_VALUE) |
+ FIELD_PREP(PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL,
+ PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_PWR_MACRO_CTL, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SCR, &value);
+ value &= ~(PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE0 |
+ PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE1 |
+ PCIE_GLI_9767_SCR_CFG_RST_DATA_LINK_DOWN);
+
+ value |= PCIE_GLI_9767_SCR_AUTO_AXI_W_BURST |
+ PCIE_GLI_9767_SCR_AUTO_AXI_R_BURST |
+ PCIE_GLI_9767_SCR_AXI_REQ |
+ PCIE_GLI_9767_SCR_CARD_DET_PWR_SAVING_EN |
+ PCIE_GLI_9767_SCR_CORE_PWR_D3_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SCR, value);
+
+ gl9767_vhs_read(pdev);
+}
+
+static void sdhci_gl9767_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+ gli_set_9767(host);
+}
+
+static int gl9767_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev;
+ u32 value;
+ int i;
+
+ pdev = slot->chip->pdev;
+
+ if (mmc->ops->get_ro(mmc)) {
+ mmc->ios.timing &= ~(MMC_TIMING_SD_EXP | MMC_TIMING_SD_EXP_1_2V);
+ return 0;
+ }
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_COMBO_MUX_CTL, &value);
+ value &= ~(PCIE_GLI_9767_COMBO_MUX_CTL_RST_EN | PCIE_GLI_9767_COMBO_MUX_CTL_WAIT_PERST_EN);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_COMBO_MUX_CTL, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, &value);
+ value &= ~PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME;
+ value |= FIELD_PREP(PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME,
+ PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME_VALUE);
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, &value);
+ value |= PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2, &value);
+ value |= PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2_SDEI_COMPLETE_STATUS_EN;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2, &value);
+ value |= PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2_SDEI_COMPLETE_SIGNAL_EN;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+
+ value = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ value &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_PLL_EN);
+ sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
+
+ value = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ value |= (SDHCI_VDD2_POWER_180 | SDHCI_VDD2_POWER_ON);
+ sdhci_writeb(host, value, SDHCI_POWER_CONTROL);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, &value);
+ value |= PCIE_GLI_9767_SD_EXPRESS_CTL_SDEI_EXE;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, value);
+
+ for (i = 0; i < 2; i++) {
+ usleep_range(10000, 10100);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, &value);
+ if (value & PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE) {
+ pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2,
+ value);
+ break;
+ }
+ }
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SDHC_CAP, &value);
+ if (value & PCIE_GLI_9767_SDHC_CAP_SDEI_RESULT) {
+ pci_read_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, &value);
+ value |= PCIE_GLI_9767_SD_EXPRESS_CTL_SD_EXPRESS_MODE;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, value);
+ } else {
+ mmc->ios.timing &= ~(MMC_TIMING_SD_EXP | MMC_TIMING_SD_EXP_1_2V);
+
+ value = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ value &= ~(SDHCI_VDD2_POWER_180 | SDHCI_VDD2_POWER_ON);
+ sdhci_writeb(host, value, SDHCI_POWER_CONTROL);
+
+ value = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ value |= (SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_PLL_EN);
+ sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
+ }
+
+ gl9767_vhs_read(pdev);
+
+ return 0;
+}
+
static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
@@ -717,6 +1070,21 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
return 0;
}
+static int gli_probe_slot_gl9767(struct sdhci_pci_slot *slot)
+{
+ struct sdhci_host *host = slot->host;
+
+ gli_set_9767(host);
+ gl9767_hw_setting(slot);
+ gli_pcie_enable_msi(slot);
+ slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+ host->mmc->caps2 |= MMC_CAP2_SD_EXP;
+ host->mmc_host_ops.init_sd_express = gl9767_init_sd_express;
+ sdhci_enable_v4_mode(host);
+
+ return 0;
+}
+
static void sdhci_gli_voltage_switch(struct sdhci_host *host)
{
/*
@@ -740,6 +1108,25 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
usleep_range(100000, 110000);
}
+static void sdhci_gl9767_voltage_switch(struct sdhci_host *host)
+{
+ /*
+ * According to Section 3.6.1 signal voltage switch procedure in
+ * SD Host Controller Simplified Spec. 4.20, steps 6~8 are as
+ * follows:
+ * (6) Set 1.8V Signal Enable in the Host Control 2 register.
+ * (7) Wait 5ms. 1.8V voltage regulator shall be stable within this
+ * period.
+ * (8) If 1.8V Signal Enable is cleared by Host Controller, go to
+ * step (12).
+ *
+ * Wait 5ms after set 1.8V signal enable in Host Control 2 register
+ * to ensure 1.8V signal enable bit is set by GL9767.
+ *
+ */
+ usleep_range(5000, 5500);
+}
+
static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
@@ -1150,3 +1537,22 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
#endif
.add_host = gl9763e_add_host,
};
+
+static const struct sdhci_ops sdhci_gl9767_ops = {
+ .set_clock = sdhci_gl9767_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_gl9767_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .voltage_switch = sdhci_gl9767_voltage_switch,
+};
+
+const struct sdhci_pci_fixes sdhci_gl9767 = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
+ .probe_slot = gli_probe_slot_gl9767,
+ .ops = &sdhci_gl9767_ops,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_gli_resume,
+#endif
+};
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 3661a224fb04..9c8863956381 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -76,6 +76,7 @@
#define PCI_DEVICE_ID_GLI_9755 0x9755
#define PCI_DEVICE_ID_GLI_9750 0x9750
#define PCI_DEVICE_ID_GLI_9763E 0xe763
+#define PCI_DEVICE_ID_GLI_9767 0x9767
/*
* PCI device class and mask
@@ -195,5 +196,6 @@ extern const struct sdhci_pci_fixes sdhci_o2;
extern const struct sdhci_pci_fixes sdhci_gl9750;
extern const struct sdhci_pci_fixes sdhci_gl9755;
extern const struct sdhci_pci_fixes sdhci_gl9763e;
+extern const struct sdhci_pci_fixes sdhci_gl9767;
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3241916141d7..ff41aa56564e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1167,6 +1167,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
}
+ sdhci_config_dma(host);
+
if (host->flags & SDHCI_REQ_USE_DMA) {
int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
@@ -1186,8 +1188,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
}
- sdhci_config_dma(host);
-
if (!(host->flags & SDHCI_REQ_USE_DMA)) {
int flags;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f4f2085c274c..f219bdea8f28 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -99,6 +99,13 @@
#define SDHCI_POWER_180 0x0A
#define SDHCI_POWER_300 0x0C
#define SDHCI_POWER_330 0x0E
+/*
+ * VDD2 - UHS2 or PCIe/NVMe
+ * VDD2 power on/off and voltage select
+ */
+#define SDHCI_VDD2_POWER_ON 0x10
+#define SDHCI_VDD2_POWER_120 0x80
+#define SDHCI_VDD2_POWER_180 0xA0
#define SDHCI_BLOCK_GAP_CONTROL 0x2A