aboutsummaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig28
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c9
-rw-r--r--drivers/spi/spi-aspeed-smc.c1210
-rw-r--r--drivers/spi/spi-au1550.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c164
-rw-r--r--drivers/spi/spi-cadence.c11
-rw-r--r--drivers/spi/spi-clps711x.c5
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-fsl-qspi.c4
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c358
-rw-r--r--drivers/spi/spi-ingenic.c47
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-intel.c51
-rw-r--r--drivers/spi/spi-mem.c13
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c1
-rw-r--r--drivers/spi/spi-mpc52xx.c3
-rw-r--r--drivers/spi/spi-mt65xx.c702
-rw-r--r--drivers/spi/spi-mtk-nor.c12
-rw-r--r--drivers/spi/spi-mtk-snfi.c1472
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-omap-uwire.c15
-rw-r--r--drivers/spi/spi-omap2-mcspi.c19
-rw-r--r--drivers/spi/spi-rockchip.c8
-rw-r--r--drivers/spi/spi-rspi.c15
-rw-r--r--drivers/spi/spi-sprd.c3
-rw-r--r--drivers/spi/spi-stm32-qspi.c41
-rw-r--r--drivers/spi/spi-stm32.c3
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c6
-rw-r--r--drivers/spi/spi-tegra114.c9
-rw-r--r--drivers/spi/spi-tegra20-sflash.c6
-rw-r--r--drivers/spi/spi-tegra20-slink.c6
-rw-r--r--drivers/spi/spi-ti-qspi.c8
-rw-r--r--drivers/spi/spi.c25
-rw-r--r--drivers/spi/spidev.c104
36 files changed, 3748 insertions, 624 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d2815eb361c0..3b1044ebc400 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -101,6 +101,17 @@ config SPI_ARMADA_3700
This enables support for the SPI controller present on the
Marvell Armada 3700 SoCs.
+config SPI_ASPEED_SMC
+ tristate "Aspeed flash controllers in SPI mode"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF
+ help
+ This enables support for the Firmware Memory controller (FMC)
+ in the Aspeed AST2600, AST2500 and AST2400 SoCs when attached
+ to SPI NOR chips, and support for the SPI flash memory
+ controller (SPI) for the host firmware. The implementation
+ only supports SPI NOR.
+
config SPI_ATMEL
tristate "Atmel SPI Controller"
depends on ARCH_AT91 || COMPILE_TEST
@@ -414,15 +425,14 @@ config SPI_IMG_SPFI
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
depends on ARCH_MXC || COMPILE_TEST
- select SPI_BITBANG
help
This enables support for the Freescale i.MX SPI controllers.
config SPI_INGENIC
- tristate "Ingenic JZ47xx SoCs SPI controller"
+ tristate "Ingenic SoCs SPI controller"
depends on MACH_INGENIC || COMPILE_TEST
help
- This enables support for the Ingenic JZ47xx SoCs SPI controller.
+ This enables support for the Ingenic SoCs SPI controller.
To compile this driver as a module, choose M here: the module
will be called spi-ingenic.
@@ -590,6 +600,16 @@ config SPI_MTK_NOR
SPI interface as well as several SPI NOR specific instructions
via SPI MEM interface.
+config SPI_MTK_SNFI
+ tristate "MediaTek SPI NAND Flash Interface"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on MTD_NAND_ECC_MEDIATEK
+ help
+ This enables support for SPI-NAND mode on the MediaTek NAND
+ Flash Interface found on MediaTek ARM SoCs. This controller
+ is implemented as a SPI-MEM controller with pipelined ECC
+ capcability.
+
config SPI_NPCM_FIU
tristate "Nuvoton NPCM FLASH Interface Unit"
depends on ARCH_NPCM || COMPILE_TEST
@@ -631,7 +651,7 @@ config SPI_OCTEON
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
- depends on ARCH_OMAP1
+ depends on ARCH_OMAP1 || (ARM && COMPILE_TEST)
select SPI_BITBANG
help
This hooks up to the MicroWire controller on OMAP1 chips.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 3aa28ed3f761..0f44eb6083a5 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o
obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o
obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
+obj-$(CONFIG_SPI_ASPEED_SMC) += spi-aspeed-smc.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
+obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 92d9610df1fd..480c0c8c18e4 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
if (atmel_qspi_find_mode(op) < 0)
return false;
@@ -285,12 +288,6 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
op->dummy.nbytes == 0)
return false;
- /* DTR ops not supported. */
- if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
- return false;
- if (op->cmd.nbytes != 1)
- return false;
-
return true;
}
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
new file mode 100644
index 000000000000..496f3e1e9079
--- /dev/null
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ASPEED FMC/SPI Memory Controller Driver
+ *
+ * Copyright (c) 2015-2022, IBM Corporation.
+ * Copyright (c) 2020, ASPEED Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define DEVICE_NAME "spi-aspeed-smc"
+
+/* Type setting Register */
+#define CONFIG_REG 0x0
+#define CONFIG_TYPE_SPI 0x2
+
+/* CE Control Register */
+#define CE_CTRL_REG 0x4
+
+/* CEx Control Register */
+#define CE0_CTRL_REG 0x10
+#define CTRL_IO_MODE_MASK GENMASK(30, 28)
+#define CTRL_IO_SINGLE_DATA 0x0
+#define CTRL_IO_DUAL_DATA BIT(29)
+#define CTRL_IO_QUAD_DATA BIT(30)
+#define CTRL_COMMAND_SHIFT 16
+#define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */
+#define CTRL_IO_DUMMY_SET(dummy) \
+ (((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6))
+#define CTRL_FREQ_SEL_SHIFT 8
+#define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT)
+#define CTRL_CE_STOP_ACTIVE BIT(2)
+#define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0)
+#define CTRL_IO_MODE_NORMAL 0x0
+#define CTRL_IO_MODE_READ 0x1
+#define CTRL_IO_MODE_WRITE 0x2
+#define CTRL_IO_MODE_USER 0x3
+
+#define CTRL_IO_CMD_MASK 0xf0ff40c3
+
+/* CEx Address Decoding Range Register */
+#define CE0_SEGMENT_ADDR_REG 0x30
+
+/* CEx Read timing compensation register */
+#define CE0_TIMING_COMPENSATION_REG 0x94
+
+enum aspeed_spi_ctl_reg_value {
+ ASPEED_SPI_BASE,
+ ASPEED_SPI_READ,
+ ASPEED_SPI_WRITE,
+ ASPEED_SPI_MAX,
+};
+
+struct aspeed_spi;
+
+struct aspeed_spi_chip {
+ struct aspeed_spi *aspi;
+ u32 cs;
+ void __iomem *ctl;
+ void __iomem *ahb_base;
+ u32 ahb_window_size;
+ u32 ctl_val[ASPEED_SPI_MAX];
+ u32 clk_freq;
+};
+
+struct aspeed_spi_data {
+ u32 ctl0;
+ u32 max_cs;
+ bool hastype;
+ u32 mode_bits;
+ u32 we0;
+ u32 timing;
+ u32 hclk_mask;
+ u32 hdiv_max;
+
+ u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg);
+ u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg);
+ u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end);
+ int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf);
+};
+
+#define ASPEED_SPI_MAX_NUM_CS 5
+
+struct aspeed_spi {
+ const struct aspeed_spi_data *data;
+
+ void __iomem *regs;
+ void __iomem *ahb_base;
+ u32 ahb_base_phy;
+ u32 ahb_window_size;
+ struct device *dev;
+
+ struct clk *clk;
+ u32 clk_freq;
+
+ struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS];
+};
+
+static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op)
+{
+ switch (op->data.buswidth) {
+ case 1:
+ return CTRL_IO_SINGLE_DATA;
+ case 2:
+ return CTRL_IO_DUAL_DATA;
+ case 4:
+ return CTRL_IO_QUAD_DATA;
+ default:
+ return CTRL_IO_SINGLE_DATA;
+ }
+}
+
+static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode)
+{
+ u32 ctl;
+
+ if (io_mode > 0) {
+ ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK;
+ ctl |= io_mode;
+ writel(ctl, chip->ctl);
+ }
+}
+
+static void aspeed_spi_start_user(struct aspeed_spi_chip *chip)
+{
+ u32 ctl = chip->ctl_val[ASPEED_SPI_BASE];
+
+ ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
+ writel(ctl, chip->ctl);
+
+ ctl &= ~CTRL_CE_STOP_ACTIVE;
+ writel(ctl, chip->ctl);
+}
+
+static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip)
+{
+ u32 ctl = chip->ctl_val[ASPEED_SPI_READ] |
+ CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
+
+ writel(ctl, chip->ctl);
+
+ /* Restore defaults */
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+}
+
+static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ ioread32_rep(src, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ ioread8_rep(src, (u8 *)buf + offset, len);
+ return 0;
+}
+
+static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ iowrite32_rep(dst, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ iowrite8_rep(dst, (const u8 *)buf + offset, len);
+ return 0;
+}
+
+static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes,
+ u64 offset, u32 opcode)
+{
+ __be32 temp;
+ u32 cmdaddr;
+
+ switch (addr_nbytes) {
+ case 3:
+ cmdaddr = offset & 0xFFFFFF;
+ cmdaddr |= opcode << 24;
+
+ temp = cpu_to_be32(cmdaddr);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ case 4:
+ temp = cpu_to_be32(offset);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ default:
+ WARN_ONCE(1, "Unexpected address width %u", addr_nbytes);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op)
+{
+ aspeed_spi_start_user(chip);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
+ aspeed_spi_read_from_ahb(op->data.buf.in,
+ chip->ahb_base, op->data.nbytes);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op)
+{
+ aspeed_spi_start_user(chip);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
+ aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out,
+ op->data.nbytes);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op,
+ u64 offset, size_t len, void *buf)
+{
+ int io_mode = aspeed_spi_get_io_mode(op);
+ u8 dummy = 0xFF;
+ int i;
+ int ret;
+
+ aspeed_spi_start_user(chip);
+
+ ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
+ if (ret < 0)
+ return ret;
+
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
+ aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
+ }
+
+ aspeed_spi_set_io_mode(chip, io_mode);
+
+ aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op)
+{
+ int ret;
+
+ aspeed_spi_start_user(chip);
+ ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
+ if (ret < 0)
+ return ret;
+ aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+/* support for 1-1-1, 1-1-2 or 1-1-4 */
+static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (op->cmd.buswidth > 1)
+ return false;
+
+ if (op->addr.nbytes != 0) {
+ if (op->addr.buswidth > 1)
+ return false;
+ if (op->addr.nbytes < 3 || op->addr.nbytes > 4)
+ return false;
+ }
+
+ if (op->dummy.nbytes != 0) {
+ if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7)
+ return false;
+ }
+
+ if (op->data.nbytes != 0 && op->data.buswidth > 4)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static const struct aspeed_spi_data ast2400_spi_data;
+
+static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
+ struct aspeed_spi_chip *chip = &aspi->chips[mem->spi->chip_select];
+ u32 addr_mode, addr_mode_backup;
+ u32 ctl_val;
+ int ret = 0;
+
+ dev_dbg(aspi->dev,
+ "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
+ chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
+
+ addr_mode = readl(aspi->regs + CE_CTRL_REG);
+ addr_mode_backup = addr_mode;
+
+ ctl_val = chip->ctl_val[ASPEED_SPI_BASE];
+ ctl_val &= ~CTRL_IO_CMD_MASK;
+
+ ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT;
+
+ /* 4BYTE address mode */
+ if (op->addr.nbytes) {
+ if (op->addr.nbytes == 4)
+ addr_mode |= (0x11 << chip->cs);
+ else
+ addr_mode &= ~(0x11 << chip->cs);
+
+ if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
+ ctl_val |= CTRL_IO_ADDRESS_4B;
+ }
+
+ if (op->dummy.nbytes)
+ ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ctl_val |= aspeed_spi_get_io_mode(op);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ ctl_val |= CTRL_IO_MODE_WRITE;
+ else
+ ctl_val |= CTRL_IO_MODE_READ;
+
+ if (addr_mode != addr_mode_backup)
+ writel(addr_mode, aspi->regs + CE_CTRL_REG);
+ writel(ctl_val, chip->ctl);
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!op->addr.nbytes)
+ ret = aspeed_spi_read_reg(chip, op);
+ else
+ ret = aspeed_spi_read_user(chip, op, op->addr.val,
+ op->data.nbytes, op->data.buf.in);
+ } else {
+ if (!op->addr.nbytes)
+ ret = aspeed_spi_write_reg(chip, op);
+ else
+ ret = aspeed_spi_write_user(chip, op);
+ }
+
+ /* Restore defaults */
+ if (addr_mode != addr_mode_backup)
+ writel(addr_mode_backup, aspi->regs + CE_CTRL_REG);
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+ return ret;
+}
+
+static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ int ret;
+
+ ret = do_aspeed_spi_exec_op(mem, op);
+ if (ret)
+ dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
+ return ret;
+}
+
+static const char *aspeed_spi_get_name(struct spi_mem *mem)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = aspi->dev;
+
+ return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
+}
+
+struct aspeed_spi_window {
+ u32 cs;
+ u32 offset;
+ u32 size;
+};
+
+static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
+ struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])
+{
+ const struct aspeed_spi_data *data = aspi->data;
+ u32 reg_val;
+ u32 cs;
+
+ for (cs = 0; cs < aspi->data->max_cs; cs++) {
+ reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4);
+ windows[cs].cs = cs;
+ windows[cs].size = data->segment_end(aspi, reg_val) -
+ data->segment_start(aspi, reg_val);
+ windows[cs].offset = cs ? windows[cs - 1].offset + windows[cs - 1].size : 0;
+ dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
+ windows[cs].offset, windows[cs].size);
+ }
+}
+
+/*
+ * On the AST2600, some CE windows are closed by default at reset but
+ * U-Boot should open all.
+ */
+static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
+ struct aspeed_spi_window *win = &windows[chip->cs];
+
+ /* No segment registers for the AST2400 SPI controller */
+ if (aspi->data == &ast2400_spi_data) {
+ win->offset = 0;
+ win->size = aspi->ahb_window_size;
+ } else {
+ aspeed_spi_get_windows(aspi, windows);
+ }
+
+ chip->ahb_base = aspi->ahb_base + win->offset;
+ chip->ahb_window_size = win->size;
+
+ dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
+ chip->cs, aspi->ahb_base_phy + win->offset,
+ aspi->ahb_base_phy + win->offset + win->size - 1,
+ win->size >> 20);
+
+ return chip->ahb_window_size ? 0 : -1;
+}
+
+static int aspeed_spi_set_window(struct aspeed_spi *aspi,
+ const struct aspeed_spi_window *win)
+{
+ u32 start = aspi->ahb_base_phy + win->offset;
+ u32 end = start + win->size;
+ void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4;
+ u32 seg_val_backup = readl(seg_reg);
+ u32 seg_val = aspi->data->segment_reg(aspi, start, end);
+
+ if (seg_val == seg_val_backup)
+ return 0;
+
+ writel(seg_val, seg_reg);
+
+ /*
+ * Restore initial value if something goes wrong else we could
+ * loose access to the chip.
+ */
+ if (seg_val != readl(seg_reg)) {
+ dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB",
+ win->cs, start, end - 1, win->size >> 20);
+ writel(seg_val_backup, seg_reg);
+ return -EIO;
+ }
+
+ if (win->size)
+ dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB",
+ win->cs, start, end - 1, win->size >> 20);
+ else
+ dev_dbg(aspi->dev, "CE%d window closed", win->cs);
+
+ return 0;
+}
+
+/*
+ * Yet to be done when possible :
+ * - Align mappings on flash size (we don't have the info)
+ * - ioremap each window, not strictly necessary since the overall window
+ * is correct.
+ */
+static const struct aspeed_spi_data ast2500_spi_data;
+static const struct aspeed_spi_data ast2600_spi_data;
+static const struct aspeed_spi_data ast2600_fmc_data;
+
+static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip,
+ u32 local_offset, u32 size)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
+ struct aspeed_spi_window *win = &windows[chip->cs];
+ int ret;
+
+ /* No segment registers for the AST2400 SPI controller */
+ if (aspi->data == &ast2400_spi_data)
+ return 0;
+
+ /*
+ * Due to an HW issue on the AST2500 SPI controller, the CE0
+ * window size should be smaller than the maximum 128MB.
+ */
+ if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) {
+ size = 120 << 20;
+ dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)",
+ chip->cs, size >> 20);
+ }
+
+ /*
+ * The decoding size of AST2600 SPI controller should set at
+ * least 2MB.
+ */
+ if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) &&
+ size < SZ_2M) {
+ size = SZ_2M;
+ dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)",
+ chip->cs, size >> 20);
+ }
+
+ aspeed_spi_get_windows(aspi, windows);
+
+ /* Adjust this chip window */
+ win->offset += local_offset;
+ win->size = size;
+
+ if (win->offset + win->size > aspi->ahb_window_size) {
+ win->size = aspi->ahb_window_size - win->offset;
+ dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20);
+ }
+
+ ret = aspeed_spi_set_window(aspi, win);
+ if (ret)
+ return ret;
+
+ /* Update chip mapping info */
+ chip->ahb_base = aspi->ahb_base + win->offset;
+ chip->ahb_window_size = win->size;
+
+ /*
+ * Also adjust next chip window to make sure that it does not
+ * overlap with the current window.
+ */
+ if (chip->cs < aspi->data->max_cs - 1) {
+ struct aspeed_spi_window *next = &windows[chip->cs + 1];
+
+ /* Change offset and size to keep the same end address */
+ if ((next->offset + next->size) > (win->offset + win->size))
+ next->size = (next->offset + next->size) - (win->offset + win->size);
+ else
+ next->size = 0;
+ next->offset = win->offset + win->size;
+
+ aspeed_spi_set_window(aspi, next);
+ }
+ return 0;
+}
+
+static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
+
+static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select];
+ struct spi_mem_op *op = &desc->info.op_tmpl;
+ u32 ctl_val;
+ int ret = 0;
+
+ chip->clk_freq = desc->mem->spi->max_speed_hz;
+
+ /* Only for reads */
+ if (op->data.dir != SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length);
+
+ if (desc->info.length > chip->ahb_window_size)
+ dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping",
+ chip->cs, chip->ahb_window_size >> 20);
+
+ /* Define the default IO read settings */
+ ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
+ ctl_val |= aspeed_spi_get_io_mode(op) |
+ op->cmd.opcode << CTRL_COMMAND_SHIFT |
+ CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth) |
+ CTRL_IO_MODE_READ;
+
+ /* Tune 4BYTE address mode */
+ if (op->addr.nbytes) {
+ u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
+
+ if (op->addr.nbytes == 4)
+ addr_mode |= (0x11 << chip->cs);
+ else
+ addr_mode &= ~(0x11 << chip->cs);
+ writel(addr_mode, aspi->regs + CE_CTRL_REG);
+
+ /* AST2400 SPI controller sets 4BYTE address mode in
+ * CE0 Control Register
+ */
+ if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
+ ctl_val |= CTRL_IO_ADDRESS_4B;
+ }
+
+ /* READ mode is the controller default setting */
+ chip->ctl_val[ASPEED_SPI_READ] = ctl_val;
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+
+ ret = aspeed_spi_do_calibration(chip);
+
+ dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n",
+ chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]);
+
+ return ret;
+}
+
+static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offset, size_t len, void *buf)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select];
+
+ /* Switch to USER command mode if mapping window is too small */
+ if (chip->ahb_window_size < offset + len) {
+ int ret;
+
+ ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
+ if (ret < 0)
+ return ret;
+ } else {
+ memcpy_fromio(buf, chip->ahb_base + offset, len);
+ }
+
+ return len;
+}
+
+static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
+ .supports_op = aspeed_spi_supports_op,
+ .exec_op = aspeed_spi_exec_op,
+ .get_name = aspeed_spi_get_name,
+ .dirmap_create = aspeed_spi_dirmap_create,
+ .dirmap_read = aspeed_spi_dirmap_read,
+};
+
+static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
+{
+ u32 reg;
+
+ reg = readl(aspi->regs + CONFIG_REG);
+ reg &= ~(0x3 << (cs * 2));
+ reg |= type << (cs * 2);
+ writel(reg, aspi->regs + CONFIG_REG);
+}
+
+static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable)
+{
+ u32 we_bit = BIT(aspi->data->we0 + cs);
+ u32 reg = readl(aspi->regs + CONFIG_REG);
+
+ if (enable)
+ reg |= we_bit;
+ else
+ reg &= ~we_bit;
+ writel(reg, aspi->regs + CONFIG_REG);
+}
+
+static int aspeed_spi_setup(struct spi_device *spi)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
+ const struct aspeed_spi_data *data = aspi->data;
+ unsigned int cs = spi->chip_select;
+ struct aspeed_spi_chip *chip = &aspi->chips[cs];
+
+ chip->aspi = aspi;
+ chip->cs = cs;
+ chip->ctl = aspi->regs + data->ctl0 + cs * 4;
+
+ /* The driver only supports SPI type flash */
+ if (data->hastype)
+ aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
+
+ if (aspeed_spi_chip_set_default_window(chip) < 0) {
+ dev_warn(aspi->dev, "CE%d window invalid", cs);
+ return -EINVAL;
+ }
+
+ aspeed_spi_chip_enable(aspi, cs, true);
+
+ chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER;
+
+ dev_dbg(aspi->dev, "CE%d setup done\n", cs);
+ return 0;
+}
+
+static void aspeed_spi_cleanup(struct spi_device *spi)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
+ unsigned int cs = spi->chip_select;
+
+ aspeed_spi_chip_enable(aspi, cs, false);
+
+ dev_dbg(aspi->dev, "CE%d cleanup done\n", cs);
+}
+
+static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
+{
+ int cs;
+
+ for (cs = 0; cs < aspi->data->max_cs; cs++)
+ aspeed_spi_chip_enable(aspi, cs, enable);
+}
+
+static int aspeed_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct aspeed_spi_data *data;
+ struct spi_controller *ctlr;
+ struct aspeed_spi *aspi;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ ctlr = devm_spi_alloc_master(dev, sizeof(*aspi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ aspi = spi_controller_get_devdata(ctlr);
+ platform_set_drvdata(pdev, aspi);
+ aspi->data = data;
+ aspi->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aspi->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(aspi->regs)) {
+ dev_err(dev, "missing AHB register window\n");
+ return PTR_ERR(aspi->regs);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ aspi->ahb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(aspi->ahb_base)) {
+ dev_err(dev, "missing AHB mapping window\n");
+ return PTR_ERR(aspi->ahb_base);
+ }
+
+ aspi->ahb_window_size = resource_size(res);
+ aspi->ahb_base_phy = res->start;
+
+ aspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(aspi->clk)) {
+ dev_err(dev, "missing clock\n");
+ return PTR_ERR(aspi->clk);
+ }
+
+ aspi->clk_freq = clk_get_rate(aspi->clk);
+ if (!aspi->clk_freq) {
+ dev_err(dev, "invalid clock\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(aspi->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* IRQ is for DMA, which the driver doesn't support yet */
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits;
+ ctlr->bus_num = pdev->id;
+ ctlr->mem_ops = &aspeed_spi_mem_ops;
+ ctlr->setup = aspeed_spi_setup;
+ ctlr->cleanup = aspeed_spi_cleanup;
+ ctlr->num_chipselect = data->max_cs;
+ ctlr->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed\n");
+ goto disable_clk;
+ }
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(aspi->clk);
+ return ret;
+}
+
+static int aspeed_spi_remove(struct platform_device *pdev)
+{
+ struct aspeed_spi *aspi = platform_get_drvdata(pdev);
+
+ aspeed_spi_enable(aspi, false);
+ clk_disable_unprepare(aspi->clk);
+ return 0;
+}
+
+/*
+ * AHB mappings
+ */
+
+/*
+ * The Segment Registers of the AST2400 and AST2500 use a 8MB unit.
+ * The address range is encoded with absolute addresses in the overall
+ * mapping window.
+ */
+static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg)
+{
+ return ((reg >> 16) & 0xFF) << 23;
+}
+
+static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg)
+{
+ return ((reg >> 24) & 0xFF) << 23;
+}
+
+static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
+{
+ return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
+}
+
+/*
+ * The Segment Registers of the AST2600 use a 1MB unit. The address
+ * range is encoded with offsets in the overall mapping window.
+ */
+
+#define AST2600_SEG_ADDR_MASK 0x0ff00000
+
+static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi,
+ u32 reg)
+{
+ u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
+
+ return aspi->ahb_base_phy + start_offset;
+}
+
+static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
+ u32 reg)
+{
+ u32 end_offset = reg & AST2600_SEG_ADDR_MASK;
+
+ /* segment is disabled */
+ if (!end_offset)
+ return aspi->ahb_base_phy;
+
+ return aspi->ahb_base_phy + end_offset + 0x100000;
+}
+
+static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
+ u32 start, u32 end)
+{
+ /* disable zero size segments */
+ if (start == end)
+ return 0;
+
+ return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
+ ((end - 1) & AST2600_SEG_ADDR_MASK);
+}
+
+/*
+ * Read timing compensation sequences
+ */
+
+#define CALIBRATE_BUF_SIZE SZ_16K
+
+static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
+ if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
+#if defined(VERBOSE_DEBUG)
+ print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE,
+ test_buf, 0x100);
+#endif
+ return false;
+ }
+ }
+ return true;
+}
+
+#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8))
+
+/*
+ * The timing register is shared by all devices. Only update for CE0.
+ */
+static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ const struct aspeed_spi_data *data = aspi->data;
+ int i;
+ int good_pass = -1, pass_count = 0;
+ u32 shift = (hdiv - 1) << 2;
+ u32 mask = ~(0xfu << shift);
+ u32 fread_timing_val = 0;
+
+ /* Try HCLK delay 0..5, each one with/without delay and look for a
+ * good pair.
+ */
+ for (i = 0; i < 12; i++) {
+ bool pass;
+
+ if (chip->cs == 0) {
+ fread_timing_val &= mask;
+ fread_timing_val |= FREAD_TPASS(i) << shift;
+ writel(fread_timing_val, aspi->regs + data->timing);
+ }
+ pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(aspi->dev,
+ " * [%08x] %d HCLK delay, %dns DI delay : %s",
+ fread_timing_val, i / 2, (i & 1) ? 0 : 4,
+ pass ? "PASS" : "FAIL");
+ if (pass) {
+ pass_count++;
+ if (pass_count == 3) {
+ good_pass = i - 1;
+ break;
+ }
+ } else {
+ pass_count = 0;
+ }
+ }
+
+ /* No good setting for this frequency */
+ if (good_pass < 0)
+ return -1;
+
+ /* We have at least one pass of margin, let's use first pass */
+ if (chip->cs == 0) {
+ fread_timing_val &= mask;
+ fread_timing_val |= FREAD_TPASS(good_pass) << shift;
+ writel(fread_timing_val, aspi->regs + data->timing);
+ }
+ dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]",
+ good_pass, fread_timing_val);
+ return 0;
+}
+
+static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
+{
+ const u32 *tb32 = (const u32 *)test_buf;
+ u32 i, cnt = 0;
+
+ /* We check if we have enough words that are neither all 0
+ * nor all 1's so the calibration can be considered valid.
+ *
+ * I use an arbitrary threshold for now of 64
+ */
+ size >>= 2;
+ for (i = 0; i < size; i++) {
+ if (tb32[i] != 0 && tb32[i] != 0xffffffff)
+ cnt++;
+ }
+ return cnt >= 64;
+}
+
+static const u32 aspeed_spi_hclk_divs[] = {
+ 0xf, /* HCLK */
+ 0x7, /* HCLK/2 */
+ 0xe, /* HCLK/3 */
+ 0x6, /* HCLK/4 */
+ 0xd, /* HCLK/5 */
+};
+
+#define ASPEED_SPI_HCLK_DIV(i) \
+ (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT)
+
+static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ const struct aspeed_spi_data *data = aspi->data;
+ u32 ahb_freq = aspi->clk_freq;
+ u32 max_freq = chip->clk_freq;
+ u32 ctl_val;
+ u8 *golden_buf = NULL;
+ u8 *test_buf = NULL;
+ int i, rc, best_div = -1;
+
+ dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz",
+ ahb_freq / 1000000);
+
+ /*
+ * use the related low frequency to get check calibration data
+ * and get golden data.
+ */
+ ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask;
+ writel(ctl_val, chip->ctl);
+
+ test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
+ if (!test_buf)
+ return -ENOMEM;
+
+ golden_buf = test_buf + CALIBRATE_BUF_SIZE;
+
+ memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
+ if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
+ dev_info(aspi->dev, "Calibration area too uniform, using low speed");
+ goto no_calib;
+ }
+
+#if defined(VERBOSE_DEBUG)
+ print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE,
+ golden_buf, 0x100);
+#endif
+
+ /* Now we iterate the HCLK dividers until we find our breaking point */
+ for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) {
+ u32 tv, freq;
+
+ freq = ahb_freq / i;
+ if (freq > max_freq)
+ continue;
+
+ /* Set the timing */
+ tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i);
+ writel(tv, chip->ctl);
+ dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv);
+ rc = data->calibrate(chip, i, golden_buf, test_buf);
+ if (rc == 0)
+ best_div = i;
+ }
+
+ /* Nothing found ? */
+ if (best_div < 0) {
+ dev_warn(aspi->dev, "No good frequency, using dumb slow");
+ } else {
+ dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div);
+
+ /* Record the freq */
+ for (i = 0; i < ASPEED_SPI_MAX; i++)
+ chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) |
+ ASPEED_SPI_HCLK_DIV(best_div);
+ }
+
+no_calib:
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+ kfree(test_buf);
+ return 0;
+}
+
+#define TIMING_DELAY_DI BIT(3)
+#define TIMING_DELAY_HCYCLE_MAX 5
+#define TIMING_REG_AST2600(chip) \
+ ((chip)->aspi->regs + (chip)->aspi->data->timing + \
+ (chip)->cs * 4)
+
+static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ int hcycle;
+ u32 shift = (hdiv - 2) << 3;
+ u32 mask = ~(0xfu << shift);
+ u32 fread_timing_val = 0;
+
+ for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
+ int delay_ns;
+ bool pass = false;
+
+ fread_timing_val &= mask;
+ fread_timing_val |= hcycle << shift;
+
+ /* no DI input delay first */
+ writel(fread_timing_val, TIMING_REG_AST2600(chip));
+ pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(aspi->dev,
+ " * [%08x] %d HCLK delay, DI delay none : %s",
+ fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
+ if (pass)
+ return 0;
+
+ /* Add DI input delays */
+ fread_timing_val &= mask;
+ fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
+
+ for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
+ fread_timing_val &= ~(0xf << (4 + shift));
+ fread_timing_val |= delay_ns << (4 + shift);
+
+ writel(fread_timing_val, TIMING_REG_AST2600(chip));
+ pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(aspi->dev,
+ " * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
+ fread_timing_val, hcycle, (delay_ns + 1) / 2,
+ (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
+ /*
+ * TODO: This is optimistic. We should look
+ * for a working interval and save the middle
+ * value in the read timing register.
+ */
+ if (pass)
+ return 0;
+ }
+ }
+
+ /* No good setting for this frequency */
+ return -1;
+}
+
+/*
+ * Platform definitions
+ */
+static const struct aspeed_spi_data ast2400_fmc_data = {
+ .max_cs = 5,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ .segment_start = aspeed_spi_segment_start,
+ .segment_end = aspeed_spi_segment_end,
+ .segment_reg = aspeed_spi_segment_reg,
+};
+
+static const struct aspeed_spi_data ast2400_spi_data = {
+ .max_cs = 1,
+ .hastype = false,
+ .we0 = 0,
+ .ctl0 = 0x04,
+ .timing = 0x14,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ /* No segment registers */
+};
+
+static const struct aspeed_spi_data ast2500_fmc_data = {
+ .max_cs = 3,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xffffd0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ .segment_start = aspeed_spi_segment_start,
+ .segment_end = aspeed_spi_segment_end,
+ .segment_reg = aspeed_spi_segment_reg,
+};
+
+static const struct aspeed_spi_data ast2500_spi_data = {
+ .max_cs = 2,
+ .hastype = false,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xffffd0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ .segment_start = aspeed_spi_segment_start,
+ .segment_end = aspeed_spi_segment_end,
+ .segment_reg = aspeed_spi_segment_reg,
+};
+
+static const struct aspeed_spi_data ast2600_fmc_data = {
+ .max_cs = 3,
+ .hastype = false,
+ .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xf0fff0ff,
+ .hdiv_max = 2,
+ .calibrate = aspeed_spi_ast2600_calibrate,
+ .segment_start = aspeed_spi_segment_ast2600_start,
+ .segment_end = aspeed_spi_segment_ast2600_end,
+ .segment_reg = aspeed_spi_segment_ast2600_reg,
+};
+
+static const struct aspeed_spi_data ast2600_spi_data = {
+ .max_cs = 2,
+ .hastype = false,
+ .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xf0fff0ff,
+ .hdiv_max = 2,
+ .calibrate = aspeed_spi_ast2600_calibrate,
+ .segment_start = aspeed_spi_segment_ast2600_start,
+ .segment_end = aspeed_spi_segment_ast2600_end,
+ .segment_reg = aspeed_spi_segment_ast2600_reg,
+};
+
+static const struct of_device_id aspeed_spi_matches[] = {
+ { .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data },
+ { .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data },
+ { .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data },
+ { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
+ { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
+ { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_spi_matches);
+
+static struct platform_driver aspeed_spi_driver = {
+ .probe = aspeed_spi_probe,
+ .remove = aspeed_spi_remove,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_spi_matches,
+ }
+};
+
+module_platform_driver(aspeed_spi_driver);
+
+MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
+MODULE_AUTHOR("Chin-Ting Kuo <chin-ting_kuo@aspeedtech.com>");
+MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 4b59a1b1bf7e..e008761298da 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -405,7 +405,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
dma_unmap_single(hw->dev, dma_tx_addr, t->len,
DMA_TO_DEVICE);
- return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count;
+ return min(hw->rx_count, hw->tx_count);
}
static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
@@ -539,7 +539,7 @@ static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
wait_for_completion(&hw->master_done);
- return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count;
+ return min(hw->rx_count, hw->tx_count);
}
static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 616ada891974..2b9fc8449a62 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -43,6 +43,8 @@
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
+#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -53,16 +55,12 @@ struct cqspi_flash_pdata {
u32 tsd2d_ns;
u32 tchsh_ns;
u32 tslch_ns;
- u8 inst_width;
- u8 addr_width;
- u8 data_width;
- bool dtr;
u8 cs;
};
struct cqspi_st {
struct platform_device *pdev;
-
+ struct spi_master *master;
struct clk *clk;
unsigned int sclk;
@@ -343,18 +341,18 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
return IRQ_HANDLED;
}
-static unsigned int cqspi_calc_rdreg(struct cqspi_flash_pdata *f_pdata)
+static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op)
{
u32 rdreg = 0;
- rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
- rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
- rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+ rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
+ rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
+ rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
return rdreg;
}
-static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
+static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op)
{
unsigned int dummy_clk;
@@ -362,66 +360,12 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
return 0;
dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
- if (dtr)
+ if (op->cmd.dtr)
dummy_clk /= 2;
return dummy_clk;
}
-static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
- const struct spi_mem_op *op)
-{
- /*
- * For an op to be DTR, cmd phase along with every other non-empty
- * phase should have dtr field set to 1. If an op phase has zero
- * nbytes, ignore its dtr field; otherwise, check its dtr field.
- */
- f_pdata->dtr = op->cmd.dtr &&
- (!op->addr.nbytes || op->addr.dtr) &&
- (!op->data.nbytes || op->data.dtr);
-
- f_pdata->inst_width = 0;
- if (op->cmd.buswidth)
- f_pdata->inst_width = ilog2(op->cmd.buswidth);
-
- f_pdata->addr_width = 0;
- if (op->addr.buswidth)
- f_pdata->addr_width = ilog2(op->addr.buswidth);
-
- f_pdata->data_width = 0;
- if (op->data.buswidth)
- f_pdata->data_width = ilog2(op->data.buswidth);
-
- /* Right now we only support 8-8-8 DTR mode. */
- if (f_pdata->dtr) {
- switch (op->cmd.buswidth) {
- case 0:
- case 8:
- break;
- default:
- return -EINVAL;
- }
-
- switch (op->addr.buswidth) {
- case 0:
- case 8:
- break;
- default:
- return -EINVAL;
- }
-
- switch (op->data.buswidth) {
- case 0:
- case 8:
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int cqspi_wait_idle(struct cqspi_st *cqspi)
{
const unsigned int poll_idle_retry = 3;
@@ -503,8 +447,7 @@ static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
}
static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
- const struct spi_mem_op *op, unsigned int shift,
- bool enable)
+ const struct spi_mem_op *op, unsigned int shift)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
@@ -517,7 +460,7 @@ static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
* We enable dual byte opcode here. The callers have to set up the
* extension opcode based on which type of operation it is.
*/
- if (enable) {
+ if (op->cmd.dtr) {
reg |= CQSPI_REG_CONFIG_DTR_PROTO;
reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
@@ -549,12 +492,7 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
size_t read_len;
int status;
- status = cqspi_set_protocol(f_pdata, op);
- if (status)
- return status;
-
- status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
- f_pdata->dtr);
+ status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
if (status)
return status;
@@ -565,17 +503,17 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
return -EINVAL;
}
- if (f_pdata->dtr)
+ if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
- rdreg = cqspi_calc_rdreg(f_pdata);
+ rdreg = cqspi_calc_rdreg(op);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
- dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
+ dummy_clk = cqspi_calc_dummy(op);
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -EOPNOTSUPP;
@@ -622,12 +560,7 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
size_t write_len;
int ret;
- ret = cqspi_set_protocol(f_pdata, op);
- if (ret)
- return ret;
-
- ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
- f_pdata->dtr);
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
if (ret)
return ret;
@@ -638,10 +571,10 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
return -EINVAL;
}
- reg = cqspi_calc_rdreg(f_pdata);
+ reg = cqspi_calc_rdreg(op);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
- if (f_pdata->dtr)
+ if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
@@ -688,21 +621,20 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
int ret;
u8 opcode;
- ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB,
- f_pdata->dtr);
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB);
if (ret)
return ret;
- if (f_pdata->dtr)
+ if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
- reg |= cqspi_calc_rdreg(f_pdata);
+ reg |= cqspi_calc_rdreg(op);
/* Setup dummy clock cycles */
- dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
+ dummy_clk = cqspi_calc_dummy(op);
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -EOPNOTSUPP;
@@ -947,22 +879,21 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
void __iomem *reg_base = cqspi->iobase;
u8 opcode;
- ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB,
- f_pdata->dtr);
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB);
if (ret)
return ret;
- if (f_pdata->dtr)
+ if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
/* Set opcode. */
reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
- reg |= f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
- reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
+ reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
- reg = cqspi_calc_rdreg(f_pdata);
+ reg = cqspi_calc_rdreg(op);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
/*
@@ -1244,10 +1175,6 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
const u_char *buf = op->data.buf.out;
int ret;
- ret = cqspi_set_protocol(f_pdata, op);
- if (ret)
- return ret;
-
ret = cqspi_write_setup(f_pdata, op);
if (ret)
return ret;
@@ -1260,7 +1187,7 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
* mode. So, we can not use direct mode when in DTR mode for writing
* data.
*/
- if (!f_pdata->dtr && cqspi->use_direct_mode &&
+ if (!op->cmd.dtr && cqspi->use_direct_mode &&
((to + len) <= cqspi->ahb_size)) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
return cqspi_wait_idle(cqspi);
@@ -1348,9 +1275,6 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
int ret;
ddata = of_device_get_match_data(dev);
- ret = cqspi_set_protocol(f_pdata, op);
- if (ret)
- return ret;
ret = cqspi_read_setup(f_pdata, op);
if (ret)
@@ -1415,9 +1339,18 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
- /* Mixed DTR modes not supported. */
- if (!(all_true || all_false))
+ if (all_true) {
+ /* Right now we only support 8-8-8 DTR mode. */
+ if (op->cmd.nbytes && op->cmd.buswidth != 8)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth != 8)
+ return false;
+ if (op->data.nbytes && op->data.buswidth != 8)
+ return false;
+ } else if (!all_false) {
+ /* Mixed DTR modes are not supported. */
return false;
+ }
return spi_mem_default_supports_op(mem, op);
}
@@ -1548,6 +1481,7 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
cqspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(cqspi->rx_chan)) {
int ret = PTR_ERR(cqspi->rx_chan);
+
cqspi->rx_chan = NULL;
return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
}
@@ -1624,7 +1558,7 @@ static int cqspi_probe(struct platform_device *pdev)
int ret;
int irq;
- master = spi_alloc_master(&pdev->dev, sizeof(*cqspi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*cqspi));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master failed\n");
return -ENOMEM;
@@ -1637,6 +1571,7 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi = spi_master_get_devdata(master);
cqspi->pdev = pdev;
+ cqspi->master = master;
platform_set_drvdata(pdev, cqspi);
/* Obtain configuration from OF. */
@@ -1685,11 +1620,9 @@ static int cqspi_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto probe_master_put;
- }
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
@@ -1769,7 +1702,7 @@ static int cqspi_probe(struct platform_device *pdev)
goto probe_setup_failed;
}
- ret = devm_spi_register_master(dev, master);
+ ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
goto probe_setup_failed;
@@ -1792,6 +1725,7 @@ static int cqspi_remove(struct platform_device *pdev)
{
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ spi_unregister_master(cqspi->master);
cqspi_controller_enable(cqspi, 0);
if (cqspi->rx_chan)
@@ -1850,7 +1784,7 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
};
static const struct cqspi_driver_platdata socfpga_qspi = {
- .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
};
static const struct cqspi_driver_platdata versal_ospi = {
@@ -1879,11 +1813,11 @@ static const struct of_device_id cqspi_dt_ids[] = {
},
{
.compatible = "xlnx,versal-ospi-1.0",
- .data = (void *)&versal_ospi,
+ .data = &versal_ospi,
},
{
.compatible = "intel,socfpga-qspi",
- .data = (void *)&socfpga_qspi,
+ .data = &socfpga_qspi,
},
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index ceb16e70d235..a23d4f6329f5 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -342,7 +342,8 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct cdns_spi *xspi = spi_master_get_devdata(master);
- u32 intr_status, status;
+ irqreturn_t status;
+ u32 intr_status;
status = IRQ_NONE;
intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR);
@@ -657,7 +658,7 @@ static int __maybe_unused cdns_spi_resume(struct device *dev)
*
* Return: 0 on success and error value on error
*/
-static int __maybe_unused cnds_runtime_resume(struct device *dev)
+static int __maybe_unused cdns_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct cdns_spi *xspi = spi_master_get_devdata(master);
@@ -686,7 +687,7 @@ static int __maybe_unused cnds_runtime_resume(struct device *dev)
*
* Return: Always 0
*/
-static int __maybe_unused cnds_runtime_suspend(struct device *dev)
+static int __maybe_unused cdns_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct cdns_spi *xspi = spi_master_get_devdata(master);
@@ -698,8 +699,8 @@ static int __maybe_unused cnds_runtime_suspend(struct device *dev)
}
static const struct dev_pm_ops cdns_spi_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(cnds_runtime_suspend,
- cnds_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(cdns_spi_runtime_suspend,
+ cdns_spi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(cdns_spi_suspend, cdns_spi_resume)
};
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 0bef5ce08094..c005ed26a3e1 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -89,6 +90,7 @@ static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
static int spi_clps711x_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct spi_clps711x_data *hw;
struct spi_master *master;
int irq, ret;
@@ -117,8 +119,7 @@ static int spi_clps711x_probe(struct platform_device *pdev)
goto err_out;
}
- hw->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon3");
+ hw->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(hw->syscon)) {
ret = PTR_ERR(hw->syscon);
goto err_out;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 4c601294f8fa..19b1f3d881b0 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -20,7 +20,7 @@
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/dma-imx.h>
+#include <linux/dma/imx-dma.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 9851551ebbe0..46ae46a944c5 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -876,6 +876,10 @@ static int fsl_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
+ if (!res) {
+ ret = -EINVAL;
+ goto err_put_ctrl;
+ }
q->memmap_phy = res->start;
/* Since there are 4 cs, map size required is 4 times ahb_buf_size */
q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 5f05d519fbbd..71376b6df89d 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -731,7 +731,7 @@ static int img_spfi_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index b2dd0a4d2446..30d82cc7300b 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -18,13 +18,12 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/property.h>
-#include <linux/platform_data/dma-imx.h>
+#include <linux/dma/imx-dma.h>
#define DRIVER_NAME "spi_imx"
@@ -32,6 +31,12 @@ static bool use_dma = true;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
+/* define polling limits */
+static unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode\n");
+
#define MXC_RPM_TIMEOUT 2000 /* 2000ms */
#define MXC_CSPIRXDATA 0x00
@@ -64,15 +69,15 @@ enum spi_imx_devtype {
struct spi_imx_data;
struct spi_imx_devtype_data {
- void (*intctrl)(struct spi_imx_data *, int);
- int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
- int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *);
- void (*trigger)(struct spi_imx_data *);
- int (*rx_available)(struct spi_imx_data *);
- void (*reset)(struct spi_imx_data *);
- void (*setup_wml)(struct spi_imx_data *);
- void (*disable)(struct spi_imx_data *);
- void (*disable_dma)(struct spi_imx_data *);
+ void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
+ int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
+ int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
+ void (*trigger)(struct spi_imx_data *spi_imx);
+ int (*rx_available)(struct spi_imx_data *spi_imx);
+ void (*reset)(struct spi_imx_data *spi_imx);
+ void (*setup_wml)(struct spi_imx_data *spi_imx);
+ void (*disable)(struct spi_imx_data *spi_imx);
+ void (*disable_dma)(struct spi_imx_data *spi_imx);
bool has_dmamode;
bool has_slavemode;
unsigned int fifo_size;
@@ -86,7 +91,7 @@ struct spi_imx_devtype_data {
};
struct spi_imx_data {
- struct spi_bitbang bitbang;
+ struct spi_controller *controller;
struct device *dev;
struct completion xfer_done;
@@ -102,12 +107,13 @@ struct spi_imx_data {
unsigned int spi_drctl;
unsigned int count, remainder;
- void (*tx)(struct spi_imx_data *);
- void (*rx)(struct spi_imx_data *);
+ void (*tx)(struct spi_imx_data *spi_imx);
+ void (*rx)(struct spi_imx_data *spi_imx);
void *rx_buf;
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
unsigned int dynamic_burst;
+ bool rx_only;
/* Slave mode */
bool slave_mode;
@@ -225,15 +231,15 @@ static int spi_imx_bytes_per_word(const int bits_per_word)
return 4;
}
-static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
+static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
- if (!use_dma || master->fallback)
+ if (!use_dma || controller->fallback)
return false;
- if (!master->dma_rx)
+ if (!controller->dma_rx)
return false;
if (spi_imx->slave_mode)
@@ -289,17 +295,16 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
{
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
-#ifdef __LITTLE_ENDIAN
- unsigned int bytes_per_word;
-#endif
if (spi_imx->rx_buf) {
#ifdef __LITTLE_ENDIAN
+ unsigned int bytes_per_word;
+
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
if (bytes_per_word == 1)
- val = cpu_to_be32(val);
+ swab32s(&val);
else if (bytes_per_word == 2)
- val = (val << 16) | (val >> 16);
+ swahw32s(&val);
#endif
*(u32 *)spi_imx->rx_buf = val;
spi_imx->rx_buf += sizeof(u32);
@@ -353,9 +358,9 @@ static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
if (bytes_per_word == 1)
- val = cpu_to_be32(val);
+ swab32s(&val);
else if (bytes_per_word == 2)
- val = (val << 16) | (val >> 16);
+ swahw32s(&val);
#endif
writel(val, spi_imx->base + MXC_CSPITXDATA);
}
@@ -469,7 +474,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
{
- unsigned val = 0;
+ unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX51_ECSPI_INT_TEEN;
@@ -515,6 +520,7 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
u32 min_speed_hz = ~0U;
u32 testreg, delay;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
+ u32 current_cfg = cfg;
/* set Master or Slave mode */
if (spi_imx->slave_mode)
@@ -554,11 +560,6 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
else
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
- if (spi->mode & SPI_CPHA)
- cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
- else
- cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
-
if (spi->mode & SPI_CPOL) {
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
@@ -572,6 +573,9 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
else
cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
+ if (cfg == current_cfg)
+ return 0;
+
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
/*
@@ -585,7 +589,7 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
* the SPI communication as the device on the other end would consider
* the change of SCLK polarity as a clock tick already.
*
- * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
+ * Because spi_imx->spi_bus_clk is only set in prepare_message
* callback, iterate over all the transfers in spi_message, find the
* one with lowest bus frequency, and use that bus frequency for the
* delay calculation. In case all transfers have speed_hz == 0, then
@@ -606,6 +610,24 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
return 0;
}
+static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
+ struct spi_device *spi)
+{
+ bool cpha = (spi->mode & SPI_CPHA);
+ bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
+ u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
+
+ /* Flip cpha logical value iff flip_cpha */
+ cpha ^= flip_cpha;
+
+ if (cpha)
+ cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
+
+ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+}
+
static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
@@ -627,6 +649,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
spi_imx->spi_bus_clk = clk;
+ mx51_configure_cpha(spi_imx, spi);
+
/*
* ERR009165: work in XHC mode instead of SMC as PIO on the chips
* before i.mx6ul.
@@ -1153,12 +1177,12 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int spi_imx_dma_configure(struct spi_master *master)
+static int spi_imx_dma_configure(struct spi_controller *controller)
{
int ret;
enum dma_slave_buswidth buswidth;
struct dma_slave_config rx = {}, tx = {};
- struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
case 4:
@@ -1178,7 +1202,7 @@ static int spi_imx_dma_configure(struct spi_master *master)
tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
tx.dst_addr_width = buswidth;
tx.dst_maxburst = spi_imx->wml;
- ret = dmaengine_slave_config(master->dma_tx, &tx);
+ ret = dmaengine_slave_config(controller->dma_tx, &tx);
if (ret) {
dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
return ret;
@@ -1188,7 +1212,7 @@ static int spi_imx_dma_configure(struct spi_master *master)
rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
rx.src_addr_width = buswidth;
rx.src_maxburst = spi_imx->wml;
- ret = dmaengine_slave_config(master->dma_rx, &rx);
+ ret = dmaengine_slave_config(controller->dma_rx, &rx);
if (ret) {
dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
return ret;
@@ -1200,7 +1224,7 @@ static int spi_imx_dma_configure(struct spi_master *master)
static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
if (!t)
return 0;
@@ -1246,11 +1270,14 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->dynamic_burst = 0;
}
- if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
+ if (spi_imx_can_dma(spi_imx->controller, spi, t))
spi_imx->usedma = true;
else
spi_imx->usedma = false;
+ spi_imx->rx_only = ((t->tx_buf == NULL)
+ || (t->tx_buf == spi->controller->dummy_tx));
+
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
spi_imx->rx = mx53_ecspi_rx_slave;
spi_imx->tx = mx53_ecspi_tx_slave;
@@ -1264,50 +1291,50 @@ static int spi_imx_setupxfer(struct spi_device *spi,
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
{
- struct spi_master *master = spi_imx->bitbang.master;
+ struct spi_controller *controller = spi_imx->controller;
- if (master->dma_rx) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (controller->dma_rx) {
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
}
- if (master->dma_tx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (controller->dma_tx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
}
}
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
- struct spi_master *master)
+ struct spi_controller *controller)
{
int ret;
spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
/* Prepare for TX DMA: */
- master->dma_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(master->dma_tx)) {
- ret = PTR_ERR(master->dma_tx);
+ controller->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(controller->dma_tx)) {
+ ret = PTR_ERR(controller->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
- master->dma_tx = NULL;
+ controller->dma_tx = NULL;
goto err;
}
/* Prepare for RX : */
- master->dma_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(master->dma_rx)) {
- ret = PTR_ERR(master->dma_rx);
+ controller->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(controller->dma_rx)) {
+ ret = PTR_ERR(controller->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
- master->dma_rx = NULL;
+ controller->dma_rx = NULL;
goto err;
}
init_completion(&spi_imx->dma_rx_completion);
init_completion(&spi_imx->dma_tx_completion);
- master->can_dma = spi_imx_can_dma;
- master->max_dma_len = MAX_SDMA_BD_BYTES;
- spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
- SPI_MASTER_MUST_TX;
+ controller->can_dma = spi_imx_can_dma;
+ controller->max_dma_len = MAX_SDMA_BD_BYTES;
+ spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
+ SPI_CONTROLLER_MUST_TX;
return 0;
err:
@@ -1349,7 +1376,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
unsigned long transfer_timeout;
unsigned long timeout;
- struct spi_master *master = spi_imx->bitbang.master;
+ struct spi_controller *controller = spi_imx->controller;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
unsigned int bytes_per_word, i;
@@ -1367,7 +1394,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
spi_imx->wml = i;
- ret = spi_imx_dma_configure(master);
+ ret = spi_imx_dma_configure(controller);
if (ret)
goto dma_failure_no_start;
@@ -1382,7 +1409,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
* The TX DMA setup starts the transfer, so make sure RX is configured
* before TX.
*/
- desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
@@ -1394,14 +1421,14 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
desc_rx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_rx);
reinit_completion(&spi_imx->dma_rx_completion);
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(controller->dma_rx);
- desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+ desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
- dmaengine_terminate_all(master->dma_tx);
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
return -EINVAL;
}
@@ -1409,7 +1436,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
desc_tx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_tx);
reinit_completion(&spi_imx->dma_tx_completion);
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(controller->dma_tx);
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
@@ -1418,21 +1445,21 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
transfer_timeout);
if (!timeout) {
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
- dmaengine_terminate_all(master->dma_tx);
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
transfer_timeout);
if (!timeout) {
- dev_err(&master->dev, "I/O Error in DMA RX\n");
+ dev_err(&controller->dev, "I/O Error in DMA RX\n");
spi_imx->devtype_data->reset(spi_imx);
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
- return transfer->len;
+ return 0;
/* fallback to pio */
dma_failure_no_start:
transfer->error |= SPI_TRANS_FAIL_NO_START;
@@ -1442,7 +1469,7 @@ dma_failure_no_start:
static int spi_imx_pio_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
unsigned long transfer_timeout;
unsigned long timeout;
@@ -1468,14 +1495,62 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
return -ETIMEDOUT;
}
- return transfer->len;
+ return 0;
+}
+
+static int spi_imx_poll_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ unsigned long timeout;
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
+
+ /* fill in the fifo before timeout calculations if we are
+ * interrupted here, then the data is getting transferred by
+ * the HW while we are interrupted
+ */
+ spi_imx_push(spi_imx);
+
+ timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
+ while (spi_imx->txfifo) {
+ /* RX */
+ while (spi_imx->txfifo &&
+ spi_imx->devtype_data->rx_available(spi_imx)) {
+ spi_imx->rx(spi_imx);
+ spi_imx->txfifo--;
+ }
+
+ /* TX */
+ if (spi_imx->count) {
+ spi_imx_push(spi_imx);
+ continue;
+ }
+
+ if (spi_imx->txfifo &&
+ time_after(jiffies, timeout)) {
+
+ dev_err_ratelimited(&spi->dev,
+ "timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
+ jiffies - timeout);
+
+ /* fall back to interrupt mode */
+ return spi_imx_pio_transfer(spi, transfer);
+ }
+ }
+
+ return 0;
}
static int spi_imx_pio_transfer_slave(struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
- int ret = transfer->len;
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ int ret = 0;
if (is_imx53_ecspi(spi_imx) &&
transfer->len > MX53_MAX_TRANSFER_BYTES) {
@@ -1515,11 +1590,14 @@ static int spi_imx_pio_transfer_slave(struct spi_device *spi,
return ret;
}
-static int spi_imx_transfer(struct spi_device *spi,
+static int spi_imx_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ unsigned long hz_per_byte, byte_limit;
+ spi_imx_setupxfer(spi, transfer);
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
/* flush rxfifo before transfer */
@@ -1529,6 +1607,17 @@ static int spi_imx_transfer(struct spi_device *spi,
if (spi_imx->slave_mode)
return spi_imx_pio_transfer_slave(spi, transfer);
+ /*
+ * Calculate the estimated time in us the transfer runs. Find
+ * the number of Hz per byte per polling limit.
+ */
+ hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
+
+ /* run in polling mode for short transfers */
+ if (transfer->len < byte_limit)
+ return spi_imx_poll_transfer(spi, transfer);
+
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
@@ -1548,14 +1637,13 @@ static void spi_imx_cleanup(struct spi_device *spi)
}
static int
-spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
+spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
int ret;
- ret = pm_runtime_get_sync(spi_imx->dev);
+ ret = pm_runtime_resume_and_get(spi_imx->dev);
if (ret < 0) {
- pm_runtime_put_noidle(spi_imx->dev);
dev_err(spi_imx->dev, "failed to enable clock\n");
return ret;
}
@@ -1570,18 +1658,18 @@ spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
}
static int
-spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
+spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
pm_runtime_mark_last_busy(spi_imx->dev);
pm_runtime_put_autosuspend(spi_imx->dev);
return 0;
}
-static int spi_imx_slave_abort(struct spi_master *master)
+static int spi_imx_slave_abort(struct spi_controller *controller)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
spi_imx->slave_aborted = true;
complete(&spi_imx->xfer_done);
@@ -1592,7 +1680,7 @@ static int spi_imx_slave_abort(struct spi_master *master)
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct spi_master *master;
+ struct spi_controller *controller;
struct spi_imx_data *spi_imx;
struct resource *res;
int ret, irq, spi_drctl;
@@ -1604,12 +1692,12 @@ static int spi_imx_probe(struct platform_device *pdev)
slave_mode = devtype_data->has_slavemode &&
of_property_read_bool(np, "spi-slave");
if (slave_mode)
- master = spi_alloc_slave(&pdev->dev,
- sizeof(struct spi_imx_data));
+ controller = spi_alloc_slave(&pdev->dev,
+ sizeof(struct spi_imx_data));
else
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct spi_imx_data));
- if (!master)
+ controller = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data));
+ if (!controller)
return -ENOMEM;
ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
@@ -1618,14 +1706,14 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_drctl = 0;
}
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, controller);
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->bus_num = np ? -1 : pdev->id;
- master->use_gpio_descriptors = true;
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ controller->bus_num = np ? -1 : pdev->id;
+ controller->use_gpio_descriptors = true;
- spi_imx = spi_master_get_devdata(master);
- spi_imx->bitbang.master = master;
+ spi_imx = spi_controller_get_devdata(controller);
+ spi_imx->controller = controller;
spi_imx->dev = &pdev->dev;
spi_imx->slave_mode = slave_mode;
@@ -1638,22 +1726,24 @@ static int spi_imx_probe(struct platform_device *pdev)
* board files have <= 3 chip selects.
*/
if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
- master->num_chipselect = val;
+ controller->num_chipselect = val;
else
- master->num_chipselect = 3;
-
- spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
- spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
- spi_imx->bitbang.master->setup = spi_imx_setup;
- spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
- spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
- spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
- spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
- spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
- | SPI_NO_CS;
+ controller->num_chipselect = 3;
+
+ spi_imx->controller->transfer_one = spi_imx_transfer_one;
+ spi_imx->controller->setup = spi_imx_setup;
+ spi_imx->controller->cleanup = spi_imx_cleanup;
+ spi_imx->controller->prepare_message = spi_imx_prepare_message;
+ spi_imx->controller->unprepare_message = spi_imx_unprepare_message;
+ spi_imx->controller->slave_abort = spi_imx_slave_abort;
+ spi_imx->controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS;
+
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
is_imx53_ecspi(spi_imx))
- spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
+ spi_imx->controller->mode_bits |= SPI_LOOP | SPI_READY;
+
+ if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
+ spi_imx->controller->mode_bits |= SPI_RX_CPHA_FLIP;
if (is_imx51_ecspi(spi_imx) &&
device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
@@ -1662,7 +1752,7 @@ static int spi_imx_probe(struct platform_device *pdev)
* setting the burst length to the word size. This is
* considerably faster than manually controlling the CS.
*/
- spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
+ spi_imx->controller->mode_bits |= SPI_CS_WORD;
spi_imx->spi_drctl = spi_drctl;
@@ -1672,38 +1762,38 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(spi_imx->base)) {
ret = PTR_ERR(spi_imx->base);
- goto out_master_put;
+ goto out_controller_put;
}
spi_imx->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto out_master_put;
+ goto out_controller_put;
}
ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
dev_name(&pdev->dev), spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
- goto out_master_put;
+ goto out_controller_put;
}
spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(spi_imx->clk_ipg)) {
ret = PTR_ERR(spi_imx->clk_ipg);
- goto out_master_put;
+ goto out_controller_put;
}
spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(spi_imx->clk_per)) {
ret = PTR_ERR(spi_imx->clk_per);
- goto out_master_put;
+ goto out_controller_put;
}
ret = clk_prepare_enable(spi_imx->clk_per);
if (ret)
- goto out_master_put;
+ goto out_controller_put;
ret = clk_prepare_enable(spi_imx->clk_ipg);
if (ret)
@@ -1721,7 +1811,7 @@ static int spi_imx_probe(struct platform_device *pdev)
* if validated on other chips.
*/
if (spi_imx->devtype_data->has_dmamode) {
- ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
+ ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
if (ret == -EPROBE_DEFER)
goto out_runtime_pm_put;
@@ -1734,11 +1824,11 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
- master->dev.of_node = pdev->dev.of_node;
- ret = spi_bitbang_start(&spi_imx->bitbang);
+ controller->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(controller);
if (ret) {
- dev_err_probe(&pdev->dev, ret, "bitbang start failed\n");
- goto out_bitbang_start;
+ dev_err_probe(&pdev->dev, ret, "register controller failed\n");
+ goto out_register_controller;
}
pm_runtime_mark_last_busy(spi_imx->dev);
@@ -1746,7 +1836,7 @@ static int spi_imx_probe(struct platform_device *pdev)
return ret;
-out_bitbang_start:
+out_register_controller:
if (spi_imx->devtype_data->has_dmamode)
spi_imx_sdma_exit(spi_imx);
out_runtime_pm_put:
@@ -1757,23 +1847,22 @@ out_runtime_pm_put:
clk_disable_unprepare(spi_imx->clk_ipg);
out_put_per:
clk_disable_unprepare(spi_imx->clk_per);
-out_master_put:
- spi_master_put(master);
+out_controller_put:
+ spi_controller_put(controller);
return ret;
}
static int spi_imx_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ struct spi_controller *controller = platform_get_drvdata(pdev);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
int ret;
- spi_bitbang_stop(&spi_imx->bitbang);
+ spi_unregister_controller(controller);
- ret = pm_runtime_get_sync(spi_imx->dev);
+ ret = pm_runtime_resume_and_get(spi_imx->dev);
if (ret < 0) {
- pm_runtime_put_noidle(spi_imx->dev);
dev_err(spi_imx->dev, "failed to enable clock\n");
return ret;
}
@@ -1785,18 +1874,17 @@ static int spi_imx_remove(struct platform_device *pdev)
pm_runtime_disable(spi_imx->dev);
spi_imx_sdma_exit(spi_imx);
- spi_master_put(master);
return 0;
}
static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *controller = dev_get_drvdata(dev);
struct spi_imx_data *spi_imx;
int ret;
- spi_imx = spi_master_get_devdata(master);
+ spi_imx = spi_controller_get_devdata(controller);
ret = clk_prepare_enable(spi_imx->clk_per);
if (ret)
@@ -1813,10 +1901,10 @@ static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *controller = dev_get_drvdata(dev);
struct spi_imx_data *spi_imx;
- spi_imx = spi_master_get_devdata(master);
+ spi_imx = spi_controller_get_devdata(controller);
clk_disable_unprepare(spi_imx->clk_per);
clk_disable_unprepare(spi_imx->clk_ipg);
diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c
index 03077a7e11c8..713a238bee63 100644
--- a/drivers/spi/spi-ingenic.c
+++ b/drivers/spi/spi-ingenic.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * SPI bus driver for the Ingenic JZ47xx SoCs
+ * SPI bus driver for the Ingenic SoCs
* Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
* Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
+ * Copyright (c) 2022 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/clk.h>
@@ -52,6 +53,9 @@ struct jz_soc_info {
u32 bits_per_word_mask;
struct reg_field flen_field;
bool has_trendian;
+
+ unsigned int max_speed_hz;
+ unsigned int max_native_cs;
};
struct ingenic_spi {
@@ -380,7 +384,7 @@ static int spi_ingenic_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct ingenic_spi *priv;
void __iomem *base;
- int ret;
+ int num_cs, ret;
pdata = of_device_get_match_data(dev);
if (!pdata) {
@@ -416,6 +420,9 @@ static int spi_ingenic_probe(struct platform_device *pdev)
if (IS_ERR(priv->flen_field))
return PTR_ERR(priv->flen_field);
+ if (device_property_read_u32(dev, "num-cs", &num_cs))
+ num_cs = pdata->max_native_cs;
+
platform_set_drvdata(pdev, ctlr);
ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
@@ -428,8 +435,10 @@ static int spi_ingenic_probe(struct platform_device *pdev)
ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
ctlr->min_speed_hz = 7200;
- ctlr->max_speed_hz = 54000000;
- ctlr->num_chipselect = 2;
+ ctlr->max_speed_hz = pdata->max_speed_hz;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = pdata->max_native_cs;
+ ctlr->num_chipselect = num_cs;
ctlr->dev.of_node = pdev->dev.of_node;
if (spi_ingenic_request_dma(ctlr, dev))
@@ -452,17 +461,44 @@ static const struct jz_soc_info jz4750_soc_info = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
.flen_field = REG_FIELD(REG_SSICR1, 4, 7),
.has_trendian = false,
+
+ .max_speed_hz = 54000000,
+ .max_native_cs = 2,
};
static const struct jz_soc_info jz4780_soc_info = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
.has_trendian = true,
+
+ .max_speed_hz = 54000000,
+ .max_native_cs = 2,
+};
+
+static const struct jz_soc_info x1000_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
+ .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
+ .has_trendian = true,
+
+ .max_speed_hz = 50000000,
+ .max_native_cs = 2,
+};
+
+static const struct jz_soc_info x2000_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
+ .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
+ .has_trendian = true,
+
+ .max_speed_hz = 50000000,
+ .max_native_cs = 1,
};
static const struct of_device_id spi_ingenic_of_match[] = {
{ .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
+ { .compatible = "ingenic,jz4775-spi", .data = &jz4780_soc_info },
{ .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
+ { .compatible = "ingenic,x1000-spi", .data = &x1000_soc_info },
+ { .compatible = "ingenic,x2000-spi", .data = &x2000_soc_info },
{}
};
MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
@@ -476,7 +512,8 @@ static struct platform_driver spi_ingenic_driver = {
};
module_platform_driver(spi_ingenic_driver);
-MODULE_DESCRIPTION("SPI bus driver for the Ingenic JZ47xx SoCs");
+MODULE_DESCRIPTION("SPI bus driver for the Ingenic SoCs");
MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index a5ef7a526a7f..f6eec7a869b6 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index e937cfe85559..50f42983b950 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -779,10 +779,59 @@ static const char *intel_spi_get_name(struct spi_mem *mem)
return dev_name(ispi->dev);
}
+static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ const struct intel_spi_mem_op *iop;
+
+ iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
+ if (!iop)
+ return -EOPNOTSUPP;
+
+ desc->priv = (void *)iop;
+ return 0;
+}
+
+static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
+ size_t len, void *buf)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ const struct intel_spi_mem_op *iop = desc->priv;
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ /* Fill in the gaps */
+ op.addr.val = offs;
+ op.data.nbytes = len;
+ op.data.buf.in = buf;
+
+ ret = iop->exec_op(ispi, iop, &op);
+ return ret ? ret : len;
+}
+
+static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
+ size_t len, const void *buf)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ const struct intel_spi_mem_op *iop = desc->priv;
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ op.addr.val = offs;
+ op.data.nbytes = len;
+ op.data.buf.out = buf;
+
+ ret = iop->exec_op(ispi, iop, &op);
+ return ret ? ret : len;
+}
+
static const struct spi_controller_mem_ops intel_spi_mem_ops = {
.supports_op = intel_spi_supports_mem_op,
.exec_op = intel_spi_exec_mem_op,
.get_name = intel_spi_get_name,
+ .dirmap_create = intel_spi_dirmap_create,
+ .dirmap_read = intel_spi_dirmap_read,
+ .dirmap_write = intel_spi_dirmap_write,
};
#define INTEL_SPI_OP_ADDR(__nbytes) \
@@ -1205,7 +1254,7 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
* intel_spi_probe() - Probe the Intel SPI flash controller
* @dev: Pointer to the parent device
* @mem: MMIO resource
- * @info: Platform spefific information
+ * @info: Platform specific information
*
* Probes Intel SPI flash controller and creates the flash chip device.
* Returns %0 on success and negative errno in case of failure.
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 0e8dafc62d94..e8de4f5017cd 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -10,6 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
+#include <linux/sched/task_stack.h>
#include "internals.h"
@@ -211,6 +212,15 @@ static int spi_mem_check_op(const struct spi_mem_op *op)
!spi_mem_buswidth_is_valid(op->data.buswidth))
return -EINVAL;
+ /* Buffers must be DMA-able. */
+ if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
+ object_is_on_stack(op->data.buf.in)))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
+ object_is_on_stack(op->data.buf.out)))
+ return -EINVAL;
+
return 0;
}
@@ -262,9 +272,8 @@ static int spi_mem_access_start(struct spi_mem *mem)
if (ctlr->auto_runtime_pm) {
int ret;
- ret = pm_runtime_get_sync(ctlr->dev.parent);
+ ret = pm_runtime_resume_and_get(ctlr->dev.parent);
if (ret < 0) {
- pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
return ret;
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 21ef5d481faf..7654736c2c0e 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/mpc52xx.h>
#include <asm/mpc52xx_psc.h>
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 51041526546d..3ebdce804b90 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -19,6 +19,9 @@
#include <linux/io.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
#include <asm/time.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 1a0b3208dfca..0a3b9f7eed30 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -17,105 +17,148 @@
#include <linux/platform_data/spi-mt65xx.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
#include <linux/dma-mapping.h>
-#define SPI_CFG0_REG 0x0000
-#define SPI_CFG1_REG 0x0004
-#define SPI_TX_SRC_REG 0x0008
-#define SPI_RX_DST_REG 0x000c
-#define SPI_TX_DATA_REG 0x0010
-#define SPI_RX_DATA_REG 0x0014
-#define SPI_CMD_REG 0x0018
-#define SPI_STATUS0_REG 0x001c
-#define SPI_PAD_SEL_REG 0x0024
-#define SPI_CFG2_REG 0x0028
-#define SPI_TX_SRC_REG_64 0x002c
-#define SPI_RX_DST_REG_64 0x0030
-#define SPI_CFG3_IPM_REG 0x0040
-
-#define SPI_CFG0_SCK_HIGH_OFFSET 0
-#define SPI_CFG0_SCK_LOW_OFFSET 8
-#define SPI_CFG0_CS_HOLD_OFFSET 16
-#define SPI_CFG0_CS_SETUP_OFFSET 24
-#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
-#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
-
-#define SPI_CFG1_CS_IDLE_OFFSET 0
-#define SPI_CFG1_PACKET_LOOP_OFFSET 8
-#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
-#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
-#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
-
-#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
-#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
-
-#define SPI_CFG1_CS_IDLE_MASK 0xff
-#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
-#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
-#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
-#define SPI_CFG2_SCK_HIGH_OFFSET 0
-#define SPI_CFG2_SCK_LOW_OFFSET 16
-
-#define SPI_CMD_ACT BIT(0)
-#define SPI_CMD_RESUME BIT(1)
-#define SPI_CMD_RST BIT(2)
-#define SPI_CMD_PAUSE_EN BIT(4)
-#define SPI_CMD_DEASSERT BIT(5)
-#define SPI_CMD_SAMPLE_SEL BIT(6)
-#define SPI_CMD_CS_POL BIT(7)
-#define SPI_CMD_CPHA BIT(8)
-#define SPI_CMD_CPOL BIT(9)
-#define SPI_CMD_RX_DMA BIT(10)
-#define SPI_CMD_TX_DMA BIT(11)
-#define SPI_CMD_TXMSBF BIT(12)
-#define SPI_CMD_RXMSBF BIT(13)
-#define SPI_CMD_RX_ENDIAN BIT(14)
-#define SPI_CMD_TX_ENDIAN BIT(15)
-#define SPI_CMD_FINISH_IE BIT(16)
-#define SPI_CMD_PAUSE_IE BIT(17)
-#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
-#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
-#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
+#define SPI_CFG0_REG 0x0000
+#define SPI_CFG1_REG 0x0004
+#define SPI_TX_SRC_REG 0x0008
+#define SPI_RX_DST_REG 0x000c
+#define SPI_TX_DATA_REG 0x0010
+#define SPI_RX_DATA_REG 0x0014
+#define SPI_CMD_REG 0x0018
+#define SPI_STATUS0_REG 0x001c
+#define SPI_PAD_SEL_REG 0x0024
+#define SPI_CFG2_REG 0x0028
+#define SPI_TX_SRC_REG_64 0x002c
+#define SPI_RX_DST_REG_64 0x0030
+#define SPI_CFG3_IPM_REG 0x0040
+
+#define SPI_CFG0_SCK_HIGH_OFFSET 0
+#define SPI_CFG0_SCK_LOW_OFFSET 8
+#define SPI_CFG0_CS_HOLD_OFFSET 16
+#define SPI_CFG0_CS_SETUP_OFFSET 24
+#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
+#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
+
+#define SPI_CFG1_CS_IDLE_OFFSET 0
+#define SPI_CFG1_PACKET_LOOP_OFFSET 8
+#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
+#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
+#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
+
+#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
+#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
+
+#define SPI_CFG1_CS_IDLE_MASK 0xff
+#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
+#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
+#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
+#define SPI_CFG2_SCK_HIGH_OFFSET 0
+#define SPI_CFG2_SCK_LOW_OFFSET 16
+
+#define SPI_CMD_ACT BIT(0)
+#define SPI_CMD_RESUME BIT(1)
+#define SPI_CMD_RST BIT(2)
+#define SPI_CMD_PAUSE_EN BIT(4)
+#define SPI_CMD_DEASSERT BIT(5)
+#define SPI_CMD_SAMPLE_SEL BIT(6)
+#define SPI_CMD_CS_POL BIT(7)
+#define SPI_CMD_CPHA BIT(8)
+#define SPI_CMD_CPOL BIT(9)
+#define SPI_CMD_RX_DMA BIT(10)
+#define SPI_CMD_TX_DMA BIT(11)
+#define SPI_CMD_TXMSBF BIT(12)
+#define SPI_CMD_RXMSBF BIT(13)
+#define SPI_CMD_RX_ENDIAN BIT(14)
+#define SPI_CMD_TX_ENDIAN BIT(15)
+#define SPI_CMD_FINISH_IE BIT(16)
+#define SPI_CMD_PAUSE_IE BIT(17)
+#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
+#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
+#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
-#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
-#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
-#define MT8173_SPI_MAX_PAD_SEL 3
-#define MTK_SPI_PAUSE_INT_STATUS 0x2
+#define PIN_MODE_CFG(x) ((x) / 2)
-#define MTK_SPI_IDLE 0
-#define MTK_SPI_PAUSED 1
+#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
+#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
+#define SPI_CFG3_IPM_XMODE_EN BIT(4)
+#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
+#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
+#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
-#define MTK_SPI_MAX_FIFO_SIZE 32U
-#define MTK_SPI_PACKET_SIZE 1024
-#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
-#define MTK_SPI_32BITS_MASK (0xffffffff)
+#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
+#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
+#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
-#define DMA_ADDR_EXT_BITS (36)
-#define DMA_ADDR_DEF_BITS (32)
+#define MT8173_SPI_MAX_PAD_SEL 3
+#define MTK_SPI_PAUSE_INT_STATUS 0x2
+
+#define MTK_SPI_MAX_FIFO_SIZE 32U
+#define MTK_SPI_PACKET_SIZE 1024
+#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
+#define MTK_SPI_IPM_PACKET_LOOP SZ_256
+
+#define MTK_SPI_IDLE 0
+#define MTK_SPI_PAUSED 1
+
+#define MTK_SPI_32BITS_MASK (0xffffffff)
+
+#define DMA_ADDR_EXT_BITS (36)
+#define DMA_ADDR_DEF_BITS (32)
+
+/**
+ * struct mtk_spi_compatible - device data structure
+ * @need_pad_sel: Enable pad (pins) selection in SPI controller
+ * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer
+ * @enhance_timing: Enable adjusting cfg register to enhance time accuracy
+ * @dma_ext: DMA address extension supported
+ * @no_need_unprepare: Don't unprepare the SPI clk during runtime
+ * @ipm_design: Adjust/extend registers to support IPM design IP features
+ */
struct mtk_spi_compatible {
bool need_pad_sel;
- /* Must explicitly send dummy Tx bytes to do Rx only transfer */
bool must_tx;
- /* some IC design adjust cfg register to enhance time accuracy */
bool enhance_timing;
- /* some IC support DMA addr extension */
bool dma_ext;
- /* some IC no need unprepare SPI clk */
bool no_need_unprepare;
- /* IPM design adjust and extend register to support more features */
bool ipm_design;
-
};
+/**
+ * struct mtk_spi - SPI driver instance
+ * @base: Start address of the SPI controller registers
+ * @state: SPI controller state
+ * @pad_num: Number of pad_sel entries
+ * @pad_sel: Groups of pins to select
+ * @parent_clk: Parent of sel_clk
+ * @sel_clk: SPI master mux clock
+ * @spi_clk: Peripheral clock
+ * @spi_hclk: AHB bus clock
+ * @cur_transfer: Currently processed SPI transfer
+ * @xfer_len: Number of bytes to transfer
+ * @num_xfered: Number of transferred bytes
+ * @tx_sgl: TX transfer scatterlist
+ * @rx_sgl: RX transfer scatterlist
+ * @tx_sgl_len: Size of TX DMA transfer
+ * @rx_sgl_len: Size of RX DMA transfer
+ * @dev_comp: Device data structure
+ * @spi_clk_hz: Current SPI clock in Hz
+ * @spimem_done: SPI-MEM operation completion
+ * @use_spimem: Enables SPI-MEM
+ * @dev: Device pointer
+ * @tx_dma: DMA start for SPI-MEM TX
+ * @rx_dma: DMA start for SPI-MEM RX
+ */
struct mtk_spi {
void __iomem *base;
u32 state;
int pad_num;
u32 *pad_sel;
- struct clk *parent_clk, *sel_clk, *spi_clk;
+ struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
struct spi_transfer *cur_transfer;
u32 xfer_len;
u32 num_xfered;
@@ -123,6 +166,11 @@ struct mtk_spi {
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
u32 spi_clk_hz;
+ struct completion spimem_done;
+ bool use_spimem;
+ struct device *dev;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
};
static const struct mtk_spi_compatible mtk_common_compat;
@@ -704,6 +752,12 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
else
mdata->state = MTK_SPI_IDLE;
+ /* SPI-MEM ops */
+ if (mdata->use_spimem) {
+ complete(&mdata->spimem_done);
+ return IRQ_HANDLED;
+ }
+
if (!master->can_dma(master, NULL, trans)) {
if (trans->rx_buf) {
cnt = mdata->xfer_len / 4;
@@ -787,21 +841,287 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ int opcode_len;
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
+ if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+ op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
+ /* force data buffer dma-aligned. */
+ op->data.nbytes -= op->data.nbytes % 4;
+ }
+ }
+
+ return 0;
+}
+
+static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->addr.nbytes && op->dummy.nbytes &&
+ op->addr.buswidth != op->dummy.buswidth)
+ return false;
+
+ if (op->addr.nbytes + op->dummy.nbytes > 16)
+ return false;
+
+ if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+ if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
+ MTK_SPI_IPM_PACKET_LOOP ||
+ op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
+ const struct spi_mem_op *op)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_TX_SRC_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(mdata->tx_dma >> 32),
+ mdata->base + SPI_TX_SRC_REG_64);
+#endif
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_RX_DST_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(mdata->rx_dma >> 32),
+ mdata->base + SPI_RX_DST_REG_64);
+#endif
+ }
+}
+
+static int mtk_spi_transfer_wait(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
+ /*
+ * For each byte we wait for 8 cycles of the SPI clock.
+ * Since speed is defined in Hz and we want milliseconds,
+ * so it should be 8 * 1000.
+ */
+ u64 ms = 8000LL;
+
+ if (op->data.dir == SPI_MEM_NO_DATA)
+ ms *= 32; /* prevent we may get 0 for short transfers. */
+ else
+ ms *= op->data.nbytes;
+ ms = div_u64(ms, mem->spi->max_speed_hz);
+ ms += ms + 1000; /* 1s tolerance */
+
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
+ if (!wait_for_completion_timeout(&mdata->spimem_done,
+ msecs_to_jiffies(ms))) {
+ dev_err(mdata->dev, "spi-mem transfer timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mtk_spi_mem_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
+ u32 reg_val, nio, tx_size;
+ char *tx_tmp_buf, *rx_tmp_buf;
+ int ret = 0;
+
+ mdata->use_spimem = true;
+ reinit_completion(&mdata->spimem_done);
+
+ mtk_spi_reset(mdata);
+ mtk_spi_hw_init(mem->spi->master, mem->spi);
+ mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
+
+ reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
+ /* opcode byte len */
+ reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
+ reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
+
+ /* addr & dummy byte len */
+ reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
+ if (op->addr.nbytes || op->dummy.nbytes)
+ reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
+ SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
+
+ /* data byte len */
+ if (op->data.dir == SPI_MEM_NO_DATA) {
+ reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
+ writel(0, mdata->base + SPI_CFG1_REG);
+ } else {
+ reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
+ mdata->xfer_len = op->data.nbytes;
+ mtk_spi_setup_packet(mem->spi->master);
+ }
+
+ if (op->addr.nbytes || op->dummy.nbytes) {
+ if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
+ reg_val |= SPI_CFG3_IPM_XMODE_EN;
+ else
+ reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
+ }
+
+ if (op->addr.buswidth == 2 ||
+ op->dummy.buswidth == 2 ||
+ op->data.buswidth == 2)
+ nio = 2;
+ else if (op->addr.buswidth == 4 ||
+ op->dummy.buswidth == 4 ||
+ op->data.buswidth == 4)
+ nio = 4;
+ else
+ nio = 1;
+
+ reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
+ reg_val |= PIN_MODE_CFG(nio);
+
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ else
+ reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
+
+ tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ tx_size += op->data.nbytes;
+
+ tx_size = max_t(u32, tx_size, 32);
+
+ tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
+ if (!tx_tmp_buf) {
+ mdata->use_spimem = false;
+ return -ENOMEM;
+ }
+
+ tx_tmp_buf[0] = op->cmd.opcode;
+
+ if (op->addr.nbytes) {
+ int i;
+
+ for (i = 0; i < op->addr.nbytes; i++)
+ tx_tmp_buf[i + 1] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+ }
+
+ if (op->dummy.nbytes)
+ memset(tx_tmp_buf + op->addr.nbytes + 1,
+ 0xff,
+ op->dummy.nbytes);
+
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
+ op->data.buf.out,
+ op->data.nbytes);
+
+ mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
+ tx_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
+ rx_tmp_buf = kzalloc(op->data.nbytes,
+ GFP_KERNEL | GFP_DMA);
+ if (!rx_tmp_buf) {
+ ret = -ENOMEM;
+ goto unmap_tx_dma;
+ }
+ } else {
+ rx_tmp_buf = op->data.buf.in;
+ }
+
+ mdata->rx_dma = dma_map_single(mdata->dev,
+ rx_tmp_buf,
+ op->data.nbytes,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
+ ret = -ENOMEM;
+ goto kfree_rx_tmp_buf;
+ }
+ }
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val |= SPI_CMD_TX_DMA;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ reg_val |= SPI_CMD_RX_DMA;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
+
+ mtk_spi_enable_transfer(mem->spi->master);
+
+ /* Wait for the interrupt. */
+ ret = mtk_spi_transfer_wait(mem, op);
+ if (ret)
+ goto unmap_rx_dma;
+
+ /* spi disable dma */
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val &= ~SPI_CMD_TX_DMA;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ reg_val &= ~SPI_CMD_RX_DMA;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+unmap_rx_dma:
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_unmap_single(mdata->dev, mdata->rx_dma,
+ op->data.nbytes, DMA_FROM_DEVICE);
+ if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
+ memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
+ }
+kfree_rx_tmp_buf:
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ !IS_ALIGNED((size_t)op->data.buf.in, 4))
+ kfree(rx_tmp_buf);
+unmap_tx_dma:
+ dma_unmap_single(mdata->dev, mdata->tx_dma,
+ tx_size, DMA_TO_DEVICE);
+err_exit:
+ kfree(tx_tmp_buf);
+ mdata->use_spimem = false;
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
+ .adjust_op_size = mtk_spi_mem_adjust_op_size,
+ .supports_op = mtk_spi_mem_supports_op,
+ .exec_op = mtk_spi_mem_exec_op,
+};
+
static int mtk_spi_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct spi_master *master;
struct mtk_spi *mdata;
- const struct of_device_id *of_id;
int i, irq, ret, addr_bits;
- master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
- if (!master) {
- dev_err(&pdev->dev, "failed to alloc spi master\n");
- return -ENOMEM;
- }
+ master = devm_spi_alloc_master(dev, sizeof(*mdata));
+ if (!master)
+ return dev_err_probe(dev, -ENOMEM, "failed to alloc spi master\n");
master->auto_runtime_pm = true;
- master->dev.of_node = pdev->dev.of_node;
+ master->dev.of_node = dev->of_node;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->set_cs = mtk_spi_set_cs;
@@ -812,15 +1132,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->set_cs_timing = mtk_spi_set_hw_cs_timing;
master->use_gpio_descriptors = true;
- of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
- if (!of_id) {
- dev_err(&pdev->dev, "failed to probe of_node\n");
- ret = -EINVAL;
- goto err_put_master;
- }
-
mdata = spi_master_get_devdata(master);
- mdata->dev_comp = of_id->data;
+ mdata->dev_comp = device_get_match_data(dev);
if (mdata->dev_comp->enhance_timing)
master->mode_bits |= SPI_CS_HIGH;
@@ -830,143 +1143,122 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (mdata->dev_comp->ipm_design)
master->mode_bits |= SPI_LOOP;
+ if (mdata->dev_comp->ipm_design) {
+ mdata->dev = dev;
+ master->mem_ops = &mtk_spi_mem_ops;
+ init_completion(&mdata->spimem_done);
+ }
+
if (mdata->dev_comp->need_pad_sel) {
- mdata->pad_num = of_property_count_u32_elems(
- pdev->dev.of_node,
+ mdata->pad_num = of_property_count_u32_elems(dev->of_node,
"mediatek,pad-select");
- if (mdata->pad_num < 0) {
- dev_err(&pdev->dev,
+ if (mdata->pad_num < 0)
+ return dev_err_probe(dev, -EINVAL,
"No 'mediatek,pad-select' property\n");
- ret = -EINVAL;
- goto err_put_master;
- }
- mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
+ mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
sizeof(u32), GFP_KERNEL);
- if (!mdata->pad_sel) {
- ret = -ENOMEM;
- goto err_put_master;
- }
+ if (!mdata->pad_sel)
+ return -ENOMEM;
for (i = 0; i < mdata->pad_num; i++) {
- of_property_read_u32_index(pdev->dev.of_node,
+ of_property_read_u32_index(dev->of_node,
"mediatek,pad-select",
i, &mdata->pad_sel[i]);
- if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
- dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
- i, mdata->pad_sel[i]);
- ret = -EINVAL;
- goto err_put_master;
- }
+ if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
+ return dev_err_probe(dev, -EINVAL,
+ "wrong pad-sel[%d]: %u\n",
+ i, mdata->pad_sel[i]);
}
}
platform_set_drvdata(pdev, master);
mdata->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mdata->base)) {
- ret = PTR_ERR(mdata->base);
- goto err_put_master;
- }
+ if (IS_ERR(mdata->base))
+ return PTR_ERR(mdata->base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_put_master;
- }
+ if (irq < 0)
+ return irq;
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
- ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
- IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
- if (ret) {
- dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
- goto err_put_master;
- }
+ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+ IRQF_TRIGGER_NONE, dev_name(dev), master);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register irq\n");
- mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
- if (IS_ERR(mdata->parent_clk)) {
- ret = PTR_ERR(mdata->parent_clk);
- dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
- goto err_put_master;
- }
+ mdata->parent_clk = devm_clk_get(dev, "parent-clk");
+ if (IS_ERR(mdata->parent_clk))
+ return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
+ "failed to get parent-clk\n");
- mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
- if (IS_ERR(mdata->sel_clk)) {
- ret = PTR_ERR(mdata->sel_clk);
- dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
- goto err_put_master;
- }
+ mdata->sel_clk = devm_clk_get(dev, "sel-clk");
+ if (IS_ERR(mdata->sel_clk))
+ return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
- mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
- if (IS_ERR(mdata->spi_clk)) {
- ret = PTR_ERR(mdata->spi_clk);
- dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
- goto err_put_master;
- }
+ mdata->spi_clk = devm_clk_get(dev, "spi-clk");
+ if (IS_ERR(mdata->spi_clk))
+ return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
- ret = clk_prepare_enable(mdata->spi_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
- goto err_put_master;
- }
+ mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
+ if (IS_ERR(mdata->spi_hclk))
+ return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
+
+ ret = clk_prepare_enable(mdata->spi_hclk);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to enable hclk\n");
+
+ ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
- clk_disable_unprepare(mdata->spi_clk);
- goto err_put_master;
+ clk_disable_unprepare(mdata->spi_hclk);
+ return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
}
mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
- if (mdata->dev_comp->no_need_unprepare)
+ if (mdata->dev_comp->no_need_unprepare) {
clk_disable(mdata->spi_clk);
- else
+ clk_disable(mdata->spi_hclk);
+ } else {
clk_disable_unprepare(mdata->spi_clk);
-
- pm_runtime_enable(&pdev->dev);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
if (mdata->dev_comp->need_pad_sel) {
- if (mdata->pad_num != master->num_chipselect) {
- dev_err(&pdev->dev,
+ if (mdata->pad_num != master->num_chipselect)
+ return dev_err_probe(dev, -EINVAL,
"pad_num does not match num_chipselect(%d != %d)\n",
mdata->pad_num, master->num_chipselect);
- ret = -EINVAL;
- goto err_disable_runtime_pm;
- }
- if (!master->cs_gpiods && master->num_chipselect > 1) {
- dev_err(&pdev->dev,
+ if (!master->cs_gpiods && master->num_chipselect > 1)
+ return dev_err_probe(dev, -EINVAL,
"cs_gpios not specified and num_chipselect > 1\n");
- ret = -EINVAL;
- goto err_disable_runtime_pm;
- }
}
if (mdata->dev_comp->dma_ext)
addr_bits = DMA_ADDR_EXT_BITS;
else
addr_bits = DMA_ADDR_DEF_BITS;
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
+ ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
if (ret)
- dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
+ dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
addr_bits, ret);
- ret = devm_spi_register_master(&pdev->dev, master);
+ pm_runtime_enable(dev);
+
+ ret = devm_spi_register_master(dev, master);
if (ret) {
- dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
- goto err_disable_runtime_pm;
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "failed to register master\n");
}
return 0;
-
-err_disable_runtime_pm:
- pm_runtime_disable(&pdev->dev);
-err_put_master:
- spi_master_put(master);
-
- return ret;
}
static int mtk_spi_remove(struct platform_device *pdev)
@@ -978,8 +1270,10 @@ static int mtk_spi_remove(struct platform_device *pdev)
mtk_spi_reset(mdata);
- if (mdata->dev_comp->no_need_unprepare)
+ if (mdata->dev_comp->no_need_unprepare) {
clk_unprepare(mdata->spi_clk);
+ clk_unprepare(mdata->spi_hclk);
+ }
return 0;
}
@@ -995,8 +1289,10 @@ static int mtk_spi_suspend(struct device *dev)
if (ret)
return ret;
- if (!pm_runtime_suspended(dev))
+ if (!pm_runtime_suspended(dev)) {
clk_disable_unprepare(mdata->spi_clk);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
return ret;
}
@@ -1013,11 +1309,20 @@ static int mtk_spi_resume(struct device *dev)
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
}
+
+ ret = clk_prepare_enable(mdata->spi_hclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ return ret;
+ }
}
ret = spi_master_resume(master);
- if (ret < 0)
+ if (ret < 0) {
clk_disable_unprepare(mdata->spi_clk);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
return ret;
}
@@ -1029,10 +1334,13 @@ static int mtk_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
- if (mdata->dev_comp->no_need_unprepare)
+ if (mdata->dev_comp->no_need_unprepare) {
clk_disable(mdata->spi_clk);
- else
+ clk_disable(mdata->spi_hclk);
+ } else {
clk_disable_unprepare(mdata->spi_clk);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
return 0;
}
@@ -1043,13 +1351,31 @@ static int mtk_spi_runtime_resume(struct device *dev)
struct mtk_spi *mdata = spi_master_get_devdata(master);
int ret;
- if (mdata->dev_comp->no_need_unprepare)
+ if (mdata->dev_comp->no_need_unprepare) {
ret = clk_enable(mdata->spi_clk);
- else
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+ ret = clk_enable(mdata->spi_hclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
+ clk_disable(mdata->spi_clk);
+ return ret;
+ }
+ } else {
ret = clk_prepare_enable(mdata->spi_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
- return ret;
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mdata->spi_hclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ return ret;
+ }
}
return 0;
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 94fb09696677..d167699a1a96 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -960,7 +960,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
static int __maybe_unused mtk_nor_resume(struct device *dev)
{
- return pm_runtime_force_resume(dev);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ mtk_nor_init(sp);
+
+ return 0;
}
static const struct dev_pm_ops mtk_nor_pm_ops = {
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
new file mode 100644
index 000000000000..d66bf9762557
--- /dev/null
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -0,0 +1,1472 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for the SPI-NAND mode of Mediatek NAND Flash Interface
+//
+// Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com>
+//
+// This driver is based on the SPI-NAND mtd driver from Mediatek SDK:
+//
+// Copyright (C) 2020 MediaTek Inc.
+// Author: Weijie Gao <weijie.gao@mediatek.com>
+//
+// This controller organize the page data as several interleaved sectors
+// like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size)
+// +---------+------+------+---------+------+------+-----+
+// | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... |
+// +---------+------+------+---------+------+------+-----+
+// With auto-format turned on, DMA only returns this part:
+// +---------+---------+-----+
+// | Sector1 | Sector2 | ... |
+// +---------+---------+-----+
+// The FDM data will be filled to the registers, and ECC parity data isn't
+// accessible.
+// With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA
+// in it's original order shown in the first table. ECC can't be turned on when
+// auto-format is off.
+//
+// However, Linux SPI-NAND driver expects the data returned as:
+// +------+-----+
+// | Page | OOB |
+// +------+-----+
+// where the page data is continuously stored instead of interleaved.
+// So we assume all instructions matching the page_op template between ECC
+// prepare_io_req and finish_io_req are for page cache r/w.
+// Here's how this spi-mem driver operates when reading:
+// 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off).
+// 2. Perform page ops and let the controller fill the DMA bounce buffer with
+// de-interleaved sector data and set FDM registers.
+// 3. Return the data as:
+// +---------+---------+-----+------+------+-----+
+// | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... |
+// +---------+---------+-----+------+------+-----+
+// 4. For other matching spi_mem ops outside a prepare/finish_io_req pair,
+// read the data with auto-format off into the bounce buffer and copy
+// needed data to the buffer specified in the request.
+//
+// Write requests operates in a similar manner.
+// As a limitation of this strategy, we won't be able to access any ECC parity
+// data at all in Linux.
+//
+// Here's the bad block mark situation on MTK chips:
+// In older chips like mt7622, MTK uses the first FDM byte in the first sector
+// as the bad block mark. After de-interleaving, this byte appears at [pagesize]
+// in the returned data, which is the BBM position expected by kernel. However,
+// the conventional bad block mark is the first byte of the OOB, which is part
+// of the last sector data in the interleaved layout. Instead of fixing their
+// hardware, MTK decided to address this inconsistency in software. On these
+// later chips, the BootROM expects the following:
+// 1. The [pagesize] byte on a nand page is used as BBM, which will appear at
+// (page_size - (nsectors - 1) * spare_size) in the DMA buffer.
+// 2. The original byte stored at that position in the DMA buffer will be stored
+// as the first byte of the FDM section in the last sector.
+// We can't disagree with the BootROM, so after de-interleaving, we need to
+// perform the following swaps in read:
+// 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size],
+// which is the expected BBM position by kernel.
+// 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to
+// [page_size - (nsectors - 1) * spare_size]
+// Similarly, when writing, we need to perform swaps in the other direction.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/of_platform.h>
+#include <linux/mtd/nand-ecc-mtk.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mtd/nand.h>
+
+// NFI registers
+#define NFI_CNFG 0x000
+#define CNFG_OP_MODE_S 12
+#define CNFG_OP_MODE_CUST 6
+#define CNFG_OP_MODE_PROGRAM 3
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_READ_MODE BIT(1)
+#define CNFG_DMA_MODE BIT(0)
+
+#define NFI_PAGEFMT 0x0004
+#define NFI_SPARE_SIZE_LS_S 16
+#define NFI_FDM_ECC_NUM_S 12
+#define NFI_FDM_NUM_S 8
+#define NFI_SPARE_SIZE_S 4
+#define NFI_SEC_SEL_512 BIT(2)
+#define NFI_PAGE_SIZE_S 0
+#define NFI_PAGE_SIZE_512_2K 0
+#define NFI_PAGE_SIZE_2K_4K 1
+#define NFI_PAGE_SIZE_4K_8K 2
+#define NFI_PAGE_SIZE_8K_16K 3
+
+#define NFI_CON 0x008
+#define CON_SEC_NUM_S 12
+#define CON_BWR BIT(9)
+#define CON_BRD BIT(8)
+#define CON_NFI_RST BIT(1)
+#define CON_FIFO_FLUSH BIT(0)
+
+#define NFI_INTR_EN 0x010
+#define NFI_INTR_STA 0x014
+#define NFI_IRQ_INTR_EN BIT(31)
+#define NFI_IRQ_CUS_READ BIT(8)
+#define NFI_IRQ_CUS_PG BIT(7)
+
+#define NFI_CMD 0x020
+#define NFI_CMD_DUMMY_READ 0x00
+#define NFI_CMD_DUMMY_WRITE 0x80
+
+#define NFI_STRDATA 0x040
+#define STR_DATA BIT(0)
+
+#define NFI_STA 0x060
+#define NFI_NAND_FSM GENMASK(28, 24)
+#define NFI_FSM GENMASK(19, 16)
+#define READ_EMPTY BIT(12)
+
+#define NFI_FIFOSTA 0x064
+#define FIFO_WR_REMAIN_S 8
+#define FIFO_RD_REMAIN_S 0
+
+#define NFI_ADDRCNTR 0x070
+#define SEC_CNTR GENMASK(16, 12)
+#define SEC_CNTR_S 12
+#define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
+
+#define NFI_STRADDR 0x080
+
+#define NFI_BYTELEN 0x084
+#define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
+
+#define NFI_FDM0L 0x0a0
+#define NFI_FDM0M 0x0a4
+#define NFI_FDML(n) (NFI_FDM0L + (n)*8)
+#define NFI_FDMM(n) (NFI_FDM0M + (n)*8)
+
+#define NFI_DEBUG_CON1 0x220
+#define WBUF_EN BIT(2)
+
+#define NFI_MASTERSTA 0x224
+#define MAS_ADDR GENMASK(11, 9)
+#define MAS_RD GENMASK(8, 6)
+#define MAS_WR GENMASK(5, 3)
+#define MAS_RDDLY GENMASK(2, 0)
+#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
+
+// SNFI registers
+#define SNF_MAC_CTL 0x500
+#define MAC_XIO_SEL BIT(4)
+#define SF_MAC_EN BIT(3)
+#define SF_TRIG BIT(2)
+#define WIP_READY BIT(1)
+#define WIP BIT(0)
+
+#define SNF_MAC_OUTL 0x504
+#define SNF_MAC_INL 0x508
+
+#define SNF_RD_CTL2 0x510
+#define DATA_READ_DUMMY_S 8
+#define DATA_READ_MAX_DUMMY 0xf
+#define DATA_READ_CMD_S 0
+
+#define SNF_RD_CTL3 0x514
+
+#define SNF_PG_CTL1 0x524
+#define PG_LOAD_CMD_S 8
+
+#define SNF_PG_CTL2 0x528
+
+#define SNF_MISC_CTL 0x538
+#define SW_RST BIT(28)
+#define FIFO_RD_LTC_S 25
+#define PG_LOAD_X4_EN BIT(20)
+#define DATA_READ_MODE_S 16
+#define DATA_READ_MODE GENMASK(18, 16)
+#define DATA_READ_MODE_X1 0
+#define DATA_READ_MODE_X2 1
+#define DATA_READ_MODE_X4 2
+#define DATA_READ_MODE_DUAL 5
+#define DATA_READ_MODE_QUAD 6
+#define PG_LOAD_CUSTOM_EN BIT(7)
+#define DATARD_CUSTOM_EN BIT(6)
+#define CS_DESELECT_CYC_S 0
+
+#define SNF_MISC_CTL2 0x53c
+#define PROGRAM_LOAD_BYTE_NUM_S 16
+#define READ_DATA_BYTE_NUM_S 11
+
+#define SNF_DLY_CTL3 0x548
+#define SFCK_SAM_DLY_S 0
+
+#define SNF_STA_CTL1 0x550
+#define CUS_PG_DONE BIT(28)
+#define CUS_READ_DONE BIT(27)
+#define SPI_STATE_S 0
+#define SPI_STATE GENMASK(3, 0)
+
+#define SNF_CFG 0x55c
+#define SPI_MODE BIT(0)
+
+#define SNF_GPRAM 0x800
+#define SNF_GPRAM_SIZE 0xa0
+
+#define SNFI_POLL_INTERVAL 1000000
+
+static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 };
+
+struct mtk_snand_caps {
+ u16 sector_size;
+ u16 max_sectors;
+ u16 fdm_size;
+ u16 fdm_ecc_size;
+ u16 fifo_size;
+
+ bool bbm_swap;
+ bool empty_page_check;
+ u32 mastersta_mask;
+
+ const u8 *spare_sizes;
+ u32 num_spare_size;
+};
+
+static const struct mtk_snand_caps mt7622_snand_caps = {
+ .sector_size = 512,
+ .max_sectors = 8,
+ .fdm_size = 8,
+ .fdm_ecc_size = 1,
+ .fifo_size = 32,
+ .bbm_swap = false,
+ .empty_page_check = false,
+ .mastersta_mask = NFI_MASTERSTA_MASK_7622,
+ .spare_sizes = mt7622_spare_sizes,
+ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
+};
+
+static const struct mtk_snand_caps mt7629_snand_caps = {
+ .sector_size = 512,
+ .max_sectors = 8,
+ .fdm_size = 8,
+ .fdm_ecc_size = 1,
+ .fifo_size = 32,
+ .bbm_swap = true,
+ .empty_page_check = false,
+ .mastersta_mask = NFI_MASTERSTA_MASK_7622,
+ .spare_sizes = mt7622_spare_sizes,
+ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
+};
+
+struct mtk_snand_conf {
+ size_t page_size;
+ size_t oob_size;
+ u8 nsectors;
+ u8 spare_size;
+};
+
+struct mtk_snand {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ struct clk *nfi_clk;
+ struct clk *pad_clk;
+ void __iomem *nfi_base;
+ int irq;
+ struct completion op_done;
+ const struct mtk_snand_caps *caps;
+ struct mtk_ecc_config *ecc_cfg;
+ struct mtk_ecc *ecc;
+ struct mtk_snand_conf nfi_cfg;
+ struct mtk_ecc_stats ecc_stats;
+ struct nand_ecc_engine ecc_eng;
+ bool autofmt;
+ u8 *buf;
+ size_t buf_len;
+};
+
+static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+
+ return container_of(eng, struct mtk_snand, ecc_eng);
+}
+
+static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)
+{
+ if (snf->buf_len >= size)
+ return 0;
+ kfree(snf->buf);
+ snf->buf = kmalloc(size, GFP_KERNEL);
+ if (!snf->buf)
+ return -ENOMEM;
+ snf->buf_len = size;
+ memset(snf->buf, 0xff, snf->buf_len);
+ return 0;
+}
+
+static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg)
+{
+ return readl(snf->nfi_base + reg);
+}
+
+static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val)
+{
+ writel(val, snf->nfi_base + reg);
+}
+
+static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val)
+{
+ writew(val, snf->nfi_base + reg);
+}
+
+static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set)
+{
+ u32 val;
+
+ val = readl(snf->nfi_base + reg);
+ val &= ~clr;
+ val |= set;
+ writel(val, snf->nfi_base + reg);
+}
+
+static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len)
+{
+ u32 i, val = 0, es = sizeof(u32);
+
+ for (i = reg; i < reg + len; i++) {
+ if (i == reg || i % es == 0)
+ val = nfi_read32(snf, i & ~(es - 1));
+
+ *data++ = (u8)(val >> (8 * (i % es)));
+ }
+}
+
+static int mtk_nfi_reset(struct mtk_snand *snf)
+{
+ u32 val, fifo_mask;
+ int ret;
+
+ nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
+
+ ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
+ !(val & snf->caps->mastersta_mask), 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "NFI master is still busy after reset\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
+ !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Failed to reset NFI\n");
+ return ret;
+ }
+
+ fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
+ ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
+ ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
+ !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "NFI FIFOs are not empty\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_snand_mac_reset(struct mtk_snand *snf)
+{
+ int ret;
+ u32 val;
+
+ nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
+
+ ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
+ !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
+ if (ret)
+ dev_err(snf->dev, "Failed to reset SNFI MAC\n");
+
+ nfi_write32(snf, SNF_MISC_CTL,
+ (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S));
+
+ return ret;
+}
+
+static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen)
+{
+ int ret;
+ u32 val;
+
+ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
+ nfi_write32(snf, SNF_MAC_OUTL, outlen);
+ nfi_write32(snf, SNF_MAC_INL, inlen);
+
+ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
+
+ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
+ val & WIP_READY, 0, SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Timed out waiting for WIP_READY\n");
+ goto cleanup;
+ }
+
+ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
+ 0, SNFI_POLL_INTERVAL);
+ if (ret)
+ dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
+
+cleanup:
+ nfi_write32(snf, SNF_MAC_CTL, 0);
+
+ return ret;
+}
+
+static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op)
+{
+ u32 rx_len = 0;
+ u32 reg_offs = 0;
+ u32 val = 0;
+ const u8 *tx_buf = NULL;
+ u8 *rx_buf = NULL;
+ int i, ret;
+ u8 b;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ rx_len = op->data.nbytes;
+ rx_buf = op->data.buf.in;
+ } else {
+ tx_buf = op->data.buf.out;
+ }
+
+ mtk_snand_mac_reset(snf);
+
+ for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) {
+ b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
+ val |= b << (8 * (reg_offs % 4));
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+
+ for (i = 0; i < op->addr.nbytes; i++, reg_offs++) {
+ b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
+ val |= b << (8 * (reg_offs % 4));
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+
+ for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) {
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ for (i = 0; i < op->data.nbytes; i++, reg_offs++) {
+ val |= tx_buf[i] << (8 * (reg_offs % 4));
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+ }
+
+ if (reg_offs % 4)
+ nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val);
+
+ for (i = 0; i < reg_offs; i += 4)
+ dev_dbg(snf->dev, "%d: %08X", i,
+ nfi_read32(snf, SNF_GPRAM + i));
+
+ dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len);
+
+ ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len);
+ if (ret)
+ return ret;
+
+ if (!rx_len)
+ return 0;
+
+ nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len);
+ return 0;
+}
+
+static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size,
+ u32 oob_size)
+{
+ int spare_idx = -1;
+ u32 spare_size, spare_size_shift, pagesize_idx;
+ u32 sector_size_512;
+ u8 nsectors;
+ int i;
+
+ // skip if it's already configured as required.
+ if (snf->nfi_cfg.page_size == page_size &&
+ snf->nfi_cfg.oob_size == oob_size)
+ return 0;
+
+ nsectors = page_size / snf->caps->sector_size;
+ if (nsectors > snf->caps->max_sectors) {
+ dev_err(snf->dev, "too many sectors required.\n");
+ goto err;
+ }
+
+ if (snf->caps->sector_size == 512) {
+ sector_size_512 = NFI_SEC_SEL_512;
+ spare_size_shift = NFI_SPARE_SIZE_S;
+ } else {
+ sector_size_512 = 0;
+ spare_size_shift = NFI_SPARE_SIZE_LS_S;
+ }
+
+ switch (page_size) {
+ case SZ_512:
+ pagesize_idx = NFI_PAGE_SIZE_512_2K;
+ break;
+ case SZ_2K:
+ if (snf->caps->sector_size == 512)
+ pagesize_idx = NFI_PAGE_SIZE_2K_4K;
+ else
+ pagesize_idx = NFI_PAGE_SIZE_512_2K;
+ break;
+ case SZ_4K:
+ if (snf->caps->sector_size == 512)
+ pagesize_idx = NFI_PAGE_SIZE_4K_8K;
+ else
+ pagesize_idx = NFI_PAGE_SIZE_2K_4K;
+ break;
+ case SZ_8K:
+ if (snf->caps->sector_size == 512)
+ pagesize_idx = NFI_PAGE_SIZE_8K_16K;
+ else
+ pagesize_idx = NFI_PAGE_SIZE_4K_8K;
+ break;
+ case SZ_16K:
+ pagesize_idx = NFI_PAGE_SIZE_8K_16K;
+ break;
+ default:
+ dev_err(snf->dev, "unsupported page size.\n");
+ goto err;
+ }
+
+ spare_size = oob_size / nsectors;
+ // If we're using the 1KB sector size, HW will automatically double the
+ // spare size. We should only use half of the value in this case.
+ if (snf->caps->sector_size == 1024)
+ spare_size /= 2;
+
+ for (i = snf->caps->num_spare_size - 1; i >= 0; i--) {
+ if (snf->caps->spare_sizes[i] <= spare_size) {
+ spare_size = snf->caps->spare_sizes[i];
+ if (snf->caps->sector_size == 1024)
+ spare_size *= 2;
+ spare_idx = i;
+ break;
+ }
+ }
+
+ if (spare_idx < 0) {
+ dev_err(snf->dev, "unsupported spare size: %u\n", spare_size);
+ goto err;
+ }
+
+ nfi_write32(snf, NFI_PAGEFMT,
+ (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
+ (snf->caps->fdm_size << NFI_FDM_NUM_S) |
+ (spare_idx << spare_size_shift) |
+ (pagesize_idx << NFI_PAGE_SIZE_S) |
+ sector_size_512);
+
+ snf->nfi_cfg.page_size = page_size;
+ snf->nfi_cfg.oob_size = oob_size;
+ snf->nfi_cfg.nsectors = nsectors;
+ snf->nfi_cfg.spare_size = spare_size;
+
+ dev_dbg(snf->dev, "page format: (%u + %u) * %u\n",
+ snf->caps->sector_size, spare_size, nsectors);
+ return snand_prepare_bouncebuf(snf, page_size + oob_size);
+err:
+ dev_err(snf->dev, "page size %u + %u is not supported\n", page_size,
+ oob_size);
+ return -EOPNOTSUPP;
+}
+
+static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc)
+{
+ // ECC area is not accessible
+ return -ERANGE;
+}
+
+static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtk_snand *ms = nand_to_mtk_snand(nand);
+
+ if (section >= ms->nfi_cfg.nsectors)
+ return -ERANGE;
+
+ oobfree->length = ms->caps->fdm_size - 1;
+ oobfree->offset = section * ms->caps->fdm_size + 1;
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
+ .ecc = mtk_snand_ooblayout_ecc,
+ .free = mtk_snand_ooblayout_free,
+};
+
+static int mtk_snand_ecc_init_ctx(struct nand_device *nand)
+{
+ struct mtk_snand *snf = nand_to_mtk_snand(nand);
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct nand_ecc_props *reqs = &nand->ecc.requirements;
+ struct nand_ecc_props *user = &nand->ecc.user_conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int step_size = 0, strength = 0, desired_correction = 0, steps;
+ bool ecc_user = false;
+ int ret;
+ u32 parity_bits, max_ecc_bytes;
+ struct mtk_ecc_config *ecc_cfg;
+
+ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
+ nand->memorg.oobsize);
+ if (ret)
+ return ret;
+
+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ if (!ecc_cfg)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = ecc_cfg;
+
+ if (user->step_size && user->strength) {
+ step_size = user->step_size;
+ strength = user->strength;
+ ecc_user = true;
+ } else if (reqs->step_size && reqs->strength) {
+ step_size = reqs->step_size;
+ strength = reqs->strength;
+ }
+
+ if (step_size && strength) {
+ steps = mtd->writesize / step_size;
+ desired_correction = steps * strength;
+ strength = desired_correction / snf->nfi_cfg.nsectors;
+ }
+
+ ecc_cfg->mode = ECC_NFI_MODE;
+ ecc_cfg->sectors = snf->nfi_cfg.nsectors;
+ ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size;
+
+ // calculate the max possible strength under current page format
+ parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
+ max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
+ ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits;
+ mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
+
+ // if there's a user requested strength, find the minimum strength that
+ // meets the requirement. Otherwise use the maximum strength which is
+ // expected by BootROM.
+ if (ecc_user && strength) {
+ u32 s_next = ecc_cfg->strength - 1;
+
+ while (1) {
+ mtk_ecc_adjust_strength(snf->ecc, &s_next);
+ if (s_next >= ecc_cfg->strength)
+ break;
+ if (s_next < strength)
+ break;
+ s_next = ecc_cfg->strength - 1;
+ }
+ }
+
+ mtd_set_ooblayout(mtd, &mtk_snand_ooblayout);
+
+ conf->step_size = snf->caps->sector_size;
+ conf->strength = ecc_cfg->strength;
+
+ if (ecc_cfg->strength < strength)
+ dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n",
+ strength);
+ dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n",
+ ecc_cfg->strength, snf->caps->sector_size);
+
+ return 0;
+}
+
+static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ kfree(ecc_cfg);
+}
+
+static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mtk_snand *snf = nand_to_mtk_snand(nand);
+ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
+ int ret;
+
+ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
+ nand->memorg.oobsize);
+ if (ret)
+ return ret;
+ snf->autofmt = true;
+ snf->ecc_cfg = ecc_cfg;
+ return 0;
+}
+
+static int mtk_snand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mtk_snand *snf = nand_to_mtk_snand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ snf->ecc_cfg = NULL;
+ snf->autofmt = false;
+ if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ))
+ return 0;
+
+ if (snf->ecc_stats.failed)
+ mtd->ecc_stats.failed += snf->ecc_stats.failed;
+ mtd->ecc_stats.corrected += snf->ecc_stats.corrected;
+ return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips;
+}
+
+static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = {
+ .init_ctx = mtk_snand_ecc_init_ctx,
+ .cleanup_ctx = mtk_snand_ecc_cleanup_ctx,
+ .prepare_io_req = mtk_snand_ecc_prepare_io_req,
+ .finish_io_req = mtk_snand_ecc_finish_io_req,
+};
+
+static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf)
+{
+ u32 vall, valm;
+ u8 *oobptr = buf;
+ int i, j;
+
+ for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
+ vall = nfi_read32(snf, NFI_FDML(i));
+ valm = nfi_read32(snf, NFI_FDMM(i));
+
+ for (j = 0; j < snf->caps->fdm_size; j++)
+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+
+ oobptr += snf->caps->fdm_size;
+ }
+}
+
+static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf)
+{
+ u32 fdm_size = snf->caps->fdm_size;
+ const u8 *oobptr = buf;
+ u32 vall, valm;
+ int i, j;
+
+ for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
+ vall = 0;
+ valm = 0;
+
+ for (j = 0; j < 8; j++) {
+ if (j < 4)
+ vall |= (j < fdm_size ? oobptr[j] : 0xff)
+ << (j * 8);
+ else
+ valm |= (j < fdm_size ? oobptr[j] : 0xff)
+ << ((j - 4) * 8);
+ }
+
+ nfi_write32(snf, NFI_FDML(i), vall);
+ nfi_write32(snf, NFI_FDMM(i), valm);
+
+ oobptr += fdm_size;
+ }
+}
+
+static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf)
+{
+ u32 buf_bbm_pos, fdm_bbm_pos;
+
+ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
+ return;
+
+ // swap [pagesize] byte on nand with the first fdm byte
+ // in the last sector.
+ buf_bbm_pos = snf->nfi_cfg.page_size -
+ (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
+ fdm_bbm_pos = snf->nfi_cfg.page_size +
+ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
+
+ swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]);
+}
+
+static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
+{
+ u32 fdm_bbm_pos1, fdm_bbm_pos2;
+
+ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
+ return;
+
+ // swap the first fdm byte in the first and the last sector.
+ fdm_bbm_pos1 = snf->nfi_cfg.page_size;
+ fdm_bbm_pos2 = snf->nfi_cfg.page_size +
+ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
+ swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
+}
+
+static int mtk_snand_read_page_cache(struct mtk_snand *snf,
+ const struct spi_mem_op *op)
+{
+ u8 *buf = snf->buf;
+ u8 *buf_fdm = buf + snf->nfi_cfg.page_size;
+ // the address part to be sent by the controller
+ u32 op_addr = op->addr.val;
+ // where to start copying data from bounce buffer
+ u32 rd_offset = 0;
+ u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth);
+ u32 op_mode = 0;
+ u32 dma_len = snf->buf_len;
+ int ret = 0;
+ u32 rd_mode, rd_bytes, val;
+ dma_addr_t buf_dma;
+
+ if (snf->autofmt) {
+ u32 last_bit;
+ u32 mask;
+
+ dma_len = snf->nfi_cfg.page_size;
+ op_mode = CNFG_AUTO_FMT_EN;
+ if (op->data.ecc)
+ op_mode |= CNFG_HW_ECC_EN;
+ // extract the plane bit:
+ // Find the highest bit set in (pagesize+oobsize).
+ // Bits higher than that in op->addr are kept and sent over SPI
+ // Lower bits are used as an offset for copying data from DMA
+ // bounce buffer.
+ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
+ mask = (1 << last_bit) - 1;
+ rd_offset = op_addr & mask;
+ op_addr &= ~mask;
+
+ // check if we can dma to the caller memory
+ if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
+ buf = op->data.buf.in;
+ }
+ mtk_snand_mac_reset(snf);
+ mtk_nfi_reset(snf);
+
+ // command and dummy cycles
+ nfi_write32(snf, SNF_RD_CTL2,
+ (dummy_clk << DATA_READ_DUMMY_S) |
+ (op->cmd.opcode << DATA_READ_CMD_S));
+
+ // read address
+ nfi_write32(snf, SNF_RD_CTL3, op_addr);
+
+ // Set read op_mode
+ if (op->data.buswidth == 4)
+ rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD :
+ DATA_READ_MODE_X4;
+ else if (op->data.buswidth == 2)
+ rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL :
+ DATA_READ_MODE_X2;
+ else
+ rd_mode = DATA_READ_MODE_X1;
+ rd_mode <<= DATA_READ_MODE_S;
+ nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
+ rd_mode | DATARD_CUSTOM_EN);
+
+ // Set bytes to read
+ rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
+ snf->nfi_cfg.nsectors;
+ nfi_write32(snf, SNF_MISC_CTL2,
+ (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes);
+
+ // NFI read prepare
+ nfi_write16(snf, NFI_CNFG,
+ (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN |
+ CNFG_READ_MODE | CNFG_DMA_MODE | op_mode);
+
+ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
+
+ buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(snf->dev, buf_dma);
+ if (ret) {
+ dev_err(snf->dev, "DMA mapping failed.\n");
+ goto cleanup;
+ }
+ nfi_write32(snf, NFI_STRADDR, buf_dma);
+ if (op->data.ecc) {
+ snf->ecc_cfg->op = ECC_DECODE;
+ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
+ if (ret)
+ goto cleanup_dma;
+ }
+ // Prepare for custom read interrupt
+ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
+ reinit_completion(&snf->op_done);
+
+ // Trigger NFI into custom mode
+ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
+
+ // Start DMA read
+ nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
+ nfi_write16(snf, NFI_STRDATA, STR_DATA);
+
+ if (!wait_for_completion_timeout(
+ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
+ dev_err(snf->dev, "DMA timed out for reading from cache.\n");
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ // Wait for BUS_SEC_CNTR returning expected value
+ ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
+ BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n");
+ goto cleanup2;
+ }
+
+ // Wait for bus becoming idle
+ ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
+ !(val & snf->caps->mastersta_mask), 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Timed out waiting for bus becoming idle\n");
+ goto cleanup2;
+ }
+
+ if (op->data.ecc) {
+ ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE);
+ if (ret) {
+ dev_err(snf->dev, "wait ecc done timeout\n");
+ goto cleanup2;
+ }
+ // save status before disabling ecc
+ mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
+ snf->nfi_cfg.nsectors);
+ }
+
+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
+
+ if (snf->autofmt) {
+ mtk_snand_read_fdm(snf, buf_fdm);
+ if (snf->caps->bbm_swap) {
+ mtk_snand_bm_swap(snf, buf);
+ mtk_snand_fdm_bm_swap(snf);
+ }
+ }
+
+ // copy data back
+ if (nfi_read32(snf, NFI_STA) & READ_EMPTY) {
+ memset(op->data.buf.in, 0xff, op->data.nbytes);
+ snf->ecc_stats.bitflips = 0;
+ snf->ecc_stats.failed = 0;
+ snf->ecc_stats.corrected = 0;
+ } else {
+ if (buf == op->data.buf.in) {
+ u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size;
+ u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size;
+
+ if (req_left)
+ memcpy(op->data.buf.in + snf->nfi_cfg.page_size,
+ buf_fdm,
+ cap_len < req_left ? cap_len : req_left);
+ } else if (rd_offset < snf->buf_len) {
+ u32 cap_len = snf->buf_len - rd_offset;
+
+ if (op->data.nbytes < cap_len)
+ cap_len = op->data.nbytes;
+ memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len);
+ }
+ }
+cleanup2:
+ if (op->data.ecc)
+ mtk_ecc_disable(snf->ecc);
+cleanup_dma:
+ // unmap dma only if any error happens. (otherwise it's done before
+ // data copying)
+ if (ret)
+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
+cleanup:
+ // Stop read
+ nfi_write32(snf, NFI_CON, 0);
+ nfi_write16(snf, NFI_CNFG, 0);
+
+ // Clear SNF done flag
+ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
+ nfi_write32(snf, SNF_STA_CTL1, 0);
+
+ // Disable interrupt
+ nfi_read32(snf, NFI_INTR_STA);
+ nfi_write32(snf, NFI_INTR_EN, 0);
+
+ nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
+ return ret;
+}
+
+static int mtk_snand_write_page_cache(struct mtk_snand *snf,
+ const struct spi_mem_op *op)
+{
+ // the address part to be sent by the controller
+ u32 op_addr = op->addr.val;
+ // where to start copying data from bounce buffer
+ u32 wr_offset = 0;
+ u32 op_mode = 0;
+ int ret = 0;
+ u32 wr_mode = 0;
+ u32 dma_len = snf->buf_len;
+ u32 wr_bytes, val;
+ size_t cap_len;
+ dma_addr_t buf_dma;
+
+ if (snf->autofmt) {
+ u32 last_bit;
+ u32 mask;
+
+ dma_len = snf->nfi_cfg.page_size;
+ op_mode = CNFG_AUTO_FMT_EN;
+ if (op->data.ecc)
+ op_mode |= CNFG_HW_ECC_EN;
+
+ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
+ mask = (1 << last_bit) - 1;
+ wr_offset = op_addr & mask;
+ op_addr &= ~mask;
+ }
+ mtk_snand_mac_reset(snf);
+ mtk_nfi_reset(snf);
+
+ if (wr_offset)
+ memset(snf->buf, 0xff, wr_offset);
+
+ cap_len = snf->buf_len - wr_offset;
+ if (op->data.nbytes < cap_len)
+ cap_len = op->data.nbytes;
+ memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len);
+ if (snf->autofmt) {
+ if (snf->caps->bbm_swap) {
+ mtk_snand_fdm_bm_swap(snf);
+ mtk_snand_bm_swap(snf, snf->buf);
+ }
+ mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size);
+ }
+
+ // Command
+ nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S));
+
+ // write address
+ nfi_write32(snf, SNF_PG_CTL2, op_addr);
+
+ // Set read op_mode
+ if (op->data.buswidth == 4)
+ wr_mode = PG_LOAD_X4_EN;
+
+ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN,
+ wr_mode | PG_LOAD_CUSTOM_EN);
+
+ // Set bytes to write
+ wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
+ snf->nfi_cfg.nsectors;
+ nfi_write32(snf, SNF_MISC_CTL2,
+ (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes);
+
+ // NFI write prepare
+ nfi_write16(snf, NFI_CNFG,
+ (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
+ CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode);
+
+ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
+ buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(snf->dev, buf_dma);
+ if (ret) {
+ dev_err(snf->dev, "DMA mapping failed.\n");
+ goto cleanup;
+ }
+ nfi_write32(snf, NFI_STRADDR, buf_dma);
+ if (op->data.ecc) {
+ snf->ecc_cfg->op = ECC_ENCODE;
+ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
+ if (ret)
+ goto cleanup_dma;
+ }
+ // Prepare for custom write interrupt
+ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
+ reinit_completion(&snf->op_done);
+ ;
+
+ // Trigger NFI into custom mode
+ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
+
+ // Start DMA write
+ nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
+ nfi_write16(snf, NFI_STRDATA, STR_DATA);
+
+ if (!wait_for_completion_timeout(
+ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
+ dev_err(snf->dev, "DMA timed out for program load.\n");
+ ret = -ETIMEDOUT;
+ goto cleanup_ecc;
+ }
+
+ // Wait for NFI_SEC_CNTR returning expected value
+ ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
+ NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
+ SNFI_POLL_INTERVAL);
+ if (ret)
+ dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
+
+cleanup_ecc:
+ if (op->data.ecc)
+ mtk_ecc_disable(snf->ecc);
+cleanup_dma:
+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE);
+cleanup:
+ // Stop write
+ nfi_write32(snf, NFI_CON, 0);
+ nfi_write16(snf, NFI_CNFG, 0);
+
+ // Clear SNF done flag
+ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
+ nfi_write32(snf, SNF_STA_CTL1, 0);
+
+ // Disable interrupt
+ nfi_read32(snf, NFI_INTR_STA);
+ nfi_write32(snf, NFI_INTR_EN, 0);
+
+ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
+
+ return ret;
+}
+
+/**
+ * mtk_snand_is_page_ops() - check if the op is a controller supported page op.
+ * @op spi-mem op to check
+ *
+ * Check whether op can be executed with read_from_cache or program_load
+ * mode in the controller.
+ * This controller can execute typical Read From Cache and Program Load
+ * instructions found on SPI-NAND with 2-byte address.
+ * DTR and cmd buswidth & nbytes should be checked before calling this.
+ *
+ * Return: true if the op matches the instruction template
+ */
+static bool mtk_snand_is_page_ops(const struct spi_mem_op *op)
+{
+ if (op->addr.nbytes != 2)
+ return false;
+
+ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
+ op->addr.buswidth != 4)
+ return false;
+
+ // match read from page instructions
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ // check dummy cycle first
+ if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth >
+ DATA_READ_MAX_DUMMY)
+ return false;
+ // quad io / quad out
+ if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) &&
+ op->data.buswidth == 4)
+ return true;
+
+ // dual io / dual out
+ if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) &&
+ op->data.buswidth == 2)
+ return true;
+
+ // standard spi
+ if (op->addr.buswidth == 1 && op->data.buswidth == 1)
+ return true;
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ // check dummy cycle first
+ if (op->dummy.nbytes)
+ return false;
+ // program load quad out
+ if (op->addr.buswidth == 1 && op->data.buswidth == 4)
+ return true;
+ // standard spi
+ if (op->addr.buswidth == 1 && op->data.buswidth == 1)
+ return true;
+ }
+ return false;
+}
+
+static bool mtk_snand_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
+ return false;
+ if (mtk_snand_is_page_ops(op))
+ return true;
+ return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) &&
+ (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) &&
+ (op->data.nbytes == 0 || op->data.buswidth == 1));
+}
+
+static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
+ // page ops transfer size must be exactly ((sector_size + spare_size) *
+ // nsectors). Limit the op size if the caller requests more than that.
+ // exec_op will read more than needed and discard the leftover if the
+ // caller requests less data.
+ if (mtk_snand_is_page_ops(op)) {
+ size_t l;
+ // skip adjust_op_size for page ops
+ if (ms->autofmt)
+ return 0;
+ l = ms->caps->sector_size + ms->nfi_cfg.spare_size;
+ l *= ms->nfi_cfg.nsectors;
+ if (op->data.nbytes > l)
+ op->data.nbytes = l;
+ } else {
+ size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+
+ if (hl >= SNF_GPRAM_SIZE)
+ return -EOPNOTSUPP;
+ if (op->data.nbytes > SNF_GPRAM_SIZE - hl)
+ op->data.nbytes = SNF_GPRAM_SIZE - hl;
+ }
+ return 0;
+}
+
+static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
+
+ dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
+ op->addr.val, op->addr.buswidth, op->addr.nbytes,
+ op->data.buswidth, op->data.nbytes);
+ if (mtk_snand_is_page_ops(op)) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ return mtk_snand_read_page_cache(ms, op);
+ else
+ return mtk_snand_write_page_cache(ms, op);
+ } else {
+ return mtk_snand_mac_io(ms, op);
+ }
+}
+
+static const struct spi_controller_mem_ops mtk_snand_mem_ops = {
+ .adjust_op_size = mtk_snand_adjust_op_size,
+ .supports_op = mtk_snand_supports_op,
+ .exec_op = mtk_snand_exec_op,
+};
+
+static const struct spi_controller_mem_caps mtk_snand_mem_caps = {
+ .ecc = true,
+};
+
+static irqreturn_t mtk_snand_irq(int irq, void *id)
+{
+ struct mtk_snand *snf = id;
+ u32 sta, ien;
+
+ sta = nfi_read32(snf, NFI_INTR_STA);
+ ien = nfi_read32(snf, NFI_INTR_EN);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ nfi_write32(snf, NFI_INTR_EN, 0);
+ complete(&snf->op_done);
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id mtk_snand_ids[] = {
+ { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps },
+ { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_snand_ids);
+
+static int mtk_snand_enable_clk(struct mtk_snand *ms)
+{
+ int ret;
+
+ ret = clk_prepare_enable(ms->nfi_clk);
+ if (ret) {
+ dev_err(ms->dev, "unable to enable nfi clk\n");
+ return ret;
+ }
+ ret = clk_prepare_enable(ms->pad_clk);
+ if (ret) {
+ dev_err(ms->dev, "unable to enable pad clk\n");
+ goto err1;
+ }
+ return 0;
+err1:
+ clk_disable_unprepare(ms->nfi_clk);
+ return ret;
+}
+
+static void mtk_snand_disable_clk(struct mtk_snand *ms)
+{
+ clk_disable_unprepare(ms->pad_clk);
+ clk_disable_unprepare(ms->nfi_clk);
+}
+
+static int mtk_snand_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *dev_id;
+ struct spi_controller *ctlr;
+ struct mtk_snand *ms;
+ int ret;
+
+ dev_id = of_match_node(mtk_snand_ids, np);
+ if (!dev_id)
+ return -EINVAL;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms));
+ if (!ctlr)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ctlr);
+
+ ms = spi_controller_get_devdata(ctlr);
+
+ ms->ctlr = ctlr;
+ ms->caps = dev_id->data;
+
+ ms->ecc = of_mtk_ecc_get(np);
+ if (IS_ERR(ms->ecc))
+ return PTR_ERR(ms->ecc);
+ else if (!ms->ecc)
+ return -ENODEV;
+
+ ms->nfi_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ms->nfi_base)) {
+ ret = PTR_ERR(ms->nfi_base);
+ goto release_ecc;
+ }
+
+ ms->dev = &pdev->dev;
+
+ ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk");
+ if (IS_ERR(ms->nfi_clk)) {
+ ret = PTR_ERR(ms->nfi_clk);
+ dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret);
+ goto release_ecc;
+ }
+
+ ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk");
+ if (IS_ERR(ms->pad_clk)) {
+ ret = PTR_ERR(ms->pad_clk);
+ dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret);
+ goto release_ecc;
+ }
+
+ ret = mtk_snand_enable_clk(ms);
+ if (ret)
+ goto release_ecc;
+
+ init_completion(&ms->op_done);
+
+ ms->irq = platform_get_irq(pdev, 0);
+ if (ms->irq < 0) {
+ ret = ms->irq;
+ goto disable_clk;
+ }
+ ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0,
+ "mtk-snand", ms);
+ if (ret) {
+ dev_err(ms->dev, "failed to request snfi irq\n");
+ goto disable_clk;
+ }
+
+ ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(ms->dev, "failed to set dma mask\n");
+ goto disable_clk;
+ }
+
+ // switch to SNFI mode
+ nfi_write32(ms, SNF_CFG, SPI_MODE);
+
+ // setup an initial page format for ops matching page_cache_op template
+ // before ECC is called.
+ ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size,
+ ms->caps->spare_sizes[0]);
+ if (ret) {
+ dev_err(ms->dev, "failed to set initial page format\n");
+ goto disable_clk;
+ }
+
+ // setup ECC engine
+ ms->ecc_eng.dev = &pdev->dev;
+ ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops;
+ ms->ecc_eng.priv = ms;
+
+ ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ecc engine.\n");
+ goto disable_clk;
+ }
+
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &mtk_snand_mem_ops;
+ ctlr->mem_caps = &mtk_snand_mem_caps;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed.\n");
+ goto disable_clk;
+ }
+
+ return 0;
+disable_clk:
+ mtk_snand_disable_clk(ms);
+release_ecc:
+ mtk_ecc_release(ms->ecc);
+ return ret;
+}
+
+static int mtk_snand_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct mtk_snand *ms = spi_controller_get_devdata(ctlr);
+
+ spi_unregister_controller(ctlr);
+ mtk_snand_disable_clk(ms);
+ mtk_ecc_release(ms->ecc);
+ kfree(ms->buf);
+ return 0;
+}
+
+static struct platform_driver mtk_snand_driver = {
+ .probe = mtk_snand_probe,
+ .remove = mtk_snand_remove,
+ .driver = {
+ .name = "mtk-snand",
+ .of_match_table = mtk_snand_ids,
+ },
+};
+
+module_platform_driver(mtk_snand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 435309b09227..55178579f3c6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -605,9 +605,8 @@ static int mxs_spi_probe(struct platform_device *pdev)
}
}
- ret = pm_runtime_get_sync(ssp->dev);
+ ret = pm_runtime_resume_and_get(ssp->dev);
if (ret < 0) {
- pm_runtime_put_noidle(ssp->dev);
dev_err(ssp->dev, "runtime_get_sync failed\n");
goto out_pm_runtime_disable;
}
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 087172a193fa..29198e6815b2 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -44,13 +44,10 @@
#include <linux/module.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <asm/mach-types.h>
-
-#include <mach/mux.h>
-
-#include <mach/omap7xx.h> /* OMAP7XX_IO_CONF registers */
-
+#include <linux/soc/ti/omap1-io.h>
+#include <linux/soc/ti/omap1-soc.h>
+#include <linux/soc/ti/omap1-mux.h>
/* FIXME address is now a platform device resource,
* and irqs should show there too...
@@ -548,12 +545,6 @@ static int __init omap_uwire_init(void)
omap_cfg_reg(N14_1610_UWIRE_CS0);
omap_cfg_reg(N15_1610_UWIRE_CS1);
}
- if (machine_is_omap_perseus2()) {
- /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */
- int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000;
- omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9);
- }
-
return platform_driver_register(&uwire_driver);
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 60c9cdf1c94b..c42e59df38fe 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -246,9 +246,8 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
enable = !enable;
if (spi->controller_state) {
- int err = pm_runtime_get_sync(mcspi->dev);
+ int err = pm_runtime_resume_and_get(mcspi->dev);
if (err < 0) {
- pm_runtime_put_noidle(mcspi->dev);
dev_err(mcspi->dev, "failed to get sync: %d\n", err);
return;
}
@@ -758,6 +757,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
}
+ /* Add word delay between each word */
+ spi_delay_exec(&xfer->word_delay, xfer);
} while (c);
} else if (word_len <= 16) {
u16 *rx;
@@ -805,6 +806,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
}
+ /* Add word delay between each word */
+ spi_delay_exec(&xfer->word_delay, xfer);
} while (c >= 2);
} else if (word_len <= 32) {
u32 *rx;
@@ -852,6 +855,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
}
+ /* Add word delay between each word */
+ spi_delay_exec(&xfer->word_delay, xfer);
} while (c >= 4);
}
@@ -1068,9 +1073,8 @@ static int omap2_mcspi_setup(struct spi_device *spi)
initial_setup = true;
}
- ret = pm_runtime_get_sync(mcspi->dev);
+ ret = pm_runtime_resume_and_get(mcspi->dev);
if (ret < 0) {
- pm_runtime_put_noidle(mcspi->dev);
if (initial_setup)
omap2_mcspi_cleanup(spi);
@@ -1317,12 +1321,9 @@ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
int ret = 0;
- ret = pm_runtime_get_sync(mcspi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(mcspi->dev);
-
+ ret = pm_runtime_resume_and_get(mcspi->dev);
+ if (ret < 0)
return ret;
- }
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
OMAP2_MCSPI_WAKEUPENABLE_WKEN);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index cdc16eecaf6b..a08215eb9e14 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -196,6 +196,8 @@ struct rockchip_spi {
bool slave_abort;
bool cs_inactive; /* spi slave tansmition stop when cs inactive */
+ bool cs_high_supported; /* native CS supports active-high polarity */
+
struct spi_transfer *xfer; /* Store xfer temporarily */
};
@@ -719,6 +721,11 @@ static int rockchip_spi_setup(struct spi_device *spi)
struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
u32 cr0;
+ if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH) && !rs->cs_high_supported) {
+ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
+ return -EINVAL;
+ }
+
pm_runtime_get_sync(rs->dev);
cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
@@ -899,6 +906,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
case ROCKCHIP_SPI_VER2_TYPE2:
+ rs->cs_high_supported = true;
ctlr->mode_bits |= SPI_CS_HIGH;
if (ctlr->can_dma && slave_mode)
rs->cs_inactive = true;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index bd5708d7e5a1..7a014eeec2d0 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1108,14 +1108,11 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
+ cfg.dst_addr = port_addr + RSPI_SPDR;
+ cfg.src_addr = port_addr + RSPI_SPDR;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
cfg.direction = dir;
- if (dir == DMA_MEM_TO_DEV) {
- cfg.dst_addr = port_addr;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else {
- cfg.src_addr = port_addr;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- }
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
@@ -1146,12 +1143,12 @@ static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
}
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
- res->start + RSPI_SPDR);
+ res->start);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
- res->start + RSPI_SPDR);
+ res->start);
if (!ctlr->dma_rx) {
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 28e70db9bbba..65b8075da4eb 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -1008,9 +1008,8 @@ static int sprd_spi_remove(struct platform_device *pdev)
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
int ret;
- ret = pm_runtime_get_sync(ss->dev);
+ ret = pm_runtime_resume_and_get(ss->dev);
if (ret < 0) {
- pm_runtime_put_noidle(ss->dev);
dev_err(ss->dev, "failed to resume SPI controller\n");
return ret;
}
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index ffdc55f87e82..c0239e405c39 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -305,10 +305,8 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
u32 cr, sr;
int err = 0;
- if (!op->data.nbytes)
- goto wait_nobusy;
-
- if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
+ if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
+ qspi->fmode == CCR_FMODE_APM)
goto out;
reinit_completion(&qspi->data_completion);
@@ -327,7 +325,6 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
out:
/* clear flags */
writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
-wait_nobusy:
if (!err)
err = stm32_qspi_wait_nobusy(qspi);
@@ -372,10 +369,6 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
op->dummy.buswidth, op->data.buswidth,
op->addr.val, op->data.nbytes);
- err = stm32_qspi_wait_nobusy(qspi);
- if (err)
- goto abort;
-
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_PRESC_MASK & ~CR_FSEL;
cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
@@ -463,11 +456,9 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *
if (!spi_mem_supports_op(mem, op))
return -EOPNOTSUPP;
- ret = pm_runtime_get_sync(qspi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(qspi->dev);
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
return ret;
- }
mutex_lock(&qspi->lock);
@@ -490,11 +481,9 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
int ret;
- ret = pm_runtime_get_sync(qspi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(qspi->dev);
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
return ret;
- }
mutex_lock(&qspi->lock);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
@@ -536,11 +525,9 @@ static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
u32 addr_max;
int ret;
- ret = pm_runtime_get_sync(qspi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(qspi->dev);
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
return ret;
- }
mutex_lock(&qspi->lock);
/* make a local copy of desc op_tmpl and complete dirmap rdesc
@@ -583,11 +570,9 @@ static int stm32_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- ret = pm_runtime_get_sync(qspi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(qspi->dev);
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
return ret;
- }
presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
@@ -851,11 +836,9 @@ static int __maybe_unused stm32_qspi_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index a6adc20f6862..6fe617b445a5 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -2000,9 +2000,8 @@ static int __maybe_unused stm32_spi_resume(struct device *dev)
return ret;
}
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dev, "Unable to power device:%d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c
index f989f7b99296..f1fa88777575 100644
--- a/drivers/spi/spi-sunplus-sp7021.c
+++ b/drivers/spi/spi-sunplus-sp7021.c
@@ -85,8 +85,6 @@ struct sp7021_spi_ctlr {
int s_irq;
struct clk *spi_clk;
struct reset_control *rstc;
- // irq spin lock
- spinlock_t lock;
// data xfer lock
struct mutex buf_lock;
struct completion isr_done;
@@ -199,8 +197,6 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
if (tx_len == 0 && total_len == 0)
return IRQ_NONE;
- spin_lock_irq(&pspim->lock);
-
rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
if (fd_status & SP7021_RX_FULL_FLAG)
rx_cnt = pspim->data_unit;
@@ -239,7 +235,6 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
if (isrdone)
complete(&pspim->isr_done);
- spin_unlock_irq(&pspim->lock);
return IRQ_HANDLED;
}
@@ -446,7 +441,6 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
pspim->mode = mode;
pspim->ctlr = ctlr;
pspim->dev = dev;
- spin_lock_init(&pspim->lock);
mutex_init(&pspim->buf_lock);
init_completion(&pspim->isr_done);
init_completion(&pspim->slave_isr);
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 8f345247a8c3..d9be80e3e1bc 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -964,9 +964,8 @@ static int tegra_spi_setup(struct spi_device *spi)
spi->controller_data = cdata;
}
- ret = pm_runtime_get_sync(tspi->dev);
+ ret = pm_runtime_resume_and_get(tspi->dev);
if (ret < 0) {
- pm_runtime_put_noidle(tspi->dev);
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
if (cdata)
tegra_spi_cleanup(spi);
@@ -1394,10 +1393,9 @@ static int tegra_spi_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
- pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
@@ -1476,9 +1474,8 @@ static int tegra_spi_resume(struct device *dev)
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 2888d8a8dc6d..220ee08c4a06 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -486,10 +486,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
- pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
@@ -549,9 +548,8 @@ static int tegra_sflash_resume(struct device *dev)
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 80c3787deea9..38360434d6e9 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -749,9 +749,8 @@ static int tegra_slink_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
- ret = pm_runtime_get_sync(tspi->dev);
+ ret = pm_runtime_resume_and_get(tspi->dev);
if (ret < 0) {
- pm_runtime_put_noidle(tspi->dev);
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
@@ -1169,9 +1168,8 @@ static int tegra_slink_resume(struct device *dev)
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index e06aafe169e0..b5b65d882d7a 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -172,9 +172,8 @@ static int ti_qspi_setup(struct spi_device *spi)
dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
qspi->spi_max_frequency, clk_div);
- ret = pm_runtime_get_sync(qspi->dev);
+ ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0) {
- pm_runtime_put_noidle(qspi->dev);
dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
return ret;
}
@@ -448,6 +447,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
struct dma_async_tx_descriptor *tx;
int ret;
+ unsigned long time_left;
tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
if (!tx) {
@@ -467,9 +467,9 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
}
dma_async_issue_pending(chan);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ time_left = wait_for_completion_timeout(&qspi->transfer_complete,
msecs_to_jiffies(len));
- if (ret <= 0) {
+ if (time_left == 0) {
dmaengine_terminate_sync(chan);
dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
return -ETIMEDOUT;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2e6d6bbeb784..fe252a8075a7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1611,9 +1611,8 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
mutex_lock(&ctlr->io_mutex);
if (!was_busy && ctlr->auto_runtime_pm) {
- ret = pm_runtime_get_sync(ctlr->dev.parent);
+ ret = pm_runtime_resume_and_get(ctlr->dev.parent);
if (ret < 0) {
- pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
mutex_unlock(&ctlr->io_mutex);
@@ -3475,7 +3474,7 @@ static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits, ugly_bits;
- int status;
+ int status = 0;
/*
* Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
@@ -3518,13 +3517,18 @@ int spi_setup(struct spi_device *spi)
return -EINVAL;
}
- if (!spi->bits_per_word)
+ if (!spi->bits_per_word) {
spi->bits_per_word = 8;
-
- status = __spi_validate_bits_per_word(spi->controller,
- spi->bits_per_word);
- if (status)
- return status;
+ } else {
+ /*
+ * Some controllers may not support the default 8 bits-per-word
+ * so only perform the check when this is explicitly provided.
+ */
+ status = __spi_validate_bits_per_word(spi->controller,
+ spi->bits_per_word);
+ if (status)
+ return status;
+ }
if (spi->controller->max_speed_hz &&
(!spi->max_speed_hz ||
@@ -3544,10 +3548,9 @@ int spi_setup(struct spi_device *spi)
}
if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
- status = pm_runtime_get_sync(spi->controller->dev.parent);
+ status = pm_runtime_resume_and_get(spi->controller->dev.parent);
if (status < 0) {
mutex_unlock(&spi->controller->io_mutex);
- pm_runtime_put_noidle(spi->controller->dev.parent);
dev_err(&spi->controller->dev, "Failed to power device: %d\n",
status);
return status;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 53a551714265..b2775d82d2d7 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -8,19 +8,18 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/compat.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/acpi.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
@@ -46,6 +45,7 @@
static DECLARE_BITMAP(minors, N_SPI_MINORS);
+static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
/* Bit masks for spi_device.mode management. Note that incorrect
* settings for some settings can cause *lots* of trouble for other
@@ -63,7 +63,8 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
- | SPI_RX_QUAD | SPI_RX_OCTAL)
+ | SPI_RX_QUAD | SPI_RX_OCTAL \
+ | SPI_RX_CPHA_FLIP)
struct spidev_data {
dev_t devt;
@@ -568,19 +569,20 @@ spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int spidev_open(struct inode *inode, struct file *filp)
{
- struct spidev_data *spidev;
+ struct spidev_data *spidev = NULL, *iter;
int status = -ENXIO;
mutex_lock(&device_list_lock);
- list_for_each_entry(spidev, &device_list, device_entry) {
- if (spidev->devt == inode->i_rdev) {
+ list_for_each_entry(iter, &device_list, device_entry) {
+ if (iter->devt == inode->i_rdev) {
status = 0;
+ spidev = iter;
break;
}
}
- if (status) {
+ if (!spidev) {
pr_debug("spidev: nothing for minor %d\n", iminor(inode));
goto err_find_dev;
}
@@ -693,25 +695,38 @@ static const struct spi_device_id spidev_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
-#ifdef CONFIG_OF
+/*
+ * spidev should never be referenced in DT without a specific compatible string,
+ * it is a Linux implementation thing rather than a description of the hardware.
+ */
+static int spidev_of_check(struct device *dev)
+{
+ if (device_property_match_string(dev, "compatible", "spidev") < 0)
+ return 0;
+
+ dev_err(dev, "spidev listed directly in DT is not supported\n");
+ return -EINVAL;
+}
+
static const struct of_device_id spidev_dt_ids[] = {
- { .compatible = "rohm,dh2228fv" },
- { .compatible = "lineartechnology,ltc2488" },
- { .compatible = "semtech,sx1301" },
- { .compatible = "lwn,bk4" },
- { .compatible = "dh,dhcom-board" },
- { .compatible = "menlo,m53cpld" },
- { .compatible = "cisco,spi-petra" },
- { .compatible = "micron,spi-authenta" },
+ { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
+ { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
+ { .compatible = "semtech,sx1301", .data = &spidev_of_check },
+ { .compatible = "lwn,bk4", .data = &spidev_of_check },
+ { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
+ { .compatible = "menlo,m53cpld", .data = &spidev_of_check },
+ { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
+ { .compatible = "micron,spi-authenta", .data = &spidev_of_check },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
-#endif
-
-#ifdef CONFIG_ACPI
/* Dummy SPI devices not to be used in production systems */
-#define SPIDEV_ACPI_DUMMY 1
+static int spidev_acpi_check(struct device *dev)
+{
+ dev_warn(dev, "do not use this driver in production systems!\n");
+ return 0;
+}
static const struct acpi_device_id spidev_acpi_ids[] = {
/*
@@ -720,51 +735,29 @@ static const struct acpi_device_id spidev_acpi_ids[] = {
* description of the connected peripheral and they should also use
* a proper driver instead of poking directly to the SPI bus.
*/
- { "SPT0001", SPIDEV_ACPI_DUMMY },
- { "SPT0002", SPIDEV_ACPI_DUMMY },
- { "SPT0003", SPIDEV_ACPI_DUMMY },
+ { "SPT0001", (kernel_ulong_t)&spidev_acpi_check },
+ { "SPT0002", (kernel_ulong_t)&spidev_acpi_check },
+ { "SPT0003", (kernel_ulong_t)&spidev_acpi_check },
{},
};
MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
-static void spidev_probe_acpi(struct spi_device *spi)
-{
- const struct acpi_device_id *id;
-
- if (!has_acpi_companion(&spi->dev))
- return;
-
- id = acpi_match_device(spidev_acpi_ids, &spi->dev);
- if (WARN_ON(!id))
- return;
-
- if (id->driver_data == SPIDEV_ACPI_DUMMY)
- dev_warn(&spi->dev, "do not use this driver in production systems!\n");
-}
-#else
-static inline void spidev_probe_acpi(struct spi_device *spi) {}
-#endif
-
/*-------------------------------------------------------------------------*/
static int spidev_probe(struct spi_device *spi)
{
+ int (*match)(struct device *dev);
struct spidev_data *spidev;
int status;
unsigned long minor;
- /*
- * spidev should never be referenced in DT without a specific
- * compatible string, it is a Linux implementation thing
- * rather than a description of the hardware.
- */
- if (spi->dev.of_node && of_device_is_compatible(spi->dev.of_node, "spidev")) {
- dev_err(&spi->dev, "spidev listed directly in DT is not supported\n");
- return -EINVAL;
+ match = device_get_match_data(&spi->dev);
+ if (match) {
+ status = match(&spi->dev);
+ if (status)
+ return status;
}
- spidev_probe_acpi(spi);
-
/* Allocate driver data */
spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
if (!spidev)
@@ -832,8 +825,8 @@ static void spidev_remove(struct spi_device *spi)
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
- .of_match_table = of_match_ptr(spidev_dt_ids),
- .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
+ .of_match_table = spidev_dt_ids,
+ .acpi_match_table = spidev_acpi_ids,
},
.probe = spidev_probe,
.remove = spidev_remove,
@@ -856,7 +849,6 @@ static int __init spidev_init(void)
* that will key udev/mdev to add/remove /dev nodes. Last, register
* the driver which manages those device numbers.
*/
- BUILD_BUG_ON(N_SPI_MINORS > 256);
status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
if (status < 0)
return status;