diff options
author | Linus Torvalds | 2019-11-25 11:55:30 -0800 |
---|---|---|
committer | Linus Torvalds | 2019-11-25 11:55:30 -0800 |
commit | 1b88176b9c72fb4edd5920969aef94c5cd358337 (patch) | |
tree | 5f37e6afefb80731911d1d5d6cfa471e802dba18 /drivers | |
parent | eeee2827ae75ca58a6965e1b6d208576a5a01920 (diff) | |
parent | 589e1b6c47ce72fcae103c2e45d899610c92c11e (diff) |
Merge tag 'mtd/for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
Pull MTD updates from Miquel Raynal:
"MTD core:
- drop inactive maintainers, update the repositories and add IRC
channel
- debugfs functions improvements
- initialize more structure parameters
- misc fixes reported by robots
MTD devices:
- spear_smi: Fixed Write Burst mode
- new Intel IXP4xx flash probing hook
Raw NAND core:
- useless extra checks dropped
- update the detection of the bad block markers position
Raw NAND controller drivers:
- Cadence: new driver
- Brcmnand: support for flash-dma v0 + fixes
- Denali: drop support for the legacy controller/chip DT representation
- superfluous dev_err() calls removed
SPI NOR core changes:
- introduce 'struct spi_nor_controller_ops'
- clean the Register Operations methods
- use dev_dbg insted of dev_err for low level info
- fix retlen handling in sst_write()
- fix silent truncations in spi_nor_read and spi_nor_read_raw()
- fix the clearing of QE bit on lock()/unlock()
- rework the disabling of the block write protection
- rework the Quad Enable methods
- make sure nor->spimem and nor->controller_ops are mutually exclusive
- set default Quad Enable method for ISSI flashes
- add support for few flashes
SPI NOR controller drivers changes:
- intel-spi:
- support chips without software sequencer
- add support for Intel Cannon Lake and Intel Comet Lake-H flashes
CFI core changes:
- code cleanups related useless initializers and coding style issues
- fix for a possible double free problem in cfi_cmdset_0002
- improved HyperFlash error reporting and handling in cfi_cmdset_0002 core"
* tag 'mtd/for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (73 commits)
mtd: devices: fix mchp23k256 read and write
mtd: no need to check return value of debugfs_create functions
mtd: spi-nor: Set default Quad Enable method for ISSI flashes
mtd: spi-nor: Add support for is25wp256
mtd: spi-nor: Add support for w25q256jw
mtd: spi-nor: Move condition to avoid a NULL check
mtd: spi-nor: Make sure nor->spimem and nor->controller_ops are mutually exclusive
mtd: spi-nor: Rename Quad Enable methods
mtd: spi-nor: Merge spansion Quad Enable methods
mtd: spi-nor: Rename CR_QUAD_EN_SPAN to SR2_QUAD_EN_BIT1
mtd: spi-nor: Extend the SR Read Back test
mtd: spi-nor: Rework the disabling of block write protection
mtd: spi-nor: Fix clearing of QE bit on lock()/unlock()
mtd: cfi_cmdset_0002: fix delayed error detection on HyperFlash
mtd: cfi_cmdset_0002: only check errors when ready in cfi_check_err_status()
mtd: cfi_cmdset_0002: don't free cfi->cfiq in error path of cfi_amdstd_setup()
mtd: cfi_cmdset_*: kill useless 'ret' variable initializers
mtd: cfi_util: use DIV_ROUND_UP() in cfi_udelay()
mtd: spi-nor: Print debug message when the read back test fails
mtd: spi-nor: Check all the bits written, not just the BP ones
...
Diffstat (limited to 'drivers')
43 files changed, 4401 insertions, 986 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 79a53cb8507b..00a79489067c 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -1353,7 +1353,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a { unsigned long cmd_addr; struct cfi_private *cfi = map->fldrv_priv; - int ret = 0; + int ret; adr += chip->start; @@ -1383,7 +1383,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len, struct cfi_private *cfi = map->fldrv_priv; unsigned long ofs, last_end = 0; int chipnum; - int ret = 0; + int ret; if (!map->virt) return -EINVAL; @@ -1550,7 +1550,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, { struct cfi_private *cfi = map->fldrv_priv; map_word status, write_cmd; - int ret=0; + int ret; adr += chip->start; @@ -1624,7 +1624,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - int ret = 0; + int ret; int chipnum; unsigned long ofs; @@ -1871,7 +1871,7 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs, struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; - int ret = 0; + int ret; int chipnum; unsigned long ofs, vec_seek, i; size_t len = 0; diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index cf8c8be40a9c..04b383bc3947 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -123,19 +123,23 @@ static int cfi_use_status_reg(struct cfi_private *cfi) (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; } -static void cfi_check_err_status(struct map_info *map, struct flchip *chip, - unsigned long adr) +static int cfi_check_err_status(struct map_info *map, struct flchip *chip, + unsigned long adr) { struct cfi_private *cfi = map->fldrv_priv; map_word status; if (!cfi_use_status_reg(cfi)) - return; + return 0; cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); status = map_read(map, adr); + /* The error bits are invalid while the chip's busy */ + if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) + return 0; + if (map_word_bitsset(map, status, CMD(0x3a))) { unsigned long chipstatus = MERGESTATUS(status); @@ -151,7 +155,12 @@ static void cfi_check_err_status(struct map_info *map, struct flchip *chip, if (chipstatus & CFI_SR_SLSB) pr_err("%s sector write protected, status %lx\n", map->name, chipstatus); + + /* Erase/Program status bits are set on the operation failure */ + if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB)) + return 1; } + return 0; } /* #define DEBUG_CFI_FEATURES */ @@ -785,7 +794,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) kfree(mtd->eraseregions); kfree(mtd); kfree(cfi->cmdset_priv); - kfree(cfi->cfiq); return NULL; } @@ -848,20 +856,16 @@ static int __xipram chip_good(struct map_info *map, struct flchip *chip, if (cfi_use_status_reg(cfi)) { map_word ready = CMD(CFI_SR_DRB); - map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB); + /* * For chips that support status register, check device - * ready bit and Erase/Program status bit to know if - * operation succeeded. + * ready bit */ cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); curd = map_read(map, addr); - if (map_word_andequal(map, curd, ready, ready)) - return !map_word_bitsset(map, curd, err); - - return 0; + return map_word_andequal(map, curd, ready, ready); } oldd = map_read(map, addr); @@ -1699,8 +1703,11 @@ static int __xipram do_write_oneword_once(struct map_info *map, break; } - if (chip_good(map, chip, adr, datum)) + if (chip_good(map, chip, adr, datum)) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); @@ -1713,7 +1720,7 @@ static int __xipram do_write_oneword_start(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) { - int ret = 0; + int ret; mutex_lock(&chip->mutex); @@ -1773,7 +1780,6 @@ static int __xipram do_write_oneword_retry(struct map_info *map, ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); if (ret) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -1791,7 +1797,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum, int mode) { - int ret = 0; + int ret; adr += chip->start; @@ -1815,7 +1821,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - int ret = 0; + int ret; int chipnum; unsigned long ofs, chipstart; DECLARE_WAITQUEUE(wait, current); @@ -1970,12 +1976,17 @@ static int __xipram do_write_buffer_wait(struct map_info *map, */ if (time_after(jiffies, timeo) && !chip_good(map, chip, adr, datum)) { + pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", + __func__, adr); ret = -EIO; break; } - if (chip_good(map, chip, adr, datum)) + if (chip_good(map, chip, adr, datum)) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); @@ -2014,7 +2025,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, int len) { struct cfi_private *cfi = map->fldrv_priv; - int ret = -EIO; + int ret; unsigned long cmd_adr; int z, words; map_word datum; @@ -2071,12 +2082,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, chip->word_write_time); ret = do_write_buffer_wait(map, chip, adr, datum); - if (ret) { - cfi_check_err_status(map, chip, adr); + if (ret) do_write_buffer_reset(map, chip, cfi); - pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", - __func__, adr); - } xip_enable(map, chip, adr); @@ -2095,7 +2102,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; - int ret = 0; + int ret; int chipnum; unsigned long ofs; @@ -2232,7 +2239,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, struct cfi_private *cfi = map->fldrv_priv; int retry_cnt = 0; map_word oldd; - int ret = 0; + int ret; int i; adr += chip->start; @@ -2271,9 +2278,9 @@ retry: udelay(1); } - if (!chip_good(map, chip, adr, datum)) { + if (!chip_good(map, chip, adr, datum) || + cfi_check_err_status(map, chip, adr)) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -2307,7 +2314,7 @@ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long ofs, chipstart; - int ret = 0; + int ret; int chipnum; chipnum = to >> cfi->chipshift; @@ -2411,7 +2418,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) unsigned long timeo = jiffies + HZ; unsigned long int adr; DECLARE_WAITQUEUE(wait, current); - int ret = 0; + int ret; int retry_cnt = 0; adr = cfi->addr_unlock1; @@ -2467,8 +2474,11 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->erase_suspended = 0; } - if (chip_good(map, chip, adr, map_word_ff(map))) + if (chip_good(map, chip, adr, map_word_ff(map))) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } if (time_after(jiffies, timeo)) { printk(KERN_WARNING "MTD %s(): software timeout\n", @@ -2483,7 +2493,6 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) /* Did we succeed? */ if (ret) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -2508,7 +2517,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; DECLARE_WAITQUEUE(wait, current); - int ret = 0; + int ret; int retry_cnt = 0; adr += chip->start; @@ -2564,8 +2573,11 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->erase_suspended = 0; } - if (chip_good(map, chip, adr, map_word_ff(map))) + if (chip_good(map, chip, adr, map_word_ff(map))) { + if (cfi_check_err_status(map, chip, adr)) + ret = -EIO; break; + } if (time_after(jiffies, timeo)) { printk(KERN_WARNING "MTD %s(): software timeout\n", @@ -2580,7 +2592,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, /* Did we succeed? */ if (ret) { /* reset on all failures. */ - cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index e752067526a5..54edae63b92d 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c @@ -611,7 +611,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; - int ret = 0; + int ret; int chipnum; unsigned long ofs; @@ -895,7 +895,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd, { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long adr, len; - int chipnum, ret = 0; + int chipnum, ret; int i, first; struct mtd_erase_region_info *regions = mtd->eraseregions; @@ -1132,7 +1132,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long adr; - int chipnum, ret = 0; + int chipnum, ret; #ifdef DEBUG_LOCK_BITS int ofs_factor = cfi->interleave * cfi->device_type; #endif @@ -1279,7 +1279,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long adr; - int chipnum, ret = 0; + int chipnum, ret; #ifdef DEBUG_LOCK_BITS int ofs_factor = cfi->interleave * cfi->device_type; #endif diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index e3b266ee06af..e2d4db05aeb3 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -26,7 +26,7 @@ void cfi_udelay(int us) { if (us >= 1000) { - msleep((us+999)/1000); + msleep(DIV_ROUND_UP(us, 1000)); } else { udelay(us); cond_resched(); diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c index b20d02b4f830..77c872fd3d83 100644 --- a/drivers/mtd/devices/mchp23k256.c +++ b/drivers/mtd/devices/mchp23k256.c @@ -64,15 +64,17 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len, struct spi_transfer transfer[2] = {}; struct spi_message message; unsigned char command[MAX_CMD_SIZE]; - int ret; + int ret, cmd_len; spi_message_init(&message); + cmd_len = mchp23k256_cmdsz(flash); + command[0] = MCHP23K256_CMD_WRITE; mchp23k256_addr2cmd(flash, to, command); transfer[0].tx_buf = command; - transfer[0].len = mchp23k256_cmdsz(flash); + transfer[0].len = cmd_len; spi_message_add_tail(&transfer[0], &message); transfer[1].tx_buf = buf; @@ -88,8 +90,8 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len, if (ret) return ret; - if (retlen && message.actual_length > sizeof(command)) - *retlen += message.actual_length - sizeof(command); + if (retlen && message.actual_length > cmd_len) + *retlen += message.actual_length - cmd_len; return 0; } @@ -101,16 +103,18 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len, struct spi_transfer transfer[2] = {}; struct spi_message message; unsigned char command[MAX_CMD_SIZE]; - int ret; + int ret, cmd_len; spi_message_init(&message); + cmd_len = mchp23k256_cmdsz(flash); + memset(&transfer, 0, sizeof(transfer)); command[0] = MCHP23K256_CMD_READ; mchp23k256_addr2cmd(flash, from, command); transfer[0].tx_buf = command; - transfer[0].len = mchp23k256_cmdsz(flash); + transfer[0].len = cmd_len; spi_message_add_tail(&transfer[0], &message); transfer[1].rx_buf = buf; @@ -126,8 +130,8 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len, if (ret) return ret; - if (retlen && message.actual_length > sizeof(command)) - *retlen += message.actual_length - sizeof(command); + if (retlen && message.actual_length > cmd_len) + *retlen += message.actual_length - cmd_len; return 0; } diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index 986f81d2f93e..79dcca16481d 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c @@ -592,6 +592,26 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, return 0; } +/* + * The purpose of this function is to ensure a memcpy_toio() with byte writes + * only. Its structure is inspired from the ARM implementation of _memcpy_toio() + * which also does single byte writes but cannot be used here as this is just an + * implementation detail and not part of the API. Not mentioning the comment + * stating that _memcpy_toio() should be optimized. + */ +static void spear_smi_memcpy_toio_b(volatile void __iomem *dest, + const void *src, size_t len) +{ + const unsigned char *from = src; + + while (len) { + len--; + writeb(*from, dest); + from++; + dest++; + } +} + static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank, void __iomem *dest, const void *src, size_t len) { @@ -614,7 +634,23 @@ static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank, ctrlreg1 = readl(dev->io_base + SMI_CR1); writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1); - memcpy_toio(dest, src, len); + /* + * In Write Burst mode (WB_MODE), the specs states that writes must be: + * - incremental + * - of the same size + * The ARM implementation of memcpy_toio() will optimize the number of + * I/O by using as much 4-byte writes as possible, surrounded by + * 2-byte/1-byte access if: + * - the destination is not 4-byte aligned + * - the length is not a multiple of 4-byte. + * Avoid this alternance of write access size by using our own 'byte + * access' helper if at least one of the two conditions above is true. + */ + if (IS_ALIGNED(len, sizeof(u32)) && + IS_ALIGNED((uintptr_t)dest, sizeof(u32))) + memcpy_toio(dest, src, len); + else + spear_smi_memcpy_toio_b(dest, src, len); writel(ctrlreg1, dev->io_base + SMI_CR1); @@ -777,9 +813,6 @@ static int spear_smi_probe_config_dt(struct platform_device *pdev, /* Fill structs for each subnode (flash device) */ while ((pp = of_get_next_child(np, pp))) { - struct spear_smi_flash_info *flash_info; - - flash_info = &pdata->board_flash_info[i]; pdata->np[i] = pp; /* Read base-addr and size from DT */ @@ -933,7 +966,6 @@ static int spear_smi_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENODEV; - dev_err(&pdev->dev, "invalid smi irq\n"); goto err; } diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index f4d1667daaf9..1888523d9745 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -255,7 +255,6 @@ struct stfsm_seq { struct stfsm { struct device *dev; void __iomem *base; - struct resource *region; struct mtd_info mtd; struct mutex lock; struct flash_info *info; diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index bc82305ebb4c..b28225a7c4f3 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -96,6 +96,17 @@ config MTD_PHYSMAP_GEMINI platforms, some detection and setting up parallel mode on the external interface. +config MTD_PHYSMAP_IXP4XX + bool "Intel IXP4xx OF-based physical memory map handling" + depends on MTD_PHYSMAP_OF + depends on ARM + select MTD_COMPLEX_MAPPINGS + select MTD_CFI_BE_BYTE_SWAP if CPU_BIG_ENDIAN + default ARCH_IXP4XX + help + This provides some extra DT physmap parsing for the Intel IXP4xx + platforms, some elaborate endianness handling in particular. + config MTD_PHYSMAP_GPIO_ADDR bool "GPIO-assisted Flash Chip Support" depends on MTD_PHYSMAP diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 1146009f41df..c0da86a5d26f 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o physmap-objs-y += physmap-core.o physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o +physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o physmap-objs := $(physmap-objs-y) obj-$(CONFIG_MTD_PHYSMAP) += physmap.o obj-$(CONFIG_MTD_PISMO) += pismo.o diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c index 876f12f40018..0eeadfeb620d 100644 --- a/drivers/mtd/maps/l440gx.c +++ b/drivers/mtd/maps/l440gx.c @@ -86,7 +86,7 @@ static int __init init_l440gx(void) return -ENOMEM; } simple_map_init(&l440gx_map); - printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt); + pr_debug("window_addr = %p\n", l440gx_map.virt); /* Setup the pm iobase resource * This code should move into some kind of generic bridge diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c index 21b556afc305..a9f7964e2edb 100644 --- a/drivers/mtd/maps/physmap-core.c +++ b/drivers/mtd/maps/physmap-core.c @@ -41,6 +41,7 @@ #include <linux/gpio/consumer.h> #include "physmap-gemini.h" +#include "physmap-ixp4xx.h" #include "physmap-versatile.h" struct physmap_flash_info { @@ -370,6 +371,10 @@ static int physmap_flash_of_init(struct platform_device *dev) if (err) return err; + err = of_flash_probe_ixp4xx(dev, dp, &info->maps[i]); + if (err) + return err; + err = of_flash_probe_versatile(dev, dp, &info->maps[i]); if (err) return err; diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c new file mode 100644 index 000000000000..6a054229a8a0 --- /dev/null +++ b/drivers/mtd/maps/physmap-ixp4xx.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel IXP4xx OF physmap add-on + * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org> + * + * Based on the ixp4xx.c map driver, originally written by: + * Intel Corporation + * Deepak Saxena <dsaxena@mvista.com> + * Copyright (C) 2002 Intel Corporation + * Copyright (C) 2003-2004 MontaVista Software, Inc. + */ +#include <linux/export.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/mtd/map.h> +#include <linux/mtd/xip.h> +#include "physmap-ixp4xx.h" + +/* + * Read/write a 16 bit word from flash address 'addr'. + * + * When the cpu is in little-endian mode it swizzles the address lines + * ('address coherency') so we need to undo the swizzling to ensure commands + * and the like end up on the correct flash address. + * + * To further complicate matters, due to the way the expansion bus controller + * handles 32 bit reads, the byte stream ABCD is stored on the flash as: + * D15 D0 + * +---+---+ + * | A | B | 0 + * +---+---+ + * | C | D | 2 + * +---+---+ + * This means that on LE systems each 16 bit word must be swapped. Note that + * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI + * data and other flash commands which are always in D7-D0. + */ +#ifndef CONFIG_CPU_BIG_ENDIAN + +static inline u16 flash_read16(void __iomem *addr) +{ + return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2))); +} + +static inline void flash_write16(u16 d, void __iomem *addr) +{ + __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2)); +} + +#define BYTE0(h) ((h) & 0xFF) +#define BYTE1(h) (((h) >> 8) & 0xFF) + +#else + +static inline u16 flash_read16(const void __iomem *addr) +{ + return __raw_readw(addr); +} + +static inline void flash_write16(u16 d, void __iomem *addr) +{ + __raw_writew(d, addr); +} + +#define BYTE0(h) (((h) >> 8) & 0xFF) +#define BYTE1(h) ((h) & 0xFF) +#endif + +static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs) +{ + map_word val; + + val.x[0] = flash_read16(map->virt + ofs); + return val; +} + +/* + * The IXP4xx expansion bus only allows 16-bit wide acceses + * when attached to a 16-bit wide device (such as the 28F128J3A), + * so we can't just memcpy_fromio(). + */ +static void ixp4xx_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + u8 *dest = (u8 *) to; + void __iomem *src = map->virt + from; + + if (len <= 0) + return; + + if (from & 1) { + *dest++ = BYTE1(flash_read16(src-1)); + src++; + --len; + } + + while (len >= 2) { + u16 data = flash_read16(src); + *dest++ = BYTE0(data); + *dest++ = BYTE1(data); + src += 2; + len -= 2; + } + + if (len > 0) + *dest++ = BYTE0(flash_read16(src)); +} + +static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr) +{ + flash_write16(d.x[0], map->virt + adr); +} + +int of_flash_probe_ixp4xx(struct platform_device *pdev, + struct device_node *np, + struct map_info *map) +{ + struct device *dev = &pdev->dev; + + /* Multiplatform guard */ + if (!of_device_is_compatible(np, "intel,ixp4xx-flash")) + return 0; + + map->read = ixp4xx_read16; + map->write = ixp4xx_write16; + map->copy_from = ixp4xx_copy_from; + map->copy_to = NULL; + + dev_info(dev, "initialized Intel IXP4xx-specific physmap control\n"); + + return 0; +} diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h new file mode 100644 index 000000000000..b0fc49b7f3ed --- /dev/null +++ b/drivers/mtd/maps/physmap-ixp4xx.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/of.h> +#include <linux/mtd/map.h> + +#ifdef CONFIG_MTD_PHYSMAP_IXP4XX +int of_flash_probe_ixp4xx(struct platform_device *pdev, + struct device_node *np, + struct map_info *map); +#else +static inline +int of_flash_probe_ixp4xx(struct platform_device *pdev, + struct device_node *np, + struct map_info *map) +{ + return 0; +} +#endif diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 975aed94f06c..b841008a9eb7 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -174,7 +174,7 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, break; case MTD_FILE_MODE_RAW: { - struct mtd_oob_ops ops; + struct mtd_oob_ops ops = {}; ops.mode = MTD_OPS_RAW; ops.datbuf = kbuf; @@ -268,7 +268,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c case MTD_FILE_MODE_RAW: { - struct mtd_oob_ops ops; + struct mtd_oob_ops ops = {}; ops.mode = MTD_OPS_RAW; ops.datbuf = kbuf; @@ -350,7 +350,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint32_t __user *retp) { struct mtd_file_info *mfi = file->private_data; - struct mtd_oob_ops ops; + struct mtd_oob_ops ops = {}; uint32_t retlen; int ret = 0; @@ -394,7 +394,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, uint32_t __user *retp) { struct mtd_file_info *mfi = file->private_data; - struct mtd_oob_ops ops; + struct mtd_oob_ops ops = {}; int ret = 0; if (length > 4096) @@ -587,7 +587,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) { struct mtd_write_req req; - struct mtd_oob_ops ops; + struct mtd_oob_ops ops = {}; const void __user *usr_data, *usr_oob; int ret; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 6cc7ecb0c788..5fac4355b9c2 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -382,33 +382,21 @@ static struct dentry *dfs_dir_mtd; static void mtd_debugfs_populate(struct mtd_info *mtd) { struct device *dev = &mtd->dev; - struct dentry *root, *dent; + struct dentry *root; if (IS_ERR_OR_NULL(dfs_dir_mtd)) return; root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); - if (IS_ERR_OR_NULL(root)) { - dev_dbg(dev, "won't show data in debugfs\n"); - return; - } - mtd->dbg.dfs_dir = root; - if (mtd->dbg.partid) { - dent = debugfs_create_file("partid", 0400, root, mtd, - &mtd_partid_debug_fops); - if (IS_ERR_OR_NULL(dent)) - dev_err(dev, "can't create debugfs entry for partid\n"); - } + if (mtd->dbg.partid) + debugfs_create_file("partid", 0400, root, mtd, + &mtd_partid_debug_fops); - if (mtd->dbg.partname) { - dent = debugfs_create_file("partname", 0400, root, mtd, - &mtd_partname_debug_fops); - if (IS_ERR_OR_NULL(dent)) - dev_err(dev, - "can't create debugfs entry for partname\n"); - } + if (mtd->dbg.partname) + debugfs_create_file("partname", 0400, root, mtd, + &mtd_partname_debug_fops); } #ifndef CONFIG_MMU diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index f92414eb4c86..58eefa43af14 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -1257,7 +1257,6 @@ DEFINE_SHOW_ATTRIBUTE(mtdswap); static int mtdswap_add_debugfs(struct mtdswap_dev *d) { struct dentry *root = d->mtd->dbg.dfs_dir; - struct dentry *dent; if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; @@ -1265,12 +1264,7 @@ static int mtdswap_add_debugfs(struct mtdswap_dev *d) if (IS_ERR_OR_NULL(root)) return -1; - dent = debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, - &mtdswap_fops); - if (!dent) { - dev_err(d->dev, "debugfs_create_file failed\n"); - return -1; - } + debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops); return 0; } diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index e59de3f60cf6..74fb91adeb46 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -450,6 +450,13 @@ config MTD_NAND_PLATFORM devices. You will need to provide platform-specific functions via platform_data. +config MTD_NAND_CADENCE + tristate "Support Cadence NAND (HPNFC) controller" + depends on OF || COMPILE_TEST + help + Enable the driver for NAND flash on platforms using a Cadence NAND + controller. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index a98721988e61..2d136b158fb7 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o +obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 15ef30b368a5..1a66b1cd51c0 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -117,6 +117,18 @@ enum flash_dma_reg { FLASH_DMA_CURRENT_DESC_EXT, }; +/* flash_dma registers v0*/ +static const u16 flash_dma_regs_v0[] = { + [FLASH_DMA_REVISION] = 0x00, + [FLASH_DMA_FIRST_DESC] = 0x04, + [FLASH_DMA_CTRL] = 0x08, + [FLASH_DMA_MODE] = 0x0c, + [FLASH_DMA_STATUS] = 0x10, + [FLASH_DMA_INTERRUPT_DESC] = 0x14, + [FLASH_DMA_ERROR_STATUS] = 0x18, + [FLASH_DMA_CURRENT_DESC] = 0x1c, +}; + /* flash_dma registers v1*/ static const u16 flash_dma_regs_v1[] = { [FLASH_DMA_REVISION] = 0x00, @@ -597,6 +609,8 @@ static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl) /* flash_dma register offsets */ if (ctrl->nand_version >= 0x0703) ctrl->flash_dma_offsets = flash_dma_regs_v4; + else if (ctrl->nand_version == 0x0602) + ctrl->flash_dma_offsets = flash_dma_regs_v0; else ctrl->flash_dma_offsets = flash_dma_regs_v1; } @@ -918,7 +932,7 @@ static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl) return; if (has_flash_dma(ctrl)) { - ctrl->flash_dma_base = 0; + ctrl->flash_dma_base = NULL; disable_irq(ctrl->dma_irq); } @@ -1673,8 +1687,11 @@ static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc) flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc)); (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC); - flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc)); - (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); + if (ctrl->nand_version > 0x0602) { + flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, + upper_32_bits(desc)); + (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); + } /* Start FLASH_DMA engine */ ctrl->dma_pending = true; diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c new file mode 100644 index 000000000000..3a36285a8d8a --- /dev/null +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -0,0 +1,3030 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Cadence NAND flash controller driver + * + * Copyright (C) 2019 Cadence + * + * Author: Piotr Sroka <piotrs@cadence.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/of_device.h> +#include <linux/iopoll.h> + +/* + * HPNFC can work in 3 modes: + * - PIO - can work in master or slave DMA + * - CDMA - needs Master DMA for accessing command descriptors. + * - Generic mode - can use only slave DMA. + * CDMA and PIO modes can be used to execute only base commands. + * Generic mode can be used to execute any command + * on NAND flash memory. Driver uses CDMA mode for + * block erasing, page reading, page programing. + * Generic mode is used for executing rest of commands. + */ + +#define MAX_OOB_SIZE_PER_SECTOR 32 +#define MAX_ADDRESS_CYC 6 +#define MAX_ERASE_ADDRESS_CYC 3 +#define MAX_DATA_SIZE 0xFFFC +#define DMA_DATA_SIZE_ALIGN 8 + +/* Register definition. */ +/* + * Command register 0. + * Writing data to this register will initiate a new transaction + * of the NF controller. + */ +#define CMD_REG0 0x0000 +/* Command type field mask. */ +#define CMD_REG0_CT GENMASK(31, 30) +/* Command type CDMA. */ +#define CMD_REG0_CT_CDMA 0uL +/* Command type generic. */ +#define CMD_REG0_CT_GEN 3uL +/* Command thread number field mask. */ +#define CMD_REG0_TN GENMASK(27, 24) + +/* Command register 2. */ +#define CMD_REG2 0x0008 +/* Command register 3. */ +#define CMD_REG3 0x000C +/* Pointer register to select which thread status will be selected. */ +#define CMD_STATUS_PTR 0x0010 +/* Command status register for selected thread. */ +#define CMD_STATUS 0x0014 + +/* Interrupt status register. */ +#define INTR_STATUS 0x0110 +#define INTR_STATUS_SDMA_ERR BIT(22) +#define INTR_STATUS_SDMA_TRIGG BIT(21) +#define INTR_STATUS_UNSUPP_CMD BIT(19) +#define INTR_STATUS_DDMA_TERR BIT(18) +#define INTR_STATUS_CDMA_TERR BIT(17) +#define INTR_STATUS_CDMA_IDL BIT(16) + +/* Interrupt enable register. */ +#define INTR_ENABLE 0x0114 +#define INTR_ENABLE_INTR_EN BIT(31) +#define INTR_ENABLE_SDMA_ERR_EN BIT(22) +#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21) +#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19) +#define INTR_ENABLE_DDMA_TERR_EN BIT(18) +#define INTR_ENABLE_CDMA_TERR_EN BIT(17) +#define INTR_ENABLE_CDMA_IDLE_EN BIT(16) + +/* Controller internal state. */ +#define CTRL_STATUS 0x0118 +#define CTRL_STATUS_INIT_COMP BIT(9) +#define CTRL_STATUS_CTRL_BUSY BIT(8) + +/* Command Engine threads state. */ +#define TRD_STATUS 0x0120 + +/* Command Engine interrupt thread error status. */ +#define TRD_ERR_INT_STATUS 0x0128 +/* Command Engine interrupt thread error enable. */ +#define TRD_ERR_INT_STATUS_EN 0x0130 +/* Command Engine interrupt thread complete status. */ +#define TRD_COMP_INT_STATUS 0x0138 + +/* + * Transfer config 0 register. + * Configures data transfer parameters. + */ +#define TRAN_CFG_0 0x0400 +/* Offset value from the beginning of the page. */ +#define TRAN_CFG_0_OFFSET GENMASK(31, 16) +/* Numbers of sectors to transfer within singlNF device's page. */ +#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0) + +/* + * Transfer config 1 register. + * Configures data transfer parameters. + */ +#define TRAN_CFG_1 0x0404 +/* Size of last data sector. */ +#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16) +/* Size of not-last data sector. */ +#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0) + +/* ECC engine configuration register 0. */ +#define ECC_CONFIG_0 0x0428 +/* Correction strength. */ +#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8) +/* Enable erased pages detection mechanism. */ +#define ECC_CONFIG_0_ERASE_DET_EN BIT(1) +/* Enable controller ECC check bits generation and correction. */ +#define ECC_CONFIG_0_ECC_EN BIT(0) + +/* ECC engine configuration register 1. */ +#define ECC_CONFIG_1 0x042C + +/* Multiplane settings register. */ +#define MULTIPLANE_CFG 0x0434 +/* Cache operation settings. */ +#define CACHE_CFG 0x0438 + +/* DMA settings register. */ +#define DMA_SETINGS 0x043C +/* Enable SDMA error report on access unprepared slave DMA interface. */ +#define DMA_SETINGS_SDMA_ERR_RSP BIT(17) + +/* Transferred data block size for the slave DMA module. */ +#define SDMA_SIZE 0x0440 + +/* Thread number associated with transferred data block + * for the slave DMA module. + */ +#define SDMA_TRD_NUM 0x0444 +/* Thread number mask. */ +#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0) + +#define CONTROL_DATA_CTRL 0x0494 +/* Thread number mask. */ +#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0) + +#define CTRL_VERSION 0x800 +#define CTRL_VERSION_REV GENMASK(7, 0) + +/* Available hardware features of the controller. */ +#define CTRL_FEATURES 0x804 +/* Support for NV-DDR2/3 work mode. */ +#define CTRL_FEATURES_NVDDR_2_3 BIT(28) +/* Support for NV-DDR work mode. */ +#define CTRL_FEATURES_NVDDR BIT(27) +/* Support for asynchronous work mode. */ +#define CTRL_FEATURES_ASYNC BIT(26) +/* Support for asynchronous work mode. */ +#define CTRL_FEATURES_N_BANKS GENMASK(25, 24) +/* Slave and Master DMA data width. */ +#define CTRL_FEATURES_DMA_DWITH64 BIT(21) +/* Availability of Control Data feature.*/ +#define CTRL_FEATURES_CONTROL_DATA BIT(10) + +/* BCH Engine identification register 0 - correction strengths. */ +#define BCH_CFG_0 0x838 +#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0) +#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8) +#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16) +#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24) + +/* BCH Engine identification register 1 - correction strengths. */ +#define BCH_CFG_1 0x83C +#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0) +#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8) +#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16) +#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24) + +/* BCH Engine identification register 2 - sector sizes. */ +#define BCH_CFG_2 0x840 +#define BCH_CFG_2_SECT_0 GENMASK(15, 0) +#define BCH_CFG_2_SECT_1 GENMASK(31, 16) + +/* BCH Engine identification register 3. */ +#define BCH_CFG_3 0x844 + +/* Ready/Busy# line status. */ +#define RBN_SETINGS 0x1004 + +/* Common settings. */ +#define COMMON_SET 0x1008 +/* 16 bit device connected to the NAND Flash interface. */ +#define COMMON_SET_DEVICE_16BIT BIT(8) + +/* Skip_bytes registers. */ +#define SKIP_BYTES_CONF 0x100C +#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16) +#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0) + +#define SKIP_BYTES_OFFSET 0x1010 +#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0) + +/* Timings configuration. */ +#define ASYNC_TOGGLE_TIMINGS 0x101c +#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24) +#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16) +#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8) +#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0) + +#define TIMINGS0 0x1024 +#define TIMINGS0_TADL GENMASK(31, 24) +#define TIMINGS0_TCCS GENMASK(23, 16) +#define TIMINGS0_TWHR GENMASK(15, 8) +#define TIMINGS0_TRHW GENMASK(7, 0) + +#define TIMINGS1 0x1028 +#define TIMINGS1_TRHZ GENMASK(31, 24) +#define TIMINGS1_TWB GENMASK(23, 16) +#define TIMINGS1_TVDLY GENMASK(7, 0) + +#define TIMINGS2 0x102c +#define TIMINGS2_TFEAT GENMASK(25, 16) +#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8) +#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0) + +/* Configuration of the resynchronization of slave DLL of PHY. */ +#define DLL_PHY_CTRL 0x1034 +#define DLL_PHY_CTRL_DLL_RST_N BIT(24) +#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17) +#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16) +#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8) +#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0) + +/* Register controlling DQ related timing. */ +#define PHY_DQ_TIMING 0x2000 +/* Register controlling DSQ related timing. */ +#define PHY_DQS_TIMING 0x2004 +#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0) +#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16) +#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20) + +/* Register controlling the gate and loopback control related timing. */ +#define PHY_GATE_LPBK_CTRL 0x2008 +#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19) + +/* Register holds the control for the master DLL logic. */ +#define PHY_DLL_MASTER_CTRL 0x200C +#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23) + +/* Register holds the control for the slave DLL logic. */ +#define PHY_DLL_SLAVE_CTRL 0x2010 + +/* This register handles the global control settings for the PHY. */ +#define PHY_CTRL 0x2080 +#define PHY_CTRL_SDR_DQS BIT(14) +#define PHY_CTRL_PHONY_DQS GENMASK(9, 4) + +/* + * This register handles the global control settings + * for the termination selects for reads. + */ +#define PHY_TSEL 0x2084 + +/* Generic command layout. */ +#define GCMD_LAY_CS GENMASK_ULL(11, 8) +/* + * This bit informs the minicotroller if it has to wait for tWB + * after sending the last CMD/ADDR/DATA in the sequence. + */ +#define GCMD_LAY_TWB BIT_ULL(6) +/* Type of generic instruction. */ +#define GCMD_LAY_INSTR GENMASK_ULL(5, 0) + +/* Generic CMD sequence type. */ +#define GCMD_LAY_INSTR_CMD 0 +/* Generic ADDR sequence type. */ +#define GCMD_LAY_INSTR_ADDR 1 +/* Generic data transfer sequence type. */ +#define GCMD_LAY_INSTR_DATA 2 + +/* Input part of generic command type of input is command. */ +#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16) + +/* Generic command address sequence - address fields. */ +#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16) +/* Generic command address sequence - address size. */ +#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11) + +/* Transfer direction field of generic command data sequence. */ +#define GCMD_DIR BIT_ULL(11) +/* Read transfer direction of generic command data sequence. */ +#define GCMD_DIR_READ 0 +/* Write transfer direction of generic command data sequence. */ +#define GCMD_DIR_WRITE 1 + +/* ECC enabled flag of generic command data sequence - ECC enabled. */ +#define GCMD_ECC_EN BIT_ULL(12) +/* Generic command data sequence - sector size. */ +#define GCMD_SECT_SIZE GENMASK_ULL(31, 16) +/* Generic command data sequence - sector count. */ +#define GCMD_SECT_CNT GENMASK_ULL(39, 32) +/* Generic command data sequence - last sector size. */ +#define GCMD_LAST_SIZE GENMASK_ULL(55, 40) + +/* CDMA descriptor fields. */ +/* Erase command type of CDMA descriptor. */ +#define CDMA_CT_ERASE 0x1000 +/* Program page command type of CDMA descriptor. */ +#define CDMA_CT_WR 0x2100 +/* Read page command type of CDMA descriptor. */ +#define CDMA_CT_RD 0x2200 + +/* Flash pointer memory shift. */ +#define CDMA_CFPTR_MEM_SHIFT 24 +/* Flash pointer memory mask. */ +#define CDMA_CFPTR_MEM GENMASK(26, 24) + +/* + * Command DMA descriptor flags. If set causes issue interrupt after + * the completion of descriptor processing. + */ +#define CDMA_CF_INT BIT(8) +/* + * Command DMA descriptor flags - the next descriptor + * address field is valid and descriptor processing should continue. + */ +#define CDMA_CF_CONT BIT(9) +/* DMA master flag of command DMA descriptor. */ +#define CDMA_CF_DMA_MASTER BIT(10) + +/* Operation complete status of command descriptor. */ +#define CDMA_CS_COMP BIT(15) +/* Operation complete status of command descriptor. */ +/* Command descriptor status - operation fail. */ +#define CDMA_CS_FAIL BIT(14) +/* Command descriptor status - page erased. */ +#define CDMA_CS_ERP BIT(11) +/* Command descriptor status - timeout occurred. */ +#define CDMA_CS_TOUT BIT(10) +/* + * Maximum amount of correction applied to one ECC sector. + * It is part of command descriptor status. + */ +#define CDMA_CS_MAXERR GENMASK(9, 2) +/* Command descriptor status - uncorrectable ECC error. */ +#define CDMA_CS_UNCE BIT(1) +/* Command descriptor status - descriptor error. */ +#define CDMA_CS_ERR BIT(0) + +/* Status of operation - OK. */ +#define STAT_OK 0 +/* Status of operation - FAIL. */ +#define STAT_FAIL 2 +/* Status of operation - uncorrectable ECC error. */ +#define STAT_ECC_UNCORR 3 +/* Status of operation - page erased. */ +#define STAT_ERASED 5 +/* Status of operation - correctable ECC error. */ +#define STAT_ECC_CORR 6 +/* Status of operation - unsuspected state. */ +#define STAT_UNKNOWN 7 +/* Status of operation - operation is not completed yet. */ +#define STAT_BUSY 0xFF + +#define BCH_MAX_NUM_CORR_CAPS 8 +#define BCH_MAX_NUM_SECTOR_SIZES 2 + +struct cadence_nand_timings { + u32 async_toggle_timings; + u32 timings0; + u32 timings1; + u32 timings2; + u32 dll_phy_ctrl; + u32 phy_ctrl; + u32 phy_dqs_timing; + u32 phy_gate_lpbk_ctrl; +}; + +/* Command DMA descriptor. */ +struct cadence_nand_cdma_desc { + /* Next descriptor address. */ + u64 next_pointer; + + /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */ + u32 flash_pointer; + /*field appears in HPNFC version 13*/ + u16 bank; + u16 rsvd0; + + /* Operation the controller needs to perform. */ + u16 command_type; + u16 rsvd1; + /* Flags for operation of this command. */ + u16 command_flags; + u16 rsvd2; + + /* System/host memory address required for data DMA commands. */ + u64 memory_pointer; + + /* Status of operation. */ + u32 status; + u32 rsvd3; + + /* Address pointer to sync buffer location. */ + u64 sync_flag_pointer; + + /* Controls the buffer sync mechanism. */ + u32 sync_arguments; + u32 rsvd4; + + /* Control data pointer. */ + u64 ctrl_data_ptr; +}; + +/* Interrupt status. */ +struct cadence_nand_irq_status { + /* Thread operation complete status. */ + u32 trd_status; + /* Thread operation error. */ + u32 trd_error; + /* Controller status. */ + u32 status; +}; + +/* Cadence NAND flash controller capabilities get from driver data. */ +struct cadence_nand_dt_devdata { + /* Skew value of the output signals of the NAND Flash interface. */ + u32 if_skew; + /* It informs if slave DMA interface is connected to DMA engine. */ + unsigned int has_dma:1; +}; + +/* Cadence NAND flash controller capabilities read from registers. */ +struct cdns_nand_caps { + /* Maximum number of banks supported by hardware. */ + u8 max_banks; + /* Slave and Master DMA data width in bytes (4 or 8). */ + u8 data_dma_width; + /* Control Data feature supported. */ + bool data_control_supp; + /* Is PHY type DLL. */ + bool is_phy_type_dll; +}; + +struct cdns_nand_ctrl { + struct device *dev; + struct nand_controller controller; + struct cadence_nand_cdma_desc *cdma_desc; + /* IP capability. */ + const struct cadence_nand_dt_devdata *caps1; + struct cdns_nand_caps caps2; + u8 ctrl_rev; + dma_addr_t dma_cdma_desc; + u8 *buf; + u32 buf_size; + u8 curr_corr_str_idx; + + /* Register interface. */ + void __iomem *reg; + + struct { + void __iomem *virt; + dma_addr_t dma; + } io; + + int irq; + /* Interrupts that have happened. */ + struct cadence_nand_irq_status irq_status; + /* Interrupts we are waiting for. */ + struct cadence_nand_irq_status irq_mask; + struct completion complete; + /* Protect irq_mask and irq_status. */ + spinlock_t irq_lock; + + int ecc_strengths[BCH_MAX_NUM_CORR_CAPS]; + struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES]; + struct nand_ecc_caps ecc_caps; + + int curr_trans_type; + + struct dma_chan *dmac; + + u32 nf_clk_rate; + /* + * Estimated Board delay. The value includes the total + * round trip delay for the signals and is used for deciding on values + * associated with data read capture. + */ + u32 board_delay; + + struct nand_chip *selected_chip; + + unsigned long assigned_cs; + struct list_head chips; +}; + +struct cdns_nand_chip { + struct cadence_nand_timings timings; + struct nand_chip chip; + u8 nsels; + struct list_head node; + + /* + * part of oob area of NAND flash memory page. + * This part is available for user to read or write. + */ + u32 avail_oob_size; + + /* Sector size. There are few sectors per mtd->writesize */ + u32 sector_size; + u32 sector_count; + + /* Offset of BBM. */ + u8 bbm_offs; + /* Number of bytes reserved for BBM. */ + u8 bbm_len; + /* ECC strength index. */ + u8 corr_str_idx; + + u8 cs[]; +}; + +struct ecc_info { + int (*calc_ecc_bytes)(int step_size, int strength); + int max_step_size; +}; + +static inline struct +cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip) +{ + return container_of(chip, struct cdns_nand_chip, chip); +} + +static inline struct +cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller) +{ + return container_of(controller, struct cdns_nand_ctrl, controller); +} + +static bool +cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf, + u32 buf_len) +{ + u8 data_dma_width = cdns_ctrl->caps2.data_dma_width; + + return buf && virt_addr_valid(buf) && + likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) && + likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN)); +} + +static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl, + u32 reg_offset, u32 timeout_us, + u32 mask, bool is_clear) +{ + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset, + val, !(val & mask) == is_clear, + 10, timeout_us); + + if (ret < 0) { + dev_err(cdns_ctrl->dev, + "Timeout while waiting for reg %x with mask %x is clear %d\n", + reg_offset, mask, is_clear); + } + + return ret; +} + +static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl, + bool enable) +{ + u32 reg; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); + + if (enable) + reg |= ECC_CONFIG_0_ECC_EN; + else + reg &= ~ECC_CONFIG_0_ECC_EN; + + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); + + return 0; +} + +static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl, + u8 corr_str_idx) +{ + u32 reg; + + if (cdns_ctrl->curr_corr_str_idx == corr_str_idx) + return; + + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); + reg &= ~ECC_CONFIG_0_CORR_STR; + reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx); + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); + + cdns_ctrl->curr_corr_str_idx = corr_str_idx; +} + +static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl, + u8 strength) +{ + int i, corr_str_idx = -1; + + for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) { + if (cdns_ctrl->ecc_strengths[i] == strength) { + corr_str_idx = i; + break; + } + } + + return corr_str_idx; +} + +static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl, + u16 marker_value) +{ + u32 reg; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF); + reg &= ~SKIP_BYTES_MARKER_VALUE; + reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE, + marker_value); + + writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF); + + return 0; +} + +static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl, + u8 num_of_bytes, + u32 offset_value, + int enable) +{ + u32 reg, skip_bytes_offset; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + if (!enable) { + num_of_bytes = 0; + offset_value = 0; + } + + reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF); + reg &= ~SKIP_BYTES_NUM_OF_BYTES; + reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES, + num_of_bytes); + skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE, + offset_value); + + writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF); + writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET); + + return 0; +} + +/* Functions enables/disables hardware detection of erased data */ +static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl, + bool enable, + u8 bitflips_threshold) +{ + u32 reg; + + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); + + if (enable) + reg |= ECC_CONFIG_0_ERASE_DET_EN; + else + reg &= ~ECC_CONFIG_0_ERASE_DET_EN; + + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); + writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1); +} + +static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl, + bool bit_bus16) +{ + u32 reg; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET); + + if (!bit_bus16) + reg &= ~COMMON_SET_DEVICE_16BIT; + else + reg |= COMMON_SET_DEVICE_16BIT; + writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET); + + return 0; +} + +static void +cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_status) +{ + writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS); + writel_relaxed(irq_status->trd_status, + cdns_ctrl->reg + TRD_COMP_INT_STATUS); + writel_relaxed(irq_status->trd_error, + cdns_ctrl->reg + TRD_ERR_INT_STATUS); +} + +static void +cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_status) +{ + irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS); + irq_status->trd_status = readl_relaxed(cdns_ctrl->reg + + TRD_COMP_INT_STATUS); + irq_status->trd_error = readl_relaxed(cdns_ctrl->reg + + TRD_ERR_INT_STATUS); +} + +static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_status) +{ + cadence_nand_read_int_status(cdns_ctrl, irq_status); + + return irq_status->status || irq_status->trd_status || + irq_status->trd_error; +} + +static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl) +{ + unsigned long flags; + + spin_lock_irqsave(&cdns_ctrl->irq_lock, flags); + memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status)); + memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask)); + spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags); +} + +/* + * This is the interrupt service routine. It handles all interrupts + * sent to this device. + */ +static irqreturn_t cadence_nand_isr(int irq, void *dev_id) +{ + struct cdns_nand_ctrl *cdns_ctrl = dev_id; + struct cadence_nand_irq_status irq_status; + irqreturn_t result = IRQ_NONE; + + spin_lock(&cdns_ctrl->irq_lock); + + if (irq_detected(cdns_ctrl, &irq_status)) { + /* Handle interrupt. */ + /* First acknowledge it. */ + cadence_nand_clear_interrupt(cdns_ctrl, &irq_status); + /* Status in the device context for someone to read. */ + cdns_ctrl->irq_status.status |= irq_status.status; + cdns_ctrl->irq_status.trd_status |= irq_status.trd_status; + cdns_ctrl->irq_status.trd_error |= irq_status.trd_error; + /* Notify anyone who cares that it happened. */ + complete(&cdns_ctrl->complete); + /* Tell the OS that we've handled this. */ + result = IRQ_HANDLED; + } + spin_unlock(&cdns_ctrl->irq_lock); + + return result; +} + +static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_mask) +{ + writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status, + cdns_ctrl->reg + INTR_ENABLE); + + writel_relaxed(irq_mask->trd_error, + cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN); +} + +static void +cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_mask, + struct cadence_nand_irq_status *irq_status) +{ + unsigned long timeout = msecs_to_jiffies(10000); + unsigned long time_left; + + time_left = wait_for_completion_timeout(&cdns_ctrl->complete, + timeout); + + *irq_status = cdns_ctrl->irq_status; + if (time_left == 0) { + /* Timeout error. */ + dev_err(cdns_ctrl->dev, "timeout occurred:\n"); + dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n", + irq_status->status, irq_mask->status); + dev_err(cdns_ctrl->dev, + "\ttrd_status = 0x%x, trd_status mask = 0x%x\n", + irq_status->trd_status, irq_mask->trd_status); + dev_err(cdns_ctrl->dev, + "\t trd_error = 0x%x, trd_error mask = 0x%x\n", + irq_status->trd_error, irq_mask->trd_error); + } +} + +/* Execute generic command on NAND controller. */ +static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl, + u8 chip_nr, + u64 mini_ctrl_cmd) +{ + u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr); + mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF; + mini_ctrl_cmd_h = mini_ctrl_cmd >> 32; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + cadence_nand_reset_irq(cdns_ctrl); + + writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2); + writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3); + + /* Select generic command. */ + reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN); + /* Thread number. */ + reg |= FIELD_PREP(CMD_REG0_TN, 0); + + /* Issue command. */ + writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0); + + return 0; +} + +/* Wait for data on slave DMA interface. */ +static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl, + u8 *out_sdma_trd, + u32 *out_sdma_size) +{ + struct cadence_nand_irq_status irq_mask, irq_status; + + irq_mask.trd_status = 0; + irq_mask.trd_error = 0; + irq_mask.status = INTR_STATUS_SDMA_TRIGG + | INTR_STATUS_SDMA_ERR + | INTR_STATUS_UNSUPP_CMD; + + cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask); + cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status); + if (irq_status.status == 0) { + dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n"); + return -ETIMEDOUT; + } + + if (irq_status.status & INTR_STATUS_SDMA_TRIGG) { + *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE); + *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM); + *out_sdma_trd = + FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd); + } else { + dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n", + irq_status.status); + return -EIO; + } + + return 0; +} + +static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl) +{ + u32 reg; + + reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES); + + cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg); + + if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg)) + cdns_ctrl->caps2.data_dma_width = 8; + else + cdns_ctrl->caps2.data_dma_width = 4; + + if (reg & CTRL_FEATURES_CONTROL_DATA) + cdns_ctrl->caps2.data_control_supp = true; + + if (reg & (CTRL_FEATURES_NVDDR_2_3 + | CTRL_FEATURES_NVDDR)) + cdns_ctrl->caps2.is_phy_type_dll = true; +} + +/* Prepare CDMA descriptor. */ +static void +cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, + char nf_mem, u32 flash_ptr, char *mem_ptr, + char *ctrl_data_ptr, u16 ctype) +{ + struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc; + + memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc)); + + /* Set fields for one descriptor. */ + cdma_desc->flash_pointer = flash_ptr; + if (cdns_ctrl->ctrl_rev >= 13) + cdma_desc->bank = nf_mem; + else + cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT); + + cdma_desc->command_flags |= CDMA_CF_DMA_MASTER; + cdma_desc->command_flags |= CDMA_CF_INT; + + cdma_desc->memory_pointer = (uintptr_t)mem_ptr; + cdma_desc->status = 0; + cdma_desc->sync_flag_pointer = 0; + cdma_desc->sync_arguments = 0; + + cdma_desc->command_type = ctype; + cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr; +} + +static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl, + u32 desc_status) +{ + if (desc_status & CDMA_CS_ERP) + return STAT_ERASED; + + if (desc_status & CDMA_CS_UNCE) + return STAT_ECC_UNCORR; + + if (desc_status & CDMA_CS_ERR) { + dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n"); + return STAT_FAIL; + } + + if (FIELD_GET(CDMA_CS_MAXERR, desc_status)) + return STAT_ECC_CORR; + + return STAT_FAIL; +} + +static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc; + u8 status = STAT_BUSY; + + if (desc_ptr->status & CDMA_CS_FAIL) { + status = cadence_nand_check_desc_error(cdns_ctrl, + desc_ptr->status); + dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status); + } else if (desc_ptr->status & CDMA_CS_COMP) { + /* Descriptor finished with no errors. */ + if (desc_ptr->command_flags & CDMA_CF_CONT) { + dev_info(cdns_ctrl->dev, "DMA unsupported flag is set"); + status = STAT_UNKNOWN; + } else { + /* Last descriptor. */ + status = STAT_OK; + } + } + + return status; +} + +static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl, + u8 thread) +{ + u32 reg; + int status; + + /* Wait for thread ready. */ + status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS, + 1000000, + BIT(thread), true); + if (status) + return status; + + cadence_nand_reset_irq(cdns_ctrl); + + writel_relaxed((u32)cdns_ctrl->dma_cdma_desc, + cdns_ctrl->reg + CMD_REG2); + writel_relaxed(0, cdns_ctrl->reg + CMD_REG3); + + /* Select CDMA mode. */ + reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA); + /* Thread number. */ + reg |= FIELD_PREP(CMD_REG0_TN, thread); + /* Issue command. */ + writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0); + + return 0; +} + +/* Send SDMA command and wait for finish. */ +static u32 +cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl, + u8 thread) +{ + struct cadence_nand_irq_status irq_mask, irq_status = {0}; + int status; + + irq_mask.trd_status = BIT(thread); + irq_mask.trd_error = BIT(thread); + irq_mask.status = INTR_STATUS_CDMA_TERR; + + cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask); + + status = cadence_nand_cdma_send(cdns_ctrl, thread); + if (status) + return status; + + cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status); + + if (irq_status.status == 0 && irq_status.trd_status == 0 && + irq_status.trd_error == 0) { + dev_err(cdns_ctrl->dev, "CDMA command timeout\n"); + return -ETIMEDOUT; + } + if (irq_status.status & irq_mask.status) { + dev_err(cdns_ctrl->dev, "CDMA command failed\n"); + return -EIO; + } + + return 0; +} + +/* + * ECC size depends on configured ECC strength and on maximum supported + * ECC step size. + */ +static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength) +{ + int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8); + + return ALIGN(nbytes, 2); +} + +#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \ + static int \ + cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \ + int strength)\ + {\ + return cadence_nand_calc_ecc_bytes(max_step_size, strength);\ + } + +CADENCE_NAND_CALC_ECC_BYTES(256) +CADENCE_NAND_CALC_ECC_BYTES(512) +CADENCE_NAND_CALC_ECC_BYTES(1024) +CADENCE_NAND_CALC_ECC_BYTES(2048) +CADENCE_NAND_CALC_ECC_BYTES(4096) + +/* Function reads BCH capabilities. */ +static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps; + int max_step_size = 0, nstrengths, i; + u32 reg; + + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0); + cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg); + cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg); + cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg); + cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg); + + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1); + cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg); + cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg); + cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg); + cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg); + + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2); + cdns_ctrl->ecc_stepinfos[0].stepsize = + FIELD_GET(BCH_CFG_2_SECT_0, reg); + + cdns_ctrl->ecc_stepinfos[1].stepsize = + FIELD_GET(BCH_CFG_2_SECT_1, reg); + + nstrengths = 0; + for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) { + if (cdns_ctrl->ecc_strengths[i] != 0) + nstrengths++; + } + + ecc_caps->nstepinfos = 0; + for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) { + /* ECC strengths are common for all step infos. */ + cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths; + cdns_ctrl->ecc_stepinfos[i].strengths = + cdns_ctrl->ecc_strengths; + + if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0) + ecc_caps->nstepinfos++; + + if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size) + max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize; + } + ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0]; + + switch (max_step_size) { + case 256: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256; + break; + case 512: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512; + break; + case 1024: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024; + break; + case 2048: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048; + break; + case 4096: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096; + break; + default: + dev_err(cdns_ctrl->dev, + "Unsupported sector size(ecc step size) %d\n", + max_step_size); + return -EIO; + } + + return 0; +} + +/* Hardware initialization. */ +static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl) +{ + int status; + u32 reg; + + status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_INIT_COMP, false); + if (status) + return status; + + reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION); + cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg); + + dev_info(cdns_ctrl->dev, + "%s: cadence nand controller version reg %x\n", + __func__, reg); + + /* Disable cache and multiplane. */ + writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG); + writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG); + + /* Clear all interrupts. */ + writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS); + + cadence_nand_get_caps(cdns_ctrl); + cadence_nand_read_bch_caps(cdns_ctrl); + + /* + * Set IO width access to 8. + * It is because during SW device discovering width access + * is expected to be 8. + */ + status = cadence_nand_set_access_width16(cdns_ctrl, false); + + return status; +} + +#define TT_MAIN_OOB_AREAS 2 +#define TT_RAW_PAGE 3 +#define TT_BBM 4 +#define TT_MAIN_OOB_AREA_EXT 5 + +/* Prepare size of data to transfer. */ +static void +cadence_nand_prepare_data_size(struct nand_chip *chip, + int transfer_type) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + u32 sec_size = 0, offset = 0, sec_cnt = 1; + u32 last_sec_size = cdns_chip->sector_size; + u32 data_ctrl_size = 0; + u32 reg = 0; + + if (cdns_ctrl->curr_trans_type == transfer_type) + return; + + switch (transfer_type) { + case TT_MAIN_OOB_AREA_EXT: + sec_cnt = cdns_chip->sector_count; + sec_size = cdns_chip->sector_size; + data_ctrl_size = cdns_chip->avail_oob_size; + break; + case TT_MAIN_OOB_AREAS: + sec_cnt = cdns_chip->sector_count; + last_sec_size = cdns_chip->sector_size + + cdns_chip->avail_oob_size; + sec_size = cdns_chip->sector_size; + break; + case TT_RAW_PAGE: + last_sec_size = mtd->writesize + mtd->oobsize; + break; + case TT_BBM: + offset = mtd->writesize + cdns_chip->bbm_offs; + last_sec_size = 8; + break; + } + + reg = 0; + reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset); + reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt); + writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0); + + reg = 0; + reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size); + reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size); + writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1); + + if (cdns_ctrl->caps2.data_control_supp) { + reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL); + reg &= ~CONTROL_DATA_CTRL_SIZE; + reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size); + writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL); + } + + cdns_ctrl->curr_trans_type = transfer_type; +} + +static int +cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr, + int page, void *buf, void *ctrl_dat, u32 buf_size, + u32 ctrl_dat_size, enum dma_data_direction dir, + bool with_ecc) +{ + dma_addr_t dma_buf, dma_ctrl_dat = 0; + u8 thread_nr = chip_nr; + int status; + u16 ctype; + + if (dir == DMA_FROM_DEVICE) + ctype = CDMA_CT_RD; + else + ctype = CDMA_CT_WR; + + cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc); + + dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir); + if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) { + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); + return -EIO; + } + + if (ctrl_dat && ctrl_dat_size) { + dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat, + ctrl_dat_size, dir); + if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) { + dma_unmap_single(cdns_ctrl->dev, dma_buf, + buf_size, dir); + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); + return -EIO; + } + } + + cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page, + (void *)dma_buf, (void *)dma_ctrl_dat, + ctype); + + status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); + + dma_unmap_single(cdns_ctrl->dev, dma_buf, + buf_size, dir); + + if (ctrl_dat && ctrl_dat_size) + dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat, + ctrl_dat_size, dir); + if (status) + return status; + + return cadence_nand_cdma_finish(cdns_ctrl); +} + +static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_timings *t) +{ + writel_relaxed(t->async_toggle_timings, + cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS); + writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0); + writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1); + writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2); + + if (cdns_ctrl->caps2.is_phy_type_dll) + writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL); + + writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL); + + if (cdns_ctrl->caps2.is_phy_type_dll) { + writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL); + writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING); + writel_relaxed(t->phy_dqs_timing, + cdns_ctrl->reg + PHY_DQS_TIMING); + writel_relaxed(t->phy_gate_lpbk_ctrl, + cdns_ctrl->reg + PHY_GATE_LPBK_CTRL); + writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE, + cdns_ctrl->reg + PHY_DLL_MASTER_CTRL); + writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL); + } +} + +static int cadence_nand_select_target(struct nand_chip *chip) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + + if (chip == cdns_ctrl->selected_chip) + return 0; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings); + + cadence_nand_set_ecc_strength(cdns_ctrl, + cdns_chip->corr_str_idx); + + cadence_nand_set_erase_detection(cdns_ctrl, true, + chip->ecc.strength); + + cdns_ctrl->curr_trans_type = -1; + cdns_ctrl->selected_chip = chip; + + return 0; +} + +static int cadence_nand_erase(struct nand_chip *chip, u32 page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + int status; + u8 thread_nr = cdns_chip->cs[chip->cur_cs]; + + cadence_nand_cdma_desc_prepare(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, NULL, NULL, + CDMA_CT_ERASE); + status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); + if (status) { + dev_err(cdns_ctrl->dev, "erase operation failed\n"); + return -EIO; + } + + status = cadence_nand_cdma_finish(cdns_ctrl); + if (status) + return status; + + return 0; +} + +static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf) +{ + int status; + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + + cadence_nand_prepare_data_size(chip, TT_BBM); + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); + + /* + * Read only bad block marker from offset + * defined by a memory manufacturer. + */ + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->oobsize, + 0, DMA_FROM_DEVICE, false); + if (status) { + dev_err(cdns_ctrl->dev, "read BBM failed\n"); + return -EIO; + } + + memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len); + + return 0; +} + +static int cadence_nand_write_page(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int status; + u16 marker_val = 0xFFFF; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len, + mtd->writesize + + cdns_chip->bbm_offs, + 1); + + if (oob_required) { + marker_val = *(u16 *)(chip->oob_poi + + cdns_chip->bbm_offs); + } else { + /* Set oob data to 0xFF. */ + memset(cdns_ctrl->buf + mtd->writesize, 0xFF, + cdns_chip->avail_oob_size); + } + + cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val); + + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT); + + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) && + cdns_ctrl->caps2.data_control_supp) { + u8 *oob; + + if (oob_required) + oob = chip->oob_poi; + else + oob = cdns_ctrl->buf + mtd->writesize; + + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, (void *)buf, oob, + mtd->writesize, + cdns_chip->avail_oob_size, + DMA_TO_DEVICE, true); + if (status) { + dev_err(cdns_ctrl->dev, "write page failed\n"); + return -EIO; + } + + return 0; + } + + if (oob_required) { + /* Transfer the data to the oob area. */ + memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi, + cdns_chip->avail_oob_size); + } + + memcpy(cdns_ctrl->buf, buf, mtd->writesize); + + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS); + + return cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->writesize + + cdns_chip->avail_oob_size, + 0, DMA_TO_DEVICE, true); +} + +static int cadence_nand_write_oob(struct nand_chip *chip, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + + memset(cdns_ctrl->buf, 0xFF, mtd->writesize); + + return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page); +} + +static int cadence_nand_write_page_raw(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *tmp_buf = cdns_ctrl->buf; + int oob_skip = cdns_chip->bbm_len; + size_t size = writesize + oobsize; + int i, pos, len; + int status = 0; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + /* + * Fill the buffer with 0xff first except the full page transfer. + * This simplifies the logic. + */ + if (!buf || !oob_required) + memset(tmp_buf, 0xff, size); + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); + + /* Arrange the buffer for syndrome payload/ecc layout. */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(tmp_buf + pos, buf, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(tmp_buf + writesize + oob_skip, buf, + len); + buf += len; + } + } + } + + if (oob_required) { + const u8 *oob = chip->oob_poi; + u32 oob_data_offset = (cdns_chip->sector_count - 1) * + (cdns_chip->sector_size + chip->ecc.bytes) + + cdns_chip->sector_size + oob_skip; + + /* BBM at the beginning of the OOB area. */ + memcpy(tmp_buf + writesize, oob, oob_skip); + + /* OOB free. */ + memcpy(tmp_buf + oob_data_offset, oob, + cdns_chip->avail_oob_size); + oob += cdns_chip->avail_oob_size; + + /* OOB ECC. */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + if (i == (ecc_steps - 1)) + pos += cdns_chip->avail_oob_size; + + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(tmp_buf + pos, oob, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(tmp_buf + writesize + oob_skip, oob, + len); + oob += len; + } + } + } + + cadence_nand_prepare_data_size(chip, TT_RAW_PAGE); + + return cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->writesize + + mtd->oobsize, + 0, DMA_TO_DEVICE, false); +} + +static int cadence_nand_write_oob_raw(struct nand_chip *chip, + int page) +{ + return cadence_nand_write_page_raw(chip, NULL, true, page); +} + +static int cadence_nand_read_page(struct nand_chip *chip, + u8 *buf, int oob_required, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int status = 0; + int ecc_err_count = 0; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len, + mtd->writesize + + cdns_chip->bbm_offs, 1); + + /* + * If data buffer can be accessed by DMA and data_control feature + * is supported then transfer data and oob directly. + */ + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) && + cdns_ctrl->caps2.data_control_supp) { + u8 *oob; + + if (oob_required) + oob = chip->oob_poi; + else + oob = cdns_ctrl->buf + mtd->writesize; + + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT); + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, buf, oob, + mtd->writesize, + cdns_chip->avail_oob_size, + DMA_FROM_DEVICE, true); + /* Otherwise use bounce buffer. */ + } else { + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS); + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, + NULL, mtd->writesize + + cdns_chip->avail_oob_size, + 0, DMA_FROM_DEVICE, true); + + memcpy(buf, cdns_ctrl->buf, mtd->writesize); + if (oob_required) + memcpy(chip->oob_poi, + cdns_ctrl->buf + mtd->writesize, + mtd->oobsize); + } + + switch (status) { + case STAT_ECC_UNCORR: + mtd->ecc_stats.failed++; + ecc_err_count++; + break; + case STAT_ECC_CORR: + ecc_err_count = FIELD_GET(CDMA_CS_MAXERR, + cdns_ctrl->cdma_desc->status); + mtd->ecc_stats.corrected += ecc_err_count; + break; + case STAT_ERASED: + case STAT_OK: + break; + default: + dev_err(cdns_ctrl->dev, "read page failed\n"); + return -EIO; + } + + if (oob_required) + if (cadence_nand_read_bbm(chip, page, chip->oob_poi)) + return -EIO; + + return ecc_err_count; +} + +/* Reads OOB data from the device. */ +static int cadence_nand_read_oob(struct nand_chip *chip, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + + return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page); +} + +static int cadence_nand_read_page_raw(struct nand_chip *chip, + u8 *buf, int oob_required, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int oob_skip = cdns_chip->bbm_len; + int writesize = mtd->writesize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *tmp_buf = cdns_ctrl->buf; + int i, pos, len; + int status = 0; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); + + cadence_nand_prepare_data_size(chip, TT_RAW_PAGE); + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->writesize + + mtd->oobsize, + 0, DMA_FROM_DEVICE, false); + + switch (status) { + case STAT_ERASED: + case STAT_OK: + break; + default: + dev_err(cdns_ctrl->dev, "read raw page failed\n"); + return -EIO; + } + + /* Arrange the buffer for syndrome payload/ecc layout. */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(buf, tmp_buf + pos, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(buf, tmp_buf + writesize + oob_skip, + len); + buf += len; + } + } + } + + if (oob_required) { + u8 *oob = chip->oob_poi; + u32 oob_data_offset = (cdns_chip->sector_count - 1) * + (cdns_chip->sector_size + chip->ecc.bytes) + + cdns_chip->sector_size + oob_skip; + + /* OOB free. */ + memcpy(oob, tmp_buf + oob_data_offset, + cdns_chip->avail_oob_size); + + /* BBM at the beginning of the OOB area. */ + memcpy(oob, tmp_buf + writesize, oob_skip); + + oob += cdns_chip->avail_oob_size; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (i == (ecc_steps - 1)) + pos += cdns_chip->avail_oob_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(oob, tmp_buf + pos, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(oob, tmp_buf + writesize + oob_skip, + len); + oob += len; + } + } + } + + return 0; +} + +static int cadence_nand_read_oob_raw(struct nand_chip *chip, + int page) +{ + return cadence_nand_read_page_raw(chip, NULL, true, page); +} + +static void cadence_nand_slave_dma_transfer_finished(void *data) +{ + struct completion *finished = data; + + complete(finished); +} + +static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, + void *buf, + dma_addr_t dev_dma, size_t len, + enum dma_data_direction dir) +{ + DECLARE_COMPLETION_ONSTACK(finished); + struct dma_chan *chan; + struct dma_device *dma_dev; + dma_addr_t src_dma, dst_dma, buf_dma; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + + chan = cdns_ctrl->dmac; + dma_dev = chan->device; + + buf_dma = dma_map_single(dma_dev->dev, buf, len, dir); + if (dma_mapping_error(dma_dev->dev, buf_dma)) { + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); + goto err; + } + + if (dir == DMA_FROM_DEVICE) { + src_dma = cdns_ctrl->io.dma; + dst_dma = buf_dma; + } else { + src_dma = buf_dma; + dst_dma = cdns_ctrl->io.dma; + } + + tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n"); + goto err_unmap; + } + + tx->callback = cadence_nand_slave_dma_transfer_finished; + tx->callback_param = &finished; + + cookie = dmaengine_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n"); + goto err_unmap; + } + + dma_async_issue_pending(cdns_ctrl->dmac); + wait_for_completion(&finished); + + dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + + return 0; + +err_unmap: + dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + +err: + dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n"); + + return -EIO; +} + +static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl, + u8 *buf, int len) +{ + u8 thread_nr = 0; + u32 sdma_size; + int status; + + /* Wait until slave DMA interface is ready to data transfer. */ + status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); + if (status) + return status; + + if (!cdns_ctrl->caps1->has_dma) { + int len_in_words = len >> 2; + + /* read alingment data */ + ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words); + if (sdma_size > len) { + /* read rest data from slave DMA interface if any */ + ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf, + sdma_size / 4 - len_in_words); + /* copy rest of data */ + memcpy(buf + (len_in_words << 2), cdns_ctrl->buf, + len - (len_in_words << 2)); + } + return 0; + } + + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) { + status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf, + cdns_ctrl->io.dma, + len, DMA_FROM_DEVICE); + if (status == 0) + return 0; + + dev_warn(cdns_ctrl->dev, + "Slave DMA transfer failed. Try again using bounce buffer."); + } + + /* If DMA transfer is not possible or failed then use bounce buffer. */ + status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf, + cdns_ctrl->io.dma, + sdma_size, DMA_FROM_DEVICE); + + if (status) { + dev_err(cdns_ctrl->dev, "Slave DMA transfer failed"); + return status; + } + + memcpy(buf, cdns_ctrl->buf, len); + + return 0; +} + +static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl, + const u8 *buf, int len) +{ + u8 thread_nr = 0; + u32 sdma_size; + int status; + + /* Wait until slave DMA interface is ready to data transfer. */ + status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); + if (status) + return status; + + if (!cdns_ctrl->caps1->has_dma) { + int len_in_words = len >> 2; + + iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words); + if (sdma_size > len) { + /* copy rest of data */ + memcpy(cdns_ctrl->buf, buf + (len_in_words << 2), + len - (len_in_words << 2)); + /* write all expected by nand controller data */ + iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf, + sdma_size / 4 - len_in_words); + } + + return 0; + } + + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) { + status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf, + cdns_ctrl->io.dma, + len, DMA_TO_DEVICE); + if (status == 0) + return 0; + + dev_warn(cdns_ctrl->dev, + "Slave DMA transfer failed. Try again using bounce buffer."); + } + + /* If DMA transfer is not possible or failed then use bounce buffer. */ + memcpy(cdns_ctrl->buf, buf, len); + + status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf, + cdns_ctrl->io.dma, + sdma_size, DMA_TO_DEVICE); + + if (status) + dev_err(cdns_ctrl->dev, "Slave DMA transfer failed"); + + return status; +} + +static int cadence_nand_force_byte_access(struct nand_chip *chip, + bool force_8bit) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + int status; + + /* + * Callers of this function do not verify if the NAND is using a 16-bit + * an 8-bit bus for normal operations, so we need to take care of that + * here by leaving the configuration unchanged if the NAND does not have + * the NAND_BUSWIDTH_16 flag set. + */ + if (!(chip->options & NAND_BUSWIDTH_16)) + return 0; + + status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit); + + return status; +} + +static int cadence_nand_cmd_opcode(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr; + unsigned int op_id = 0; + u64 mini_ctrl_cmd = 0; + int ret; + + instr = &subop->instrs[op_id]; + + if (instr->delay_ns > 0) + mini_ctrl_cmd |= GCMD_LAY_TWB; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, + GCMD_LAY_INSTR_CMD); + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD, + instr->ctx.cmd.opcode); + + ret = cadence_nand_generic_cmd_send(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + mini_ctrl_cmd); + if (ret) + dev_err(cdns_ctrl->dev, "send cmd %x failed\n", + instr->ctx.cmd.opcode); + + return ret; +} + +static int cadence_nand_cmd_address(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr; + unsigned int op_id = 0; + u64 mini_ctrl_cmd = 0; + unsigned int offset, naddrs; + u64 address = 0; + const u8 *addrs; + int ret; + int i; + + instr = &subop->instrs[op_id]; + + if (instr->delay_ns > 0) + mini_ctrl_cmd |= GCMD_LAY_TWB; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, + GCMD_LAY_INSTR_ADDR); + + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + + for (i = 0; i < naddrs; i++) + address |= (u64)addrs[i] << (8 * i); + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR, + address); + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE, + naddrs - 1); + + ret = cadence_nand_generic_cmd_send(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + mini_ctrl_cmd); + if (ret) + dev_err(cdns_ctrl->dev, "send address %llx failed\n", address); + + return ret; +} + +static int cadence_nand_cmd_erase(struct nand_chip *chip, + const struct nand_subop *subop) +{ + unsigned int op_id; + + if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) { + int i; + const struct nand_op_instr *instr = NULL; + unsigned int offset, naddrs; + const u8 *addrs; + u32 page = 0; + + instr = &subop->instrs[1]; + offset = nand_subop_get_addr_start_off(subop, 1); + naddrs = nand_subop_get_num_addr_cyc(subop, 1); + addrs = &instr->ctx.addr.addrs[offset]; + + for (i = 0; i < naddrs; i++) + page |= (u32)addrs[i] << (8 * i); + + return cadence_nand_erase(chip, page); + } + + /* + * If it is not an erase operation then handle operation + * by calling exec_op function. + */ + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + int ret; + const struct nand_operation nand_op = { + .cs = chip->cur_cs, + .instrs = &subop->instrs[op_id], + .ninstrs = 1}; + ret = chip->controller->ops->exec_op(chip, &nand_op, false); + if (ret) + return ret; + } + + return 0; +} + +static int cadence_nand_cmd_data(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr; + unsigned int offset, op_id = 0; + u64 mini_ctrl_cmd = 0; + int len = 0; + int ret; + + instr = &subop->instrs[op_id]; + + if (instr->delay_ns > 0) + mini_ctrl_cmd |= GCMD_LAY_TWB; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, + GCMD_LAY_INSTR_DATA); + + if (instr->type == NAND_OP_DATA_OUT_INSTR) + mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR, + GCMD_DIR_WRITE); + + len = nand_subop_get_data_len(subop, op_id); + offset = nand_subop_get_data_start_off(subop, op_id); + mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1); + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len); + if (instr->ctx.data.force_8bit) { + ret = cadence_nand_force_byte_access(chip, true); + if (ret) { + dev_err(cdns_ctrl->dev, + "cannot change byte access generic data cmd failed\n"); + return ret; + } + } + + ret = cadence_nand_generic_cmd_send(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + mini_ctrl_cmd); + if (ret) { + dev_err(cdns_ctrl->dev, "send generic data cmd failed\n"); + return ret; + } + + if (instr->type == NAND_OP_DATA_IN_INSTR) { + void *buf = instr->ctx.data.buf.in + offset; + + ret = cadence_nand_read_buf(cdns_ctrl, buf, len); + } else { + const void *buf = instr->ctx.data.buf.out + offset; + + ret = cadence_nand_write_buf(cdns_ctrl, buf, len); + } + + if (ret) { + dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n"); + return ret; + } + + if (instr->ctx.data.force_8bit) { + ret = cadence_nand_force_byte_access(chip, false); + if (ret) { + dev_err(cdns_ctrl->dev, + "cannot change byte access generic data cmd failed\n"); + } + } + + return ret; +} + +static int cadence_nand_cmd_waitrdy(struct nand_chip *chip, + const struct nand_subop *subop) +{ + int status; + unsigned int op_id = 0; + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr = &subop->instrs[op_id]; + u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000; + + status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS, + timeout_us, + BIT(cdns_chip->cs[chip->cur_cs]), + false); + return status; +} + +static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_erase, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_opcode, + NAND_OP_PARSER_PAT_CMD_ELEM(false)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_address, + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_data, + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_data, + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_waitrdy, + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)) + ); + +static int cadence_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + int status = cadence_nand_select_target(chip); + + if (status) + return status; + + return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op, + check_only); +} + +static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + + if (section) + return -ERANGE; + + oobregion->offset = cdns_chip->bbm_len; + oobregion->length = cdns_chip->avail_oob_size + - cdns_chip->bbm_len; + + return 0; +} + +static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + + if (section) + return -ERANGE; + + oobregion->offset = cdns_chip->avail_oob_size; + oobregion->length = chip->ecc.total; + + return 0; +} + +static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = { + .free = cadence_nand_ooblayout_free, + .ecc = cadence_nand_ooblayout_ecc, +}; + +static int calc_cycl(u32 timing, u32 clock) +{ + if (timing == 0 || clock == 0) + return 0; + + if ((timing % clock) > 0) + return timing / clock; + else + return timing / clock - 1; +} + +/* Calculate max data valid window. */ +static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min, + u32 board_delay_skew_min, u32 ext_mode) +{ + if (ext_mode == 0) + clk_period /= 2; + + return (trp_cnt + 1) * clk_period + trhoh_min + + board_delay_skew_min; +} + +/* Calculate data valid window. */ +static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, + u32 trea_max, u32 ext_mode) +{ + if (ext_mode == 0) + clk_period /= 2; + + return (trp_cnt + 1) * clk_period + trhoh_min - trea_max; +} + +static int +cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) +{ + const struct nand_sdr_timings *sdr; + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct cadence_nand_timings *t = &cdns_chip->timings; + u32 reg; + u32 board_delay = cdns_ctrl->board_delay; + u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL, + cdns_ctrl->nf_clk_rate); + u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt; + u32 tfeat_cnt, trhz_cnt, tvdly_cnt; + u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt; + u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0; + u32 if_skew = cdns_ctrl->caps1->if_skew; + u32 board_delay_skew_min = board_delay - if_skew; + u32 board_delay_skew_max = board_delay + if_skew; + u32 dqs_sampl_res, phony_dqs_mod; + u32 tdvw, tdvw_min, tdvw_max; + u32 ext_rd_mode, ext_wr_mode; + u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0; + u32 sampling_point; + + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + memset(t, 0, sizeof(*t)); + /* Sampling point calculation. */ + + if (cdns_ctrl->caps2.is_phy_type_dll) + phony_dqs_mod = 2; + else + phony_dqs_mod = 1; + + dqs_sampl_res = clk_period / phony_dqs_mod; + + tdvw_min = sdr->tREA_max + board_delay_skew_max; + /* + * The idea of those calculation is to get the optimum value + * for tRP and tRH timings. If it is NOT possible to sample data + * with optimal tRP/tRH settings, the parameters will be extended. + * If clk_period is 50ns (the lowest value) this condition is met + * for asynchronous timing modes 1, 2, 3, 4 and 5. + * If clk_period is 20ns the condition is met only + * for asynchronous timing mode 5. + */ + if (sdr->tRC_min <= clk_period && + sdr->tRP_min <= (clk_period / 2) && + sdr->tREH_min <= (clk_period / 2)) { + /* Performance mode. */ + ext_rd_mode = 0; + tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min, + sdr->tREA_max, ext_rd_mode); + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min, + board_delay_skew_min, + ext_rd_mode); + /* + * Check if data valid window and sampling point can be found + * and is not on the edge (ie. we have hold margin). + * If not extend the tRP timings. + */ + if (tdvw > 0) { + if (tdvw_max <= tdvw_min || + (tdvw_max % dqs_sampl_res) == 0) { + /* + * No valid sampling point so the RE pulse need + * to be widen widening by half clock cycle. + */ + ext_rd_mode = 1; + } + } else { + /* + * There is no valid window + * to be able to sample data the tRP need to be widen. + * Very safe calculations are performed here. + */ + trp_cnt = (sdr->tREA_max + board_delay_skew_max + + dqs_sampl_res) / clk_period; + ext_rd_mode = 1; + } + + } else { + /* Extended read mode. */ + u32 trh; + + ext_rd_mode = 1; + trp_cnt = calc_cycl(sdr->tRP_min, clk_period); + trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period); + if (sdr->tREH_min >= trh) + trh_cnt = calc_cycl(sdr->tREH_min, clk_period); + else + trh_cnt = calc_cycl(trh, clk_period); + + tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min, + sdr->tREA_max, ext_rd_mode); + /* + * Check if data valid window and sampling point can be found + * or if it is at the edge check if previous is valid + * - if not extend the tRP timings. + */ + if (tdvw > 0) { + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, + sdr->tRHOH_min, + board_delay_skew_min, + ext_rd_mode); + + if ((((tdvw_max / dqs_sampl_res) + * dqs_sampl_res) <= tdvw_min) || + (((tdvw_max % dqs_sampl_res) == 0) && + (((tdvw_max / dqs_sampl_res - 1) + * dqs_sampl_res) <= tdvw_min))) { + /* + * Data valid window width is lower than + * sampling resolution and do not hit any + * sampling point to be sure the sampling point + * will be found the RE low pulse width will be + * extended by one clock cycle. + */ + trp_cnt = trp_cnt + 1; + } + } else { + /* + * There is no valid window to be able to sample data. + * The tRP need to be widen. + * Very safe calculations are performed here. + */ + trp_cnt = (sdr->tREA_max + board_delay_skew_max + + dqs_sampl_res) / clk_period; + } + } + + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, + sdr->tRHOH_min, + board_delay_skew_min, ext_rd_mode); + + if (sdr->tWC_min <= clk_period && + (sdr->tWP_min + if_skew) <= (clk_period / 2) && + (sdr->tWH_min + if_skew) <= (clk_period / 2)) { + ext_wr_mode = 0; + } else { + u32 twh; + + ext_wr_mode = 1; + twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period); + if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew)) + twp_cnt = calc_cycl(sdr->tALS_min + if_skew, + clk_period); + + twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period); + if (sdr->tWH_min >= twh) + twh = sdr->tWH_min; + + twh_cnt = calc_cycl(twh + if_skew, clk_period); + } + + reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt); + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt); + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt); + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt); + t->async_toggle_timings = reg; + dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg); + + tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period); + tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period); + twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period); + trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period); + reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt); + + /* + * If timing exceeds delay field in timing register + * then use maximum value. + */ + if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt)) + reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt); + else + reg |= TIMINGS0_TCCS; + + reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt); + reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt); + t->timings0 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg); + + /* The following is related to single signal so skew is not needed. */ + trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period); + trhz_cnt = trhz_cnt + 1; + twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period); + /* + * Because of the two stage syncflop the value must be increased by 3 + * first value is related with sync, second value is related + * with output if delay. + */ + twb_cnt = twb_cnt + 3 + 5; + /* + * The following is related to the we edge of the random data input + * sequence so skew is not needed. + */ + tvdly_cnt = calc_cycl(500000 + if_skew, clk_period); + reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt); + reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt); + reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt); + t->timings1 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg); + + tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period); + if (tfeat_cnt < twb_cnt) + tfeat_cnt = twb_cnt; + + tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period); + tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period); + + reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt); + reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt); + reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt); + t->timings2 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg); + + if (cdns_ctrl->caps2.is_phy_type_dll) { + reg = DLL_PHY_CTRL_DLL_RST_N; + if (ext_wr_mode) + reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE; + if (ext_rd_mode) + reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE; + + reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7); + reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7); + t->dll_phy_ctrl = reg; + dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg); + } + + /* Sampling point calculation. */ + if ((tdvw_max % dqs_sampl_res) > 0) + sampling_point = tdvw_max / dqs_sampl_res; + else + sampling_point = (tdvw_max / dqs_sampl_res - 1); + + if (sampling_point * dqs_sampl_res > tdvw_min) { + dll_phy_dqs_timing = + FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4); + dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS; + phony_dqs_timing = sampling_point / phony_dqs_mod; + + if ((sampling_point % 2) > 0) { + dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL; + if ((tdvw_max % dqs_sampl_res) == 0) + /* + * Calculation for sampling point at the edge + * of data and being odd number. + */ + phony_dqs_timing = (tdvw_max / dqs_sampl_res) + / phony_dqs_mod - 1; + + if (!cdns_ctrl->caps2.is_phy_type_dll) + phony_dqs_timing--; + + } else { + phony_dqs_timing--; + } + rd_del_sel = phony_dqs_timing + 3; + } else { + dev_warn(cdns_ctrl->dev, + "ERROR : cannot find valid sampling point\n"); + } + + reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing); + if (cdns_ctrl->caps2.is_phy_type_dll) + reg |= PHY_CTRL_SDR_DQS; + t->phy_ctrl = reg; + dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg); + + if (cdns_ctrl->caps2.is_phy_type_dll) { + dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0); + dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2); + dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n", + dll_phy_dqs_timing); + t->phy_dqs_timing = dll_phy_dqs_timing; + + reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel); + dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n", + reg); + t->phy_gate_lpbk_ctrl = reg; + + dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n", + PHY_DLL_MASTER_CTRL_BYPASS_MODE); + dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0); + } + + return 0; +} + +int cadence_nand_attach_chip(struct nand_chip *chip) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes; + struct mtd_info *mtd = nand_to_mtd(chip); + u32 max_oob_data_size; + int ret; + + if (chip->options & NAND_BUSWIDTH_16) { + ret = cadence_nand_set_access_width16(cdns_ctrl, true); + if (ret) + return ret; + } + + chip->bbt_options |= NAND_BBT_USE_FLASH; + chip->bbt_options |= NAND_BBT_NO_OOB; + chip->ecc.mode = NAND_ECC_HW; + + chip->options |= NAND_NO_SUBPAGE_WRITE; + + cdns_chip->bbm_offs = chip->badblockpos; + if (chip->options & NAND_BUSWIDTH_16) { + cdns_chip->bbm_offs &= ~0x01; + cdns_chip->bbm_len = 2; + } else { + cdns_chip->bbm_len = 1; + } + + ret = nand_ecc_choose_conf(chip, + &cdns_ctrl->ecc_caps, + mtd->oobsize - cdns_chip->bbm_len); + if (ret) { + dev_err(cdns_ctrl->dev, "ECC configuration failed\n"); + return ret; + } + + dev_dbg(cdns_ctrl->dev, + "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", + chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); + + /* Error correction configuration. */ + cdns_chip->sector_size = chip->ecc.size; + cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size; + + cdns_chip->avail_oob_size = mtd->oobsize - ecc_size; + + max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR; + + if (cdns_chip->avail_oob_size > max_oob_data_size) + cdns_chip->avail_oob_size = max_oob_data_size; + + if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size) + > mtd->oobsize) + cdns_chip->avail_oob_size -= 4; + + ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength); + if (ret < 0) + return -EINVAL; + + cdns_chip->corr_str_idx = (u8)ret; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + cadence_nand_set_ecc_strength(cdns_ctrl, + cdns_chip->corr_str_idx); + + cadence_nand_set_erase_detection(cdns_ctrl, true, + chip->ecc.strength); + + /* Override the default read operations. */ + chip->ecc.read_page = cadence_nand_read_page; + chip->ecc.read_page_raw = cadence_nand_read_page_raw; + chip->ecc.write_page = cadence_nand_write_page; + chip->ecc.write_page_raw = cadence_nand_write_page_raw; + chip->ecc.read_oob = cadence_nand_read_oob; + chip->ecc.write_oob = cadence_nand_write_oob; + chip->ecc.read_oob_raw = cadence_nand_read_oob_raw; + chip->ecc.write_oob_raw = cadence_nand_write_oob_raw; + + if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size) + cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize; + + /* Is 32-bit DMA supported? */ + ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(cdns_ctrl->dev, "no usable DMA configuration\n"); + return ret; + } + + mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops); + + return 0; +} + +static const struct nand_controller_ops cadence_nand_controller_ops = { + .attach_chip = cadence_nand_attach_chip, + .exec_op = cadence_nand_exec_op, + .setup_data_interface = cadence_nand_setup_data_interface, +}; + +static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, + struct device_node *np) +{ + struct cdns_nand_chip *cdns_chip; + struct mtd_info *mtd; + struct nand_chip *chip; + int nsels, ret, i; + u32 cs; + + nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32)); + if (nsels <= 0) { + dev_err(cdns_ctrl->dev, "missing/invalid reg property\n"); + return -EINVAL; + } + + /* Allocate the nand chip structure. */ + cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) + + (nsels * sizeof(u8)), + GFP_KERNEL); + if (!cdns_chip) { + dev_err(cdns_ctrl->dev, "could not allocate chip structure\n"); + return -ENOMEM; + } + + cdns_chip->nsels = nsels; + + for (i = 0; i < nsels; i++) { + /* Retrieve CS id. */ + ret = of_property_read_u32_index(np, "reg", i, &cs); + if (ret) { + dev_err(cdns_ctrl->dev, + "could not retrieve reg property: %d\n", + ret); + return ret; + } + + if (cs >= cdns_ctrl->caps2.max_banks) { + dev_err(cdns_ctrl->dev, + "invalid reg value: %u (max CS = %d)\n", + cs, cdns_ctrl->caps2.max_banks); + return -EINVAL; + } + + if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) { + dev_err(cdns_ctrl->dev, + "CS %d already assigned\n", cs); + return -EINVAL; + } + + cdns_chip->cs[i] = cs; + } + + chip = &cdns_chip->chip; + chip->controller = &cdns_ctrl->controller; + nand_set_flash_node(chip, np); + + mtd = nand_to_mtd(chip); + mtd->dev.parent = cdns_ctrl->dev; + + /* + * Default to HW ECC engine mode. If the nand-ecc-mode property is given + * in the DT node, this entry will be overwritten in nand_scan_ident(). + */ + chip->ecc.mode = NAND_ECC_HW; + + ret = nand_scan(chip, cdns_chip->nsels); + if (ret) { + dev_err(cdns_ctrl->dev, "could not scan the nand chip\n"); + return ret; + } + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(cdns_ctrl->dev, + "failed to register mtd device: %d\n", ret); + nand_cleanup(chip); + return ret; + } + + list_add_tail(&cdns_chip->node, &cdns_ctrl->chips); + + return 0; +} + +static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct cdns_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) { + nand_release(&entry->chip); + list_del(&entry->node); + } +} + +static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct device_node *np = cdns_ctrl->dev->of_node; + struct device_node *nand_np; + int max_cs = cdns_ctrl->caps2.max_banks; + int nchips, ret; + + nchips = of_get_child_count(np); + + if (nchips > max_cs) { + dev_err(cdns_ctrl->dev, + "too many NAND chips: %d (max = %d CS)\n", + nchips, max_cs); + return -EINVAL; + } + + for_each_child_of_node(np, nand_np) { + ret = cadence_nand_chip_init(cdns_ctrl, nand_np); + if (ret) { + of_node_put(nand_np); + cadence_nand_chips_cleanup(cdns_ctrl); + return ret; + } + } + + return 0; +} + +static void +cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) +{ + /* Disable interrupts. */ + writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE); +} + +static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) +{ + dma_cap_mask_t mask; + int ret; + + cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, + sizeof(*cdns_ctrl->cdma_desc), + &cdns_ctrl->dma_cdma_desc, + GFP_KERNEL); + if (!cdns_ctrl->dma_cdma_desc) + return -ENOMEM; + + cdns_ctrl->buf_size = SZ_16K; + cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL); + if (!cdns_ctrl->buf) { + ret = -ENOMEM; + goto free_buf_desc; + } + + if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr, + IRQF_SHARED, "cadence-nand-controller", + cdns_ctrl)) { + dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n"); + ret = -ENODEV; + goto free_buf; + } + + spin_lock_init(&cdns_ctrl->irq_lock); + init_completion(&cdns_ctrl->complete); + + ret = cadence_nand_hw_init(cdns_ctrl); + if (ret) + goto disable_irq; + + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + + if (cdns_ctrl->caps1->has_dma) { + cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL); + if (!cdns_ctrl->dmac) { + dev_err(cdns_ctrl->dev, + "Unable to get a DMA channel\n"); + ret = -EBUSY; + goto disable_irq; + } + } + + nand_controller_init(&cdns_ctrl->controller); + INIT_LIST_HEAD(&cdns_ctrl->chips); + + cdns_ctrl->controller.ops = &cadence_nand_controller_ops; + cdns_ctrl->curr_corr_str_idx = 0xFF; + + ret = cadence_nand_chips_init(cdns_ctrl); + if (ret) { + dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n", + ret); + goto dma_release_chnl; + } + + kfree(cdns_ctrl->buf); + cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL); + if (!cdns_ctrl->buf) { + ret = -ENOMEM; + goto dma_release_chnl; + } + + return 0; + +dma_release_chnl: + if (cdns_ctrl->dmac) + dma_release_channel(cdns_ctrl->dmac); + +disable_irq: + cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); + +free_buf: + kfree(cdns_ctrl->buf); + +free_buf_desc: + dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), + cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc); + + return ret; +} + +/* Driver exit point. */ +static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl) +{ + cadence_nand_chips_cleanup(cdns_ctrl); + cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); + kfree(cdns_ctrl->buf); + dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), + cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc); + + if (cdns_ctrl->dmac) + dma_release_channel(cdns_ctrl->dmac); +} + +struct cadence_nand_dt { + struct cdns_nand_ctrl cdns_ctrl; + struct clk *clk; +}; + +static const struct cadence_nand_dt_devdata cadence_nand_default = { + .if_skew = 0, + .has_dma = 1, +}; + +static const struct of_device_id cadence_nand_dt_ids[] = { + { + .compatible = "cdns,hp-nfc", + .data = &cadence_nand_default + }, {} +}; + +MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids); + +static int cadence_nand_dt_probe(struct platform_device *ofdev) +{ + struct resource *res; + struct cadence_nand_dt *dt; + struct cdns_nand_ctrl *cdns_ctrl; + int ret; + const struct of_device_id *of_id; + const struct cadence_nand_dt_devdata *devdata; + u32 val; + + of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev); + if (of_id) { + ofdev->id_entry = of_id->data; + devdata = of_id->data; + } else { + pr_err("Failed to find the right device id.\n"); + return -ENOMEM; + } + + dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL); + if (!dt) + return -ENOMEM; + + cdns_ctrl = &dt->cdns_ctrl; + cdns_ctrl->caps1 = devdata; + + cdns_ctrl->dev = &ofdev->dev; + cdns_ctrl->irq = platform_get_irq(ofdev, 0); + if (cdns_ctrl->irq < 0) + return cdns_ctrl->irq; + + dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq); + + cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0); + if (IS_ERR(cdns_ctrl->reg)) { + dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n"); + return PTR_ERR(cdns_ctrl->reg); + } + + res = platform_get_resource(ofdev, IORESOURCE_MEM, 1); + cdns_ctrl->io.dma = res->start; + cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(cdns_ctrl->io.virt)) { + dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n"); + return PTR_ERR(cdns_ctrl->io.virt); + } + + dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk"); + if (IS_ERR(dt->clk)) + return PTR_ERR(dt->clk); + + cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk); + + ret = of_property_read_u32(ofdev->dev.of_node, + "cdns,board-delay-ps", &val); + if (ret) { + val = 4830; + dev_info(cdns_ctrl->dev, + "missing cdns,board-delay-ps property, %d was set\n", + val); + } + cdns_ctrl->board_delay = val; + + ret = cadence_nand_init(cdns_ctrl); + if (ret) + return ret; + + platform_set_drvdata(ofdev, dt); + return 0; +} + +static int cadence_nand_dt_remove(struct platform_device *ofdev) +{ + struct cadence_nand_dt *dt = platform_get_drvdata(ofdev); + + cadence_nand_remove(&dt->cdns_ctrl); + + return 0; +} + +static struct platform_driver cadence_nand_dt_driver = { + .probe = cadence_nand_dt_probe, + .remove = cadence_nand_dt_remove, + .driver = { + .name = "cadence-nand-controller", + .of_match_table = cadence_nand_dt_ids, + }, +}; + +module_platform_driver(cadence_nand_dt_driver); + +MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Driver for Cadence NAND flash controller"); + diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index 5e14836f6bd5..8b779a899dcf 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -102,47 +102,6 @@ static int denali_dt_chip_init(struct denali_controller *denali, return denali_chip_init(denali, dchip); } -/* Backward compatibility for old platforms */ -static int denali_dt_legacy_chip_init(struct denali_controller *denali) -{ - struct denali_chip *dchip; - int nsels, i; - - nsels = denali->nbanks; - - dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels), - GFP_KERNEL); - if (!dchip) - return -ENOMEM; - - dchip->nsels = nsels; - - for (i = 0; i < nsels; i++) - dchip->sels[i].bank = i; - - nand_set_flash_node(&dchip->chip, denali->dev->of_node); - - return denali_chip_init(denali, dchip); -} - -/* - * Check the DT binding. - * The new binding expects chip subnodes in the controller node. - * So, #address-cells = <1>; #size-cells = <0>; are required. - * Check the #size-cells to distinguish the binding. - */ -static bool denali_dt_is_legacy_binding(struct device_node *np) -{ - u32 cells; - int ret; - - ret = of_property_read_u32(np, "#size-cells", &cells); - if (ret) - return true; - - return cells != 0; -} - static int denali_dt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -167,10 +126,8 @@ static int denali_dt_probe(struct platform_device *pdev) denali->dev = dev; denali->irq = platform_get_irq(pdev, 0); - if (denali->irq < 0) { - dev_err(dev, "no irq defined\n"); + if (denali->irq < 0) return denali->irq; - } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg"); denali->reg = devm_ioremap_resource(dev, res); @@ -213,17 +170,11 @@ static int denali_dt_probe(struct platform_device *pdev) if (ret) goto out_disable_clk_ecc; - if (denali_dt_is_legacy_binding(dev->of_node)) { - ret = denali_dt_legacy_chip_init(denali); - if (ret) + for_each_child_of_node(dev->of_node, np) { + ret = denali_dt_chip_init(denali, np); + if (ret) { + of_node_put(np); goto out_remove_denali; - } else { - for_each_child_of_node(dev->of_node, np) { - ret = denali_dt_chip_init(denali, np); - if (ret) { - of_node_put(np); - goto out_remove_denali; - } } } diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c index 6a4626a8bf95..0b48be54ba6f 100644 --- a/drivers/mtd/nand/raw/hisi504_nand.c +++ b/drivers/mtd/nand/raw/hisi504_nand.c @@ -751,10 +751,8 @@ static int hisi_nfc_probe(struct platform_device *pdev) mtd = nand_to_mtd(chip); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "no IRQ resource defined\n"); + if (irq < 0) return -ENXIO; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->iobase = devm_ioremap_resource(dev, res); diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index 78b31f845c50..241b58b83240 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -773,7 +773,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { - dev_err(&pdev->dev, "failed to get platform irq\n"); res = -EINVAL; goto release_dma_chan; } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index fc49e13d81ec..fb5abdcfb007 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2862,10 +2862,8 @@ static int marvell_nfc_probe(struct platform_device *pdev) return PTR_ERR(nfc->regs); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to retrieve irq\n"); + if (irq < 0) return irq; - } nfc->core_clk = devm_clk_get(&pdev->dev, "core"); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 1b82b687e5a5..9f17b5b8efbf 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1399,10 +1399,8 @@ static int meson_nfc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "no NFC IRQ resource\n"); + if (irq < 0) return -EINVAL; - } ret = meson_nfc_clk_init(nfc); if (ret) { diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 74595b644b7c..75f1fa3d4d35 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -527,10 +527,8 @@ static int mtk_ecc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to get irq: %d\n", irq); + if (irq < 0) return irq; - } ret = dma_set_mask(dev, DMA_BIT_MASK(32)); if (ret) { diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 373d47d1ba4c..b8305e39ab51 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1540,7 +1540,6 @@ static int mtk_nfc_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "no nfi irq resource\n"); ret = -EINVAL; goto clk_disable; } diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index 9d49e6c845e1..ed7a4e021bf5 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -524,10 +524,8 @@ static int mxic_nfc_probe(struct platform_device *pdev) nand_chip->controller = &nfc->controller; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "failed to retrieve irq\n"); + if (irq < 0) return irq; - } mxic_nfc_hw_init(nfc); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 5c2c30a7dffa..f64e3b6605c6 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -292,12 +292,16 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page) struct mtd_info *mtd = nand_to_mtd(chip); int last_page = ((mtd->erasesize - mtd->writesize) >> chip->page_shift) & chip->pagemask; + unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE + | NAND_BBM_LASTPAGE; + if (page == 0 && !(chip->options & bbm_flags)) + return 0; if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) return 0; - else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) + if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) return 1; - else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) + if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) return last_page; return -EINVAL; diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 8ca9fad6e6ad..56654030ec7f 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -446,8 +446,10 @@ static int micron_nand_init(struct nand_chip *chip) if (ret) goto err_free_manuf_data; + chip->options |= NAND_BBM_FIRSTPAGE; + if (mtd->writesize == 2048) - chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; + chip->options |= NAND_BBM_SECONDPAGE; ondie = micron_supports_on_die_ecc(chip); diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 6ec65f48501c..ad77c112a78a 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -1967,10 +1967,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip) case NAND_OMAP_PREFETCH_IRQ: info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0); - if (info->gpmc_irq_fifo <= 0) { - dev_err(dev, "Error getting fifo IRQ\n"); + if (info->gpmc_irq_fifo <= 0) return -ENODEV; - } err = devm_request_irq(dev, info->gpmc_irq_fifo, omap_nand_irq, IRQF_SHARED, "gpmc-nand-fifo", info); @@ -1982,10 +1980,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip) } info->gpmc_irq_count = platform_get_irq(info->pdev, 1); - if (info->gpmc_irq_count <= 0) { - dev_err(dev, "Error getting IRQ count\n"); + if (info->gpmc_irq_count <= 0) return -ENODEV; - } err = devm_request_irq(dev, info->gpmc_irq_count, omap_nand_irq, IRQF_SHARED, "gpmc-nand-count", info); diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index e509c93737c4..058e99d0cbcf 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -1129,10 +1129,8 @@ static int flctl_probe(struct platform_device *pdev) flctl->fifo = res->start + 0x24; /* FLDTFIFO */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl); diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 8cc852dc7d54..9e63800f768a 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1880,11 +1880,8 @@ static int stm32_fmc2_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - if (irq != -EPROBE_DEFER) - dev_err(dev, "IRQ error missing or invalid\n"); + if (irq < 0) return irq; - } ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, dev_name(dev), fmc2); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 89773293c64d..37a4ac0dd85b 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2071,10 +2071,8 @@ static int sunxi_nfc_probe(struct platform_device *pdev) return PTR_ERR(nfc->regs); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to retrieve irq\n"); + if (irq < 0) return irq; - } nfc->ahb_clk = devm_clk_get(dev, "ahb"); if (IS_ERR(nfc->ahb_clk)) { diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c index 009c1da8574c..2b7cabbb680c 100644 --- a/drivers/mtd/spi-nor/aspeed-smc.c +++ b/drivers/mtd/spi-nor/aspeed-smc.c @@ -320,7 +320,8 @@ static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops) mutex_unlock(&chip->controller->mutex); } -static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) { struct aspeed_smc_chip *chip = nor->priv; @@ -331,8 +332,8 @@ static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) return 0; } -static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, - int len) +static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) { struct aspeed_smc_chip *chip = nor->priv; @@ -746,6 +747,15 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip) return 0; } +static const struct spi_nor_controller_ops aspeed_smc_controller_ops = { + .prepare = aspeed_smc_prep, + .unprepare = aspeed_smc_unprep, + .read_reg = aspeed_smc_read_reg, + .write_reg = aspeed_smc_write_reg, + .read = aspeed_smc_read_user, + .write = aspeed_smc_write_user, +}; + static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller, struct device_node *np, struct resource *r) { @@ -805,12 +815,7 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller, nor->dev = dev; nor->priv = chip; spi_nor_set_flash_node(nor, child); - nor->read = aspeed_smc_read_user; - nor->write = aspeed_smc_write_user; - nor->read_reg = aspeed_smc_read_reg; - nor->write_reg = aspeed_smc_write_reg; - nor->prepare = aspeed_smc_prep; - nor->unprepare = aspeed_smc_unprep; + nor->controller_ops = &aspeed_smc_controller_ops; ret = aspeed_smc_chip_setup_init(chip, r); if (ret) diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 7bef63947b29..06f997247d0f 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -285,7 +285,7 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) return IRQ_HANDLED; } -static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode) +static unsigned int cqspi_calc_rdreg(struct spi_nor *nor) { struct cqspi_flash_pdata *f_pdata = nor->priv; u32 rdreg = 0; @@ -354,27 +354,27 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) return cqspi_wait_idle(cqspi); } -static int cqspi_command_read(struct spi_nor *nor, - const u8 *txbuf, const unsigned n_tx, - u8 *rxbuf, const unsigned n_rx) +static int cqspi_command_read(struct spi_nor *nor, u8 opcode, + u8 *rxbuf, size_t n_rx) { struct cqspi_flash_pdata *f_pdata = nor->priv; struct cqspi_st *cqspi = f_pdata->cqspi; void __iomem *reg_base = cqspi->iobase; unsigned int rdreg; unsigned int reg; - unsigned int read_len; + size_t read_len; int status; if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { - dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n", + dev_err(nor->dev, + "Invalid input argument, len %zu rxbuf 0x%p\n", n_rx, rxbuf); return -EINVAL; } - reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB; + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; - rdreg = cqspi_calc_rdreg(nor, txbuf[0]); + rdreg = cqspi_calc_rdreg(nor); writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); @@ -404,19 +404,19 @@ static int cqspi_command_read(struct spi_nor *nor, } static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, - const u8 *txbuf, const unsigned n_tx) + const u8 *txbuf, size_t n_tx) { struct cqspi_flash_pdata *f_pdata = nor->priv; struct cqspi_st *cqspi = f_pdata->cqspi; void __iomem *reg_base = cqspi->iobase; unsigned int reg; unsigned int data; - u32 write_len; + size_t write_len; int ret; if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { dev_err(nor->dev, - "Invalid input argument, cmdlen %d txbuf 0x%p\n", + "Invalid input argument, cmdlen %zu txbuf 0x%p\n", n_tx, txbuf); return -EINVAL; } @@ -470,7 +470,7 @@ static int cqspi_read_setup(struct spi_nor *nor) unsigned int reg; reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; - reg |= cqspi_calc_rdreg(nor, nor->read_opcode); + reg |= cqspi_calc_rdreg(nor); /* Setup dummy clock cycles */ dummy_clk = nor->read_dummy; @@ -603,7 +603,7 @@ static int cqspi_write_setup(struct spi_nor *nor) /* Set opcode. */ reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; writel(reg, reg_base + CQSPI_REG_WR_INSTR); - reg = cqspi_calc_rdreg(nor, nor->program_opcode); + reg = cqspi_calc_rdreg(nor); writel(reg, reg_base + CQSPI_REG_RD_INSTR); reg = readl(reg_base + CQSPI_REG_SIZE); @@ -1050,7 +1050,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs) return ret; /* Send write enable, then erase commands. */ - ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0); if (ret) return ret; @@ -1080,18 +1080,19 @@ static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) mutex_unlock(&cqspi->bus_mutex); } -static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len) { int ret; ret = cqspi_set_protocol(nor, 0); if (!ret) - ret = cqspi_command_read(nor, &opcode, 1, buf, len); + ret = cqspi_command_read(nor, opcode, buf, len); return ret; } -static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) { int ret; @@ -1216,6 +1217,16 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi) init_completion(&cqspi->rx_dma_complete); } +static const struct spi_nor_controller_ops cqspi_controller_ops = { + .prepare = cqspi_prep, + .unprepare = cqspi_unprep, + .read_reg = cqspi_read_reg, + .write_reg = cqspi_write_reg, + .read = cqspi_read, + .write = cqspi_write, + .erase = cqspi_erase, +}; + static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) { struct platform_device *pdev = cqspi->pdev; @@ -1265,14 +1276,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) nor->dev = dev; spi_nor_set_flash_node(nor, np); nor->priv = f_pdata; - - nor->read_reg = cqspi_read_reg; - nor->write_reg = cqspi_write_reg; - nor->read = cqspi_read; - nor->write = cqspi_write; - nor->erase = cqspi_erase; - nor->prepare = cqspi_prep; - nor->unprepare = cqspi_unprep; + nor->controller_ops = &cqspi_controller_ops; mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), cs); @@ -1366,10 +1370,8 @@ static int cqspi_probe(struct platform_device *pdev) /* Obtain IRQ line. */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Cannot obtain IRQ.\n"); + if (irq < 0) return -ENXIO; - } pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c index 6dac9dd8bf42..a1258216f89d 100644 --- a/drivers/mtd/spi-nor/hisi-sfc.c +++ b/drivers/mtd/spi-nor/hisi-sfc.c @@ -177,7 +177,7 @@ static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops) } static int hisi_spi_nor_op_reg(struct spi_nor *nor, - u8 opcode, int len, u8 optype) + u8 opcode, size_t len, u8 optype) { struct hifmc_priv *priv = nor->priv; struct hifmc_host *host = priv->host; @@ -200,7 +200,7 @@ static int hisi_spi_nor_op_reg(struct spi_nor *nor, } static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, - int len) + size_t len) { struct hifmc_priv *priv = nor->priv; struct hifmc_host *host = priv->host; @@ -215,7 +215,7 @@ static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, } static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode, - u8 *buf, int len) + const u8 *buf, size_t len) { struct hifmc_priv *priv = nor->priv; struct hifmc_host *host = priv->host; @@ -311,6 +311,15 @@ static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to, return len; } +static const struct spi_nor_controller_ops hisi_controller_ops = { + .prepare = hisi_spi_nor_prep, + .unprepare = hisi_spi_nor_unprep, + .read_reg = hisi_spi_nor_read_reg, + .write_reg = hisi_spi_nor_write_reg, + .read = hisi_spi_nor_read, + .write = hisi_spi_nor_write, +}; + /** * Get spi flash device information and register it as a mtd device. */ @@ -357,14 +366,8 @@ static int hisi_spi_nor_register(struct device_node *np, } priv->host = host; nor->priv = priv; + nor->controller_ops = &hisi_controller_ops; - nor->prepare = hisi_spi_nor_prep; - nor->unprepare = hisi_spi_nor_unprep; - nor->read_reg = hisi_spi_nor_read_reg; - nor->write_reg = hisi_spi_nor_write_reg; - nor->read = hisi_spi_nor_read; - nor->write = hisi_spi_nor_write; - nor->erase = NULL; ret = spi_nor_scan(nor, NULL, &hwcaps); if (ret) return ret; diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c index 3cda8e7a68f8..3d8987baea2a 100644 --- a/drivers/mtd/spi-nor/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/intel-spi-pci.c @@ -20,6 +20,10 @@ static const struct intel_spi_boardinfo bxt_info = { .type = INTEL_SPI_BXT, }; +static const struct intel_spi_boardinfo cnl_info = { + .type = INTEL_SPI_CNL, +}; + static int intel_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -61,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev) static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, @@ -68,6 +73,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, { }, }; MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index 43e55a2e9b27..61d2a0ad2131 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -108,6 +108,10 @@ #define BXT_FREG_NUM 12 #define BXT_PR_NUM 6 +#define CNL_PR 0x84 +#define CNL_FREG_NUM 6 +#define CNL_PR_NUM 5 + #define LVSCC 0xc4 #define UVSCC 0xc8 #define ERASE_OPCODE_SHIFT 8 @@ -187,12 +191,16 @@ static void intel_spi_dump_regs(struct intel_spi *ispi) dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, readl(ispi->pregs + PR(i))); - value = readl(ispi->sregs + SSFSTS_CTL); - dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); - dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", - readl(ispi->sregs + PREOP_OPTYPE)); - dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0)); - dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1)); + if (ispi->sregs) { + value = readl(ispi->sregs + SSFSTS_CTL); + dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); + dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", + readl(ispi->sregs + PREOP_OPTYPE)); + dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", + readl(ispi->sregs + OPMENU0)); + dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", + readl(ispi->sregs + OPMENU1)); + } if (ispi->info->type == INTEL_SPI_BYT) dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); @@ -340,6 +348,13 @@ static int intel_spi_init(struct intel_spi *ispi) ispi->erase_64k = true; break; + case INTEL_SPI_CNL: + ispi->sregs = NULL; + ispi->pregs = ispi->base + CNL_PR; + ispi->nregions = CNL_FREG_NUM; + ispi->pr_num = CNL_PR_NUM; + break; + default: return -EINVAL; } @@ -367,6 +382,11 @@ static int intel_spi_init(struct intel_spi *ispi) !(uvscc & ERASE_64K_OPCODE_MASK)) ispi->erase_64k = false; + if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) { + dev_err(ispi->dev, "software sequencer not supported, but required\n"); + return -EINVAL; + } + /* * Some controllers can only do basic operations using hardware * sequencer. All other operations are supposed to be carried out @@ -383,7 +403,7 @@ static int intel_spi_init(struct intel_spi *ispi) val = readl(ispi->base + HSFSTS_CTL); ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); - if (ispi->locked) { + if (ispi->locked && ispi->sregs) { /* * BIOS programs allowed opcodes and then locks down the * register. So read back what opcodes it decided to support. @@ -426,7 +446,7 @@ static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) return 0; } -static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len) +static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) { u32 val, status; int ret; @@ -469,7 +489,7 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len) return 0; } -static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len, +static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, int optype) { u32 val = 0, status; @@ -535,7 +555,8 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len, return 0; } -static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) { struct intel_spi *ispi = nor->priv; int ret; @@ -555,7 +576,8 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) return intel_spi_read_block(ispi, buf, len); } -static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) { struct intel_spi *ispi = nor->priv; int ret; @@ -864,6 +886,14 @@ static void intel_spi_fill_partition(struct intel_spi *ispi, } } +static const struct spi_nor_controller_ops intel_spi_controller_ops = { + .read_reg = intel_spi_read_reg, + .write_reg = intel_spi_write_reg, + .read = intel_spi_read, + .write = intel_spi_write, + .erase = intel_spi_erase, +}; + struct intel_spi *intel_spi_probe(struct device *dev, struct resource *mem, const struct intel_spi_boardinfo *info) { @@ -897,11 +927,7 @@ struct intel_spi *intel_spi_probe(struct device *dev, ispi->nor.dev = ispi->dev; ispi->nor.priv = ispi; - ispi->nor.read_reg = intel_spi_read_reg; - ispi->nor.write_reg = intel_spi_write_reg; - ispi->nor.read = intel_spi_read; - ispi->nor.write = intel_spi_write; - ispi->nor.erase = intel_spi_erase; + ispi->nor.controller_ops = &intel_spi_controller_ops; ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps); if (ret) { diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index 34db01ab6cab..b1691680d174 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -151,9 +151,9 @@ static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval) } static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op, - u8 *tx, int txlen, u8 *rx, int rxlen) + const u8 *tx, size_t txlen, u8 *rx, size_t rxlen) { - int len = 1 + txlen + rxlen; + size_t len = 1 + txlen + rxlen; int i, ret, idx; if (len > MTK_NOR_MAX_SHIFT) @@ -193,7 +193,7 @@ static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op, } /* Do a WRSR (Write Status Register) command */ -static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr) +static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, const u8 sr) { writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG); writeb(8, mtk_nor->base + MTK_NOR_CNT_REG); @@ -354,7 +354,7 @@ static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len, return len; } -static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len) { int ret; struct mtk_nor *mtk_nor = nor->priv; @@ -376,8 +376,8 @@ static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) return ret; } -static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, - int len) +static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) { int ret; struct mtk_nor *mtk_nor = nor->priv; @@ -419,6 +419,13 @@ static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor) return 0; } +static const struct spi_nor_controller_ops mtk_controller_ops = { + .read_reg = mtk_nor_read_reg, + .write_reg = mtk_nor_write_reg, + .read = mtk_nor_read, + .write = mtk_nor_write, +}; + static int mtk_nor_init(struct mtk_nor *mtk_nor, struct device_node *flash_node) { @@ -438,12 +445,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor, nor->dev = mtk_nor->dev; nor->priv = mtk_nor; spi_nor_set_flash_node(nor, flash_node); + nor->controller_ops = &mtk_controller_ops; - /* fill the hooks to spi nor */ - nor->read = mtk_nor_read; - nor->read_reg = mtk_nor_read_reg; - nor->write = mtk_nor_write; - nor->write_reg = mtk_nor_write_reg; nor->mtd.name = "mtk_nor"; /* initialized with NULL */ ret = spi_nor_scan(nor, NULL, &hwcaps); diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c index 4a871587392b..9a5b1a7c636a 100644 --- a/drivers/mtd/spi-nor/nxp-spifi.c +++ b/drivers/mtd/spi-nor/nxp-spifi.c @@ -123,7 +123,8 @@ static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi) return ret; } -static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) { struct nxp_spifi *spifi = nor->priv; u32 cmd; @@ -145,7 +146,8 @@ static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) return nxp_spifi_wait_for_cmd(spifi); } -static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) { struct nxp_spifi *spifi = nor->priv; u32 cmd; @@ -263,9 +265,18 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi) static void nxp_spifi_dummy_id_read(struct spi_nor *nor) { u8 id[SPI_NOR_MAX_ID_LEN]; - nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN); + nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); } +static const struct spi_nor_controller_ops nxp_spifi_controller_ops = { + .read_reg = nxp_spifi_read_reg, + .write_reg = nxp_spifi_write_reg, + .read = nxp_spifi_read, + .write = nxp_spifi_write, + .erase = nxp_spifi_erase, +}; + static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, struct device_node *np) { @@ -332,11 +343,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, spifi->nor.dev = spifi->dev; spi_nor_set_flash_node(&spifi->nor, np); spifi->nor.priv = spifi; - spifi->nor.read = nxp_spifi_read; - spifi->nor.write = nxp_spifi_write; - spifi->nor.erase = nxp_spifi_erase; - spifi->nor.read_reg = nxp_spifi_read_reg; - spifi->nor.write_reg = nxp_spifi_write_reg; + spifi->nor.controller_ops = &nxp_spifi_controller_ops; /* * The first read on a hard reset isn't reliable so do a diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 7acf4a93b592..f4afe123e9dc 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -338,7 +338,7 @@ static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, if (nor->spimem) return spi_nor_spimem_read_data(nor, from, len, buf); - return nor->read(nor, from, len, buf); + return nor->controller_ops->read(nor, from, len, buf); } /** @@ -385,239 +385,172 @@ static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, if (nor->spimem) return spi_nor_spimem_write_data(nor, to, len, buf); - return nor->write(nor, to, len, buf); + return nor->controller_ops->write(nor, to, len, buf); } -/* - * Read the status register, returning its value in the location - * Return the status register value. - * Returns negative if error occurred. +/** + * spi_nor_write_enable() - Set write enable latch with Write Enable command. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. */ -static int read_sr(struct spi_nor *nor) +static int spi_nor_write_enable(struct spi_nor *nor) { int ret; if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1)); + SPI_MEM_OP_NO_DATA); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1); + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, + NULL, 0); } - if (ret < 0) { - pr_err("error %d reading SR\n", (int) ret); - return ret; - } + if (ret) + dev_dbg(nor->dev, "error %d on Write Enable\n", ret); - return nor->bouncebuf[0]; + return ret; } -/* - * Read the flag status register, returning its value in the location - * Return the status register value. - * Returns negative if error occurred. +/** + * spi_nor_write_disable() - Send Write Disable instruction to the chip. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. */ -static int read_fsr(struct spi_nor *nor) +static int spi_nor_write_disable(struct spi_nor *nor) { int ret; if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1)); + SPI_MEM_OP_NO_DATA); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1); + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI, + NULL, 0); } - if (ret < 0) { - pr_err("error %d reading FSR\n", ret); - return ret; - } + if (ret) + dev_dbg(nor->dev, "error %d on Write Disable\n", ret); - return nor->bouncebuf[0]; + return ret; } -/* - * Read configuration register, returning its value in the - * location. Return the configuration register value. - * Returns negative if error occurred. +/** + * spi_nor_read_sr() - Read the Status Register. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to a DMA-able buffer where the value of the + * Status Register will be written. + * + * Return: 0 on success, -errno otherwise. */ -static int read_cr(struct spi_nor *nor) +static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) { int ret; if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1)); + SPI_MEM_OP_DATA_IN(1, sr, 1)); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1); + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR, + sr, 1); } - if (ret < 0) { - dev_err(nor->dev, "error %d reading CR\n", ret); - return ret; - } + if (ret) + dev_dbg(nor->dev, "error %d reading SR\n", ret); - return nor->bouncebuf[0]; + return ret; } -/* - * Write status register 1 byte - * Returns negative if error occurred. +/** + * spi_nor_read_fsr() - Read the Flag Status Register. + * @nor: pointer to 'struct spi_nor' + * @fsr: pointer to a DMA-able buffer where the value of the + * Flag Status Register will be written. + * + * Return: 0 on success, -errno otherwise. */ -static int write_sr(struct spi_nor *nor, u8 val) +static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) { - nor->bouncebuf[0] = val; - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); - - return spi_mem_exec_op(nor->spimem, &op); - } - - return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1); -} + int ret; -/* - * Set write enable latch with Write Enable command. - * Returns negative if error occurred. - */ -static int write_enable(struct spi_nor *nor) -{ if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_NO_DATA); + SPI_MEM_OP_DATA_IN(1, fsr, 1)); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR, + fsr, 1); } - return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); + if (ret) + dev_dbg(nor->dev, "error %d reading FSR\n", ret); + + return ret; } -/* - * Send write disable instruction to the chip. +/** + * spi_nor_read_cr() - Read the Configuration Register using the + * SPINOR_OP_RDCR (35h) command. + * @nor: pointer to 'struct spi_nor' + * @cr: pointer to a DMA-able buffer where the value of the + * Configuration Register will be written. + * + * Return: 0 on success, -errno otherwise. */ -static int write_disable(struct spi_nor *nor) +static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) { + int ret; + if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_NO_DATA); + SPI_MEM_OP_DATA_IN(1, cr, 1)); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1); } - return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0); -} - -static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) -{ - return mtd->priv; -} - - -static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) -{ - size_t i; - - for (i = 0; i < size; i++) - if (table[i][0] == opcode) - return table[i][1]; - - /* No conversion found, keep input op code. */ - return opcode; -} - -static u8 spi_nor_convert_3to4_read(u8 opcode) -{ - static const u8 spi_nor_3to4_read[][2] = { - { SPINOR_OP_READ, SPINOR_OP_READ_4B }, - { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, - { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, - { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, - { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, - { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, - { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, - { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, - - { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, - { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, - { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, - }; - - return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, - ARRAY_SIZE(spi_nor_3to4_read)); -} - -static u8 spi_nor_convert_3to4_program(u8 opcode) -{ - static const u8 spi_nor_3to4_program[][2] = { - { SPINOR_OP_PP, SPINOR_OP_PP_4B }, - { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, - { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, - { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, - { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, - }; - - return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, - ARRAY_SIZE(spi_nor_3to4_program)); -} - -static u8 spi_nor_convert_3to4_erase(u8 opcode) -{ - static const u8 spi_nor_3to4_erase[][2] = { - { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, - { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, - { SPINOR_OP_SE, SPINOR_OP_SE_4B }, - }; - - return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, - ARRAY_SIZE(spi_nor_3to4_erase)); -} - -static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) -{ - nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); - nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); - nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); - - if (!spi_nor_has_uniform_erase(nor)) { - struct spi_nor_erase_map *map = &nor->params.erase_map; - struct spi_nor_erase_type *erase; - int i; + if (ret) + dev_dbg(nor->dev, "error %d reading CR\n", ret); - for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { - erase = &map->erase_type[i]; - erase->opcode = - spi_nor_convert_3to4_erase(erase->opcode); - } - } + return ret; } +/** + * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ static int macronix_set_4byte(struct spi_nor *nor, bool enable) { + int ret; + if (nor->spimem) { struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? @@ -628,26 +561,55 @@ static int macronix_set_4byte(struct spi_nor *nor, bool enable) SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, + enable ? SPINOR_OP_EN4B : + SPINOR_OP_EX4B, + NULL, 0); } - return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B, - NULL, 0); + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; } +/** + * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ static int st_micron_set_4byte(struct spi_nor *nor, bool enable) { int ret; - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + ret = macronix_set_4byte(nor, enable); - write_disable(nor); + if (ret) + return ret; - return ret; + return spi_nor_write_disable(nor); } +/** + * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ static int spansion_set_4byte(struct spi_nor *nor, bool enable) { + int ret; + nor->bouncebuf[0] = enable << 7; if (nor->spimem) { @@ -657,14 +619,29 @@ static int spansion_set_4byte(struct spi_nor *nor, bool enable) SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR, + nor->bouncebuf, 1); } - return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1); + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; } +/** + * spi_nor_write_ear() - Write Extended Address Register. + * @nor: pointer to 'struct spi_nor'. + * @ear: value to write to the Extended Address Register. + * + * Return: 0 on success, -errno otherwise. + */ static int spi_nor_write_ear(struct spi_nor *nor, u8 ear) { + int ret; + nor->bouncebuf[0] = ear; if (nor->spimem) { @@ -674,12 +651,26 @@ static int spi_nor_write_ear(struct spi_nor *nor, u8 ear) SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR, + nor->bouncebuf, 1); } - return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1); + if (ret) + dev_dbg(nor->dev, "error %d writing EAR\n", ret); + + return ret; } +/** + * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ static int winbond_set_4byte(struct spi_nor *nor, bool enable) { int ret; @@ -693,15 +684,29 @@ static int winbond_set_4byte(struct spi_nor *nor, bool enable) * Register to be set to 1, so all 3-byte-address reads come from the * second 16M. We must clear the register to enable normal behavior. */ - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + ret = spi_nor_write_ear(nor, 0); - write_disable(nor); + if (ret) + return ret; - return ret; + return spi_nor_write_disable(nor); } +/** + * spi_nor_xread_sr() - Read the Status Register on S3AN flashes. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to a DMA-able buffer where the value of the + * Status Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) { + int ret; + if (nor->spimem) { struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1), @@ -709,27 +714,44 @@ static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_DATA_IN(1, sr, 1)); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR, + sr, 1); } - return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1); + if (ret) + dev_dbg(nor->dev, "error %d reading XRDSR\n", ret); + + return ret; } +/** + * s3an_sr_ready() - Query the Status Register of the S3AN flash to see if the + * flash is ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ static int s3an_sr_ready(struct spi_nor *nor) { int ret; ret = spi_nor_xread_sr(nor, nor->bouncebuf); - if (ret < 0) { - dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret); + if (ret) return ret; - } return !!(nor->bouncebuf[0] & XSR_RDY); } -static int spi_nor_clear_sr(struct spi_nor *nor) +/** + * spi_nor_clear_sr() - Clear the Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void spi_nor_clear_sr(struct spi_nor *nor) { + int ret; + if (nor->spimem) { struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1), @@ -737,20 +759,33 @@ static int spi_nor_clear_sr(struct spi_nor *nor) SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR, + NULL, 0); } - return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0); + if (ret) + dev_dbg(nor->dev, "error %d clearing SR\n", ret); } +/** + * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready + * for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ static int spi_nor_sr_ready(struct spi_nor *nor) { - int sr = read_sr(nor); - if (sr < 0) - return sr; + int ret = spi_nor_read_sr(nor, nor->bouncebuf); + + if (ret) + return ret; - if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) { - if (sr & SR_E_ERR) + if (nor->flags & SNOR_F_USE_CLSR && + nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { + if (nor->bouncebuf[0] & SR_E_ERR) dev_err(nor->dev, "Erase Error occurred\n"); else dev_err(nor->dev, "Programming Error occurred\n"); @@ -759,11 +794,17 @@ static int spi_nor_sr_ready(struct spi_nor *nor) return -EIO; } - return !(sr & SR_WIP); + return !(nor->bouncebuf[0] & SR_WIP); } -static int spi_nor_clear_fsr(struct spi_nor *nor) +/** + * spi_nor_clear_fsr() - Clear the Flag Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void spi_nor_clear_fsr(struct spi_nor *nor) { + int ret; + if (nor->spimem) { struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1), @@ -771,25 +812,37 @@ static int spi_nor_clear_fsr(struct spi_nor *nor) SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR, + NULL, 0); } - return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0); + if (ret) + dev_dbg(nor->dev, "error %d clearing FSR\n", ret); } +/** + * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is + * ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ static int spi_nor_fsr_ready(struct spi_nor *nor) { - int fsr = read_fsr(nor); - if (fsr < 0) - return fsr; + int ret = spi_nor_read_fsr(nor, nor->bouncebuf); - if (fsr & (FSR_E_ERR | FSR_P_ERR)) { - if (fsr & FSR_E_ERR) + if (ret) + return ret; + + if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { + if (nor->bouncebuf[0] & FSR_E_ERR) dev_err(nor->dev, "Erase operation failed.\n"); else dev_err(nor->dev, "Program operation failed.\n"); - if (fsr & FSR_PT_ERR) + if (nor->bouncebuf[0] & FSR_PT_ERR) dev_err(nor->dev, "Attempted to modify a protected sector.\n"); @@ -797,9 +850,15 @@ static int spi_nor_fsr_ready(struct spi_nor *nor) return -EIO; } - return fsr & FSR_READY; + return nor->bouncebuf[0] & FSR_READY; } +/** + * spi_nor_ready() - Query the flash to see if it is ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ static int spi_nor_ready(struct spi_nor *nor) { int sr, fsr; @@ -816,9 +875,13 @@ static int spi_nor_ready(struct spi_nor *nor) return sr && fsr; } -/* - * Service routine to read status register until ready, or timeout occurs. - * Returns non-zero if error. +/** + * spi_nor_wait_till_ready_with_timeout() - Service routine to read the + * Status Register until ready, or timeout occurs. + * @nor: pointer to "struct spi_nor". + * @timeout_jiffies: jiffies to wait until timeout. + * + * Return: 0 on success, -errno otherwise. */ static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, unsigned long timeout_jiffies) @@ -841,24 +904,305 @@ static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, cond_resched(); } - dev_err(nor->dev, "flash operation timed out\n"); + dev_dbg(nor->dev, "flash operation timed out\n"); return -ETIMEDOUT; } +/** + * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the + * flash to be ready, or timeout occurs. + * @nor: pointer to "struct spi_nor". + * + * Return: 0 on success, -errno otherwise. + */ static int spi_nor_wait_till_ready(struct spi_nor *nor) { return spi_nor_wait_till_ready_with_timeout(nor, DEFAULT_READY_WAIT_JIFFIES); } -/* - * Erase the whole flash memory +/** + * spi_nor_write_sr() - Write the Status Register. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to DMA-able buffer to write to the Status Register. + * @len: number of bytes to write to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(len, sr, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR, + sr, len); + } + + if (ret) { + dev_dbg(nor->dev, "error %d writing SR\n", ret); + return ret; + } + + return spi_nor_wait_till_ready(nor); +} + +/** + * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and + * ensure that the byte written match the received value. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) +{ + int ret; + + nor->bouncebuf[0] = sr1; + + ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); + if (ret) + return ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] != sr1) { + dev_dbg(nor->dev, "SR1: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the + * Status Register 2 in one shot. Ensure that the byte written in the Status + * Register 1 match the received value, and that the 16-bit Write did not + * affect what was already in the Status Register 2. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register 1. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) +{ + int ret; + u8 *sr_cr = nor->bouncebuf; + u8 cr_written; + + /* Make sure we don't overwrite the contents of Status Register 2. */ + if (!(nor->flags & SNOR_F_NO_READ_CR)) { + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + } else if (nor->params.quad_enable) { + /* + * If the Status Register 2 Read command (35h) is not + * supported, we should at least be sure we don't + * change the value of the SR2 Quad Enable bit. + * + * We can safely assume that when the Quad Enable method is + * set, the value of the QE bit is one, as a consequence of the + * nor->params.quad_enable() call. + * + * We can safely assume that the Quad Enable bit is present in + * the Status Register 2 at BIT(1). According to the JESD216 + * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit + * Write Status (01h) command is available just for the cases + * in which the QE bit is described in SR2 at BIT(1). + */ + sr_cr[1] = SR2_QUAD_EN_BIT1; + } else { + sr_cr[1] = 0; + } + + sr_cr[0] = sr1; + + ret = spi_nor_write_sr(nor, sr_cr, 2); + if (ret) + return ret; + + if (nor->flags & SNOR_F_NO_READ_CR) + return 0; + + cr_written = sr_cr[1]; + + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + + if (cr_written != sr_cr[1]) { + dev_dbg(nor->dev, "CR: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the + * Configuration Register in one shot. Ensure that the byte written in the + * Configuration Register match the received value, and that the 16-bit Write + * did not affect what was already in the Status Register 1. + * @nor: pointer to a 'struct spi_nor'. + * @cr: byte value to be written to the Configuration Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) +{ + int ret; + u8 *sr_cr = nor->bouncebuf; + u8 sr_written; + + /* Keep the current value of the Status Register 1. */ + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + sr_cr[1] = cr; + + ret = spi_nor_write_sr(nor, sr_cr, 2); + if (ret) + return ret; + + sr_written = sr_cr[0]; + + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + if (sr_written != sr_cr[0]) { + dev_dbg(nor->dev, "SR: Read back test failed\n"); + return -EIO; + } + + if (nor->flags & SNOR_F_NO_READ_CR) + return 0; + + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + + if (cr != sr_cr[1]) { + dev_dbg(nor->dev, "CR: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that + * the byte written match the received value without affecting other bits in the + * Status Register 1 and 2. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) +{ + if (nor->flags & SNOR_F_HAS_16BIT_SR) + return spi_nor_write_16bit_sr_and_check(nor, sr1); + + return spi_nor_write_sr1_and_check(nor, sr1); +} + +/** + * spi_nor_write_sr2() - Write the Status Register 2 using the + * SPINOR_OP_WRSR2 (3eh) command. + * @nor: pointer to 'struct spi_nor'. + * @sr2: pointer to DMA-able buffer to write to the Status Register 2. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, sr2, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2, + sr2, 1); + } + + if (ret) { + dev_dbg(nor->dev, "error %d writing SR2\n", ret); + return ret; + } + + return spi_nor_wait_till_ready(nor); +} + +/** + * spi_nor_read_sr2() - Read the Status Register 2 using the + * SPINOR_OP_RDSR2 (3fh) command. + * @nor: pointer to 'struct spi_nor'. + * @sr2: pointer to DMA-able buffer where the value of the + * Status Register 2 will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, sr2, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2, + sr2, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading SR2\n", ret); + + return ret; +} + +/** + * spi_nor_erase_chip() - Erase the entire flash memory. + * @nor: pointer to 'struct spi_nor'. * - * Returns 0 if successful, non-zero otherwise. + * Return: 0 on success, -errno otherwise. */ -static int erase_chip(struct spi_nor *nor) +static int spi_nor_erase_chip(struct spi_nor *nor) { + int ret; + dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); if (nor->spimem) { @@ -868,10 +1212,99 @@ static int erase_chip(struct spi_nor *nor) SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); - return spi_mem_exec_op(nor->spimem, &op); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE, + NULL, 0); } - return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0); + if (ret) + dev_dbg(nor->dev, "error %d erasing chip\n", ret); + + return ret; +} + +static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) +{ + return mtd->priv; +} + +static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + if (table[i][0] == opcode) + return table[i][1]; + + /* No conversion found, keep input op code. */ + return opcode; +} + +static u8 spi_nor_convert_3to4_read(u8 opcode) +{ + static const u8 spi_nor_3to4_read[][2] = { + { SPINOR_OP_READ, SPINOR_OP_READ_4B }, + { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, + { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, + { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, + { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, + { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, + { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, + { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, + + { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, + { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, + { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, + ARRAY_SIZE(spi_nor_3to4_read)); +} + +static u8 spi_nor_convert_3to4_program(u8 opcode) +{ + static const u8 spi_nor_3to4_program[][2] = { + { SPINOR_OP_PP, SPINOR_OP_PP_4B }, + { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, + { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, + { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, + { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, + ARRAY_SIZE(spi_nor_3to4_program)); +} + +static u8 spi_nor_convert_3to4_erase(u8 opcode) +{ + static const u8 spi_nor_3to4_erase[][2] = { + { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, + { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, + { SPINOR_OP_SE, SPINOR_OP_SE_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, + ARRAY_SIZE(spi_nor_3to4_erase)); +} + +static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) +{ + nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); + nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); + nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); + + if (!spi_nor_has_uniform_erase(nor)) { + struct spi_nor_erase_map *map = &nor->params.erase_map; + struct spi_nor_erase_type *erase; + int i; + + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + erase = &map->erase_type[i]; + erase->opcode = + spi_nor_convert_3to4_erase(erase->opcode); + } + } } static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops) @@ -880,10 +1313,9 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops) mutex_lock(&nor->lock); - if (nor->prepare) { - ret = nor->prepare(nor, ops); + if (nor->controller_ops && nor->controller_ops->prepare) { + ret = nor->controller_ops->prepare(nor, ops); if (ret) { - dev_err(nor->dev, "failed in the preparation.\n"); mutex_unlock(&nor->lock); return ret; } @@ -893,8 +1325,8 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops) static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops) { - if (nor->unprepare) - nor->unprepare(nor, ops); + if (nor->controller_ops && nor->controller_ops->unprepare) + nor->controller_ops->unprepare(nor, ops); mutex_unlock(&nor->lock); } @@ -935,9 +1367,6 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) addr = spi_nor_convert_addr(nor, addr); - if (nor->erase) - return nor->erase(nor, addr); - if (nor->spimem) { struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1), @@ -946,6 +1375,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) SPI_MEM_OP_NO_DATA); return spi_mem_exec_op(nor->spimem, &op); + } else if (nor->controller_ops->erase) { + return nor->controller_ops->erase(nor, addr); } /* @@ -957,8 +1388,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) addr >>= 8; } - return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf, - nor->addr_width); + return nor->controller_ops->write_reg(nor, nor->erase_opcode, + nor->bouncebuf, nor->addr_width); } /** @@ -1208,7 +1639,9 @@ static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) list_for_each_entry_safe(cmd, next, &erase_list, list) { nor->erase_opcode = cmd->opcode; while (cmd->count) { - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + goto destroy_erase_cmd_list; ret = spi_nor_erase_sector(nor, addr); if (ret) @@ -1263,12 +1696,13 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { unsigned long timeout; - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + goto erase_err; - if (erase_chip(nor)) { - ret = -EIO; + ret = spi_nor_erase_chip(nor); + if (ret) goto erase_err; - } /* * Scale the timeout linearly with the size of the flash, with @@ -1291,7 +1725,9 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) /* "sector"-at-a-time erase */ } else if (spi_nor_has_uniform_erase(nor)) { while (len) { - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + goto erase_err; ret = spi_nor_erase_sector(nor, addr); if (ret) @@ -1312,7 +1748,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) goto erase_err; } - write_disable(nor); + ret = spi_nor_write_disable(nor); erase_err: spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); @@ -1320,27 +1756,6 @@ erase_err: return ret; } -/* Write status register and ensure bits in mask match written values */ -static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask) -{ - int ret; - - write_enable(nor); - ret = write_sr(nor, status_new); - if (ret) - return ret; - - ret = spi_nor_wait_till_ready(nor); - if (ret) - return ret; - - ret = read_sr(nor); - if (ret < 0) - return ret; - - return ((ret & mask) != (status_new & mask)) ? -EIO : 0; -} - static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, uint64_t *len) { @@ -1433,16 +1848,18 @@ static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) { struct mtd_info *mtd = &nor->mtd; - int status_old, status_new; + int ret, status_old, status_new; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 shift = ffs(mask) - 1, pow, val; loff_t lock_len; bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; bool use_top; - status_old = read_sr(nor); - if (status_old < 0) - return status_old; + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + status_old = nor->bouncebuf[0]; /* If nothing in our range is unlocked, we don't need to do anything */ if (stm_is_locked_sr(nor, ofs, len, status_old)) @@ -1502,7 +1919,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) if ((status_new & mask) < (status_old & mask)) return -EINVAL; - return write_sr_and_check(nor, status_new, mask); + return spi_nor_write_sr_and_check(nor, status_new); } /* @@ -1513,16 +1930,18 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) { struct mtd_info *mtd = &nor->mtd; - int status_old, status_new; + int ret, status_old, status_new; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 shift = ffs(mask) - 1, pow, val; loff_t lock_len; bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; bool use_top; - status_old = read_sr(nor); - if (status_old < 0) - return status_old; + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + status_old = nor->bouncebuf[0]; /* If nothing in our range is locked, we don't need to do anything */ if (stm_is_unlocked_sr(nor, ofs, len, status_old)) @@ -1585,7 +2004,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) if ((status_new & mask) > (status_old & mask)) return -EINVAL; - return write_sr_and_check(nor, status_new, mask); + return spi_nor_write_sr_and_check(nor, status_new); } /* @@ -1597,13 +2016,13 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) */ static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) { - int status; + int ret; - status = read_sr(nor); - if (status < 0) - return status; + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; - return stm_is_locked_sr(nor, ofs, len, status); + return stm_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]); } static const struct spi_nor_locking_ops stm_locking_ops = { @@ -1657,242 +2076,59 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) return ret; } -/* - * Write status Register and configuration register with 2 bytes - * The first byte will be written to the status register, while the - * second byte will be written to the configuration register. - * Return negative if error occurred. - */ -static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr) -{ - int ret; - - write_enable(nor); - - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(2, sr_cr, 1)); - - ret = spi_mem_exec_op(nor->spimem, &op); - } else { - ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2); - } - - if (ret < 0) { - dev_err(nor->dev, - "error while writing configuration register\n"); - return -EINVAL; - } - - ret = spi_nor_wait_till_ready(nor); - if (ret) { - dev_err(nor->dev, - "timeout while writing configuration register\n"); - return ret; - } - - return 0; -} - -/** - * macronix_quad_enable() - set QE bit in Status Register. - * @nor: pointer to a 'struct spi_nor' - * - * Set the Quad Enable (QE) bit in the Status Register. - * - * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories. - * - * Return: 0 on success, -errno otherwise. - */ -static int macronix_quad_enable(struct spi_nor *nor) -{ - int ret, val; - - val = read_sr(nor); - if (val < 0) - return val; - if (val & SR_QUAD_EN_MX) - return 0; - - write_enable(nor); - - write_sr(nor, val | SR_QUAD_EN_MX); - - ret = spi_nor_wait_till_ready(nor); - if (ret) - return ret; - - ret = read_sr(nor); - if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) { - dev_err(nor->dev, "Macronix Quad bit not set\n"); - return -EINVAL; - } - - return 0; -} - /** - * spansion_quad_enable() - set QE bit in Configuraiton Register. + * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status + * Register 1. * @nor: pointer to a 'struct spi_nor' * - * Set the Quad Enable (QE) bit in the Configuration Register. - * This function is kept for legacy purpose because it has been used for a - * long time without anybody complaining but it should be considered as - * deprecated and maybe buggy. - * First, this function doesn't care about the previous values of the Status - * and Configuration Registers when it sets the QE bit (bit 1) in the - * Configuration Register: all other bits are cleared, which may have unwanted - * side effects like removing some block protections. - * Secondly, it uses the Read Configuration Register (35h) instruction though - * some very old and few memories don't support this instruction. If a pull-up - * resistor is present on the MISO/IO1 line, we might still be able to pass the - * "read back" test because the QSPI memory doesn't recognize the command, - * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF. - * - * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI - * memories. + * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. * * Return: 0 on success, -errno otherwise. */ -static int spansion_quad_enable(struct spi_nor *nor) +static int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) { - u8 *sr_cr = nor->bouncebuf; int ret; - sr_cr[0] = 0; - sr_cr[1] = CR_QUAD_EN_SPAN; - ret = write_sr_cr(nor, sr_cr); + ret = spi_nor_read_sr(nor, nor->bouncebuf); if (ret) return ret; - /* read back and check it */ - ret = read_cr(nor); - if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { - dev_err(nor->dev, "Spansion Quad bit not set\n"); - return -EINVAL; - } - - return 0; -} - -/** - * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register. - * @nor: pointer to a 'struct spi_nor' - * - * Set the Quad Enable (QE) bit in the Configuration Register. - * This function should be used with QSPI memories not supporting the Read - * Configuration Register (35h) instruction. - * - * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI - * memories. - * - * Return: 0 on success, -errno otherwise. - */ -static int spansion_no_read_cr_quad_enable(struct spi_nor *nor) -{ - u8 *sr_cr = nor->bouncebuf; - int ret; + if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) + return 0; - /* Keep the current value of the Status Register. */ - ret = read_sr(nor); - if (ret < 0) { - dev_err(nor->dev, "error while reading status register\n"); - return -EINVAL; - } - sr_cr[0] = ret; - sr_cr[1] = CR_QUAD_EN_SPAN; + nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; - return write_sr_cr(nor, sr_cr); + return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); } /** - * spansion_read_cr_quad_enable() - set QE bit in Configuration Register. - * @nor: pointer to a 'struct spi_nor' - * - * Set the Quad Enable (QE) bit in the Configuration Register. - * This function should be used with QSPI memories supporting the Read - * Configuration Register (35h) instruction. + * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status + * Register 2. + * @nor: pointer to a 'struct spi_nor'. * - * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI - * memories. + * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. * * Return: 0 on success, -errno otherwise. */ -static int spansion_read_cr_quad_enable(struct spi_nor *nor) +static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) { - struct device *dev = nor->dev; - u8 *sr_cr = nor->bouncebuf; int ret; - /* Check current Quad Enable bit value. */ - ret = read_cr(nor); - if (ret < 0) { - dev_err(dev, "error while reading configuration register\n"); - return -EINVAL; - } - - if (ret & CR_QUAD_EN_SPAN) - return 0; + if (nor->flags & SNOR_F_NO_READ_CR) + return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); - sr_cr[1] = ret | CR_QUAD_EN_SPAN; - - /* Keep the current value of the Status Register. */ - ret = read_sr(nor); - if (ret < 0) { - dev_err(dev, "error while reading status register\n"); - return -EINVAL; - } - sr_cr[0] = ret; - - ret = write_sr_cr(nor, sr_cr); + ret = spi_nor_read_cr(nor, nor->bouncebuf); if (ret) return ret; - /* Read back and check it. */ - ret = read_cr(nor); - if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { - dev_err(nor->dev, "Spansion Quad bit not set\n"); - return -EINVAL; - } - - return 0; -} - -static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2) -{ - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(1, sr2, 1)); - - return spi_mem_exec_op(nor->spimem, &op); - } - - return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1); -} - -static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) -{ - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, sr2, 1)); - - return spi_mem_exec_op(nor->spimem, &op); - } + if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) + return 0; - return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1); + return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); } /** - * sr2_bit7_quad_enable() - set QE bit in Status Register 2. + * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. * @nor: pointer to a 'struct spi_nor' * * Set the Quad Enable (QE) bit in the Status Register 2. @@ -1903,10 +2139,11 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) * * Return: 0 on success, -errno otherwise. */ -static int sr2_bit7_quad_enable(struct spi_nor *nor) +static int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) { u8 *sr2 = nor->bouncebuf; int ret; + u8 sr2_written; /* Check current Quad Enable bit value. */ ret = spi_nor_read_sr2(nor, sr2); @@ -1918,117 +2155,23 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor) /* Update the Quad Enable bit. */ *sr2 |= SR2_QUAD_EN_BIT7; - write_enable(nor); - ret = spi_nor_write_sr2(nor, sr2); - if (ret < 0) { - dev_err(nor->dev, "error while writing status register 2\n"); - return -EINVAL; - } - - ret = spi_nor_wait_till_ready(nor); - if (ret < 0) { - dev_err(nor->dev, "timeout while writing status register 2\n"); + if (ret) return ret; - } + + sr2_written = *sr2; /* Read back and check it. */ ret = spi_nor_read_sr2(nor, sr2); - if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) { - dev_err(nor->dev, "SR2 Quad bit not set\n"); - return -EINVAL; - } - - return 0; -} - -/** - * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits. - * @nor: pointer to a 'struct spi_nor' - * - * Read-modify-write function that clears the Block Protection bits from the - * Status Register without affecting other bits. - * - * Return: 0 on success, -errno otherwise. - */ -static int spi_nor_clear_sr_bp(struct spi_nor *nor) -{ - int ret; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - - ret = read_sr(nor); - if (ret < 0) { - dev_err(nor->dev, "error while reading status register\n"); - return ret; - } - - write_enable(nor); - - ret = write_sr(nor, ret & ~mask); - if (ret) { - dev_err(nor->dev, "write to status register failed\n"); - return ret; - } - - ret = spi_nor_wait_till_ready(nor); if (ret) - dev_err(nor->dev, "timeout while writing status register\n"); - return ret; -} - -/** - * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection - * bits on spansion flashes. - * @nor: pointer to a 'struct spi_nor' - * - * Read-modify-write function that clears the Block Protection bits from the - * Status Register without affecting other bits. The function is tightly - * coupled with the spansion_quad_enable() function. Both assume that the Write - * Register with 16 bits, together with the Read Configuration Register (35h) - * instructions are supported. - * - * Return: 0 on success, -errno otherwise. - */ -static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor) -{ - int ret; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 *sr_cr = nor->bouncebuf; - - /* Check current Quad Enable bit value. */ - ret = read_cr(nor); - if (ret < 0) { - dev_err(nor->dev, - "error while reading configuration register\n"); return ret; - } - - /* - * When the configuration register Quad Enable bit is one, only the - * Write Status (01h) command with two data bytes may be used. - */ - if (ret & CR_QUAD_EN_SPAN) { - sr_cr[1] = ret; - ret = read_sr(nor); - if (ret < 0) { - dev_err(nor->dev, - "error while reading status register\n"); - return ret; - } - sr_cr[0] = ret & ~mask; - - ret = write_sr_cr(nor, sr_cr); - if (ret) - dev_err(nor->dev, "16-bit write register failed\n"); - return ret; + if (*sr2 != sr2_written) { + dev_dbg(nor->dev, "SR2: Read back test failed\n"); + return -EIO; } - /* - * If the Quad Enable bit is zero, use the Write Status (01h) command - * with one data byte. - */ - return spi_nor_clear_sr_bp(nor); + return 0; } /* Used when the "_ext_id" is two bytes at most */ @@ -2136,7 +2279,7 @@ static void gd25q256_default_init(struct spi_nor *nor) * indicate the quad_enable method for this case, we need * to set it in the default_init fixup hook. */ - nor->params.quad_enable = macronix_quad_enable; + nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable; } static struct spi_nor_fixups gd25q256_fixups = { @@ -2179,6 +2322,8 @@ static const struct flash_info spi_nor_ids[] = { { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ) }, { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) }, { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ) }, @@ -2267,6 +2412,10 @@ static const struct flash_info spi_nor_ids[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) + .fixups = &is25lp256_fixups }, /* Macronix */ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) }, @@ -2482,6 +2631,8 @@ static const struct flash_info spi_nor_ids[] = { { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) }, @@ -2520,11 +2671,11 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) tmp = spi_mem_exec_op(nor->spimem, &op); } else { - tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, - SPI_NOR_MAX_ID_LEN); + tmp = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); } - if (tmp < 0) { - dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp); + if (tmp) { + dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp); return ERR_PTR(tmp); } @@ -2544,7 +2695,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct spi_nor *nor = mtd_to_spi_nor(mtd); - int ret; + ssize_t ret; dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); @@ -2583,7 +2734,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct spi_nor *nor = mtd_to_spi_nor(mtd); - size_t actual; + size_t actual = 0; int ret; dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); @@ -2592,26 +2743,28 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, if (ret) return ret; - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + goto out; nor->sst_write_second = false; - actual = to % 2; /* Start write from odd address. */ - if (actual) { + if (to % 2) { nor->program_opcode = SPINOR_OP_BP; /* write one byte. */ ret = spi_nor_write_data(nor, to, 1, buf); if (ret < 0) - goto sst_write_err; - WARN(ret != 1, "While writing 1 byte written %i bytes\n", - (int)ret); + goto out; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); ret = spi_nor_wait_till_ready(nor); if (ret) - goto sst_write_err; + goto out; + + to++; + actual++; } - to += actual; /* Write out most of the data here. */ for (; actual < len - 1; actual += 2) { @@ -2620,39 +2773,44 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, /* write two bytes. */ ret = spi_nor_write_data(nor, to, 2, buf + actual); if (ret < 0) - goto sst_write_err; - WARN(ret != 2, "While writing 2 bytes written %i bytes\n", - (int)ret); + goto out; + WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret); ret = spi_nor_wait_till_ready(nor); if (ret) - goto sst_write_err; + goto out; to += 2; nor->sst_write_second = true; } nor->sst_write_second = false; - write_disable(nor); + ret = spi_nor_write_disable(nor); + if (ret) + goto out; + ret = spi_nor_wait_till_ready(nor); if (ret) - goto sst_write_err; + goto out; /* Write out trailing byte if it exists. */ if (actual != len) { - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + goto out; nor->program_opcode = SPINOR_OP_BP; ret = spi_nor_write_data(nor, to, 1, buf + actual); if (ret < 0) - goto sst_write_err; - WARN(ret != 1, "While writing 1 byte written %i bytes\n", - (int)ret); + goto out; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); ret = spi_nor_wait_till_ready(nor); if (ret) - goto sst_write_err; - write_disable(nor); + goto out; + actual += 1; + + ret = spi_nor_write_disable(nor); } -sst_write_err: +out: *retlen += actual; spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); return ret; @@ -2701,7 +2859,10 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, addr = spi_nor_convert_addr(nor, addr); - write_enable(nor); + ret = spi_nor_write_enable(nor); + if (ret) + goto write_err; + ret = spi_nor_write_data(nor, addr, page_remain, buf + i); if (ret < 0) goto write_err; @@ -2722,13 +2883,21 @@ write_err: static int spi_nor_check(struct spi_nor *nor) { if (!nor->dev || - (!nor->spimem && - (!nor->read || !nor->write || !nor->read_reg || - !nor->write_reg))) { + (!nor->spimem && !nor->controller_ops) || + (!nor->spimem && nor->controller_ops && + (!nor->controller_ops->read || + !nor->controller_ops->write || + !nor->controller_ops->read_reg || + !nor->controller_ops->write_reg))) { pr_err("spi-nor: please fill all the necessary fields!\n"); return -EINVAL; } + if (nor->spimem && nor->controller_ops) { + dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); + return -EINVAL; + } + return 0; } @@ -2738,10 +2907,8 @@ static int s3an_nor_setup(struct spi_nor *nor, int ret; ret = spi_nor_xread_sr(nor, nor->bouncebuf); - if (ret < 0) { - dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret); + if (ret) return ret; - } nor->erase_opcode = SPINOR_OP_XSE; nor->program_opcode = SPINOR_OP_XPP; @@ -2865,7 +3032,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) */ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) { - int ret; + ssize_t ret; while (len) { ret = spi_nor_read_data(nor, addr, len, buf); @@ -3489,20 +3656,39 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, break; case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: + /* + * Writing only one byte to the Status Register has the + * side-effect of clearing Status Register 2. + */ case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: - params->quad_enable = spansion_no_read_cr_quad_enable; + /* + * Read Configuration Register (35h) instruction is not + * supported. + */ + nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR; + params->quad_enable = spi_nor_sr2_bit1_quad_enable; break; case BFPT_DWORD15_QER_SR1_BIT6: - params->quad_enable = macronix_quad_enable; + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + params->quad_enable = spi_nor_sr1_bit6_quad_enable; break; case BFPT_DWORD15_QER_SR2_BIT7: - params->quad_enable = sr2_bit7_quad_enable; + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + params->quad_enable = spi_nor_sr2_bit7_quad_enable; break; case BFPT_DWORD15_QER_SR2_BIT1: - params->quad_enable = spansion_read_cr_quad_enable; + /* + * JESD216 rev B or later does not specify if writing only one + * byte to the Status Register clears or not the Status + * Register 2, so let's be cautious and keep the default + * assumption of a 16-bit Write Status (01h) command. + */ + nor->flags |= SNOR_F_HAS_16BIT_SR; + + params->quad_enable = spi_nor_sr2_bit1_quad_enable; break; default: @@ -4101,7 +4287,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, err = spi_nor_read_sfdp(nor, sizeof(header), psize, param_headers); if (err < 0) { - dev_err(dev, "failed to read SFDP parameter headers\n"); + dev_dbg(dev, "failed to read SFDP parameter headers\n"); goto exit; } } @@ -4348,7 +4534,7 @@ static int spi_nor_default_setup(struct spi_nor *nor, /* Select the (Fast) Read command. */ err = spi_nor_select_read(nor, shared_mask); if (err) { - dev_err(nor->dev, + dev_dbg(nor->dev, "can't select read settings supported by both the SPI controller and memory.\n"); return err; } @@ -4356,7 +4542,7 @@ static int spi_nor_default_setup(struct spi_nor *nor, /* Select the Page Program command. */ err = spi_nor_select_pp(nor, shared_mask); if (err) { - dev_err(nor->dev, + dev_dbg(nor->dev, "can't select write settings supported by both the SPI controller and memory.\n"); return err; } @@ -4364,7 +4550,7 @@ static int spi_nor_default_setup(struct spi_nor *nor, /* Select the Sector Erase command. */ err = spi_nor_select_erase(nor); if (err) { - dev_err(nor->dev, + dev_dbg(nor->dev, "can't select erase settings supported by both the SPI controller and memory.\n"); return err; } @@ -4381,12 +4567,32 @@ static int spi_nor_setup(struct spi_nor *nor, return nor->params.setup(nor, hwcaps); } +static void atmel_set_default_init(struct spi_nor *nor) +{ + nor->flags |= SNOR_F_HAS_LOCK; +} + +static void intel_set_default_init(struct spi_nor *nor) +{ + nor->flags |= SNOR_F_HAS_LOCK; +} + +static void issi_set_default_init(struct spi_nor *nor) +{ + nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable; +} + static void macronix_set_default_init(struct spi_nor *nor) { - nor->params.quad_enable = macronix_quad_enable; + nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable; nor->params.set_4byte = macronix_set_4byte; } +static void sst_set_default_init(struct spi_nor *nor) +{ + nor->flags |= SNOR_F_HAS_LOCK; +} + static void st_micron_set_default_init(struct spi_nor *nor) { nor->flags |= SNOR_F_HAS_LOCK; @@ -4408,6 +4614,18 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor) { /* Init flash parameters based on MFR */ switch (JEDEC_MFR(nor->info)) { + case SNOR_MFR_ATMEL: + atmel_set_default_init(nor); + break; + + case SNOR_MFR_INTEL: + intel_set_default_init(nor); + break; + + case SNOR_MFR_ISSI: + issi_set_default_init(nor); + break; + case SNOR_MFR_MACRONIX: macronix_set_default_init(nor); break; @@ -4417,6 +4635,10 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor) st_micron_set_default_init(nor); break; + case SNOR_MFR_SST: + sst_set_default_init(nor); + break; + case SNOR_MFR_WINBOND: winbond_set_default_init(nor); break; @@ -4465,9 +4687,11 @@ static void spi_nor_info_init_params(struct spi_nor *nor) u8 i, erase_mask; /* Initialize legacy flash parameters and settings. */ - params->quad_enable = spansion_quad_enable; + params->quad_enable = spi_nor_sr2_bit1_quad_enable; params->set_4byte = spansion_set_4byte; params->setup = spi_nor_default_setup; + /* Default to 16-bit Write Status (01h) Command */ + nor->flags |= SNOR_F_HAS_16BIT_SR; /* Set SPI NOR sizes. */ params->size = (u64)info->sector_size * info->n_sectors; @@ -4675,25 +4899,36 @@ static int spi_nor_quad_enable(struct spi_nor *nor) return nor->params.quad_enable(nor); } +/** + * spi_nor_unlock_all() - Unlocks the entire flash memory array. + * @nor: pointer to a 'struct spi_nor'. + * + * Some SPI NOR flashes are write protected by default after a power-on reset + * cycle, in order to avoid inadvertent writes during power-up. Backward + * compatibility imposes to unlock the entire flash memory array at power-up + * by default. + */ +static int spi_nor_unlock_all(struct spi_nor *nor) +{ + if (nor->flags & SNOR_F_HAS_LOCK) + return spi_nor_unlock(&nor->mtd, 0, nor->params.size); + + return 0; +} + static int spi_nor_init(struct spi_nor *nor) { int err; - if (nor->clear_sr_bp) { - if (nor->params.quad_enable == spansion_quad_enable) - nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp; - - err = nor->clear_sr_bp(nor); - if (err) { - dev_err(nor->dev, - "fail to clear block protection bits\n"); - return err; - } + err = spi_nor_quad_enable(nor); + if (err) { + dev_dbg(nor->dev, "quad mode not supported\n"); + return err; } - err = spi_nor_quad_enable(nor); + err = spi_nor_unlock_all(nor); if (err) { - dev_err(nor->dev, "quad mode not supported\n"); + dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n"); return err; } @@ -4761,7 +4996,7 @@ static int spi_nor_set_addr_width(struct spi_nor *nor) } if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { - dev_err(nor->dev, "address width is too large: %u\n", + dev_dbg(nor->dev, "address width is too large: %u\n", nor->addr_width); return -EINVAL; } @@ -4879,16 +5114,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (info->flags & SPI_NOR_HAS_LOCK) nor->flags |= SNOR_F_HAS_LOCK; - /* - * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up - * with the software protection bits set. - */ - if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || - JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || - JEDEC_MFR(nor->info) == SNOR_MFR_SST || - nor->info->flags & SPI_NOR_HAS_LOCK) - nor->clear_sr_bp = spi_nor_clear_sr_bp; - /* Init flash parameters based on flash_info struct and SFDP */ spi_nor_init_params(nor); diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index a1dff92ceedf..0f847d510950 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -509,11 +509,9 @@ static const struct file_operations eraseblk_count_fops = { */ int ubi_debugfs_init_dev(struct ubi_device *ubi) { - int err, n; unsigned long ubi_num = ubi->ubi_num; - const char *fname; - struct dentry *dent; struct ubi_debug_info *d = &ubi->dbg; + int n; if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; @@ -522,95 +520,52 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) ubi->ubi_num); if (n == UBI_DFS_DIR_LEN) { /* The array size is too small */ - fname = UBI_DFS_DIR_NAME; - dent = ERR_PTR(-EINVAL); - goto out; + return -EINVAL; } - fname = d->dfs_dir_name; - dent = debugfs_create_dir(fname, dfs_rootdir); - if (IS_ERR_OR_NULL(dent)) - goto out; - d->dfs_dir = dent; - - fname = "chk_gen"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_chk_gen = dent; - - fname = "chk_io"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_chk_io = dent; - - fname = "chk_fastmap"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_chk_fastmap = dent; - - fname = "tst_disable_bgt"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_disable_bgt = dent; - - fname = "tst_emulate_bitflips"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_emulate_bitflips = dent; - - fname = "tst_emulate_io_failures"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_emulate_io_failures = dent; - - fname = "tst_emulate_power_cut"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_emulate_power_cut = dent; - - fname = "tst_emulate_power_cut_min"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_power_cut_min = dent; - - fname = "tst_emulate_power_cut_max"; - dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_power_cut_max = dent; - - fname = "detailed_erase_block_info"; - dent = debugfs_create_file(fname, S_IRUSR, d->dfs_dir, (void *)ubi_num, - &eraseblk_count_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; + d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir); - return 0; + d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir, + (void *)ubi_num, &dfs_fops); -out_remove: - debugfs_remove_recursive(d->dfs_dir); -out: - err = dent ? PTR_ERR(dent) : -ENODEV; - ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n", - fname, err); - return err; + d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir, + (void *)ubi_num, &dfs_fops); + + d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR, + d->dfs_dir, (void *)ubi_num, + &dfs_fops); + + d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR, + d->dfs_dir, (void *)ubi_num, + &dfs_fops); + + d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips", + S_IWUSR, d->dfs_dir, + (void *)ubi_num, + &dfs_fops); + + d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures", + S_IWUSR, d->dfs_dir, + (void *)ubi_num, + &dfs_fops); + + d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut", + S_IWUSR, d->dfs_dir, + (void *)ubi_num, + &dfs_fops); + + d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min", + S_IWUSR, d->dfs_dir, + (void *)ubi_num, &dfs_fops); + + d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max", + S_IWUSR, d->dfs_dir, + (void *)ubi_num, &dfs_fops); + + debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir, + (void *)ubi_num, &eraseblk_count_fops); + + return 0; } /** |