diff options
author | Masahiro Yamada | 2017-11-22 02:38:32 +0900 |
---|---|---|
committer | Masahiro Yamada | 2017-11-29 00:28:59 +0900 |
commit | 350d052dab99986571c03951f0aed621dacfb9d9 (patch) | |
tree | 36b0f355a87ad5962336299cb961ed991fa23b0d /drivers/mtd | |
parent | e6001371d1603f00a98af9e6dd73abdbbffd2784 (diff) |
mtd: nand: denali: sync with Linux 4.15-rc1
I largely reworked the Denali NAND controller driver in Linux.
This commit imports the improvements from Linux. The code is
almost synced with Linux 4.15-rc1.
Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/nand/Kconfig | 11 | ||||
-rw-r--r-- | drivers/mtd/nand/denali.c | 2027 | ||||
-rw-r--r-- | drivers/mtd/nand/denali.h | 473 | ||||
-rw-r--r-- | drivers/mtd/nand/denali_dt.c | 17 | ||||
-rw-r--r-- | drivers/mtd/nand/denali_spl.c | 14 |
5 files changed, 1247 insertions, 1295 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index ca9819344ec..cbdbd2f973f 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -23,17 +23,6 @@ config NAND_DENALI_DT Enable the driver for NAND flash on platforms using a Denali NAND controller as a DT device. -config SYS_NAND_DENALI_64BIT - bool "Use 64-bit variant of Denali NAND controller" - depends on NAND_DENALI - help - The Denali NAND controller IP has some variations in terms of - the bus interface. The DMA setup sequence is completely differenct - between 32bit / 64bit AXI bus variants. - - If your Denali NAND controller is the 64-bit variant, say Y. - Otherwise (32 bit), say N. - config NAND_DENALI_SPARE_AREA_SKIP_BYTES int "Number of bytes skipped in OOB area" depends on NAND_DENALI diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 54718f418c6..b116d3a17c4 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -9,1144 +9,1076 @@ #include <common.h> #include <malloc.h> #include <nand.h> +#include <dm.h> +#include <linux/bitfield.h> +#include <linux/dma-direction.h> #include <linux/errno.h> #include <linux/io.h> #include "denali.h" -#define NAND_DEFAULT_TIMINGS -1 - -static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; - -/* - * We define a macro here that combines all interrupts this driver uses into - * a single constant value, for convenience. - */ -#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \ - INTR_STATUS__ECC_TRANSACTION_DONE | \ - INTR_STATUS__ECC_ERR | \ - INTR_STATUS__PROGRAM_FAIL | \ - INTR_STATUS__LOAD_COMP | \ - INTR_STATUS__PROGRAM_COMP | \ - INTR_STATUS__TIME_OUT | \ - INTR_STATUS__ERASE_FAIL | \ - INTR_STATUS__RST_COMP | \ - INTR_STATUS__ERASE_COMP | \ - INTR_STATUS__ECC_UNCOR_ERR | \ - INTR_STATUS__INT_ACT | \ - INTR_STATUS__LOCKED_BLK) +static dma_addr_t dma_map_single(void *dev, void *ptr, size_t size, + enum dma_data_direction dir) +{ + unsigned long addr = (unsigned long)ptr; -/* - * indicates whether or not the internal value for the flash bank is - * valid or not - */ -#define CHIP_SELECT_INVALID -1 + if (dir == DMA_FROM_DEVICE) + invalidate_dcache_range(addr, addr + size); + else + flush_dcache_range(addr, addr + size); -#define SUPPORT_8BITECC 1 + return addr; +} -/* - * this macro allows us to convert from an MTD structure to our own - * device context (denali) structure. - */ -static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) +static void dma_unmap_single(void *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) { - return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); + if (dir != DMA_TO_DEVICE) + invalidate_dcache_range(addr, addr + size); } -/* - * These constants are defined by the driver to enable common driver - * configuration options. - */ -#define SPARE_ACCESS 0x41 -#define MAIN_ACCESS 0x42 -#define MAIN_SPARE_ACCESS 0x43 -#define PIPELINE_ACCESS 0x2000 - -#define DENALI_UNLOCK_START 0x10 -#define DENALI_UNLOCK_END 0x11 -#define DENALI_LOCK 0x21 -#define DENALI_LOCK_TIGHT 0x31 -#define DENALI_BUFFER_LOAD 0x60 -#define DENALI_BUFFER_WRITE 0x62 - -#define DENALI_READ 0 -#define DENALI_WRITE 0x100 - -/* types of device accesses. We can issue commands and get status */ -#define COMMAND_CYCLE 0 -#define ADDR_CYCLE 1 -#define STATUS_CYCLE 2 - -/* - * this is a helper macro that allows us to - * format the bank into the proper bits for the controller - */ -#define BANK(x) ((x) << 24) - -/* Interrupts are cleared by writing a 1 to the appropriate status bit */ -static inline void clear_interrupt(struct denali_nand_info *denali, - uint32_t irq_mask) +static int dma_mapping_error(void *dev, dma_addr_t addr) { - uint32_t intr_status_reg; - - intr_status_reg = INTR_STATUS(denali->flash_bank); - - writel(irq_mask, denali->flash_reg + intr_status_reg); + return 0; } -static uint32_t read_interrupt_status(struct denali_nand_info *denali) -{ - uint32_t intr_status_reg; +#define DENALI_NAND_NAME "denali-nand" - intr_status_reg = INTR_STATUS(denali->flash_bank); +/* for Indexed Addressing */ +#define DENALI_INDEXED_CTRL 0x00 +#define DENALI_INDEXED_DATA 0x10 - return readl(denali->flash_reg + intr_status_reg); -} +#define DENALI_MAP00 (0 << 26) /* direct access to buffer */ +#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ +#define DENALI_MAP10 (2 << 26) /* high-level control plane */ +#define DENALI_MAP11 (3 << 26) /* direct controller access */ -static void clear_interrupts(struct denali_nand_info *denali) -{ - uint32_t status; +/* MAP11 access cycle type */ +#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ +#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ +#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ - status = read_interrupt_status(denali); - clear_interrupt(denali, status); +/* MAP10 commands */ +#define DENALI_ERASE 0x01 - denali->irq_status = 0; -} +#define DENALI_BANK(denali) ((denali)->active_bank << 24) -static void denali_irq_enable(struct denali_nand_info *denali, - uint32_t int_mask) -{ - int i; +#define DENALI_INVALID_BANK -1 +#define DENALI_NR_BANKS 4 - for (i = 0; i < denali->max_banks; ++i) - writel(int_mask, denali->flash_reg + INTR_EN(i)); -} +/* + * The bus interface clock, clk_x, is phase aligned with the core clock. The + * clk_x is an integral multiple N of the core clk. The value N is configured + * at IP delivery time, and its available value is 4, 5, or 6. We need to align + * to the largest value to make it work with any possible configuration. + */ +#define DENALI_CLK_X_MULT 6 -static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) +static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) { - unsigned long timeout = 1000000; - uint32_t intr_status; - - do { - intr_status = read_interrupt_status(denali) & DENALI_IRQ_ALL; - if (intr_status & irq_mask) { - denali->irq_status &= ~irq_mask; - /* our interrupt was detected */ - break; - } - udelay(1); - timeout--; - } while (timeout != 0); - - if (timeout == 0) { - /* timeout */ - printf("Denali timeout with interrupt status %08x\n", - read_interrupt_status(denali)); - intr_status = 0; - } - return intr_status; + return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); } /* - * Certain operations for the denali NAND controller use an indexed mode to - * read/write data. The operation is performed by writing the address value - * of the command to the device memory followed by the data. This function - * abstracts this common operation. + * Direct Addressing - the slave address forms the control information (command + * type, bank, block, and page address). The slave data is the actual data to + * be transferred. This mode requires 28 bits of address region allocated. */ -static void index_addr(struct denali_nand_info *denali, - uint32_t address, uint32_t data) +static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) { - writel(address, denali->flash_mem + INDEX_CTRL_REG); - writel(data, denali->flash_mem + INDEX_DATA_REG); + return ioread32(denali->host + addr); } -/* Perform an indexed read of the device */ -static void index_addr_read_data(struct denali_nand_info *denali, - uint32_t address, uint32_t *pdata) +static void denali_direct_write(struct denali_nand_info *denali, u32 addr, + u32 data) { - writel(address, denali->flash_mem + INDEX_CTRL_REG); - *pdata = readl(denali->flash_mem + INDEX_DATA_REG); + iowrite32(data, denali->host + addr); } /* - * We need to buffer some data for some of the NAND core routines. - * The operations manage buffering that data. + * Indexed Addressing - address translation module intervenes in passing the + * control information. This mode reduces the required address range. The + * control information and transferred data are latched by the registers in + * the translation module. */ -static void reset_buf(struct denali_nand_info *denali) +static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) { - denali->buf.head = 0; - denali->buf.tail = 0; + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); + return ioread32(denali->host + DENALI_INDEXED_DATA); } -static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte) +static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, + u32 data) { - denali->buf.buf[denali->buf.tail++] = byte; + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); + iowrite32(data, denali->host + DENALI_INDEXED_DATA); } -/* resets a specific device connected to the core */ -static void reset_bank(struct denali_nand_info *denali) +/* + * Use the configuration feature register to determine the maximum number of + * banks that the hardware supports. + */ +static void denali_detect_max_banks(struct denali_nand_info *denali) { - uint32_t irq_status; - uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT; - - clear_interrupts(denali); + uint32_t features = ioread32(denali->reg + FEATURES); - writel(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET); + denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); - irq_status = wait_for_irq(denali, irq_mask); - if (irq_status & INTR_STATUS__TIME_OUT) - debug("reset bank failed.\n"); + /* the encoding changed from rev 5.0 to 5.1 */ + if (denali->revision < 0x0501) + denali->max_banks <<= 1; } -/* Reset the flash controller */ -static uint32_t denali_nand_reset(struct denali_nand_info *denali) +static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali) { int i; - for (i = 0; i < denali->max_banks; i++) - writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, - denali->flash_reg + INTR_STATUS(i)); + for (i = 0; i < DENALI_NR_BANKS; i++) + iowrite32(U32_MAX, denali->reg + INTR_EN(i)); + iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); +} - for (i = 0; i < denali->max_banks; i++) { - writel(1 << i, denali->flash_reg + DEVICE_RESET); - while (!(readl(denali->flash_reg + INTR_STATUS(i)) & - (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT))) - if (readl(denali->flash_reg + INTR_STATUS(i)) & - INTR_STATUS__TIME_OUT) - debug("NAND Reset operation timed out on bank" - " %d\n", i); - } +static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali) +{ + int i; - for (i = 0; i < denali->max_banks; i++) - writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, - denali->flash_reg + INTR_STATUS(i)); + for (i = 0; i < DENALI_NR_BANKS; i++) + iowrite32(0, denali->reg + INTR_EN(i)); + iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); +} - return 0; +static void denali_clear_irq(struct denali_nand_info *denali, + int bank, uint32_t irq_status) +{ + /* write one to clear bits */ + iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); } -/* - * this routine calculates the ONFI timing values for a given mode and - * programs the clocking register accordingly. The mode is determined by - * the get_onfi_nand_para routine. - */ -static void nand_onfi_timing_set(struct denali_nand_info *denali, - uint16_t mode) +static void denali_clear_irq_all(struct denali_nand_info *denali) { - uint32_t trea[6] = {40, 30, 25, 20, 20, 16}; - uint32_t trp[6] = {50, 25, 17, 15, 12, 10}; - uint32_t treh[6] = {30, 15, 15, 10, 10, 7}; - uint32_t trc[6] = {100, 50, 35, 30, 25, 20}; - uint32_t trhoh[6] = {0, 15, 15, 15, 15, 15}; - uint32_t trloh[6] = {0, 0, 0, 0, 5, 5}; - uint32_t tcea[6] = {100, 45, 30, 25, 25, 25}; - uint32_t tadl[6] = {200, 100, 100, 100, 70, 70}; - uint32_t trhw[6] = {200, 100, 100, 100, 100, 100}; - uint32_t trhz[6] = {200, 100, 100, 100, 100, 100}; - uint32_t twhr[6] = {120, 80, 80, 60, 60, 60}; - uint32_t tcs[6] = {70, 35, 25, 25, 20, 15}; - - uint32_t data_invalid_rhoh, data_invalid_rloh, data_invalid; - uint32_t dv_window = 0; - uint32_t en_lo, en_hi; - uint32_t acc_clks; - uint32_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; - - en_lo = DIV_ROUND_UP(trp[mode], CLK_X); - en_hi = DIV_ROUND_UP(treh[mode], CLK_X); - if ((en_hi * CLK_X) < (treh[mode] + 2)) - en_hi++; - - if ((en_lo + en_hi) * CLK_X < trc[mode]) - en_lo += DIV_ROUND_UP((trc[mode] - (en_lo + en_hi) * CLK_X), - CLK_X); - - if ((en_lo + en_hi) < CLK_MULTI) - en_lo += CLK_MULTI - en_lo - en_hi; - - while (dv_window < 8) { - data_invalid_rhoh = en_lo * CLK_X + trhoh[mode]; - - data_invalid_rloh = (en_lo + en_hi) * CLK_X + trloh[mode]; - - data_invalid = data_invalid_rhoh < data_invalid_rloh ? - data_invalid_rhoh : data_invalid_rloh; - - dv_window = data_invalid - trea[mode]; - - if (dv_window < 8) - en_lo++; - } + int i; - acc_clks = DIV_ROUND_UP(trea[mode], CLK_X); + for (i = 0; i < DENALI_NR_BANKS; i++) + denali_clear_irq(denali, i, U32_MAX); +} - while (acc_clks * CLK_X - trea[mode] < 3) - acc_clks++; +static void __denali_check_irq(struct denali_nand_info *denali) +{ + uint32_t irq_status; + int i; - if (data_invalid - acc_clks * CLK_X < 2) - debug("%s, Line %d: Warning!\n", __FILE__, __LINE__); + for (i = 0; i < DENALI_NR_BANKS; i++) { + irq_status = ioread32(denali->reg + INTR_STATUS(i)); + denali_clear_irq(denali, i, irq_status); - addr_2_data = DIV_ROUND_UP(tadl[mode], CLK_X); - re_2_we = DIV_ROUND_UP(trhw[mode], CLK_X); - re_2_re = DIV_ROUND_UP(trhz[mode], CLK_X); - we_2_re = DIV_ROUND_UP(twhr[mode], CLK_X); - cs_cnt = DIV_ROUND_UP((tcs[mode] - trp[mode]), CLK_X); - if (cs_cnt == 0) - cs_cnt = 1; + if (i != denali->active_bank) + continue; - if (tcea[mode]) { - while (cs_cnt * CLK_X + trea[mode] < tcea[mode]) - cs_cnt++; + denali->irq_status |= irq_status; } +} - /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */ - if (readl(denali->flash_reg + MANUFACTURER_ID) == 0 && - readl(denali->flash_reg + DEVICE_ID) == 0x88) - acc_clks = 6; - - writel(acc_clks, denali->flash_reg + ACC_CLKS); - writel(re_2_we, denali->flash_reg + RE_2_WE); - writel(re_2_re, denali->flash_reg + RE_2_RE); - writel(we_2_re, denali->flash_reg + WE_2_RE); - writel(addr_2_data, denali->flash_reg + ADDR_2_DATA); - writel(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); - writel(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); - writel(cs_cnt, denali->flash_reg + CS_SETUP_CNT); +static void denali_reset_irq(struct denali_nand_info *denali) +{ + denali->irq_status = 0; + denali->irq_mask = 0; } -/* queries the NAND device to see what ONFI modes it supports. */ -static uint32_t get_onfi_nand_para(struct denali_nand_info *denali) +static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, + uint32_t irq_mask) { - int i; + unsigned long time_left = 1000000; - /* - * we needn't to do a reset here because driver has already - * reset all the banks before - */ - if (!(readl(denali->flash_reg + ONFI_TIMING_MODE) & - ONFI_TIMING_MODE__VALUE)) - return -EIO; + while (time_left) { + __denali_check_irq(denali); - for (i = 5; i > 0; i--) { - if (readl(denali->flash_reg + ONFI_TIMING_MODE) & - (0x01 << i)) - break; + if (irq_mask & denali->irq_status) + return denali->irq_status; + udelay(1); + time_left--; } - nand_onfi_timing_set(denali, i); - - /* - * By now, all the ONFI devices we know support the page cache - * rw feature. So here we enable the pipeline_rw_ahead feature - */ + if (!time_left) { + dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", + irq_mask); + return 0; + } - return 0; + return denali->irq_status; } -static void get_samsung_nand_para(struct denali_nand_info *denali, - uint8_t device_id) +static uint32_t denali_check_irq(struct denali_nand_info *denali) { - if (device_id == 0xd3) { /* Samsung K9WAG08U1A */ - /* Set timing register values according to datasheet */ - writel(5, denali->flash_reg + ACC_CLKS); - writel(20, denali->flash_reg + RE_2_WE); - writel(12, denali->flash_reg + WE_2_RE); - writel(14, denali->flash_reg + ADDR_2_DATA); - writel(3, denali->flash_reg + RDWR_EN_LO_CNT); - writel(2, denali->flash_reg + RDWR_EN_HI_CNT); - writel(2, denali->flash_reg + CS_SETUP_CNT); - } + __denali_check_irq(denali); + + return denali->irq_status; } -static void get_toshiba_nand_para(struct denali_nand_info *denali) +static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { - uint32_t tmp; + struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); + int i; - /* - * Workaround to fix a controller bug which reports a wrong - * spare area size for some kind of Toshiba NAND device - */ - if ((readl(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && - (readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { - writel(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - tmp = readl(denali->flash_reg + DEVICES_CONNECTED) * - readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - writel(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); - } + for (i = 0; i < len; i++) + buf[i] = denali->host_read(denali, addr); } -static void get_hynix_nand_para(struct denali_nand_info *denali, - uint8_t device_id) +static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { - uint32_t main_size, spare_size; - - switch (device_id) { - case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ - case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ - writel(128, denali->flash_reg + PAGES_PER_BLOCK); - writel(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); - writel(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - main_size = 4096 * - readl(denali->flash_reg + DEVICES_CONNECTED); - spare_size = 224 * - readl(denali->flash_reg + DEVICES_CONNECTED); - writel(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); - writel(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); - writel(0, denali->flash_reg + DEVICE_WIDTH); - break; - default: - debug("Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n" - "Will use default parameter values instead.\n", - device_id); - } + struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); + int i; + + for (i = 0; i < len; i++) + denali->host_write(denali, addr, buf[i]); } -/* - * determines how many NAND chips are connected to the controller. Note for - * Intel CE4100 devices we don't support more than one device. - */ -static void find_valid_banks(struct denali_nand_info *denali) +static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) { - uint32_t id[denali->max_banks]; + struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); + uint16_t *buf16 = (uint16_t *)buf; int i; - denali->total_used_banks = 1; - for (i = 0; i < denali->max_banks; i++) { - index_addr(denali, MODE_11 | (i << 24) | 0, 0x90); - index_addr(denali, MODE_11 | (i << 24) | 1, 0); - index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]); + for (i = 0; i < len / 2; i++) + buf16[i] = denali->host_read(denali, addr); +} - if (i == 0) { - if (!(id[i] & 0x0ff)) - break; - } else { - if ((id[i] & 0x0ff) == (id[0] & 0x0ff)) - denali->total_used_banks++; - else - break; - } - } +static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, + int len) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); + const uint16_t *buf16 = (const uint16_t *)buf; + int i; + + for (i = 0; i < len / 2; i++) + denali->host_write(denali, addr, buf16[i]); } -/* - * Use the configuration feature register to determine the maximum number of - * banks that the hardware supports. - */ -static void detect_max_banks(struct denali_nand_info *denali) +static uint8_t denali_read_byte(struct mtd_info *mtd) { - uint32_t features = ioread32(denali->flash_reg + FEATURES); + uint8_t byte; - denali->max_banks = 1 << (features & FEATURES__N_BANKS); + denali_read_buf(mtd, &byte, 1); - /* the encoding changed from rev 5.0 to 5.1 */ - if (denali->revision < 0x0501) - denali->max_banks <<= 1; + return byte; } -static void detect_partition_feature(struct denali_nand_info *denali) +static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) { - /* - * For MRST platform, denali->fwblks represent the - * number of blocks firmware is taken, - * FW is in protect partition and MTD driver has no - * permission to access it. So let driver know how many - * blocks it can't touch. - */ - if (readl(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { - if ((readl(denali->flash_reg + PERM_SRC_ID(1)) & - PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) { - denali->fwblks = - ((readl(denali->flash_reg + MIN_MAX_BANK(1)) & - MIN_MAX_BANK__MIN_VALUE) * - denali->blksperchip) - + - (readl(denali->flash_reg + MIN_BLK_ADDR(1)) & - MIN_BLK_ADDR__VALUE); - } else { - denali->fwblks = SPECTRA_START_BLOCK; - } - } else { - denali->fwblks = SPECTRA_START_BLOCK; - } + denali_write_buf(mtd, &byte, 1); } -static uint32_t denali_nand_timing_set(struct denali_nand_info *denali) +static uint16_t denali_read_word(struct mtd_info *mtd) { - uint32_t id_bytes[8], addr; - uint8_t maf_id, device_id; - int i; + uint16_t word; - /* - * Use read id method to get device ID and other params. - * For some NAND chips, controller can't report the correct - * device ID by reading from DEVICE_ID register - */ - addr = MODE_11 | BANK(denali->flash_bank); - index_addr(denali, addr | 0, 0x90); - index_addr(denali, addr | 1, 0); - for (i = 0; i < 8; i++) - index_addr_read_data(denali, addr | 2, &id_bytes[i]); - maf_id = id_bytes[0]; - device_id = id_bytes[1]; - - if (readl(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & - ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ - if (get_onfi_nand_para(denali)) - return -EIO; - } else if (maf_id == 0xEC) { /* Samsung NAND */ - get_samsung_nand_para(denali, device_id); - } else if (maf_id == 0x98) { /* Toshiba NAND */ - get_toshiba_nand_para(denali); - } else if (maf_id == 0xAD) { /* Hynix NAND */ - get_hynix_nand_para(denali, device_id); - } + denali_read_buf16(mtd, (uint8_t *)&word, 2); - find_valid_banks(denali); + return word; +} - detect_partition_feature(denali); +static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t type; + + if (ctrl & NAND_CLE) + type = DENALI_MAP11_CMD; + else if (ctrl & NAND_ALE) + type = DENALI_MAP11_ADDR; + else + return; /* - * If the user specified to override the default timings - * with a specific ONFI mode, we apply those changes here. + * Some commands are followed by chip->dev_ready or chip->waitfunc. + * irq_status must be cleared here to catch the R/B# interrupt later. */ - if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) - nand_onfi_timing_set(denali, onfi_timing_mode); + if (ctrl & NAND_CTRL_CHANGE) + denali_reset_irq(denali); - return 0; + denali->host_write(denali, DENALI_BANK(denali) | type, dat); } -/* - * validation function to verify that the controlling software is making - * a valid request - */ -static inline bool is_flash_bank_valid(int flash_bank) -{ - return flash_bank >= 0 && flash_bank < 4; -} - -static void denali_irq_init(struct denali_nand_info *denali) +static int denali_dev_ready(struct mtd_info *mtd) { - uint32_t int_mask; - int i; - - /* Disable global interrupts */ - writel(0, denali->flash_reg + GLOBAL_INT_ENABLE); - - int_mask = DENALI_IRQ_ALL; - - /* Clear all status bits */ - for (i = 0; i < denali->max_banks; ++i) - writel(0xFFFF, denali->flash_reg + INTR_STATUS(i)); + struct denali_nand_info *denali = mtd_to_denali(mtd); - denali_irq_enable(denali, int_mask); + return !!(denali_check_irq(denali) & INTR__INT_ACT); } -/* - * This helper function setups the registers for ECC and whether or not - * the spare area will be transferred. - */ -static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, - bool transfer_spare) +static int denali_check_erased_page(struct mtd_info *mtd, + struct nand_chip *chip, uint8_t *buf, + unsigned long uncor_ecc_flags, + unsigned int max_bitflips) { - int ecc_en_flag, transfer_spare_flag; + uint8_t *ecc_code = chip->buffers->ecccode; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int i, ret, stat; + + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + for (i = 0; i < ecc_steps; i++) { + if (!(uncor_ecc_flags & BIT(i))) + continue; + + stat = nand_check_erased_ecc_chunk(buf, ecc_size, + ecc_code, ecc_bytes, + NULL, 0, + chip->ecc.strength); + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } - /* set ECC, transfer spare bits if needed */ - ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; - transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; + buf += ecc_size; + ecc_code += ecc_bytes; + } - /* Enable spare area/ECC per user's request. */ - writel(ecc_en_flag, denali->flash_reg + ECC_ENABLE); - /* applicable for MAP01 only */ - writel(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); + return max_bitflips; } -/* - * sends a pipeline command operation to the controller. See the Denali NAND - * controller's user guide for more information (section 4.2.3.6). - */ -static int denali_send_pipeline_cmd(struct denali_nand_info *denali, - bool ecc_en, bool transfer_spare, - int access_type, int op) +static int denali_hw_ecc_fixup(struct mtd_info *mtd, + struct denali_nand_info *denali, + unsigned long *uncor_ecc_flags) { - uint32_t addr, cmd, irq_status; - static uint32_t page_count = 1; - - setup_ecc_for_xfer(denali, ecc_en, transfer_spare); + struct nand_chip *chip = mtd_to_nand(mtd); + int bank = denali->active_bank; + uint32_t ecc_cor; + unsigned int max_bitflips; - clear_interrupts(denali); + ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); + ecc_cor >>= ECC_COR_INFO__SHIFT(bank); - addr = BANK(denali->flash_bank) | denali->page; + if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { + /* + * This flag is set when uncorrectable error occurs at least in + * one ECC sector. We can not know "how many sectors", or + * "which sector(s)". We need erase-page check for all sectors. + */ + *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); + return 0; + } - /* setup the acccess type */ - cmd = MODE_10 | addr; - index_addr(denali, cmd, access_type); + max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); - /* setup the pipeline command */ - index_addr(denali, cmd, 0x2000 | op | page_count); + /* + * The register holds the maximum of per-sector corrected bitflips. + * This is suitable for the return value of the ->read_page() callback. + * Unfortunately, we can not know the total number of corrected bits in + * the page. Increase the stats by max_bitflips. (compromised solution) + */ + mtd->ecc_stats.corrected += max_bitflips; - cmd = MODE_01 | addr; - writel(cmd, denali->flash_mem + INDEX_CTRL_REG); + return max_bitflips; +} - if (op == DENALI_READ) { - /* wait for command to be accepted */ - irq_status = wait_for_irq(denali, INTR_STATUS__LOAD_COMP); +static int denali_sw_ecc_fixup(struct mtd_info *mtd, + struct denali_nand_info *denali, + unsigned long *uncor_ecc_flags, uint8_t *buf) +{ + unsigned int ecc_size = denali->nand.ecc.size; + unsigned int bitflips = 0; + unsigned int max_bitflips = 0; + uint32_t err_addr, err_cor_info; + unsigned int err_byte, err_sector, err_device; + uint8_t err_cor_value; + unsigned int prev_sector = 0; + uint32_t irq_status; - if (irq_status == 0) - return -EIO; - } + denali_reset_irq(denali); - return 0; -} + do { + err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); + err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); + err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); + + err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); + err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, + err_cor_info); + err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, + err_cor_info); + + /* reset the bitflip counter when crossing ECC sector */ + if (err_sector != prev_sector) + bitflips = 0; + + if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { + /* + * Check later if this is a real ECC error, or + * an erased sector. + */ + *uncor_ecc_flags |= BIT(err_sector); + } else if (err_byte < ecc_size) { + /* + * If err_byte is larger than ecc_size, means error + * happened in OOB, so we ignore it. It's no need for + * us to correct it err_device is represented the NAND + * error bits are happened in if there are more than + * one NAND connected. + */ + int offset; + unsigned int flips_in_byte; + + offset = (err_sector * ecc_size + err_byte) * + denali->devs_per_cs + err_device; + + /* correct the ECC error */ + flips_in_byte = hweight8(buf[offset] ^ err_cor_value); + buf[offset] ^= err_cor_value; + mtd->ecc_stats.corrected += flips_in_byte; + bitflips += flips_in_byte; + + max_bitflips = max(max_bitflips, bitflips); + } -/* helper function that simply writes a buffer to the flash */ -static int write_data_to_flash_mem(struct denali_nand_info *denali, - const uint8_t *buf, int len) -{ - uint32_t *buf32; - int i; + prev_sector = err_sector; + } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); /* - * verify that the len is a multiple of 4. - * see comment in read_data_from_flash_mem() + * Once handle all ECC errors, controller will trigger an + * ECC_TRANSACTION_DONE interrupt. */ - BUG_ON((len % 4) != 0); + irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); + if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) + return -EIO; - /* write the data to the flash memory */ - buf32 = (uint32_t *)buf; - for (i = 0; i < len / 4; i++) - writel(*buf32++, denali->flash_mem + INDEX_DATA_REG); - return i * 4; /* intent is to return the number of bytes read */ + return max_bitflips; } -/* helper function that simply reads a buffer from the flash */ -static int read_data_from_flash_mem(struct denali_nand_info *denali, - uint8_t *buf, int len) +static void denali_setup_dma64(struct denali_nand_info *denali, + dma_addr_t dma_addr, int page, int write) { - uint32_t *buf32; - int i; + uint32_t mode; + const int page_count = 1; - /* - * we assume that len will be a multiple of 4, if not it would be nice - * to know about it ASAP rather than have random failures... - * This assumption is based on the fact that this function is designed - * to be used to read flash pages, which are typically multiples of 4. - */ - BUG_ON((len % 4) != 0); + mode = DENALI_MAP10 | DENALI_BANK(denali) | page; - /* transfer the data from the flash */ - buf32 = (uint32_t *)buf; - for (i = 0; i < len / 4; i++) - *buf32++ = readl(denali->flash_mem + INDEX_DATA_REG); + /* DMA is a three step process */ - return i * 4; /* intent is to return the number of bytes read */ -} + /* + * 1. setup transfer type, interrupt when complete, + * burst len = 64 bytes, the number of pages + */ + denali->host_write(denali, mode, + 0x01002000 | (64 << 16) | (write << 8) | page_count); -static void denali_mode_main_access(struct denali_nand_info *denali) -{ - uint32_t addr, cmd; + /* 2. set memory low address */ + denali->host_write(denali, mode, lower_32_bits(dma_addr)); - addr = BANK(denali->flash_bank) | denali->page; - cmd = MODE_10 | addr; - index_addr(denali, cmd, MAIN_ACCESS); + /* 3. set memory high address */ + denali->host_write(denali, mode, upper_32_bits(dma_addr)); } -static void denali_mode_main_spare_access(struct denali_nand_info *denali) +static void denali_setup_dma32(struct denali_nand_info *denali, + dma_addr_t dma_addr, int page, int write) { - uint32_t addr, cmd; + uint32_t mode; + const int page_count = 1; - addr = BANK(denali->flash_bank) | denali->page; - cmd = MODE_10 | addr; - index_addr(denali, cmd, MAIN_SPARE_ACCESS); -} + mode = DENALI_MAP10 | DENALI_BANK(denali); -/* writes OOB data to the device */ -static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) -{ - struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t irq_status; - uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP | - INTR_STATUS__PROGRAM_FAIL; - int status = 0; + /* DMA is a four step process */ - denali->page = page; + /* 1. setup transfer type and # of pages */ + denali->host_write(denali, mode | page, + 0x2000 | (write << 8) | page_count); - if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, - DENALI_WRITE) == 0) { - write_data_to_flash_mem(denali, buf, mtd->oobsize); + /* 2. set memory high address bits 23:8 */ + denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); - /* wait for operation to complete */ - irq_status = wait_for_irq(denali, irq_mask); + /* 3. set memory low address bits 23:8 */ + denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); - if (irq_status == 0) { - dev_err(denali->dev, "OOB write failed\n"); - status = -EIO; - } - } else { - printf("unable to send pipeline command\n"); - status = -EIO; - } - return status; + /* 4. interrupt when complete, burst len = 64 bytes */ + denali->host_write(denali, mode | 0x14000, 0x2400); } -/* reads OOB data from the device */ -static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) +static int denali_pio_read(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw) { - struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t irq_mask = INTR_STATUS__LOAD_COMP; - uint32_t irq_status, addr, cmd; + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; + uint32_t *buf32 = (uint32_t *)buf; + uint32_t irq_status, ecc_err_mask; + int i; - denali->page = page; + if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) + ecc_err_mask = INTR__ECC_UNCOR_ERR; + else + ecc_err_mask = INTR__ECC_ERR; - if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, - DENALI_READ) == 0) { - read_data_from_flash_mem(denali, buf, mtd->oobsize); + denali_reset_irq(denali); - /* - * wait for command to be accepted - * can always use status0 bit as the - * mask is identical for each bank. - */ - irq_status = wait_for_irq(denali, irq_mask); + for (i = 0; i < size / 4; i++) + *buf32++ = denali->host_read(denali, addr); - if (irq_status == 0) - printf("page on OOB timeout %d\n", denali->page); + irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); + if (!(irq_status & INTR__PAGE_XFER_INC)) + return -EIO; - /* - * We set the device back to MAIN_ACCESS here as I observed - * instability with the controller if you do a block erase - * and the last transaction was a SPARE_ACCESS. Block erase - * is reliable (according to the MTD test infrastructure) - * if you are in MAIN_ACCESS. - */ - addr = BANK(denali->flash_bank) | denali->page; - cmd = MODE_10 | addr; - index_addr(denali, cmd, MAIN_ACCESS); - } + if (irq_status & INTR__ERASED_PAGE) + memset(buf, 0xff, size); + + return irq_status & ecc_err_mask ? -EBADMSG : 0; } -/* - * this function examines buffers to see if they contain data that - * indicate that the buffer is part of an erased region of flash. - */ -static bool is_erased(uint8_t *buf, int len) +static int denali_pio_write(struct denali_nand_info *denali, + const void *buf, size_t size, int page, int raw) { + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; + const uint32_t *buf32 = (uint32_t *)buf; + uint32_t irq_status; int i; - for (i = 0; i < len; i++) - if (buf[i] != 0xFF) - return false; - return true; + denali_reset_irq(denali); + + for (i = 0; i < size / 4; i++) + denali->host_write(denali, addr, *buf32++); + + irq_status = denali_wait_for_irq(denali, + INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); + if (!(irq_status & INTR__PROGRAM_COMP)) + return -EIO; + + return 0; } -/* programs the controller to either enable/disable DMA transfers */ -static void denali_enable_dma(struct denali_nand_info *denali, bool en) +static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw, int write) { - writel(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE); - readl(denali->flash_reg + DMA_ENABLE); + if (write) + return denali_pio_write(denali, buf, size, page, raw); + else + return denali_pio_read(denali, buf, size, page, raw); } -/* setups the HW to perform the data DMA */ -static void denali_setup_dma(struct denali_nand_info *denali, int op) +static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw, int write) { - uint32_t mode; - const int page_count = 1; - uint64_t addr = (unsigned long)denali->buf.dma_buf; - - flush_dcache_range(addr, addr + sizeof(denali->buf.dma_buf)); - -/* For Denali controller that is 64 bit bus IP core */ -#ifdef CONFIG_SYS_NAND_DENALI_64BIT - mode = MODE_10 | BANK(denali->flash_bank) | denali->page; - - /* DMA is a three step process */ + dma_addr_t dma_addr; + uint32_t irq_mask, irq_status, ecc_err_mask; + enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + int ret = 0; + + dma_addr = dma_map_single(denali->dev, buf, size, dir); + if (dma_mapping_error(denali->dev, dma_addr)) { + dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); + return denali_pio_xfer(denali, buf, size, page, raw, write); + } - /* 1. setup transfer type, interrupt when complete, - burst len = 64 bytes, the number of pages */ - index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count); + if (write) { + /* + * INTR__PROGRAM_COMP is never asserted for the DMA transfer. + * We can use INTR__DMA_CMD_COMP instead. This flag is asserted + * when the page program is completed. + */ + irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; + ecc_err_mask = 0; + } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { + irq_mask = INTR__DMA_CMD_COMP; + ecc_err_mask = INTR__ECC_UNCOR_ERR; + } else { + irq_mask = INTR__DMA_CMD_COMP; + ecc_err_mask = INTR__ECC_ERR; + } - /* 2. set memory low address bits 31:0 */ - index_addr(denali, mode, addr); + iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); - /* 3. set memory high address bits 64:32 */ - index_addr(denali, mode, addr >> 32); -#else - mode = MODE_10 | BANK(denali->flash_bank); + denali_reset_irq(denali); + denali->setup_dma(denali, dma_addr, page, write); - /* DMA is a four step process */ + irq_status = denali_wait_for_irq(denali, irq_mask); + if (!(irq_status & INTR__DMA_CMD_COMP)) + ret = -EIO; + else if (irq_status & ecc_err_mask) + ret = -EBADMSG; - /* 1. setup transfer type and # of pages */ - index_addr(denali, mode | denali->page, 0x2000 | op | page_count); + iowrite32(0, denali->reg + DMA_ENABLE); - /* 2. set memory high address bits 23:8 */ - index_addr(denali, mode | (((addr >> 16) & 0xffff) << 8), 0x2200); + dma_unmap_single(denali->dev, dma_addr, size, dir); - /* 3. set memory low address bits 23:8 */ - index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300); + if (irq_status & INTR__ERASED_PAGE) + memset(buf, 0xff, size); - /* 4. interrupt when complete, burst len = 64 bytes */ - index_addr(denali, mode | 0x14000, 0x2400); -#endif + return ret; } -/* Common DMA function */ -static uint32_t denali_dma_configuration(struct denali_nand_info *denali, - uint32_t ops, bool raw_xfer, - uint32_t irq_mask, int oob_required) +static int denali_data_xfer(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw, int write) { - uint32_t irq_status = 0; - /* setup_ecc_for_xfer(bool ecc_en, bool transfer_spare) */ - setup_ecc_for_xfer(denali, !raw_xfer, oob_required); - - /* clear any previous interrupt flags */ - clear_interrupts(denali); - - /* enable the DMA */ - denali_enable_dma(denali, true); - - /* setup the DMA */ - denali_setup_dma(denali, ops); - - /* wait for operation to complete */ - irq_status = wait_for_irq(denali, irq_mask); + iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); + iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, + denali->reg + TRANSFER_SPARE_REG); - /* if ECC fault happen, seems we need delay before turning off DMA. - * If not, the controller will go into non responsive condition */ - if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) - udelay(100); - - /* disable the DMA */ - denali_enable_dma(denali, false); - - return irq_status; + if (denali->dma_avail) + return denali_dma_xfer(denali, buf, size, page, raw, write); + else + return denali_pio_xfer(denali, buf, size, page, raw, write); } -static int write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, bool raw_xfer, int oob_required) +static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, + int page, int write) { struct denali_nand_info *denali = mtd_to_denali(mtd); + unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0; + unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT; + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + uint8_t *bufpoi = chip->oob_poi; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int oob_skip = denali->oob_skip_bytes; + size_t size = writesize + oobsize; + int i, pos, len; + + /* BBM at the beginning of the OOB area */ + chip->cmdfunc(mtd, start_cmd, writesize, page); + if (write) + chip->write_buf(mtd, bufpoi, oob_skip); + else + chip->read_buf(mtd, bufpoi, oob_skip); + bufpoi += oob_skip; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + chip->cmdfunc(mtd, rnd_cmd, pos, -1); + if (write) + chip->write_buf(mtd, bufpoi, len); + else + chip->read_buf(mtd, bufpoi, len); + bufpoi += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1); + if (write) + chip->write_buf(mtd, bufpoi, len); + else + chip->read_buf(mtd, bufpoi, len); + bufpoi += len; + } + } - uint32_t irq_status = 0; - uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP; - - denali->status = 0; - - /* copy buffer into DMA buffer */ - memcpy(denali->buf.dma_buf, buf, mtd->writesize); + /* OOB free */ + len = oobsize - (bufpoi - chip->oob_poi); + chip->cmdfunc(mtd, rnd_cmd, size - len, -1); + if (write) + chip->write_buf(mtd, bufpoi, len); + else + chip->read_buf(mtd, bufpoi, len); +} - /* need extra memcpy for raw transfer */ - if (raw_xfer) - memcpy(denali->buf.dma_buf + mtd->writesize, - chip->oob_poi, mtd->oobsize); +static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *tmp_buf = denali->buf; + int oob_skip = denali->oob_skip_bytes; + size_t size = writesize + oobsize; + int ret, i, pos, len; + + ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); + if (ret) + return ret; + + /* Arrange the buffer for syndrome payload/ecc layout */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(buf, tmp_buf + pos, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(buf, tmp_buf + writesize + oob_skip, + len); + buf += len; + } + } + } - /* setting up DMA */ - irq_status = denali_dma_configuration(denali, DENALI_WRITE, raw_xfer, - irq_mask, oob_required); + if (oob_required) { + uint8_t *oob = chip->oob_poi; + + /* BBM at the beginning of the OOB area */ + memcpy(oob, tmp_buf + writesize, oob_skip); + oob += oob_skip; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(oob, tmp_buf + pos, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(oob, tmp_buf + writesize + oob_skip, + len); + oob += len; + } + } - /* if timeout happen, error out */ - if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) { - debug("DMA timeout for denali write_page\n"); - denali->status = NAND_STATUS_FAIL; - return -EIO; + /* OOB free */ + len = oobsize - (oob - chip->oob_poi); + memcpy(oob, tmp_buf + size - len, len); } - if (irq_status & INTR_STATUS__LOCKED_BLK) { - debug("Failed as write to locked block\n"); - denali->status = NAND_STATUS_FAIL; - return -EIO; - } return 0; } -/* NAND core entry points */ - -/* - * this is the callback that the NAND core calls to write a page. Since - * writing a page with ECC or without is similar, all the work is done - * by write_page above. - */ -static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page) +static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) { - struct denali_nand_info *denali = mtd_to_denali(mtd); - - /* - * for regular page writes, we let HW handle all the ECC - * data written to the device. - */ - if (oob_required) - /* switch to main + spare access */ - denali_mode_main_spare_access(denali); - else - /* switch to main access only */ - denali_mode_main_access(denali); + denali_oob_xfer(mtd, chip, page, 0); - return write_page(mtd, chip, buf, false, oob_required); + return 0; } -/* - * This is the callback that the NAND core calls to write a page without ECC. - * raw access is similar to ECC page writes, so all the work is done in the - * write_page() function above. - */ -static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, - int page) +static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); + int status; - /* - * for raw page writes, we want to disable ECC and simply write - * whatever data is in the buffer. - */ + denali_reset_irq(denali); - if (oob_required) - /* switch to main + spare access */ - denali_mode_main_spare_access(denali); - else - /* switch to main access only */ - denali_mode_main_access(denali); + denali_oob_xfer(mtd, chip, page, 1); - return write_page(mtd, chip, buf, true, oob_required); -} + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + status = chip->waitfunc(mtd, chip); -static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, - int page) -{ - return write_oob_data(mtd, chip->oob_poi, page); + return status & NAND_STATUS_FAIL ? -EIO : 0; } -/* raw include ECC value and all the spare area */ -static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page) +static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); + unsigned long uncor_ecc_flags = 0; + int stat = 0; + int ret; - uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP; + ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); + if (ret && ret != -EBADMSG) + return ret; - if (denali->page != page) { - debug("Missing NAND_CMD_READ0 command\n"); - return -EIO; - } + if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) + stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); + else if (ret == -EBADMSG) + stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); - if (oob_required) - /* switch to main + spare access */ - denali_mode_main_spare_access(denali); - else - /* switch to main access only */ - denali_mode_main_access(denali); + if (stat < 0) + return stat; - /* setting up the DMA where ecc_enable is false */ - irq_status = denali_dma_configuration(denali, DENALI_READ, true, - irq_mask, oob_required); + if (uncor_ecc_flags) { + ret = denali_read_oob(mtd, chip, page); + if (ret) + return ret; - /* if timeout happen, error out */ - if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) { - debug("DMA timeout for denali_read_page_raw\n"); - return -EIO; + stat = denali_check_erased_page(mtd, chip, buf, + uncor_ecc_flags, stat); } - /* splitting the content to destination buffer holder */ - memcpy(chip->oob_poi, (denali->buf.dma_buf + mtd->writesize), - mtd->oobsize); - memcpy(buf, denali->buf.dma_buf, mtd->writesize); - - return 0; + return stat; } -static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page) +static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP; + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *tmp_buf = denali->buf; + int oob_skip = denali->oob_skip_bytes; + size_t size = writesize + oobsize; + int i, pos, len; - if (denali->page != page) { - debug("Missing NAND_CMD_READ0 command\n"); - return -EIO; + /* + * Fill the buffer with 0xff first except the full page transfer. + * This simplifies the logic. + */ + if (!buf || !oob_required) + memset(tmp_buf, 0xff, size); + + /* Arrange the buffer for syndrome payload/ecc layout */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(tmp_buf + pos, buf, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(tmp_buf + writesize + oob_skip, buf, + len); + buf += len; + } + } } - if (oob_required) - /* switch to main + spare access */ - denali_mode_main_spare_access(denali); - else - /* switch to main access only */ - denali_mode_main_access(denali); - - /* setting up the DMA where ecc_enable is true */ - irq_status = denali_dma_configuration(denali, DENALI_READ, false, - irq_mask, oob_required); - - memcpy(buf, denali->buf.dma_buf, mtd->writesize); - - /* check whether any ECC error */ - if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) { - /* is the ECC cause by erase page, check using read_page_raw */ - debug(" Uncorrected ECC detected\n"); - denali_read_page_raw(mtd, chip, buf, oob_required, - denali->page); - - if (is_erased(buf, mtd->writesize) == true && - is_erased(chip->oob_poi, mtd->oobsize) == true) { - debug(" ECC error cause by erased block\n"); - /* false alarm, return the 0xFF */ - } else { - return -EBADMSG; + if (oob_required) { + const uint8_t *oob = chip->oob_poi; + + /* BBM at the beginning of the OOB area */ + memcpy(tmp_buf + writesize, oob, oob_skip); + oob += oob_skip; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(tmp_buf + pos, oob, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(tmp_buf + writesize + oob_skip, oob, + len); + oob += len; + } } + + /* OOB free */ + len = oobsize - (oob - chip->oob_poi); + memcpy(tmp_buf + size - len, oob, len); } - memcpy(buf, denali->buf.dma_buf, mtd->writesize); - return 0; + + return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); } -static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, - int page) +static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page) { - read_oob_data(mtd, chip->oob_poi, page); + struct denali_nand_info *denali = mtd_to_denali(mtd); - return 0; + return denali_data_xfer(denali, (void *)buf, mtd->writesize, + page, 0, 1); } -static uint8_t denali_read_byte(struct mtd_info *mtd) +static void denali_select_chip(struct mtd_info *mtd, int chip) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t addr, result; - addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); - index_addr_read_data(denali, addr | 2, &result); - return (uint8_t)result & 0xFF; + denali->active_bank = chip; } -static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t i, addr, result; - - /* delay for tR (data transfer from Flash array to data register) */ - udelay(25); + uint32_t irq_status; - /* ensure device completed else additional delay and polling */ - wait_for_irq(denali, INTR_STATUS__INT_ACT); + /* R/B# pin transitioned from low to high? */ + irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); - addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); - for (i = 0; i < len; i++) { - index_addr_read_data(denali, (uint32_t)addr | 2, &result); - write_byte_to_buf(denali, result); - } - memcpy(buf, denali->buf.buf, len); + return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; } -static void denali_select_chip(struct mtd_info *mtd, int chip) +static int denali_erase(struct mtd_info *mtd, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t irq_status; - denali->flash_bank = chip; -} + denali_reset_irq(denali); -static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) -{ - struct denali_nand_info *denali = mtd_to_denali(mtd); - int status = denali->status; + denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, + DENALI_ERASE); - denali->status = 0; + /* wait for erase to complete or failure to occur */ + irq_status = denali_wait_for_irq(denali, + INTR__ERASE_COMP | INTR__ERASE_FAIL); - return status; + return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; } -static int denali_erase(struct mtd_info *mtd, int page) +static int __maybe_unused denali_setup_data_interface(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface *conf) { struct denali_nand_info *denali = mtd_to_denali(mtd); + const struct nand_sdr_timings *timings; + unsigned long t_clk; + int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; + int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; + int addr_2_data_mask; + uint32_t tmp; - uint32_t cmd, irq_status; + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return PTR_ERR(timings); - clear_interrupts(denali); + /* clk_x period in picoseconds */ + t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); + if (!t_clk) + return -EINVAL; - /* setup page read request for access type */ - cmd = MODE_10 | BANK(denali->flash_bank) | page; - index_addr(denali, cmd, 0x1); + if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) + return 0; - /* wait for erase to complete or failure to occur */ - irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP | - INTR_STATUS__ERASE_FAIL); + /* tREA -> ACC_CLKS */ + acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk); + acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); + + tmp = ioread32(denali->reg + ACC_CLKS); + tmp &= ~ACC_CLKS__VALUE; + tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); + iowrite32(tmp, denali->reg + ACC_CLKS); + + /* tRWH -> RE_2_WE */ + re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk); + re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); + + tmp = ioread32(denali->reg + RE_2_WE); + tmp &= ~RE_2_WE__VALUE; + tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); + iowrite32(tmp, denali->reg + RE_2_WE); + + /* tRHZ -> RE_2_RE */ + re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk); + re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); + + tmp = ioread32(denali->reg + RE_2_RE); + tmp &= ~RE_2_RE__VALUE; + tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); + iowrite32(tmp, denali->reg + RE_2_RE); + + /* + * tCCS, tWHR -> WE_2_RE + * + * With WE_2_RE properly set, the Denali controller automatically takes + * care of the delay; the driver need not set NAND_WAIT_TCCS. + */ + we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), + t_clk); + we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); + + tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); + tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; + tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); + iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); + + /* tADL -> ADDR_2_DATA */ - if (irq_status & INTR_STATUS__ERASE_FAIL || - irq_status & INTR_STATUS__LOCKED_BLK) - return NAND_STATUS_FAIL; + /* for older versions, ADDR_2_DATA is only 6 bit wide */ + addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; + if (denali->revision < 0x0501) + addr_2_data_mask >>= 1; + + addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk); + addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); + + tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); + tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; + tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); + iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); + + /* tREH, tWH -> RDWR_EN_HI_CNT */ + rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), + t_clk); + rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); + + tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); + tmp &= ~RDWR_EN_HI_CNT__VALUE; + tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); + iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); + + /* tRP, tWP -> RDWR_EN_LO_CNT */ + rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), + t_clk); + rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), + t_clk); + rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT); + rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); + rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); + + tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); + tmp &= ~RDWR_EN_LO_CNT__VALUE; + tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); + iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); + + /* tCS, tCEA -> CS_SETUP_CNT */ + cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo, + (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks, + 0); + cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); + + tmp = ioread32(denali->reg + CS_SETUP_CNT); + tmp &= ~CS_SETUP_CNT__VALUE; + tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); + iowrite32(tmp, denali->reg + CS_SETUP_CNT); return 0; } -static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, - int page) +static void denali_reset_banks(struct denali_nand_info *denali) { - struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t addr; - - switch (cmd) { - case NAND_CMD_PAGEPROG: - break; - case NAND_CMD_STATUS: - addr = MODE_11 | BANK(denali->flash_bank); - index_addr(denali, addr | 0, cmd); - break; - case NAND_CMD_READID: - case NAND_CMD_PARAM: - reset_buf(denali); - /* - * sometimes ManufactureId read from register is not right - * e.g. some of Micron MT29F32G08QAA MLC NAND chips - * So here we send READID cmd to NAND insteand - */ - addr = MODE_11 | BANK(denali->flash_bank); - index_addr(denali, addr | 0, cmd); - index_addr(denali, addr | 1, col & 0xFF); - if (cmd == NAND_CMD_PARAM) - udelay(50); - break; - case NAND_CMD_RNDOUT: - addr = MODE_11 | BANK(denali->flash_bank); - index_addr(denali, addr | 0, cmd); - index_addr(denali, addr | 1, col & 0xFF); - index_addr(denali, addr | 1, col >> 8); - index_addr(denali, addr | 0, NAND_CMD_RNDOUTSTART); - break; - case NAND_CMD_READ0: - case NAND_CMD_SEQIN: - denali->page = page; - break; - case NAND_CMD_RESET: - reset_bank(denali); - break; - case NAND_CMD_READOOB: - /* TODO: Read OOB data */ - break; - case NAND_CMD_ERASE1: - /* - * supporting block erase only, not multiblock erase as - * it will cross plane and software need complex calculation - * to identify the block count for the cross plane - */ - denali_erase(mtd, page); - break; - case NAND_CMD_ERASE2: - /* nothing to do here as it was done during NAND_CMD_ERASE1 */ - break; - case NAND_CMD_UNLOCK1: - addr = MODE_10 | BANK(denali->flash_bank) | page; - index_addr(denali, addr | 0, DENALI_UNLOCK_START); - break; - case NAND_CMD_UNLOCK2: - addr = MODE_10 | BANK(denali->flash_bank) | page; - index_addr(denali, addr | 0, DENALI_UNLOCK_END); - break; - case NAND_CMD_LOCK: - addr = MODE_10 | BANK(denali->flash_bank); - index_addr(denali, addr | 0, DENALI_LOCK); - break; - default: - printf(": unsupported command received 0x%x\n", cmd); - break; + u32 irq_status; + int i; + + for (i = 0; i < denali->max_banks; i++) { + denali->active_bank = i; + + denali_reset_irq(denali); + + iowrite32(DEVICE_RESET__BANK(i), + denali->reg + DEVICE_RESET); + + irq_status = denali_wait_for_irq(denali, + INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); + if (!(irq_status & INTR__INT_ACT)) + break; } + + dev_dbg(denali->dev, "%d chips connected\n", i); + denali->max_banks = i; } -/* end NAND core entry points */ -/* Initialization code to bring the device up to a known good state */ static void denali_hw_init(struct denali_nand_info *denali) { /* @@ -1154,125 +1086,284 @@ static void denali_hw_init(struct denali_nand_info *denali) * override it. */ if (!denali->revision) - denali->revision = swab16(ioread32(denali->flash_reg + REVISION)); + denali->revision = swab16(ioread32(denali->reg + REVISION)); /* * tell driver how many bit controller will skip before writing * ECC code in OOB. This is normally used for bad block marker */ - writel(CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES, - denali->flash_reg + SPARE_AREA_SKIP_BYTES); - detect_max_banks(denali); - denali_nand_reset(denali); - writel(0x0F, denali->flash_reg + RB_PIN_ENABLED); - writel(CHIP_EN_DONT_CARE__FLAG, - denali->flash_reg + CHIP_ENABLE_DONT_CARE); - writel(0xffff, denali->flash_reg + SPARE_AREA_MARKER); - - /* Should set value for these registers when init */ - writel(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); - writel(1, denali->flash_reg + ECC_ENABLE); - denali_nand_timing_set(denali); - denali_irq_init(denali); + denali->oob_skip_bytes = CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES; + iowrite32(denali->oob_skip_bytes, denali->reg + SPARE_AREA_SKIP_BYTES); + denali_detect_max_banks(denali); + iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); + iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); + + iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); } -static struct nand_ecclayout nand_oob; +int denali_calc_ecc_bytes(int step_size, int strength) +{ + /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ + return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; +} +EXPORT_SYMBOL(denali_calc_ecc_bytes); -int denali_init(struct denali_nand_info *denali) +static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, + struct denali_nand_info *denali) { - struct mtd_info *mtd = nand_to_mtd(&denali->nand); + int oobavail = mtd->oobsize - denali->oob_skip_bytes; int ret; - denali_hw_init(denali); + /* + * If .size and .strength are already set (usually by DT), + * check if they are supported by this controller. + */ + if (chip->ecc.size && chip->ecc.strength) + return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); + + /* + * We want .size and .strength closest to the chip's requirement + * unless NAND_ECC_MAXIMIZE is requested. + */ + if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { + ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); + if (!ret) + return 0; + } + + /* Max ECC strength is the last thing we can do */ + return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); +} + +static struct nand_ecclayout nand_oob; + +static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = denali->oob_skip_bytes; + oobregion->length = chip->ecc.total; + + return 0; +} - mtd->name = "denali-nand"; - mtd->owner = THIS_MODULE; +static int denali_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; + oobregion->length = mtd->oobsize - oobregion->offset; + + return 0; +} - /* register the driver with the NAND core subsystem */ - denali->nand.select_chip = denali_select_chip; - denali->nand.cmdfunc = denali_cmdfunc; - denali->nand.read_byte = denali_read_byte; - denali->nand.read_buf = denali_read_buf; - denali->nand.waitfunc = denali_waitfunc; +static const struct mtd_ooblayout_ops denali_ooblayout_ops = { + .ecc = denali_ooblayout_ecc, + .free = denali_ooblayout_free, +}; + +static int denali_multidev_fixup(struct denali_nand_info *denali) +{ + struct nand_chip *chip = &denali->nand; + struct mtd_info *mtd = nand_to_mtd(chip); /* - * scan for NAND devices attached to the controller - * this is the first stage in a two step process to register - * with the nand subsystem + * Support for multi device: + * When the IP configuration is x16 capable and two x8 chips are + * connected in parallel, DEVICES_CONNECTED should be set to 2. + * In this case, the core framework knows nothing about this fact, + * so we should tell it the _logical_ pagesize and anything necessary. */ - if (nand_scan_ident(mtd, denali->max_banks, NULL)) { - ret = -ENXIO; - goto fail; - } + denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); -#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT - /* check whether flash got BBT table (located at end of flash). As we - * use NAND_BBT_NO_OOB, the BBT page will start with - * bbt_pattern. We will have mirror pattern too */ - denali->nand.bbt_options |= NAND_BBT_USE_FLASH; /* - * We are using main + spare with ECC support. As BBT need ECC support, - * we need to ensure BBT code don't write to OOB for the BBT pattern. - * All BBT info will be stored into data area with ECC support. + * On some SoCs, DEVICES_CONNECTED is not auto-detected. + * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. */ - denali->nand.bbt_options |= NAND_BBT_NO_OOB; -#endif + if (denali->devs_per_cs == 0) { + denali->devs_per_cs = 1; + iowrite32(1, denali->reg + DEVICES_CONNECTED); + } + + if (denali->devs_per_cs == 1) + return 0; + + if (denali->devs_per_cs != 2) { + dev_err(denali->dev, "unsupported number of devices %d\n", + denali->devs_per_cs); + return -EINVAL; + } + + /* 2 chips in parallel */ + mtd->size <<= 1; + mtd->erasesize <<= 1; + mtd->writesize <<= 1; + mtd->oobsize <<= 1; + chip->chipsize <<= 1; + chip->page_shift += 1; + chip->phys_erase_shift += 1; + chip->bbt_erase_shift += 1; + chip->chip_shift += 1; + chip->pagemask <<= 1; + chip->ecc.size <<= 1; + chip->ecc.bytes <<= 1; + chip->ecc.strength <<= 1; + denali->oob_skip_bytes <<= 1; + + return 0; +} + +int denali_init(struct denali_nand_info *denali) +{ + struct nand_chip *chip = &denali->nand; + struct mtd_info *mtd = nand_to_mtd(chip); + u32 features = ioread32(denali->reg + FEATURES); + int ret; + + denali_hw_init(denali); + + denali_clear_irq_all(denali); - denali->nand.ecc.mode = NAND_ECC_HW; - denali->nand.ecc.size = CONFIG_NAND_DENALI_ECC_SIZE; + denali_reset_banks(denali); + + denali->active_bank = DENALI_INVALID_BANK; + + chip->flash_node = dev_of_offset(denali->dev); + /* Fallback to the default name if DT did not give "label" property */ + if (!mtd->name) + mtd->name = "denali-nand"; + + chip->select_chip = denali_select_chip; + chip->read_byte = denali_read_byte; + chip->write_byte = denali_write_byte; + chip->read_word = denali_read_word; + chip->cmd_ctrl = denali_cmd_ctrl; + chip->dev_ready = denali_dev_ready; + chip->waitfunc = denali_waitfunc; + + if (features & FEATURES__INDEX_ADDR) { + denali->host_read = denali_indexed_read; + denali->host_write = denali_indexed_write; + } else { + denali->host_read = denali_direct_read; + denali->host_write = denali_direct_write; + } + + /* clk rate info is needed for setup_data_interface */ + if (denali->clk_x_rate) + chip->setup_data_interface = denali_setup_data_interface; + + ret = nand_scan_ident(mtd, denali->max_banks, NULL); + if (ret) + return ret; + + if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) + denali->dma_avail = 1; + + if (denali->dma_avail) { + chip->buf_align = 16; + if (denali->caps & DENALI_CAP_DMA_64BIT) + denali->setup_dma = denali_setup_dma64; + else + denali->setup_dma = denali_setup_dma32; + } else { + chip->buf_align = 4; + } + + chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->bbt_options |= NAND_BBT_USE_FLASH; + chip->bbt_options |= NAND_BBT_NO_OOB; + denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; /* no subpage writes on denali */ - denali->nand.options |= NAND_NO_SUBPAGE_WRITE; + chip->options |= NAND_NO_SUBPAGE_WRITE; - /* - * Tell driver the ecc strength. This register may be already set - * correctly. So we read this value out. - */ - denali->nand.ecc.strength = readl(denali->flash_reg + ECC_CORRECTION); - switch (denali->nand.ecc.size) { - case 512: - denali->nand.ecc.bytes = - (denali->nand.ecc.strength * 13 + 15) / 16 * 2; - break; - case 1024: - denali->nand.ecc.bytes = - (denali->nand.ecc.strength * 14 + 15) / 16 * 2; - break; - default: - pr_err("Unsupported ECC size\n"); - ret = -EINVAL; - goto fail; + ret = denali_ecc_setup(mtd, chip, denali); + if (ret) { + dev_err(denali->dev, "Failed to setup ECC settings.\n"); + return ret; } + + dev_dbg(denali->dev, + "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", + chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); + + iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | + FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), + denali->reg + ECC_CORRECTION); + iowrite32(mtd->erasesize / mtd->writesize, + denali->reg + PAGES_PER_BLOCK); + iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, + denali->reg + DEVICE_WIDTH); + iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, + denali->reg + TWO_ROW_ADDR_CYCLES); + iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); + iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); + + iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); + iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); + /* chip->ecc.steps is set by nand_scan_tail(); not available here */ + iowrite32(mtd->writesize / chip->ecc.size, + denali->reg + CFG_NUM_DATA_BLOCKS); + + mtd_set_ooblayout(mtd, &denali_ooblayout_ops); + nand_oob.eccbytes = denali->nand.ecc.bytes; denali->nand.ecc.layout = &nand_oob; - writel(mtd->erasesize / mtd->writesize, - denali->flash_reg + PAGES_PER_BLOCK); - writel(denali->nand.options & NAND_BUSWIDTH_16 ? 1 : 0, - denali->flash_reg + DEVICE_WIDTH); - writel(mtd->writesize, - denali->flash_reg + DEVICE_MAIN_AREA_SIZE); - writel(mtd->oobsize, - denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - if (readl(denali->flash_reg + DEVICES_CONNECTED) == 0) - writel(1, denali->flash_reg + DEVICES_CONNECTED); - - /* override the default operations */ - denali->nand.ecc.read_page = denali_read_page; - denali->nand.ecc.read_page_raw = denali_read_page_raw; - denali->nand.ecc.write_page = denali_write_page; - denali->nand.ecc.write_page_raw = denali_write_page_raw; - denali->nand.ecc.read_oob = denali_read_oob; - denali->nand.ecc.write_oob = denali_write_oob; - - if (nand_scan_tail(mtd)) { - ret = -ENXIO; - goto fail; + if (chip->options & NAND_BUSWIDTH_16) { + chip->read_buf = denali_read_buf16; + chip->write_buf = denali_write_buf16; + } else { + chip->read_buf = denali_read_buf; + chip->write_buf = denali_write_buf; } + chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; + chip->ecc.read_page = denali_read_page; + chip->ecc.read_page_raw = denali_read_page_raw; + chip->ecc.write_page = denali_write_page; + chip->ecc.write_page_raw = denali_write_page_raw; + chip->ecc.read_oob = denali_read_oob; + chip->ecc.write_oob = denali_write_oob; + chip->erase = denali_erase; + + ret = denali_multidev_fixup(denali); + if (ret) + return ret; + + /* + * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not + * use devm_kmalloc() because the memory allocated by devm_ does not + * guarantee DMA-safe alignment. + */ + denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); + if (!denali->buf) + return -ENOMEM; + + ret = nand_scan_tail(mtd); + if (ret) + goto free_buf; ret = nand_register(0, mtd); + if (ret) { + dev_err(denali->dev, "Failed to register MTD: %d\n", ret); + goto free_buf; + } + return 0; + +free_buf: + kfree(denali->buf); -fail: return ret; } @@ -1289,8 +1380,8 @@ static int __board_nand_init(void) * In the future, these base addresses should be taken from * Device Tree or platform data. */ - denali->flash_reg = (void __iomem *)CONFIG_SYS_NAND_REGS_BASE; - denali->flash_mem = (void __iomem *)CONFIG_SYS_NAND_DATA_BASE; + denali->reg = (void __iomem *)CONFIG_SYS_NAND_REGS_BASE; + denali->host = (void __iomem *)CONFIG_SYS_NAND_DATA_BASE; return denali_init(denali); } diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index f796f0dad11..04b4ae26834 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -8,466 +8,319 @@ #ifndef __DENALI_H__ #define __DENALI_H__ +#include <linux/bitops.h> #include <linux/mtd/nand.h> +#include <linux/types.h> #define DEVICE_RESET 0x0 -#define DEVICE_RESET__BANK0 0x0001 -#define DEVICE_RESET__BANK1 0x0002 -#define DEVICE_RESET__BANK2 0x0004 -#define DEVICE_RESET__BANK3 0x0008 +#define DEVICE_RESET__BANK(bank) BIT(bank) #define TRANSFER_SPARE_REG 0x10 -#define TRANSFER_SPARE_REG__FLAG 0x0001 +#define TRANSFER_SPARE_REG__FLAG BIT(0) #define LOAD_WAIT_CNT 0x20 -#define LOAD_WAIT_CNT__VALUE 0xffff +#define LOAD_WAIT_CNT__VALUE GENMASK(15, 0) #define PROGRAM_WAIT_CNT 0x30 -#define PROGRAM_WAIT_CNT__VALUE 0xffff +#define PROGRAM_WAIT_CNT__VALUE GENMASK(15, 0) #define ERASE_WAIT_CNT 0x40 -#define ERASE_WAIT_CNT__VALUE 0xffff +#define ERASE_WAIT_CNT__VALUE GENMASK(15, 0) #define INT_MON_CYCCNT 0x50 -#define INT_MON_CYCCNT__VALUE 0xffff +#define INT_MON_CYCCNT__VALUE GENMASK(15, 0) #define RB_PIN_ENABLED 0x60 -#define RB_PIN_ENABLED__BANK0 0x0001 -#define RB_PIN_ENABLED__BANK1 0x0002 -#define RB_PIN_ENABLED__BANK2 0x0004 -#define RB_PIN_ENABLED__BANK3 0x0008 +#define RB_PIN_ENABLED__BANK(bank) BIT(bank) #define MULTIPLANE_OPERATION 0x70 -#define MULTIPLANE_OPERATION__FLAG 0x0001 +#define MULTIPLANE_OPERATION__FLAG BIT(0) #define MULTIPLANE_READ_ENABLE 0x80 -#define MULTIPLANE_READ_ENABLE__FLAG 0x0001 +#define MULTIPLANE_READ_ENABLE__FLAG BIT(0) #define COPYBACK_DISABLE 0x90 -#define COPYBACK_DISABLE__FLAG 0x0001 +#define COPYBACK_DISABLE__FLAG BIT(0) #define CACHE_WRITE_ENABLE 0xa0 -#define CACHE_WRITE_ENABLE__FLAG 0x0001 +#define CACHE_WRITE_ENABLE__FLAG BIT(0) #define CACHE_READ_ENABLE 0xb0 -#define CACHE_READ_ENABLE__FLAG 0x0001 +#define CACHE_READ_ENABLE__FLAG BIT(0) #define PREFETCH_MODE 0xc0 -#define PREFETCH_MODE__PREFETCH_EN 0x0001 -#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0 +#define PREFETCH_MODE__PREFETCH_EN BIT(0) +#define PREFETCH_MODE__PREFETCH_BURST_LENGTH GENMASK(15, 4) #define CHIP_ENABLE_DONT_CARE 0xd0 -#define CHIP_EN_DONT_CARE__FLAG 0x01 +#define CHIP_EN_DONT_CARE__FLAG BIT(0) #define ECC_ENABLE 0xe0 -#define ECC_ENABLE__FLAG 0x0001 +#define ECC_ENABLE__FLAG BIT(0) #define GLOBAL_INT_ENABLE 0xf0 -#define GLOBAL_INT_EN_FLAG 0x01 +#define GLOBAL_INT_EN_FLAG BIT(0) -#define WE_2_RE 0x100 -#define WE_2_RE__VALUE 0x003f +#define TWHR2_AND_WE_2_RE 0x100 +#define TWHR2_AND_WE_2_RE__WE_2_RE GENMASK(5, 0) +#define TWHR2_AND_WE_2_RE__TWHR2 GENMASK(13, 8) -#define ADDR_2_DATA 0x110 -#define ADDR_2_DATA__VALUE 0x003f +#define TCWAW_AND_ADDR_2_DATA 0x110 +/* The width of ADDR_2_DATA is 6 bit for old IP, 7 bit for new IP */ +#define TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA GENMASK(6, 0) +#define TCWAW_AND_ADDR_2_DATA__TCWAW GENMASK(13, 8) #define RE_2_WE 0x120 -#define RE_2_WE__VALUE 0x003f +#define RE_2_WE__VALUE GENMASK(5, 0) #define ACC_CLKS 0x130 -#define ACC_CLKS__VALUE 0x000f +#define ACC_CLKS__VALUE GENMASK(3, 0) #define NUMBER_OF_PLANES 0x140 -#define NUMBER_OF_PLANES__VALUE 0x0007 +#define NUMBER_OF_PLANES__VALUE GENMASK(2, 0) #define PAGES_PER_BLOCK 0x150 -#define PAGES_PER_BLOCK__VALUE 0xffff +#define PAGES_PER_BLOCK__VALUE GENMASK(15, 0) #define DEVICE_WIDTH 0x160 -#define DEVICE_WIDTH__VALUE 0x0003 +#define DEVICE_WIDTH__VALUE GENMASK(1, 0) #define DEVICE_MAIN_AREA_SIZE 0x170 -#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff +#define DEVICE_MAIN_AREA_SIZE__VALUE GENMASK(15, 0) #define DEVICE_SPARE_AREA_SIZE 0x180 -#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff +#define DEVICE_SPARE_AREA_SIZE__VALUE GENMASK(15, 0) #define TWO_ROW_ADDR_CYCLES 0x190 -#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001 +#define TWO_ROW_ADDR_CYCLES__FLAG BIT(0) #define MULTIPLANE_ADDR_RESTRICT 0x1a0 -#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001 +#define MULTIPLANE_ADDR_RESTRICT__FLAG BIT(0) #define ECC_CORRECTION 0x1b0 -#define ECC_CORRECTION__VALUE 0x001f +#define ECC_CORRECTION__VALUE GENMASK(4, 0) +#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) #define READ_MODE 0x1c0 -#define READ_MODE__VALUE 0x000f +#define READ_MODE__VALUE GENMASK(3, 0) #define WRITE_MODE 0x1d0 -#define WRITE_MODE__VALUE 0x000f +#define WRITE_MODE__VALUE GENMASK(3, 0) #define COPYBACK_MODE 0x1e0 -#define COPYBACK_MODE__VALUE 0x000f +#define COPYBACK_MODE__VALUE GENMASK(3, 0) #define RDWR_EN_LO_CNT 0x1f0 -#define RDWR_EN_LO_CNT__VALUE 0x001f +#define RDWR_EN_LO_CNT__VALUE GENMASK(4, 0) #define RDWR_EN_HI_CNT 0x200 -#define RDWR_EN_HI_CNT__VALUE 0x001f +#define RDWR_EN_HI_CNT__VALUE GENMASK(4, 0) #define MAX_RD_DELAY 0x210 -#define MAX_RD_DELAY__VALUE 0x000f +#define MAX_RD_DELAY__VALUE GENMASK(3, 0) #define CS_SETUP_CNT 0x220 -#define CS_SETUP_CNT__VALUE 0x001f +#define CS_SETUP_CNT__VALUE GENMASK(4, 0) +#define CS_SETUP_CNT__TWB GENMASK(17, 12) #define SPARE_AREA_SKIP_BYTES 0x230 -#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f +#define SPARE_AREA_SKIP_BYTES__VALUE GENMASK(5, 0) #define SPARE_AREA_MARKER 0x240 -#define SPARE_AREA_MARKER__VALUE 0xffff +#define SPARE_AREA_MARKER__VALUE GENMASK(15, 0) #define DEVICES_CONNECTED 0x250 -#define DEVICES_CONNECTED__VALUE 0x0007 +#define DEVICES_CONNECTED__VALUE GENMASK(2, 0) #define DIE_MASK 0x260 -#define DIE_MASK__VALUE 0x00ff +#define DIE_MASK__VALUE GENMASK(7, 0) #define FIRST_BLOCK_OF_NEXT_PLANE 0x270 -#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff +#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE GENMASK(15, 0) #define WRITE_PROTECT 0x280 -#define WRITE_PROTECT__FLAG 0x0001 +#define WRITE_PROTECT__FLAG BIT(0) #define RE_2_RE 0x290 -#define RE_2_RE__VALUE 0x003f +#define RE_2_RE__VALUE GENMASK(5, 0) #define MANUFACTURER_ID 0x300 -#define MANUFACTURER_ID__VALUE 0x00ff +#define MANUFACTURER_ID__VALUE GENMASK(7, 0) #define DEVICE_ID 0x310 -#define DEVICE_ID__VALUE 0x00ff +#define DEVICE_ID__VALUE GENMASK(7, 0) #define DEVICE_PARAM_0 0x320 -#define DEVICE_PARAM_0__VALUE 0x00ff +#define DEVICE_PARAM_0__VALUE GENMASK(7, 0) #define DEVICE_PARAM_1 0x330 -#define DEVICE_PARAM_1__VALUE 0x00ff +#define DEVICE_PARAM_1__VALUE GENMASK(7, 0) #define DEVICE_PARAM_2 0x340 -#define DEVICE_PARAM_2__VALUE 0x00ff +#define DEVICE_PARAM_2__VALUE GENMASK(7, 0) #define LOGICAL_PAGE_DATA_SIZE 0x350 -#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff +#define LOGICAL_PAGE_DATA_SIZE__VALUE GENMASK(15, 0) #define LOGICAL_PAGE_SPARE_SIZE 0x360 -#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff +#define LOGICAL_PAGE_SPARE_SIZE__VALUE GENMASK(15, 0) #define REVISION 0x370 -#define REVISION__VALUE 0xffff +#define REVISION__VALUE GENMASK(15, 0) #define ONFI_DEVICE_FEATURES 0x380 -#define ONFI_DEVICE_FEATURES__VALUE 0x003f +#define ONFI_DEVICE_FEATURES__VALUE GENMASK(5, 0) #define ONFI_OPTIONAL_COMMANDS 0x390 -#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f +#define ONFI_OPTIONAL_COMMANDS__VALUE GENMASK(5, 0) #define ONFI_TIMING_MODE 0x3a0 -#define ONFI_TIMING_MODE__VALUE 0x003f +#define ONFI_TIMING_MODE__VALUE GENMASK(5, 0) #define ONFI_PGM_CACHE_TIMING_MODE 0x3b0 -#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f +#define ONFI_PGM_CACHE_TIMING_MODE__VALUE GENMASK(5, 0) #define ONFI_DEVICE_NO_OF_LUNS 0x3c0 -#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff -#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100 +#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS GENMASK(7, 0) +#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE BIT(8) #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0 -#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE GENMASK(15, 0) #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0 -#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff - -#define FEATURES 0x3f0 -#define FEATURES__N_BANKS 0x0003 -#define FEATURES__ECC_MAX_ERR 0x003c -#define FEATURES__DMA 0x0040 -#define FEATURES__CMD_DMA 0x0080 -#define FEATURES__PARTITION 0x0100 -#define FEATURES__XDMA_SIDEBAND 0x0200 -#define FEATURES__GPREG 0x0400 -#define FEATURES__INDEX_ADDR 0x0800 +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE GENMASK(15, 0) + +#define FEATURES 0x3f0 +#define FEATURES__N_BANKS GENMASK(1, 0) +#define FEATURES__ECC_MAX_ERR GENMASK(5, 2) +#define FEATURES__DMA BIT(6) +#define FEATURES__CMD_DMA BIT(7) +#define FEATURES__PARTITION BIT(8) +#define FEATURES__XDMA_SIDEBAND BIT(9) +#define FEATURES__GPREG BIT(10) +#define FEATURES__INDEX_ADDR BIT(11) #define TRANSFER_MODE 0x400 -#define TRANSFER_MODE__VALUE 0x0003 - -#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50)) -#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50)) - -/* - * Some versions of the IP have the ECC fixup handled in hardware. In this - * configuration we only get interrupted when the error is uncorrectable. - * Unfortunately this bit replaces INTR_STATUS__ECC_TRANSACTION_DONE from the - * old IP. - */ -#define INTR_STATUS__ECC_UNCOR_ERR 0x0001 -#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001 -#define INTR_STATUS__ECC_ERR 0x0002 -#define INTR_STATUS__DMA_CMD_COMP 0x0004 -#define INTR_STATUS__TIME_OUT 0x0008 -#define INTR_STATUS__PROGRAM_FAIL 0x0010 -#define INTR_STATUS__ERASE_FAIL 0x0020 -#define INTR_STATUS__LOAD_COMP 0x0040 -#define INTR_STATUS__PROGRAM_COMP 0x0080 -#define INTR_STATUS__ERASE_COMP 0x0100 -#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_STATUS__LOCKED_BLK 0x0400 -#define INTR_STATUS__UNSUP_CMD 0x0800 -#define INTR_STATUS__INT_ACT 0x1000 -#define INTR_STATUS__RST_COMP 0x2000 -#define INTR_STATUS__PIPE_CMD_ERR 0x4000 -#define INTR_STATUS__PAGE_XFER_INC 0x8000 - -#define INTR_EN__ECC_TRANSACTION_DONE 0x0001 -#define INTR_EN__ECC_ERR 0x0002 -#define INTR_EN__DMA_CMD_COMP 0x0004 -#define INTR_EN__TIME_OUT 0x0008 -#define INTR_EN__PROGRAM_FAIL 0x0010 -#define INTR_EN__ERASE_FAIL 0x0020 -#define INTR_EN__LOAD_COMP 0x0040 -#define INTR_EN__PROGRAM_COMP 0x0080 -#define INTR_EN__ERASE_COMP 0x0100 -#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_EN__LOCKED_BLK 0x0400 -#define INTR_EN__UNSUP_CMD 0x0800 -#define INTR_EN__INT_ACT 0x1000 -#define INTR_EN__RST_COMP 0x2000 -#define INTR_EN__PIPE_CMD_ERR 0x4000 -#define INTR_EN__PAGE_XFER_INC 0x8000 - -#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50)) -#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50)) -#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50)) - -#define DATA_INTR 0x550 -#define DATA_INTR__WRITE_SPACE_AV 0x0001 -#define DATA_INTR__READ_DATA_AV 0x0002 - -#define DATA_INTR_EN 0x560 -#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001 -#define DATA_INTR_EN__READ_DATA_AV 0x0002 - -#define GPREG_0 0x570 -#define GPREG_0__VALUE 0xffff - -#define GPREG_1 0x580 -#define GPREG_1__VALUE 0xffff - -#define GPREG_2 0x590 -#define GPREG_2__VALUE 0xffff - -#define GPREG_3 0x5a0 -#define GPREG_3__VALUE 0xffff +#define TRANSFER_MODE__VALUE GENMASK(1, 0) + +#define INTR_STATUS(bank) (0x410 + (bank) * 0x50) +#define INTR_EN(bank) (0x420 + (bank) * 0x50) +/* bit[1:0] is used differently depending on IP version */ +#define INTR__ECC_UNCOR_ERR BIT(0) /* new IP */ +#define INTR__ECC_TRANSACTION_DONE BIT(0) /* old IP */ +#define INTR__ECC_ERR BIT(1) /* old IP */ +#define INTR__DMA_CMD_COMP BIT(2) +#define INTR__TIME_OUT BIT(3) +#define INTR__PROGRAM_FAIL BIT(4) +#define INTR__ERASE_FAIL BIT(5) +#define INTR__LOAD_COMP BIT(6) +#define INTR__PROGRAM_COMP BIT(7) +#define INTR__ERASE_COMP BIT(8) +#define INTR__PIPE_CPYBCK_CMD_COMP BIT(9) +#define INTR__LOCKED_BLK BIT(10) +#define INTR__UNSUP_CMD BIT(11) +#define INTR__INT_ACT BIT(12) +#define INTR__RST_COMP BIT(13) +#define INTR__PIPE_CMD_ERR BIT(14) +#define INTR__PAGE_XFER_INC BIT(15) +#define INTR__ERASED_PAGE BIT(16) + +#define PAGE_CNT(bank) (0x430 + (bank) * 0x50) +#define ERR_PAGE_ADDR(bank) (0x440 + (bank) * 0x50) +#define ERR_BLOCK_ADDR(bank) (0x450 + (bank) * 0x50) #define ECC_THRESHOLD 0x600 -#define ECC_THRESHOLD__VALUE 0x03ff +#define ECC_THRESHOLD__VALUE GENMASK(9, 0) #define ECC_ERROR_BLOCK_ADDRESS 0x610 -#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff +#define ECC_ERROR_BLOCK_ADDRESS__VALUE GENMASK(15, 0) #define ECC_ERROR_PAGE_ADDRESS 0x620 -#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff -#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000 +#define ECC_ERROR_PAGE_ADDRESS__VALUE GENMASK(11, 0) +#define ECC_ERROR_PAGE_ADDRESS__BANK GENMASK(15, 12) #define ECC_ERROR_ADDRESS 0x630 -#define ECC_ERROR_ADDRESS__OFFSET 0x0fff -#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000 +#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) +#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12) #define ERR_CORRECTION_INFO 0x640 -#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff -#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00 -#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 -#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 +#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0) +#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8) +#define ERR_CORRECTION_INFO__UNCOR BIT(14) +#define ERR_CORRECTION_INFO__LAST_ERR BIT(15) + +#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) +#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) +#define ECC_COR_INFO__MAX_ERRORS GENMASK(6, 0) +#define ECC_COR_INFO__UNCOR_ERR BIT(7) + +#define CFG_DATA_BLOCK_SIZE 0x6b0 + +#define CFG_LAST_DATA_BLOCK_SIZE 0x6c0 + +#define CFG_NUM_DATA_BLOCKS 0x6d0 + +#define CFG_META_DATA_SIZE 0x6e0 #define DMA_ENABLE 0x700 -#define DMA_ENABLE__FLAG 0x0001 +#define DMA_ENABLE__FLAG BIT(0) #define IGNORE_ECC_DONE 0x710 -#define IGNORE_ECC_DONE__FLAG 0x0001 +#define IGNORE_ECC_DONE__FLAG BIT(0) #define DMA_INTR 0x720 -#define DMA_INTR__TARGET_ERROR 0x0001 -#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 -#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 -#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 -#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 -#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 - #define DMA_INTR_EN 0x730 -#define DMA_INTR_EN__TARGET_ERROR 0x0001 -#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002 -#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004 -#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008 -#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010 -#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020 +#define DMA_INTR__TARGET_ERROR BIT(0) +#define DMA_INTR__DESC_COMP_CHANNEL0 BIT(1) +#define DMA_INTR__DESC_COMP_CHANNEL1 BIT(2) +#define DMA_INTR__DESC_COMP_CHANNEL2 BIT(3) +#define DMA_INTR__DESC_COMP_CHANNEL3 BIT(4) +#define DMA_INTR__MEMCOPY_DESC_COMP BIT(5) #define TARGET_ERR_ADDR_LO 0x740 -#define TARGET_ERR_ADDR_LO__VALUE 0xffff +#define TARGET_ERR_ADDR_LO__VALUE GENMASK(15, 0) #define TARGET_ERR_ADDR_HI 0x750 -#define TARGET_ERR_ADDR_HI__VALUE 0xffff +#define TARGET_ERR_ADDR_HI__VALUE GENMASK(15, 0) #define CHNL_ACTIVE 0x760 -#define CHNL_ACTIVE__CHANNEL0 0x0001 -#define CHNL_ACTIVE__CHANNEL1 0x0002 -#define CHNL_ACTIVE__CHANNEL2 0x0004 -#define CHNL_ACTIVE__CHANNEL3 0x0008 - -#define ACTIVE_SRC_ID 0x800 -#define ACTIVE_SRC_ID__VALUE 0x00ff - -#define PTN_INTR 0x810 -#define PTN_INTR__CONFIG_ERROR 0x0001 -#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002 -#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004 -#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008 -#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010 -#define PTN_INTR__REG_ACCESS_ERROR 0x0020 - -#define PTN_INTR_EN 0x820 -#define PTN_INTR_EN__CONFIG_ERROR 0x0001 -#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002 -#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004 -#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008 -#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 -#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 - -#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40)) -#define PERM_SRC_ID__SRCID 0x00ff -#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800 -#define PERM_SRC_ID__WRITE_ACTIVE 0x2000 -#define PERM_SRC_ID__READ_ACTIVE 0x4000 -#define PERM_SRC_ID__PARTITION_VALID 0x8000 - -#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40)) -#define MIN_BLK_ADDR__VALUE 0xffff - -#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40)) -#define MAX_BLK_ADDR__VALUE 0xffff - -#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40)) -#define MIN_MAX_BANK__MIN_VALUE 0x0003 -#define MIN_MAX_BANK__MAX_VALUE 0x000c - -/* lld.h */ -#define GOOD_BLOCK 0 -#define DEFECTIVE_BLOCK 1 -#define READ_ERROR 2 - -#define CLK_X 5 -#define CLK_MULTI 4 - -/* spectraswconfig.h */ -#define CMD_DMA 0 - -#define SPECTRA_PARTITION_ID 0 -/**** Block Table and Reserved Block Parameters *****/ -#define SPECTRA_START_BLOCK 3 -#define NUM_FREE_BLOCKS_GATE 30 - -/* KBV - Updated to LNW scratch register address */ -#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR -#define SCRATCH_REG_SIZE 64 - -#define GLOB_HWCTL_DEFAULT_BLKS 2048 - -#define CUSTOM_CONF_PARAMS 0 - -#define INDEX_CTRL_REG 0x0 -#define INDEX_DATA_REG 0x10 - -#define MODE_00 0x00000000 -#define MODE_01 0x04000000 -#define MODE_10 0x08000000 -#define MODE_11 0x0C000000 - - -#define DATA_TRANSFER_MODE 0 -#define PROTECTION_PER_BLOCK 1 -#define LOAD_WAIT_COUNT 2 -#define PROGRAM_WAIT_COUNT 3 -#define ERASE_WAIT_COUNT 4 -#define INT_MONITOR_CYCLE_COUNT 5 -#define READ_BUSY_PIN_ENABLED 6 -#define MULTIPLANE_OPERATION_SUPPORT 7 -#define PRE_FETCH_MODE 8 -#define CE_DONT_CARE_SUPPORT 9 -#define COPYBACK_SUPPORT 10 -#define CACHE_WRITE_SUPPORT 11 -#define CACHE_READ_SUPPORT 12 -#define NUM_PAGES_IN_BLOCK 13 -#define ECC_ENABLE_SELECT 14 -#define WRITE_ENABLE_2_READ_ENABLE 15 -#define ADDRESS_2_DATA 16 -#define READ_ENABLE_2_WRITE_ENABLE 17 -#define TWO_ROW_ADDRESS_CYCLES 18 -#define MULTIPLANE_ADDRESS_RESTRICT 19 -#define ACC_CLOCKS 20 -#define READ_WRITE_ENABLE_LOW_COUNT 21 -#define READ_WRITE_ENABLE_HIGH_COUNT 22 - -#define ECC_SECTOR_SIZE 512 - -#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) - -struct nand_buf { - int head; - int tail; - /* seprating dma_buf as buf can be used for status read purpose */ - uint8_t dma_buf[DENALI_BUF_SIZE] __aligned(64); - uint8_t buf[DENALI_BUF_SIZE]; -}; +#define CHNL_ACTIVE__CHANNEL0 BIT(0) +#define CHNL_ACTIVE__CHANNEL1 BIT(1) +#define CHNL_ACTIVE__CHANNEL2 BIT(2) +#define CHNL_ACTIVE__CHANNEL3 BIT(3) -#define INTEL_CE4100 1 -#define INTEL_MRST 2 -#define DT 3 +struct udevice; struct denali_nand_info { struct nand_chip nand; unsigned long clk_x_rate; /* bus interface clock rate */ - int flash_bank; /* currently selected chip */ - int status; - int platform; - struct nand_buf buf; - struct device *dev; - int total_used_banks; - uint32_t block; /* stored for future use */ + int active_bank; /* currently selected bank */ + struct udevice *dev; uint32_t page; - void __iomem *flash_reg; /* Mapped io reg base address */ - void __iomem *flash_mem; /* Mapped io reg base address */ - - /* elements used by ISR */ - /*struct completion complete;*/ - - uint32_t irq_status; - int irq_debug_array[32]; - int idx; + void __iomem *reg; /* Register Interface */ + void __iomem *host; /* Host Data/Command Interface */ + u32 irq_mask; /* interrupts we are waiting for */ + u32 irq_status; /* interrupts that have happened */ int irq; - - uint32_t devnum; /* represent how many nands connected */ - uint32_t fwblks; /* represent how many blocks FW used */ - uint32_t totalblks; - uint32_t blksperchip; - uint32_t bbtskipbytes; - uint32_t max_banks; - unsigned int revision; - unsigned int caps; + void *buf; /* for syndrome layout conversion */ + dma_addr_t dma_addr; + int dma_avail; /* can support DMA? */ + int devs_per_cs; /* devices connected in parallel */ + int oob_skip_bytes; /* number of bytes reserved for BBM */ + int max_banks; + unsigned int revision; /* IP revision */ + unsigned int caps; /* IP capability (or quirk) */ + const struct nand_ecc_caps *ecc_caps; + u32 (*host_read)(struct denali_nand_info *denali, u32 addr); + void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data); + void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr, + int page, int write); }; #define DENALI_CAP_HW_ECC_FIXUP BIT(0) #define DENALI_CAP_DMA_64BIT BIT(1) +int denali_calc_ecc_bytes(int step_size, int strength); int denali_init(struct denali_nand_info *denali); #endif /* __DENALI_H__ */ diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 805c066b405..9d6cb09b422 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -16,21 +16,31 @@ struct denali_dt_data { unsigned int revision; unsigned int caps; + const struct nand_ecc_caps *ecc_caps; }; +NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes, + 512, 8, 15); static const struct denali_dt_data denali_socfpga_data = { .caps = DENALI_CAP_HW_ECC_FIXUP, + .ecc_caps = &denali_socfpga_ecc_caps, }; +NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes, + 1024, 8, 16, 24); static const struct denali_dt_data denali_uniphier_v5a_data = { .caps = DENALI_CAP_HW_ECC_FIXUP | DENALI_CAP_DMA_64BIT, + .ecc_caps = &denali_uniphier_v5a_ecc_caps, }; +NAND_ECC_CAPS_SINGLE(denali_uniphier_v5b_ecc_caps, denali_calc_ecc_bytes, + 1024, 8, 16); static const struct denali_dt_data denali_uniphier_v5b_data = { .revision = 0x0501, .caps = DENALI_CAP_HW_ECC_FIXUP | DENALI_CAP_DMA_64BIT, + .ecc_caps = &denali_uniphier_v5b_ecc_caps, }; static const struct udevice_id denali_nand_dt_ids[] = { @@ -61,19 +71,22 @@ static int denali_dt_probe(struct udevice *dev) if (data) { denali->revision = data->revision; denali->caps = data->caps; + denali->ecc_caps = data->ecc_caps; } + denali->dev = dev; + ret = dev_read_resource_byname(dev, "denali_reg", &res); if (ret) return ret; - denali->flash_reg = devm_ioremap(dev, res.start, resource_size(&res)); + denali->reg = devm_ioremap(dev, res.start, resource_size(&res)); ret = dev_read_resource_byname(dev, "nand_data", &res); if (ret) return ret; - denali->flash_mem = devm_ioremap(dev, res.start, resource_size(&res)); + denali->host = devm_ioremap(dev, res.start, resource_size(&res)); ret = clk_get_by_index(dev, 0, &clk); if (ret) diff --git a/drivers/mtd/nand/denali_spl.c b/drivers/mtd/nand/denali_spl.c index c6930325307..3cb98497353 100644 --- a/drivers/mtd/nand/denali_spl.c +++ b/drivers/mtd/nand/denali_spl.c @@ -11,6 +11,12 @@ #include <linux/mtd/nand.h> #include "denali.h" +#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ +#define DENALI_MAP10 (2 << 26) /* high-level control plane */ + +#define INDEX_CTRL_REG 0x0 +#define INDEX_DATA_REG 0x10 + #define SPARE_ACCESS 0x41 #define MAIN_ACCESS 0x42 #define PIPELINE_ACCESS 0x2000 @@ -39,7 +45,7 @@ static int wait_for_irq(uint32_t irq_mask) do { intr_status = readl(denali_flash_reg + INTR_STATUS(flash_bank)); - if (intr_status & INTR_STATUS__ECC_UNCOR_ERR) { + if (intr_status & INTR__ECC_UNCOR_ERR) { debug("Uncorrected ECC detected\n"); return -EBADMSG; } @@ -106,16 +112,16 @@ int denali_send_pipeline_cmd(int page, int ecc_en, int access_type) addr = BANK(flash_bank) | page; /* setup the acccess type */ - cmd = MODE_10 | addr; + cmd = DENALI_MAP10 | addr; index_addr(cmd, access_type); /* setup the pipeline command */ index_addr(cmd, PIPELINE_ACCESS | page_count); - cmd = MODE_01 | addr; + cmd = DENALI_MAP01 | addr; writel(cmd, denali_flash_mem + INDEX_CTRL_REG); - return wait_for_irq(INTR_STATUS__LOAD_COMP); + return wait_for_irq(INTR__LOAD_COMP); } static int nand_read_oob(void *buf, int page) |