aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/mtd/nand/spi/core.c176
-rw-r--r--include/linux/mtd/spinand.h16
2 files changed, 184 insertions, 8 deletions
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 1f468ed93c8e..04041287b129 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -200,6 +200,12 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
+static int spinand_cont_read_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand->set_cont_read(spinand, enable);
+}
+
static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -311,10 +317,22 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
/* Finish a page read: check the status, report errors/bitflips */
ret = spinand_check_ecc_status(spinand, engine_conf->status);
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG) {
mtd->ecc_stats.failed++;
- else if (ret > 0)
- mtd->ecc_stats.corrected += ret;
+ } else if (ret > 0) {
+ unsigned int pages;
+
+ /*
+ * Continuous reads don't allow us to get the detail,
+ * so we may exagerate the actual number of corrected bitflips.
+ */
+ if (!req->continuous)
+ pages = 1;
+ else
+ pages = req->datalen / nanddev_page_size(nand);
+
+ mtd->ecc_stats.corrected += ret * pages;
+ }
return ret;
}
@@ -369,7 +387,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
if (req->datalen) {
buf = spinand->databuf;
- nbytes = nanddev_page_size(nand);
+ if (!req->continuous)
+ nbytes = nanddev_page_size(nand);
+ else
+ nbytes = round_up(req->dataoffs + req->datalen,
+ nanddev_page_size(nand));
column = 0;
}
@@ -397,6 +419,13 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
nbytes -= ret;
column += ret;
buf += ret;
+
+ /*
+ * Dirmap accesses are allowed to toggle the CS.
+ * Toggling the CS during a continuous read is forbidden.
+ */
+ if (nbytes && req->continuous)
+ return -EIO;
}
if (req->datalen)
@@ -672,6 +701,125 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
return ret;
}
+static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ u8 status;
+ int ret;
+
+ ret = spinand_cont_read_enable(spinand, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The cache is divided into two halves. While one half of the cache has
+ * the requested data, the other half is loaded with the next chunk of data.
+ * Therefore, the host can read out the data continuously from page to page.
+ * Each data read must be a multiple of 4-bytes and full pages should be read;
+ * otherwise, the data output might get out of sequence from one read command
+ * to another.
+ */
+ nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ goto end_cont_read;
+
+ ret = nand_ecc_prepare_io_req(nand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_load_page_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US, NULL);
+ if (ret < 0)
+ goto end_cont_read;
+
+ ret = spinand_read_from_cache_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ops->retlen += iter.req.datalen;
+
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ goto end_cont_read;
+
+ spinand_ondie_ecc_save_status(nand, status);
+
+ ret = nand_ecc_finish_io_req(nand, &iter.req);
+ if (ret < 0)
+ goto end_cont_read;
+
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ ret = 0;
+ }
+
+end_cont_read:
+ /*
+ * Once all the data has been read out, the host can either pull CS#
+ * high and wait for tRST or manually clear the bit in the configuration
+ * register to terminate the continuous read operation. We have no
+ * guarantee the SPI controller drivers will effectively deassert the CS
+ * when we expect them to, so take the register based approach.
+ */
+ spinand_cont_read_enable(spinand, false);
+
+ return ret;
+}
+
+static void spinand_cont_read_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
+
+ /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
+ if (spinand->set_cont_read &&
+ (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
+ engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
+ spinand->cont_read_possible = true;
+ }
+}
+
+static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos start_pos, end_pos;
+
+ if (!spinand->cont_read_possible)
+ return false;
+
+ /* OOBs won't be retrieved */
+ if (ops->ooblen || ops->oobbuf)
+ return false;
+
+ nanddev_offs_to_pos(nand, from, &start_pos);
+ nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
+
+ /*
+ * Continuous reads never cross LUN boundaries. Some devices don't
+ * support crossing planes boundaries. Some devices don't even support
+ * crossing blocks boundaries. The common case being to read through UBI,
+ * we will very rarely read two consequent blocks or more, so it is safer
+ * and easier (can be improved) to only enable continuous reads when
+ * reading within the same erase block.
+ */
+ if (start_pos.target != end_pos.target ||
+ start_pos.plane != end_pos.plane ||
+ start_pos.eraseblock != end_pos.eraseblock)
+ return false;
+
+ return start_pos.page < end_pos.page;
+}
+
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
@@ -684,7 +832,10 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
old_stats = mtd->ecc_stats;
- ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ if (spinand_use_cont_read(mtd, from, ops))
+ ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
+ else
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
if (ops->stats) {
ops->stats->uncorrectable_errors +=
@@ -874,6 +1025,9 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
};
struct spi_mem_dirmap_desc *desc;
+ if (spinand->cont_read_possible)
+ info.length = nanddev_eraseblock_size(nand);
+
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
@@ -1107,6 +1261,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->set_cont_read = table[i].set_cont_read;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1248,9 +1403,8 @@ static int spinand_init(struct spinand_device *spinand)
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
- spinand->databuf = kzalloc(nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- GFP_KERNEL);
+ spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
+ GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
@@ -1279,6 +1433,12 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_cleanup_nanddev;
+ /*
+ * Continuous read can only be enabled with an on-die ECC engine, so the
+ * ECC initialization must have happened previously.
+ */
+ spinand_cont_read_init(spinand);
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 5c19ead60499..14dce347dc46 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -336,6 +336,7 @@ struct spinand_ondie_ecc_conf {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @set_cont_read: enable/disable continuous cached reads
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
@@ -354,6 +355,8 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
};
#define SPINAND_ID(__method, ...) \
@@ -379,6 +382,9 @@ struct spinand_info {
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func,
+#define SPINAND_CONT_READ(__set_cont_read) \
+ .set_cont_read = __set_cont_read,
+
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
{ \
@@ -422,6 +428,12 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @cont_read_possible: Field filled by the core once the whole system
+ * configuration is known to tell whether continuous reads are
+ * suitable to use or not in general with this chip/configuration.
+ * A per-transfer check must of course be done to ensure it is
+ * actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
*/
struct spinand_device {
@@ -451,6 +463,10 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
+
+ bool cont_read_possible;
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
};
/**