summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand/spi/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/nand/spi/core.c')
-rw-r--r--drivers/mtd/nand/spi/core.c286
1 files changed, 184 insertions, 102 deletions
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index c35221794645..8ea545bb924d 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -193,6 +193,135 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .free = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct spinand_ondie_ecc_conf *engine_conf;
+
+ nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
+ nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = engine_conf;
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ return 0;
+}
+
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ kfree(nand->ecc.ctx.priv);
+}
+
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ bool enable = (req->mode != MTD_OPS_RAW);
+
+ /* Only enable or disable the engine */
+ return spinand_ecc_enable(spinand, enable);
+}
+
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+ struct spinand_device *spinand = nand_to_spinand(nand);
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* Nothing to do when finishing a page write */
+ if (req->type == NAND_PAGE_WRITE)
+ return 0;
+
+ /* Finish a page write: check the status, report errors/bitflips */
+ return spinand_check_ecc_status(spinand, engine_conf->status);
+}
+
+static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
+ .init_ctx = spinand_ondie_ecc_init_ctx,
+ .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
+ .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
+ .finish_io_req = spinand_ondie_ecc_finish_io_req,
+};
+
+static struct nand_ecc_engine spinand_ondie_ecc_engine = {
+ .ops = &spinand_ondie_ecc_engine_ops,
+};
+
+static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
+{
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+
+ if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
+ engine_conf)
+ engine_conf->status = status;
+}
+
static int spinand_write_enable_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
@@ -214,7 +343,6 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
@@ -254,16 +382,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
- if (req->ooblen) {
- if (req->mode == MTD_OPS_AUTO_OOB)
- mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
- spinand->oobbuf,
- req->ooboffs,
- req->ooblen);
- else
- memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
- req->ooblen);
- }
+ if (req->ooblen)
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
return 0;
}
@@ -272,7 +393,7 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *wdesc;
unsigned int nbytes, column = 0;
void *buf = spinand->databuf;
@@ -284,9 +405,12 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
* must fill the page cache entirely even if we only want to program
* the data portion of the page, otherwise we might corrupt the BBM or
* user data previously programmed in OOB area.
+ *
+ * Only reset the data buffer manually, the OOB buffer is prepared by
+ * ECC engines ->prepare_io_req() callback.
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
- memset(spinand->databuf, 0xff, nbytes);
+ memset(spinand->databuf, 0xff, nanddev_page_size(nand));
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
@@ -402,42 +526,17 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
-{
- struct nand_device *nand = spinand_to_nand(spinand);
-
- if (spinand->eccinfo.get_status)
- return spinand->eccinfo.get_status(spinand, status);
-
- switch (status & STATUS_ECC_MASK) {
- case STATUS_ECC_NO_BITFLIPS:
- return 0;
-
- case STATUS_ECC_HAS_BITFLIPS:
- /*
- * We have no way to know exactly how many bitflips have been
- * fixed, so let's return the maximum possible value so that
- * wear-leveling layers move the data immediately.
- */
- return nanddev_get_ecc_conf(nand)->strength;
-
- case STATUS_ECC_UNCOR_ERROR:
- return -EBADMSG;
-
- default:
- break;
- }
-
- return -EINVAL;
-}
-
static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- bool ecc_enabled)
+ const struct nand_page_io_req *req)
{
+ struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
+ ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_load_page_op(spinand, req);
if (ret)
return ret;
@@ -446,22 +545,26 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret < 0)
return ret;
+ spinand_ondie_ecc_save_status(nand, status);
+
ret = spinand_read_from_cache_op(spinand, req);
if (ret)
return ret;
- if (!ecc_enabled)
- return 0;
-
- return spinand_check_ecc_status(spinand, status);
+ return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_write_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
+ struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
+ ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
@@ -476,9 +579,9 @@ static int spinand_write_page(struct spinand_device *spinand,
ret = spinand_wait(spinand, &status);
if (!ret && (status & STATUS_PROG_FAILED))
- ret = -EIO;
+ return -EIO;
- return ret;
+ return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
@@ -488,25 +591,24 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
bool ecc_failed = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+ disable_ecc = true;
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
- ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+ ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
@@ -537,20 +639,19 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -580,7 +681,7 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
};
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req, false);
+ spinand_read_page(spinand, &req);
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -965,30 +1066,6 @@ static int spinand_detect(struct spinand_device *spinand)
return 0;
}
-static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
-{
- return -ERANGE;
-}
-
-static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
-{
- if (section)
- return -ERANGE;
-
- /* Reserve 2 bytes for the BBM. */
- region->offset = 2;
- region->length = 62;
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
- .ecc = spinand_noecc_ooblayout_ecc,
- .free = spinand_noecc_ooblayout_free,
-};
-
static int spinand_init(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
@@ -1066,10 +1143,15 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_manuf_cleanup;
- /*
- * Right now, we don't support ECC, so let the whole oob
- * area is available for user.
- */
+ /* SPI-NAND default ECC engine is on-die */
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
+
+ spinand_ecc_enable(spinand, false);
+ ret = nanddev_ecc_engine_init(nand);
+ if (ret)
+ goto err_cleanup_nanddev;
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1078,14 +1160,11 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_erase = spinand_mtd_erase;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
- if (spinand->eccinfo.ooblayout)
- mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
- else
- mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
-
- ret = mtd_ooblayout_count_freebytes(mtd);
- if (ret < 0)
- goto err_cleanup_nanddev;
+ if (nand->ecc.engine) {
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ goto err_cleanup_ecc_engine;
+ }
mtd->oobavail = ret;
@@ -1095,6 +1174,9 @@ static int spinand_init(struct spinand_device *spinand)
return 0;
+err_cleanup_ecc_engine:
+ nanddev_ecc_engine_cleanup(nand);
+
err_cleanup_nanddev:
nanddev_cleanup(nand);