[PATCH v4 24/24] mtd: spinand: repeat reading in regular mode if continuous reading fails
Mikhail Kshevetskiy
mikhail.kshevetskiy at iopsys.eu
Sat Aug 9 03:04:56 CEST 2025
Continuous reading may result in multiple flash pages reading in one
operation. Unfortunately, not all spinand controllers support such
large reading. They will read less data. Unfortunately, the operation
can't be continued.
In this case:
* disable continuous reading on this (not good enough) spi controller
* repeat reading in regular mode.
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
---
drivers/mtd/nand/spi/core.c | 40 ++++++++++++++++++++++++++++---------
1 file changed, 31 insertions(+), 9 deletions(-)
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 31926e37104..72ff516661f 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -375,7 +375,8 @@ static int spinand_load_page_op(struct spinand_device *spinand,
}
static int spinand_read_from_cache_op(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+ const struct nand_page_io_req *req,
+ bool *controller_is_buggy)
{
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = spinand_to_mtd(spinand);
@@ -427,8 +428,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
* Dirmap accesses are allowed to toggle the CS.
* Toggling the CS during a continuous read is forbidden.
*/
- if (nbytes && req->continuous)
+ if (nbytes && req->continuous) {
+ if (controller_is_buggy)
+ *controller_is_buggy = true;
return -EIO;
+ }
}
if (req->datalen)
@@ -649,7 +653,7 @@ int spinand_read_page(struct spinand_device *spinand,
spinand_ondie_ecc_save_status(nand, status);
- ret = spinand_read_from_cache_op(spinand, req);
+ ret = spinand_read_from_cache_op(spinand, req, NULL);
if (ret)
return ret;
@@ -774,7 +778,8 @@ read_retry:
static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops,
- unsigned int *max_bitflips)
+ unsigned int *max_bitflips,
+ bool *controller_is_buggy)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
@@ -813,7 +818,7 @@ static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
if (ret < 0)
goto end_cont_read;
- ret = spinand_read_from_cache_op(spinand, &iter.req);
+ ret = spinand_read_from_cache_op(spinand, &iter.req, controller_is_buggy);
if (ret)
goto end_cont_read;
@@ -890,6 +895,8 @@ static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ bool controller_is_buggy = false;
unsigned int max_bitflips = 0;
int ret;
@@ -897,10 +904,25 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_lock(&spinand->lock);
#endif
- if (spinand_use_cont_read(mtd, from, ops))
- ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
- else
- ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ if (spinand_use_cont_read(mtd, from, ops)) {
+ ret = spinand_mtd_continuous_page_read(mtd, from, ops,
+ &max_bitflips,
+ &controller_is_buggy);
+ if (controller_is_buggy) {
+ /*
+ * Some spi controllers may not support reading up to
+ * erase block size. They will read less data than
+ * expected. If this happen disable continuous mode
+ * and repeat reading in normal mode.
+ */
+ spinand->cont_read_possible = false;
+ ret = spinand_mtd_regular_page_read(mtd, from, ops,
+ &max_bitflips);
+ }
+ } else {
+ ret = spinand_mtd_regular_page_read(mtd, from, ops,
+ &max_bitflips);
+ }
#ifndef __UBOOT__
mutex_unlock(&spinand->lock);
--
2.47.2
More information about the U-Boot
mailing list