|
|
@@ -0,0 +1,97 @@
|
|
|
+From 9d13b7857de8834c6246fa5bc83f767675360240 Mon Sep 17 00:00:00 2001
|
|
|
+From: Mikhail Kshevetskiy <[email protected]>
|
|
|
+Date: Sun, 3 Aug 2025 19:06:40 +0300
|
|
|
+Subject: [PATCH RESEND v5 2/3] mtd: spinand: try a regular dirmap if creating
|
|
|
+ a dirmap for continuous reading fails
|
|
|
+
|
|
|
+Continuous reading may result in multiple flash pages reading in one
|
|
|
+operation. Typically only one flash page has read/written (a little bit
|
|
|
+more than 2-4 Kb), but continuous reading requires the spi controller
|
|
|
+to read up to 512 Kb in one operation without toggling CS in beetween.
|
|
|
+
|
|
|
+Roughly speaking spi controllers can be divided on 2 categories:
|
|
|
+ * spi controllers without dirmap acceleration support
|
|
|
+ * spi controllers with dirmap acceleration support
|
|
|
+
|
|
|
+Firt of them will have issues with continuous reading if restriction on
|
|
|
+the transfer length is implemented in the adjust_op_size() handler.
|
|
|
+Second group often supports acceleration of single page only reading.
|
|
|
+Thus enabling of continuous reading can break flash reading.
|
|
|
+
|
|
|
+This patch tries to create dirmap for continuous reading first and
|
|
|
+fallback to regular reading if spi controller refuses to create it.
|
|
|
+
|
|
|
+Signed-off-by: Mikhail Kshevetskiy <[email protected]>
|
|
|
+---
|
|
|
+ drivers/mtd/nand/spi/core.c | 43 ++++++++++++++++++++++++++++++-------
|
|
|
+ 1 file changed, 35 insertions(+), 8 deletions(-)
|
|
|
+
|
|
|
+--- a/drivers/mtd/nand/spi/core.c
|
|
|
++++ b/drivers/mtd/nand/spi/core.c
|
|
|
+@@ -1024,6 +1024,39 @@ static int spinand_mtd_block_isreserved(
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
++static struct spi_mem_dirmap_desc *spinand_create_rdesc(
|
|
|
++ struct spinand_device *spinand,
|
|
|
++ struct spi_mem_dirmap_info *info)
|
|
|
++{
|
|
|
++ struct nand_device *nand = spinand_to_nand(spinand);
|
|
|
++ struct spi_mem_dirmap_desc *desc = NULL;
|
|
|
++
|
|
|
++ if (spinand->cont_read_possible) {
|
|
|
++ /*
|
|
|
++ * spi controller may return an error if info->length is
|
|
|
++ * too large
|
|
|
++ */
|
|
|
++ info->length = nanddev_eraseblock_size(nand);
|
|
|
++ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
|
|
|
++ spinand->spimem, info);
|
|
|
++ }
|
|
|
++
|
|
|
++ if (IS_ERR_OR_NULL(desc)) {
|
|
|
++ /*
|
|
|
++ * continuous reading is not supported by flash or
|
|
|
++ * its spi controller, use regular reading
|
|
|
++ */
|
|
|
++ spinand->cont_read_possible = false;
|
|
|
++
|
|
|
++ info->length = nanddev_page_size(nand) +
|
|
|
++ nanddev_per_page_oobsize(nand);
|
|
|
++ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
|
|
|
++ spinand->spimem, info);
|
|
|
++ }
|
|
|
++
|
|
|
++ return desc;
|
|
|
++}
|
|
|
++
|
|
|
+ static int spinand_create_dirmap(struct spinand_device *spinand,
|
|
|
+ unsigned int plane)
|
|
|
+ {
|
|
|
+@@ -1043,11 +1076,8 @@ static int spinand_create_dirmap(struct
|
|
|
+
|
|
|
+ spinand->dirmaps[plane].wdesc = desc;
|
|
|
+
|
|
|
+- if (spinand->cont_read_possible)
|
|
|
+- info.length = nanddev_eraseblock_size(nand);
|
|
|
+ info.op_tmpl = *spinand->op_templates.read_cache;
|
|
|
+- desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
|
|
|
+- spinand->spimem, &info);
|
|
|
++ desc = spinand_create_rdesc(spinand, &info);
|
|
|
+ if (IS_ERR(desc))
|
|
|
+ return PTR_ERR(desc);
|
|
|
+
|
|
|
+@@ -1070,12 +1100,9 @@ static int spinand_create_dirmap(struct
|
|
|
+
|
|
|
+ spinand->dirmaps[plane].wdesc_ecc = desc;
|
|
|
+
|
|
|
+- if (spinand->cont_read_possible)
|
|
|
+- info.length = nanddev_eraseblock_size(nand);
|
|
|
+ info.op_tmpl = *spinand->op_templates.read_cache;
|
|
|
+ info.op_tmpl.data.ecc = true;
|
|
|
+- desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
|
|
|
+- spinand->spimem, &info);
|
|
|
++ desc = spinand_create_rdesc(spinand, &info);
|
|
|
+ if (IS_ERR(desc))
|
|
|
+ return PTR_ERR(desc);
|
|
|
+
|