2
0

100-07-mtd-nmbm-add-support-for-mtd.patch 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. From 0524995f07fcd216a1a7e267fdb5cf2b0ede8489 Mon Sep 17 00:00:00 2001
  2. From: Weijie Gao <[email protected]>
  3. Date: Mon, 25 Jul 2022 10:42:12 +0800
  4. Subject: [PATCH 41/71] mtd: nmbm: add support for mtd
  5. Add support to create NMBM based on MTD devices
  6. Signed-off-by: Weijie Gao <[email protected]>
  7. ---
  8. drivers/mtd/nmbm/Kconfig | 5 +
  9. drivers/mtd/nmbm/Makefile | 1 +
  10. drivers/mtd/nmbm/nmbm-mtd.c | 890 ++++++++++++++++++++++++++++++++++++
  11. include/nmbm/nmbm-mtd.h | 27 ++
  12. 4 files changed, 923 insertions(+)
  13. create mode 100644 drivers/mtd/nmbm/nmbm-mtd.c
  14. create mode 100644 include/nmbm/nmbm-mtd.h
  15. --- a/drivers/mtd/nmbm/Kconfig
  16. +++ b/drivers/mtd/nmbm/Kconfig
  17. @@ -27,3 +27,8 @@ config NMBM_LOG_LEVEL_NONE
  18. bool "5 - None"
  19. endchoice
  20. +
  21. +config NMBM_MTD
  22. + bool "Enable MTD based NAND mapping block management"
  23. + default n
  24. + depends on NMBM
  25. --- a/drivers/mtd/nmbm/Makefile
  26. +++ b/drivers/mtd/nmbm/Makefile
  27. @@ -3,3 +3,4 @@
  28. # (C) Copyright 2020 MediaTek Inc. All rights reserved.
  29. obj-$(CONFIG_NMBM) += nmbm-core.o
  30. +obj-$(CONFIG_NMBM_MTD) += nmbm-mtd.o
  31. --- /dev/null
  32. +++ b/drivers/mtd/nmbm/nmbm-mtd.c
  33. @@ -0,0 +1,890 @@
  34. +// SPDX-License-Identifier: GPL-2.0
  35. +/*
  36. + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
  37. + *
  38. + * Author: Weijie Gao <[email protected]>
  39. + */
  40. +
  41. +#include <linux/list.h>
  42. +#include <linux/bitops.h>
  43. +#include <linux/kernel.h>
  44. +#include <linux/types.h>
  45. +#include <linux/mtd/mtd.h>
  46. +#include <jffs2/load_kernel.h>
  47. +#include <watchdog.h>
  48. +
  49. +#include "nmbm-debug.h"
  50. +
  51. +#define NMBM_UPPER_MTD_NAME "nmbm"
  52. +
  53. +static uint32_t nmbm_id_cnt;
  54. +static LIST_HEAD(nmbm_devs);
  55. +
  56. +struct nmbm_mtd {
  57. + struct mtd_info upper;
  58. + char *name;
  59. + uint32_t id;
  60. +
  61. + struct mtd_info *lower;
  62. +
  63. + struct nmbm_instance *ni;
  64. + uint8_t *page_cache;
  65. +
  66. + struct list_head node;
  67. +};
  68. +
  69. +static int nmbm_lower_read_page(void *arg, uint64_t addr, void *buf, void *oob,
  70. + enum nmbm_oob_mode mode)
  71. +{
  72. + struct nmbm_mtd *nm = arg;
  73. + struct mtd_oob_ops ops;
  74. + int ret;
  75. +
  76. + memset(&ops, 0, sizeof(ops));
  77. +
  78. + switch (mode) {
  79. + case NMBM_MODE_PLACE_OOB:
  80. + ops.mode = MTD_OPS_PLACE_OOB;
  81. + break;
  82. + case NMBM_MODE_AUTO_OOB:
  83. + ops.mode = MTD_OPS_AUTO_OOB;
  84. + break;
  85. + case NMBM_MODE_RAW:
  86. + ops.mode = MTD_OPS_RAW;
  87. + break;
  88. + default:
  89. + pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
  90. + return -ENOTSUPP;
  91. + }
  92. +
  93. + if (buf) {
  94. + ops.datbuf = buf;
  95. + ops.len = nm->lower->writesize;
  96. + }
  97. +
  98. + if (oob) {
  99. + ops.oobbuf = oob;
  100. + ops.ooblen = mtd_oobavail(nm->lower, &ops);
  101. + }
  102. +
  103. + ret = mtd_read_oob(nm->lower, addr, &ops);
  104. + nm->upper.ecc_stats.corrected = nm->lower->ecc_stats.corrected;
  105. + nm->upper.ecc_stats.failed = nm->lower->ecc_stats.failed;
  106. +
  107. + /* Report error on failure (including ecc error) */
  108. + if (ret < 0 && ret != -EUCLEAN)
  109. + return ret;
  110. +
  111. + /*
  112. + * Since mtd_read_oob() won't report exact bitflips, what we can know
  113. + * is whether bitflips exceeds the threshold.
  114. + * We want the -EUCLEAN to be passed to the upper layer, but not the
  115. + * error value itself. To achieve this, report bitflips above the
  116. + * threshold.
  117. + */
  118. +
  119. + if (ret == -EUCLEAN) {
  120. + return min_t(u32, nm->lower->bitflip_threshold + 1,
  121. + nm->lower->ecc_strength);
  122. + }
  123. +
  124. + /* For bitflips less than the threshold, return 0 */
  125. +
  126. + return 0;
  127. +}
  128. +
  129. +static int nmbm_lower_write_page(void *arg, uint64_t addr, const void *buf,
  130. + const void *oob, enum nmbm_oob_mode mode)
  131. +{
  132. + struct nmbm_mtd *nm = arg;
  133. + struct mtd_oob_ops ops;
  134. +
  135. + memset(&ops, 0, sizeof(ops));
  136. +
  137. + switch (mode) {
  138. + case NMBM_MODE_PLACE_OOB:
  139. + ops.mode = MTD_OPS_PLACE_OOB;
  140. + break;
  141. + case NMBM_MODE_AUTO_OOB:
  142. + ops.mode = MTD_OPS_AUTO_OOB;
  143. + break;
  144. + case NMBM_MODE_RAW:
  145. + ops.mode = MTD_OPS_RAW;
  146. + break;
  147. + default:
  148. + pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
  149. + return -ENOTSUPP;
  150. + }
  151. +
  152. + if (buf) {
  153. + ops.datbuf = (uint8_t *)buf;
  154. + ops.len = nm->lower->writesize;
  155. + }
  156. +
  157. + if (oob) {
  158. + ops.oobbuf = (uint8_t *)oob;
  159. + ops.ooblen = mtd_oobavail(nm->lower, &ops);
  160. + }
  161. +
  162. + return mtd_write_oob(nm->lower, addr, &ops);
  163. +}
  164. +
  165. +static int nmbm_lower_erase_block(void *arg, uint64_t addr)
  166. +{
  167. + struct nmbm_mtd *nm = arg;
  168. + struct erase_info ei;
  169. +
  170. + memset(&ei, 0, sizeof(ei));
  171. +
  172. + ei.mtd = nm->lower;
  173. + ei.addr = addr;
  174. + ei.len = nm->lower->erasesize;
  175. +
  176. + return mtd_erase(nm->lower, &ei);
  177. +}
  178. +
  179. +static int nmbm_lower_is_bad_block(void *arg, uint64_t addr)
  180. +{
  181. + struct nmbm_mtd *nm = arg;
  182. +
  183. + return mtd_block_isbad(nm->lower, addr);
  184. +}
  185. +
  186. +static int nmbm_lower_mark_bad_block(void *arg, uint64_t addr)
  187. +{
  188. + struct nmbm_mtd *nm = arg;
  189. +
  190. + return mtd_block_markbad(nm->lower, addr);
  191. +}
  192. +
  193. +static void nmbm_lower_log(void *arg, enum nmbm_log_category level,
  194. + const char *fmt, va_list ap)
  195. +{
  196. + vprintf(fmt, ap);
  197. +}
  198. +
  199. +static int nmbm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
  200. + size_t *retlen, u_char *buf)
  201. +{
  202. + struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
  203. +
  204. + /* Do not allow read past end of device */
  205. + if ((from + len) > mtd->size) {
  206. + pr_debug("%s: attempt to write beyond end of device\n",
  207. + __func__);
  208. + return -EINVAL;
  209. + }
  210. +
  211. + return nmbm_read_range(nm->ni, from, len, buf, MTD_OPS_PLACE_OOB,
  212. + retlen);
  213. +}
  214. +
  215. +static int nmbm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
  216. + size_t *retlen, const u_char *buf)
  217. +{
  218. + struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
  219. +
  220. + /* Do not allow write past end of device */
  221. + if ((to + len) > mtd->size) {
  222. + pr_debug("%s: attempt to write beyond end of device\n",
  223. + __func__);
  224. + return -EINVAL;
  225. + }
  226. +
  227. + return nmbm_write_range(nm->ni, to, len, buf, MTD_OPS_PLACE_OOB,
  228. + retlen);
  229. +}
  230. +
  231. +static int nmbm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  232. +{
  233. + struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
  234. + int ret;
  235. +
  236. + instr->state = MTD_ERASING;
  237. +
  238. + ret = nmbm_erase_block_range(nm->ni, instr->addr, instr->len,
  239. + &instr->fail_addr);
  240. + if (ret)
  241. + instr->state = MTD_ERASE_FAILED;
  242. + else
  243. + instr->state = MTD_ERASE_DONE;
  244. +
  245. + if (!ret)
  246. + /* FIXME */
  247. + /* mtd_erase_callback(instr); */
  248. + return ret;
  249. + else
  250. + ret = -EIO;
  251. +
  252. + return ret;
  253. +}
  254. +
  255. +static int nmbm_mtd_read_data(struct nmbm_mtd *nm, uint64_t addr,
  256. + struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
  257. +{
  258. + size_t len, ooblen, maxooblen, chklen;
  259. + uint32_t col, ooboffs;
  260. + uint8_t *datcache, *oobcache;
  261. + bool has_ecc_err = false;
  262. + int ret, max_bitflips = 0;
  263. +
  264. + col = addr & nm->lower->writesize_mask;
  265. + addr &= ~nm->lower->writesize_mask;
  266. + maxooblen = mtd_oobavail(nm->lower, ops);
  267. + ooboffs = ops->ooboffs;
  268. + ooblen = ops->ooblen;
  269. + len = ops->len;
  270. +
  271. + datcache = len ? nm->page_cache : NULL;
  272. + oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
  273. +
  274. + ops->oobretlen = 0;
  275. + ops->retlen = 0;
  276. +
  277. + while (len || ooblen) {
  278. + schedule();
  279. +
  280. + ret = nmbm_read_single_page(nm->ni, addr, datcache, oobcache,
  281. + mode);
  282. + if (ret < 0 && ret != -EBADMSG)
  283. + return ret;
  284. +
  285. + /* Continue reading on ecc error */
  286. + if (ret == -EBADMSG)
  287. + has_ecc_err = true;
  288. +
  289. + /* Record the maximum bitflips between pages */
  290. + if (ret > max_bitflips)
  291. + max_bitflips = ret;
  292. +
  293. + if (len) {
  294. + /* Move data */
  295. + chklen = nm->lower->writesize - col;
  296. + if (chklen > len)
  297. + chklen = len;
  298. +
  299. + memcpy(ops->datbuf + ops->retlen, datcache + col,
  300. + chklen);
  301. + len -= chklen;
  302. + col = 0; /* (col + chklen) % */
  303. + ops->retlen += chklen;
  304. + }
  305. +
  306. + if (ooblen) {
  307. + /* Move oob */
  308. + chklen = maxooblen - ooboffs;
  309. + if (chklen > ooblen)
  310. + chklen = ooblen;
  311. +
  312. + memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
  313. + chklen);
  314. + ooblen -= chklen;
  315. + ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
  316. + ops->oobretlen += chklen;
  317. + }
  318. +
  319. + addr += nm->lower->writesize;
  320. + }
  321. +
  322. + if (has_ecc_err)
  323. + return -EBADMSG;
  324. +
  325. + return max_bitflips;
  326. +}
  327. +
  328. +static int nmbm_mtd_read_oob(struct mtd_info *mtd, loff_t from,
  329. + struct mtd_oob_ops *ops)
  330. +{
  331. + struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
  332. + uint32_t maxooblen;
  333. + enum nmbm_oob_mode mode;
  334. +
  335. + if (!ops->oobbuf && !ops->datbuf) {
  336. + if (ops->ooblen || ops->len)
  337. + return -EINVAL;
  338. +
  339. + return 0;
  340. + }
  341. +
  342. + switch (ops->mode) {
  343. + case MTD_OPS_PLACE_OOB:
  344. + mode = NMBM_MODE_PLACE_OOB;
  345. + break;
  346. + case MTD_OPS_AUTO_OOB:
  347. + mode = NMBM_MODE_AUTO_OOB;
  348. + break;
  349. + case MTD_OPS_RAW:
  350. + mode = NMBM_MODE_RAW;
  351. + break;
  352. + default:
  353. + pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
  354. + return -ENOTSUPP;
  355. + }
  356. +
  357. + maxooblen = mtd_oobavail(mtd, ops);
  358. +
  359. + /* Do not allow read past end of device */
  360. + if (ops->datbuf && (from + ops->len) > mtd->size) {
  361. + pr_debug("%s: attempt to read beyond end of device\n",
  362. + __func__);
  363. + return -EINVAL;
  364. + }
  365. +
  366. + if (!ops->oobbuf) {
  367. + /* Optimized for reading data only */
  368. + return nmbm_read_range(nm->ni, from, ops->len, ops->datbuf,
  369. + mode, &ops->retlen);
  370. + }
  371. +
  372. + if (unlikely(ops->ooboffs >= maxooblen)) {
  373. + pr_debug("%s: attempt to start read outside oob\n",
  374. + __func__);
  375. + return -EINVAL;
  376. + }
  377. +
  378. + if (unlikely(from >= mtd->size ||
  379. + ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
  380. + (from >> mtd->writesize_shift)) * maxooblen)) {
  381. + pr_debug("%s: attempt to read beyond end of device\n",
  382. + __func__);
  383. + return -EINVAL;
  384. + }
  385. +
  386. + return nmbm_mtd_read_data(nm, from, ops, mode);
  387. +}
  388. +
  389. +static int nmbm_mtd_write_data(struct nmbm_mtd *nm, uint64_t addr,
  390. + struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
  391. +{
  392. + size_t len, ooblen, maxooblen, chklen;
  393. + uint32_t col, ooboffs;
  394. + uint8_t *datcache, *oobcache;
  395. + int ret;
  396. +
  397. + col = addr & nm->lower->writesize_mask;
  398. + addr &= ~nm->lower->writesize_mask;
  399. + maxooblen = mtd_oobavail(nm->lower, ops);
  400. + ooboffs = ops->ooboffs;
  401. + ooblen = ops->ooblen;
  402. + len = ops->len;
  403. +
  404. + datcache = len ? nm->page_cache : NULL;
  405. + oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
  406. +
  407. + ops->oobretlen = 0;
  408. + ops->retlen = 0;
  409. +
  410. + while (len || ooblen) {
  411. + schedule();
  412. +
  413. + if (len) {
  414. + /* Move data */
  415. + chklen = nm->lower->writesize - col;
  416. + if (chklen > len)
  417. + chklen = len;
  418. +
  419. + memset(datcache, 0xff, col);
  420. + memcpy(datcache + col, ops->datbuf + ops->retlen,
  421. + chklen);
  422. + memset(datcache + col + chklen, 0xff,
  423. + nm->lower->writesize - col - chklen);
  424. + len -= chklen;
  425. + col = 0; /* (col + chklen) % */
  426. + ops->retlen += chklen;
  427. + }
  428. +
  429. + if (ooblen) {
  430. + /* Move oob */
  431. + chklen = maxooblen - ooboffs;
  432. + if (chklen > ooblen)
  433. + chklen = ooblen;
  434. +
  435. + memset(oobcache, 0xff, ooboffs);
  436. + memcpy(oobcache + ooboffs,
  437. + ops->oobbuf + ops->oobretlen, chklen);
  438. + memset(oobcache + ooboffs + chklen, 0xff,
  439. + nm->lower->oobsize - ooboffs - chklen);
  440. + ooblen -= chklen;
  441. + ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
  442. + ops->oobretlen += chklen;
  443. + }
  444. +
  445. + ret = nmbm_write_single_page(nm->ni, addr, datcache, oobcache,
  446. + mode);
  447. + if (ret)
  448. + return ret;
  449. +
  450. + addr += nm->lower->writesize;
  451. + }
  452. +
  453. + return 0;
  454. +}
  455. +
  456. +static int nmbm_mtd_write_oob(struct mtd_info *mtd, loff_t to,
  457. + struct mtd_oob_ops *ops)
  458. +{
  459. + struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
  460. + enum nmbm_oob_mode mode;
  461. + uint32_t maxooblen;
  462. +
  463. + if (!ops->oobbuf && !ops->datbuf) {
  464. + if (ops->ooblen || ops->len)
  465. + return -EINVAL;
  466. +
  467. + return 0;
  468. + }
  469. +
  470. + switch (ops->mode) {
  471. + case MTD_OPS_PLACE_OOB:
  472. + mode = NMBM_MODE_PLACE_OOB;
  473. + break;
  474. + case MTD_OPS_AUTO_OOB:
  475. + mode = NMBM_MODE_AUTO_OOB;
  476. + break;
  477. + case MTD_OPS_RAW:
  478. + mode = NMBM_MODE_RAW;
  479. + break;
  480. + default:
  481. + pr_debug("%s: unsupported oob mode: %u\n", __func__,
  482. + ops->mode);
  483. + return -ENOTSUPP;
  484. + }
  485. +
  486. + maxooblen = mtd_oobavail(mtd, ops);
  487. +
  488. + /* Do not allow write past end of device */
  489. + if (ops->datbuf && (to + ops->len) > mtd->size) {
  490. + pr_debug("%s: attempt to write beyond end of device\n",
  491. + __func__);
  492. + return -EINVAL;
  493. + }
  494. +
  495. + if (!ops->oobbuf) {
  496. + /* Optimized for writing data only */
  497. + return nmbm_write_range(nm->ni, to, ops->len, ops->datbuf,
  498. + mode, &ops->retlen);
  499. + }
  500. +
  501. + if (unlikely(ops->ooboffs >= maxooblen)) {
  502. + pr_debug("%s: attempt to start write outside oob\n",
  503. + __func__);
  504. + return -EINVAL;
  505. + }
  506. +
  507. + if (unlikely(to >= mtd->size ||
  508. + ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
  509. + (to >> mtd->writesize_shift)) * maxooblen)) {
  510. + pr_debug("%s: attempt to write beyond end of device\n",
  511. + __func__);
  512. + return -EINVAL;
  513. + }
  514. +
  515. + return nmbm_mtd_write_data(nm, to, ops, mode);
  516. +}
  517. +
  518. +static int nmbm_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
  519. +{
  520. + struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
  521. +
  522. + return nmbm_check_bad_block(nm->ni, offs);
  523. +}
  524. +
  525. +static int nmbm_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
  526. +{
  527. + struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
  528. +
  529. + return nmbm_mark_bad_block(nm->ni, offs);
  530. +}
  531. +
  532. +int nmbm_attach_mtd(struct mtd_info *lower, int flags, uint32_t max_ratio,
  533. + uint32_t max_reserved_blocks, struct mtd_info **upper)
  534. +{
  535. + struct nmbm_lower_device nld;
  536. + struct nmbm_instance *ni;
  537. + struct mtd_info *mtd;
  538. + struct nmbm_mtd *nm;
  539. + size_t namelen, alloc_size;
  540. + int ret;
  541. +
  542. + if (!lower)
  543. + return -EINVAL;
  544. +
  545. + if (lower->type != MTD_NANDFLASH || lower->flags != MTD_CAP_NANDFLASH)
  546. + return -ENOTSUPP;
  547. +
  548. + namelen = strlen(NMBM_UPPER_MTD_NAME) + 16;
  549. +
  550. + nm = calloc(sizeof(*nm) + lower->writesize + lower->oobsize + namelen + 1, 1);
  551. + if (!nm)
  552. + return -ENOMEM;
  553. +
  554. + nm->lower = lower;
  555. + nm->name = (char *)nm + sizeof(*nm);
  556. + nm->page_cache = (uint8_t *)nm->name + namelen + 1;
  557. +
  558. + nm->id = nmbm_id_cnt++;
  559. + snprintf(nm->name, namelen + 1, "%s%u", NMBM_UPPER_MTD_NAME, nm->id);
  560. +
  561. + memset(&nld, 0, sizeof(nld));
  562. +
  563. + nld.flags = flags;
  564. + nld.max_ratio = max_ratio;
  565. + nld.max_reserved_blocks = max_reserved_blocks;
  566. +
  567. + nld.size = lower->size;
  568. + nld.erasesize = lower->erasesize;
  569. + nld.writesize = lower->writesize;
  570. + nld.oobsize = lower->oobsize;
  571. + nld.oobavail = lower->oobavail;
  572. +
  573. + nld.arg = nm;
  574. + nld.read_page = nmbm_lower_read_page;
  575. + nld.write_page = nmbm_lower_write_page;
  576. + nld.erase_block = nmbm_lower_erase_block;
  577. + nld.is_bad_block = nmbm_lower_is_bad_block;
  578. + nld.mark_bad_block = nmbm_lower_mark_bad_block;
  579. +
  580. + nld.logprint = nmbm_lower_log;
  581. +
  582. + alloc_size = nmbm_calc_structure_size(&nld);
  583. + ni = calloc(alloc_size, 1);
  584. + if (!ni) {
  585. + free(nm);
  586. + return -ENOMEM;
  587. + }
  588. +
  589. + ret = nmbm_attach(&nld, ni);
  590. + if (ret) {
  591. + free(ni);
  592. + free(nm);
  593. + return ret;
  594. + }
  595. +
  596. + nm->ni = ni;
  597. +
  598. + /* Initialize upper mtd */
  599. + mtd = &nm->upper;
  600. +
  601. + mtd->name = nm->name;
  602. + mtd->type = MTD_DEV_TYPE_NMBM;
  603. + mtd->flags = lower->flags;
  604. +
  605. + mtd->size = (uint64_t)ni->data_block_count * ni->lower.erasesize;
  606. + mtd->erasesize = lower->erasesize;
  607. + mtd->writesize = lower->writesize;
  608. + mtd->writebufsize = lower->writesize;
  609. + mtd->oobsize = lower->oobsize;
  610. + mtd->oobavail = lower->oobavail;
  611. +
  612. + mtd->erasesize_shift = lower->erasesize_shift;
  613. + mtd->writesize_shift = lower->writesize_shift;
  614. + mtd->erasesize_mask = lower->erasesize_mask;
  615. + mtd->writesize_mask = lower->writesize_mask;
  616. +
  617. + mtd->bitflip_threshold = lower->bitflip_threshold;
  618. +
  619. + /* XXX: should this be duplicated? */
  620. + mtd->ooblayout = lower->ooblayout;
  621. + mtd->ecclayout = lower->ecclayout;
  622. +
  623. + mtd->ecc_step_size = lower->ecc_step_size;
  624. + mtd->ecc_strength = lower->ecc_strength;
  625. +
  626. + mtd->numeraseregions = lower->numeraseregions;
  627. + mtd->eraseregions = lower->eraseregions;
  628. +
  629. + mtd->_read = nmbm_mtd_read;
  630. + mtd->_write = nmbm_mtd_write;
  631. + mtd->_erase = nmbm_mtd_erase;
  632. + mtd->_read_oob = nmbm_mtd_read_oob;
  633. + mtd->_write_oob = nmbm_mtd_write_oob;
  634. + mtd->_block_isbad = nmbm_mtd_block_isbad;
  635. + mtd->_block_markbad = nmbm_mtd_block_markbad;
  636. +
  637. + *upper = mtd;
  638. +
  639. + list_add_tail(&nm->node, &nmbm_devs);
  640. +
  641. + return 0;
  642. +}
  643. +
  644. +int nmbm_free_mtd(struct mtd_info *upper)
  645. +{
  646. + struct nmbm_mtd *pos;
  647. +
  648. + if (!upper)
  649. + return -EINVAL;
  650. +
  651. + list_for_each_entry(pos, &nmbm_devs, node) {
  652. + if (&pos->upper == upper) {
  653. + list_del(&pos->node);
  654. +
  655. + nmbm_detach(pos->ni);
  656. + free(pos->ni);
  657. + free(pos);
  658. +
  659. + return 0;
  660. + }
  661. + }
  662. +
  663. + return -ENODEV;
  664. +}
  665. +
  666. +struct mtd_info *nmbm_mtd_get_upper_by_index(uint32_t index)
  667. +{
  668. + struct nmbm_mtd *nm;
  669. +
  670. + list_for_each_entry(nm, &nmbm_devs, node) {
  671. + if (nm->id == index)
  672. + return &nm->upper;
  673. + }
  674. +
  675. + return NULL;
  676. +}
  677. +
  678. +struct mtd_info *nmbm_mtd_get_upper(struct mtd_info *lower)
  679. +{
  680. + struct nmbm_mtd *nm;
  681. +
  682. + list_for_each_entry(nm, &nmbm_devs, node) {
  683. + if (nm->lower == lower)
  684. + return &nm->upper;
  685. + }
  686. +
  687. + return NULL;
  688. +}
  689. +
  690. +void nmbm_mtd_list_devices(void)
  691. +{
  692. + struct nmbm_mtd *nm;
  693. +
  694. + printf("Index NMBM device Lower device\n");
  695. + printf("========================================\n");
  696. +
  697. + list_for_each_entry(nm, &nmbm_devs, node) {
  698. + printf("%-8u%-20s%s\n", nm->id, nm->name, nm->lower->name);
  699. + }
  700. +}
  701. +
  702. +int nmbm_mtd_print_info(const char *name)
  703. +{
  704. + struct nmbm_mtd *nm;
  705. + bool found = false;
  706. +
  707. + list_for_each_entry(nm, &nmbm_devs, node) {
  708. + if (!strcmp(nm->name, name)) {
  709. + found = true;
  710. + break;
  711. + }
  712. + }
  713. +
  714. + if (!found) {
  715. + printf("Error: NMBM device '%s' not found\n", name);
  716. + return -ENODEV;
  717. + }
  718. +
  719. + printf("%s:\n", name);
  720. + printf("Total blocks: %u\n", nm->ni->block_count);
  721. + printf("Data blocks: %u\n", nm->ni->data_block_count);
  722. + printf("Management start block: %u\n", nm->ni->mgmt_start_ba);
  723. + printf("Info table size: 0x%x\n", nm->ni->info_table_size);
  724. +
  725. + if (nm->ni->main_table_ba)
  726. + printf("Main info table start block: %u\n", nm->ni->main_table_ba);
  727. + else
  728. + printf("Main info table start block: Not exist\n");
  729. +
  730. + if (nm->ni->backup_table_ba)
  731. + printf("Backup info table start block: %u\n", nm->ni->backup_table_ba);
  732. + else
  733. + printf("Backup info table start block: Not exist\n");
  734. +
  735. + printf("Signature block: %u\n", nm->ni->signature_ba);
  736. + printf("Mapping blocks top address: %u\n", nm->ni->mapping_blocks_top_ba);
  737. + printf("Mapping blocks limit address: %u\n", nm->ni->mapping_blocks_ba);
  738. +
  739. + return 0;
  740. +}
  741. +
  742. +static const char nmbm_block_legends[] = {
  743. + [NMBM_BLOCK_GOOD_DATA] = '-',
  744. + [NMBM_BLOCK_GOOD_MGMT] = '+',
  745. + [NMBM_BLOCK_BAD] = 'B',
  746. + [NMBM_BLOCK_MAIN_INFO_TABLE] = 'I',
  747. + [NMBM_BLOCK_BACKUP_INFO_TABLE] = 'i',
  748. + [NMBM_BLOCK_REMAPPED] = 'M',
  749. + [NMBM_BLOCK_SIGNATURE] = 'S',
  750. +};
  751. +
  752. +int nmbm_mtd_print_states(const char *name)
  753. +{
  754. + struct nmbm_mtd *nm;
  755. + enum nmmb_block_type bt;
  756. + bool found = false;
  757. + uint32_t i;
  758. +
  759. + list_for_each_entry(nm, &nmbm_devs, node) {
  760. + if (!strcmp(nm->name, name)) {
  761. + found = true;
  762. + break;
  763. + }
  764. + }
  765. +
  766. + if (!found) {
  767. + printf("Error: NMBM device '%s' not found\n", name);
  768. + return -ENODEV;
  769. + }
  770. +
  771. + printf("Physical blocks:\n");
  772. + printf("\n");
  773. +
  774. + printf("Legends:\n");
  775. + printf(" - Good data block\n");
  776. + printf(" + Good management block\n");
  777. + printf(" B Bad block\n");
  778. + printf(" I Main info table\n");
  779. + printf(" i Backup info table\n");
  780. + printf(" M Remapped spare block\n");
  781. + printf(" S Signature block\n");
  782. + printf("\n");
  783. +
  784. + for (i = 0; i < nm->ni->block_count; i++) {
  785. + if (i % 64 == 0)
  786. + printf(" ");
  787. +
  788. + bt = nmbm_debug_get_phys_block_type(nm->ni, i);
  789. + if (bt < __NMBM_BLOCK_TYPE_MAX)
  790. + putc(nmbm_block_legends[bt]);
  791. + else
  792. + putc('?');
  793. +
  794. + if (i % 64 == 63)
  795. + printf("\n");
  796. + }
  797. +
  798. + printf("\n");
  799. + printf("Logical blocks:\n");
  800. + printf("\n");
  801. +
  802. + printf("Legends:\n");
  803. + printf(" - Good block\n");
  804. + printf(" + Initially remapped block\n");
  805. + printf(" M Remapped block\n");
  806. + printf(" B Bad/Unmapped block\n");
  807. + printf("\n");
  808. +
  809. + for (i = 0; i < nm->ni->data_block_count; i++) {
  810. + if (i % 64 == 0)
  811. + printf(" ");
  812. +
  813. + if (nm->ni->block_mapping[i] < 0)
  814. + putc('B');
  815. + else if (nm->ni->block_mapping[i] == i)
  816. + putc('-');
  817. + else if (nm->ni->block_mapping[i] < nm->ni->data_block_count)
  818. + putc('+');
  819. + else if (nm->ni->block_mapping[i] > nm->ni->mapping_blocks_top_ba &&
  820. + nm->ni->block_mapping[i] < nm->ni->signature_ba)
  821. + putc('M');
  822. + else
  823. + putc('?');
  824. +
  825. + if (i % 64 == 63)
  826. + printf("\n");
  827. + }
  828. +
  829. + return 0;
  830. +}
  831. +
  832. +int nmbm_mtd_print_bad_blocks(const char *name)
  833. +{
  834. + struct nmbm_mtd *nm;
  835. + bool found = false;
  836. + uint32_t i;
  837. +
  838. + list_for_each_entry(nm, &nmbm_devs, node) {
  839. + if (!strcmp(nm->name, name)) {
  840. + found = true;
  841. + break;
  842. + }
  843. + }
  844. +
  845. + if (!found) {
  846. + printf("Error: NMBM device '%s' not found\n", name);
  847. + return -ENODEV;
  848. + }
  849. +
  850. + printf("Physical blocks:\n");
  851. +
  852. + for (i = 0; i < nm->ni->block_count; i++) {
  853. + switch (nmbm_debug_get_block_state(nm->ni, i)) {
  854. + case BLOCK_ST_BAD:
  855. + printf("%-12u [0x%08llx] - Bad\n", i,
  856. + (uint64_t)i << nm->ni->erasesize_shift);
  857. + break;
  858. + case BLOCK_ST_NEED_REMAP:
  859. + printf("%-12u [0x%08llx] - Awaiting remapping\n", i,
  860. + (uint64_t)i << nm->ni->erasesize_shift);
  861. + break;
  862. + }
  863. + }
  864. +
  865. + printf("\n");
  866. + printf("Logical blocks:\n");
  867. +
  868. + for (i = 0; i < nm->ni->data_block_count; i++) {
  869. + if (nm->ni->block_mapping[i] < 0) {
  870. + printf("%-12u [0x%08llx] - Bad\n", i,
  871. + (uint64_t)i << nm->ni->erasesize_shift);
  872. + }
  873. + }
  874. +
  875. + return 0;
  876. +}
  877. +
  878. +int nmbm_mtd_print_mappings(const char *name, int printall)
  879. +{
  880. + struct nmbm_mtd *nm;
  881. + bool found = false;
  882. + int32_t pb;
  883. + uint32_t i;
  884. +
  885. + list_for_each_entry(nm, &nmbm_devs, node) {
  886. + if (!strcmp(nm->name, name)) {
  887. + found = true;
  888. + break;
  889. + }
  890. + }
  891. +
  892. + if (!found) {
  893. + printf("Error: NMBM device '%s' not found\n", name);
  894. + return -ENODEV;
  895. + }
  896. +
  897. + printf("Logical Block Physical Block\n");
  898. + printf("==================================\n");
  899. +
  900. + if (!printall) {
  901. + for (i = 0; i < nm->ni->data_block_count; i++) {
  902. + pb = nm->ni->block_mapping[i];
  903. + if (pb < 0)
  904. + printf("%-20uUnmapped\n", i);
  905. + else if ((uint32_t)pb > nm->ni->mapping_blocks_top_ba &&
  906. + (uint32_t)pb < nm->ni->signature_ba)
  907. + printf("%-20u%u\n", i, pb);
  908. + }
  909. +
  910. + return 0;
  911. + }
  912. +
  913. + for (i = 0; i < nm->ni->data_block_count; i++) {
  914. + pb = nm->ni->block_mapping[i];
  915. +
  916. + if (pb >= 0)
  917. + printf("%-20u%u\n", i, pb);
  918. + else
  919. + printf("%-20uUnmapped\n", i);
  920. + }
  921. +
  922. + return 0;
  923. +}
  924. --- /dev/null
  925. +++ b/include/nmbm/nmbm-mtd.h
  926. @@ -0,0 +1,27 @@
  927. +/* SPDX-License-Identifier: GPL-2.0 */
  928. +/*
  929. + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
  930. + *
  931. + * Author: Weijie Gao <[email protected]>
  932. + */
  933. +
  934. +#ifndef _NMBM_MTD_H_
  935. +#define _NMBM_MTD_H_
  936. +
  937. +#include <linux/mtd/mtd.h>
  938. +
  939. +int nmbm_attach_mtd(struct mtd_info *lower, int flags, uint32_t max_ratio,
  940. + uint32_t max_reserved_blocks, struct mtd_info **upper);
  941. +
  942. +int nmbm_free_mtd(struct mtd_info *upper);
  943. +
  944. +struct mtd_info *nmbm_mtd_get_upper_by_index(uint32_t index);
  945. +struct mtd_info *nmbm_mtd_get_upper(struct mtd_info *lower);
  946. +
  947. +void nmbm_mtd_list_devices(void);
  948. +int nmbm_mtd_print_info(const char *name);
  949. +int nmbm_mtd_print_states(const char *name);
  950. +int nmbm_mtd_print_bad_blocks(const char *name);
  951. +int nmbm_mtd_print_mappings(const char *name, int printall);
  952. +
  953. +#endif /* _NMBM_MTD_H_ */