330-mtk-bmt-support.patch 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. --- a/drivers/mtd/nand/Kconfig
  2. +++ b/drivers/mtd/nand/Kconfig
  3. @@ -15,6 +15,10 @@ config MTD_NAND_ECC
  4. bool
  5. depends on MTD_NAND_CORE
  6. +config MTD_NAND_MTK_BMT
  7. + bool "Support MediaTek NAND Bad-block Management Table"
  8. + default n
  9. +
  10. endmenu
  11. endmenu
  12. --- a/drivers/mtd/nand/Makefile
  13. +++ b/drivers/mtd/nand/Makefile
  14. @@ -2,6 +2,7 @@
  15. nandcore-objs := core.o bbt.o
  16. obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
  17. +obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o
  18. obj-y += onenand/
  19. obj-y += raw/
  20. --- /dev/null
  21. +++ b/drivers/mtd/nand/mtk_bmt.c
  22. @@ -0,0 +1,788 @@
  23. +/*
  24. + * Copyright (c) 2017 MediaTek Inc.
  25. + * Author: Xiangsheng Hou <[email protected]>
  26. + * Copyright (c) 2020 Felix Fietkau <[email protected]>
  27. + *
  28. + * This program is free software; you can redistribute it and/or modify
  29. + * it under the terms of the GNU General Public License version 2 as
  30. + * published by the Free Software Foundation.
  31. + *
  32. + * This program is distributed in the hope that it will be useful,
  33. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  34. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  35. + * GNU General Public License for more details.
  36. + */
  37. +
  38. +#include <linux/slab.h>
  39. +#include <linux/gfp.h>
  40. +#include <linux/kernel.h>
  41. +#include <linux/of.h>
  42. +#include <linux/mtd/mtd.h>
  43. +#include <linux/mtd/partitions.h>
  44. +#include <linux/mtd/mtk_bmt.h>
  45. +#include <linux/module.h>
  46. +#include <linux/debugfs.h>
  47. +
  48. +#define MAIN_SIGNATURE_OFFSET 0
  49. +#define OOB_SIGNATURE_OFFSET 1
  50. +#define BBPOOL_RATIO 2
  51. +
  52. +#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
  53. +
  54. +/* Maximum 8k blocks */
  55. +#define BB_TABLE_MAX bmtd.table_size
  56. +#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
  57. +#define BMT_TBL_DEF_VAL 0x0
  58. +
  59. +/*
  60. + * Burner Bad Block Table
  61. + * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
  62. + */
  63. +
  64. +struct bbbt {
  65. + char signature[3];
  66. + /* This version is used to distinguish the legacy and new algorithm */
  67. +#define BBMT_VERSION 2
  68. + unsigned char version;
  69. + /* Below 2 tables will be written in SLC */
  70. + u16 bb_tbl[];
  71. +};
  72. +
  73. +struct bbmt {
  74. + u16 block;
  75. +#define NO_MAPPED 0
  76. +#define NORMAL_MAPPED 1
  77. +#define BMT_MAPPED 2
  78. + u16 mapped;
  79. +};
  80. +
  81. +static struct bmt_desc {
  82. + struct mtd_info *mtd;
  83. +
  84. + int (*_read_oob) (struct mtd_info *mtd, loff_t from,
  85. + struct mtd_oob_ops *ops);
  86. + int (*_write_oob) (struct mtd_info *mtd, loff_t to,
  87. + struct mtd_oob_ops *ops);
  88. + int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
  89. + int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
  90. + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
  91. +
  92. + struct bbbt *bbt;
  93. +
  94. + struct dentry *debugfs_dir;
  95. +
  96. + u32 table_size;
  97. + u32 pg_size;
  98. + u32 blk_size;
  99. + u16 pg_shift;
  100. + u16 blk_shift;
  101. + /* bbt logical address */
  102. + u16 pool_lba;
  103. + /* bbt physical address */
  104. + u16 pool_pba;
  105. + /* Maximum count of bad blocks that the vendor guaranteed */
  106. + u16 bb_max;
  107. + /* Total blocks of the Nand Chip */
  108. + u16 total_blks;
  109. + /* The block(n) BMT is located at (bmt_tbl[n]) */
  110. + u16 bmt_blk_idx;
  111. + /* How many pages needs to store 'struct bbbt' */
  112. + u32 bmt_pgs;
  113. +
  114. + /* to compensate for driver level remapping */
  115. + u8 oob_offset;
  116. +} bmtd = {0};
  117. +
  118. +static unsigned char *nand_bbt_buf;
  119. +static unsigned char *nand_data_buf;
  120. +
  121. +/* -------- Unit conversions -------- */
  122. +static inline u32 blk_pg(u16 block)
  123. +{
  124. + return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
  125. +}
  126. +
  127. +/* -------- Nand operations wrapper -------- */
  128. +static inline int
  129. +bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
  130. + unsigned char *fdm, int fdm_len)
  131. +{
  132. + struct mtd_oob_ops ops = {
  133. + .mode = MTD_OPS_PLACE_OOB,
  134. + .ooboffs = bmtd.oob_offset,
  135. + .oobbuf = fdm,
  136. + .ooblen = fdm_len,
  137. + .datbuf = dat,
  138. + .len = dat_len,
  139. + };
  140. +
  141. + return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
  142. +}
  143. +
  144. +static inline int bbt_nand_erase(u16 block)
  145. +{
  146. + struct mtd_info *mtd = bmtd.mtd;
  147. + struct erase_info instr = {
  148. + .addr = (loff_t)block << bmtd.blk_shift,
  149. + .len = bmtd.blk_size,
  150. + };
  151. +
  152. + return bmtd._erase(mtd, &instr);
  153. +}
  154. +
  155. +/* -------- Bad Blocks Management -------- */
  156. +static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
  157. +{
  158. + return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
  159. +}
  160. +
  161. +static int
  162. +read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
  163. +{
  164. + u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
  165. +
  166. + return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
  167. +}
  168. +
  169. +static int write_bmt(u16 block, unsigned char *dat)
  170. +{
  171. + struct mtd_oob_ops ops = {
  172. + .mode = MTD_OPS_PLACE_OOB,
  173. + .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
  174. + .oobbuf = "bmt",
  175. + .ooblen = 3,
  176. + .datbuf = dat,
  177. + .len = bmtd.bmt_pgs << bmtd.pg_shift,
  178. + };
  179. + loff_t addr = (loff_t)block << bmtd.blk_shift;
  180. +
  181. + return bmtd._write_oob(bmtd.mtd, addr, &ops);
  182. +}
  183. +
  184. +static u16 find_valid_block(u16 block)
  185. +{
  186. + u8 fdm[4];
  187. + int ret;
  188. + int loop = 0;
  189. +
  190. +retry:
  191. + if (block >= bmtd.total_blks)
  192. + return 0;
  193. +
  194. + ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
  195. + fdm, sizeof(fdm));
  196. + /* Read the 1st byte of FDM to judge whether it's a bad
  197. + * or not
  198. + */
  199. + if (ret || fdm[0] != 0xff) {
  200. + pr_info("nand: found bad block 0x%x\n", block);
  201. + if (loop >= bmtd.bb_max) {
  202. + pr_info("nand: FATAL ERR: too many bad blocks!!\n");
  203. + return 0;
  204. + }
  205. +
  206. + loop++;
  207. + block++;
  208. + goto retry;
  209. + }
  210. +
  211. + return block;
  212. +}
  213. +
  214. +/* Find out all bad blocks, and fill in the mapping table */
  215. +static int scan_bad_blocks(struct bbbt *bbt)
  216. +{
  217. + int i;
  218. + u16 block = 0;
  219. +
  220. + /* First time download, the block0 MUST NOT be a bad block,
  221. + * this is guaranteed by vendor
  222. + */
  223. + bbt->bb_tbl[0] = 0;
  224. +
  225. + /*
  226. + * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
  227. + * G - Good block; B - Bad block
  228. + * ---------------------------
  229. + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
  230. + * ---------------------------
  231. + * What bb_tbl[i] looks like:
  232. + * physical block(i):
  233. + * 0 1 2 3 4 5 6 7 8 9 a b c
  234. + * mapped block(bb_tbl[i]):
  235. + * 0 1 3 6 7 8 9 b ......
  236. + * ATTENTION:
  237. + * If new bad block ocurred(n), search bmt_tbl to find
  238. + * a available block(x), and fill in the bb_tbl[n] = x;
  239. + */
  240. + for (i = 1; i < bmtd.pool_lba; i++) {
  241. + bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
  242. + BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
  243. + if (bbt->bb_tbl[i] == 0)
  244. + return -1;
  245. + }
  246. +
  247. + /* Physical Block start Address of BMT pool */
  248. + bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
  249. + if (bmtd.pool_pba >= bmtd.total_blks - 2) {
  250. + pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
  251. + return -1;
  252. + }
  253. +
  254. + BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
  255. + i = 0;
  256. + block = bmtd.pool_pba;
  257. + /*
  258. + * The bmt table is used for runtime bad block mapping
  259. + * G - Good block; B - Bad block
  260. + * ---------------------------
  261. + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
  262. + * ---------------------------
  263. + * block: 0 1 2 3 4 5 6 7 8 9 a b c
  264. + * What bmt_tbl[i] looks like in initial state:
  265. + * i:
  266. + * 0 1 2 3 4 5 6 7
  267. + * bmt_tbl[i].block:
  268. + * 0 1 3 6 7 8 9 b
  269. + * bmt_tbl[i].mapped:
  270. + * N N N N N N N B
  271. + * N - Not mapped(Available)
  272. + * M - Mapped
  273. + * B - BMT
  274. + * ATTENTION:
  275. + * BMT always in the last valid block in pool
  276. + */
  277. + while ((block = find_valid_block(block)) != 0) {
  278. + bmt_tbl(bbt)[i].block = block;
  279. + bmt_tbl(bbt)[i].mapped = NO_MAPPED;
  280. + BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
  281. + block++;
  282. + i++;
  283. + }
  284. +
  285. + /* i - How many available blocks in pool, which is the length of bmt_tbl[]
  286. + * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
  287. + */
  288. + bmtd.bmt_blk_idx = i - 1;
  289. + bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
  290. +
  291. + if (i < 1) {
  292. + pr_info("nand: FATAL ERR: no space to store BMT!!\n");
  293. + return -1;
  294. + }
  295. +
  296. + pr_info("[BBT] %d available blocks in BMT pool\n", i);
  297. +
  298. + return 0;
  299. +}
  300. +
  301. +static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
  302. +{
  303. + struct bbbt *bbt = (struct bbbt *)buf;
  304. + u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
  305. +
  306. +
  307. + if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
  308. + memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
  309. + if (bbt->version == BBMT_VERSION)
  310. + return true;
  311. + }
  312. + BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
  313. + sig[0], sig[1], sig[2],
  314. + fdm[1], fdm[2], fdm[3]);
  315. + return false;
  316. +}
  317. +
  318. +static u16 get_bmt_index(struct bbmt *bmt)
  319. +{
  320. + int i = 0;
  321. +
  322. + while (bmt[i].block != BMT_TBL_DEF_VAL) {
  323. + if (bmt[i].mapped == BMT_MAPPED)
  324. + return i;
  325. + i++;
  326. + }
  327. + return 0;
  328. +}
  329. +
  330. +static struct bbbt *scan_bmt(u16 block)
  331. +{
  332. + u8 fdm[4];
  333. +
  334. + if (block < bmtd.pool_lba)
  335. + return NULL;
  336. +
  337. + if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
  338. + return scan_bmt(block - 1);
  339. +
  340. + if (is_valid_bmt(nand_bbt_buf, fdm)) {
  341. + bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
  342. + if (bmtd.bmt_blk_idx == 0) {
  343. + pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
  344. + return NULL;
  345. + }
  346. + pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
  347. + return (struct bbbt *)nand_bbt_buf;
  348. + } else
  349. + return scan_bmt(block - 1);
  350. +}
  351. +
  352. +/* Write the Burner Bad Block Table to Nand Flash
  353. + * n - write BMT to bmt_tbl[n]
  354. + */
  355. +static u16 upload_bmt(struct bbbt *bbt, int n)
  356. +{
  357. + u16 block;
  358. +
  359. +retry:
  360. + if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
  361. + pr_info("nand: FATAL ERR: no space to store BMT!\n");
  362. + return (u16)-1;
  363. + }
  364. +
  365. + block = bmt_tbl(bbt)[n].block;
  366. + BBT_LOG("n = 0x%x, block = 0x%x", n, block);
  367. + if (bbt_nand_erase(block)) {
  368. + bmt_tbl(bbt)[n].block = 0;
  369. + /* erase failed, try the previous block: bmt_tbl[n - 1].block */
  370. + n--;
  371. + goto retry;
  372. + }
  373. +
  374. + /* The signature offset is fixed set to 0,
  375. + * oob signature offset is fixed set to 1
  376. + */
  377. + memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
  378. + bbt->version = BBMT_VERSION;
  379. +
  380. + if (write_bmt(block, (unsigned char *)bbt)) {
  381. + bmt_tbl(bbt)[n].block = 0;
  382. +
  383. + /* write failed, try the previous block in bmt_tbl[n - 1] */
  384. + n--;
  385. + goto retry;
  386. + }
  387. +
  388. + /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
  389. + return n;
  390. +}
  391. +
  392. +static u16 find_valid_block_in_pool(struct bbbt *bbt)
  393. +{
  394. + int i;
  395. +
  396. + if (bmtd.bmt_blk_idx == 0)
  397. + goto error;
  398. +
  399. + for (i = 0; i < bmtd.bmt_blk_idx; i++) {
  400. + if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
  401. + bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
  402. + return bmt_tbl(bbt)[i].block;
  403. + }
  404. + }
  405. +
  406. +error:
  407. + pr_info("nand: FATAL ERR: BMT pool is run out!\n");
  408. + return 0;
  409. +}
  410. +
  411. +/* We met a bad block, mark it as bad and map it to a valid block in pool,
  412. + * if it's a write failure, we need to write the data to mapped block
  413. + */
  414. +static bool update_bmt(u16 block)
  415. +{
  416. + u16 mapped_blk;
  417. + struct bbbt *bbt;
  418. +
  419. + bbt = bmtd.bbt;
  420. + mapped_blk = find_valid_block_in_pool(bbt);
  421. + if (mapped_blk == 0)
  422. + return false;
  423. +
  424. + /* Map new bad block to available block in pool */
  425. + bbt->bb_tbl[block] = mapped_blk;
  426. + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
  427. +
  428. + return true;
  429. +}
  430. +
  431. +u16 get_mapping_block_index(int block)
  432. +{
  433. + int mapping_block;
  434. +
  435. + if (block < bmtd.pool_lba)
  436. + mapping_block = bmtd.bbt->bb_tbl[block];
  437. + else
  438. + mapping_block = block;
  439. + BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
  440. +
  441. + return mapping_block;
  442. +}
  443. +
  444. +static int
  445. +mtk_bmt_read(struct mtd_info *mtd, loff_t from,
  446. + struct mtd_oob_ops *ops)
  447. +{
  448. + struct mtd_oob_ops cur_ops = *ops;
  449. + int retry_count = 0;
  450. + loff_t cur_from;
  451. + int ret;
  452. +
  453. + ops->retlen = 0;
  454. + ops->oobretlen = 0;
  455. +
  456. + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  457. + u32 offset = from & (bmtd.blk_size - 1);
  458. + u32 block = from >> bmtd.blk_shift;
  459. + u32 cur_block;
  460. +
  461. + cur_block = get_mapping_block_index(block);
  462. + cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  463. +
  464. + cur_ops.oobretlen = 0;
  465. + cur_ops.retlen = 0;
  466. + cur_ops.len = min_t(u32, mtd->erasesize - offset,
  467. + ops->len - ops->retlen);
  468. + ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
  469. + if (ret < 0) {
  470. + update_bmt(block);
  471. + if (retry_count++ < 10)
  472. + continue;
  473. +
  474. + return ret;
  475. + }
  476. +
  477. + ops->retlen += cur_ops.retlen;
  478. + ops->oobretlen += cur_ops.oobretlen;
  479. +
  480. + cur_ops.ooboffs = 0;
  481. + cur_ops.datbuf += cur_ops.retlen;
  482. + cur_ops.oobbuf += cur_ops.oobretlen;
  483. + cur_ops.ooblen -= cur_ops.oobretlen;
  484. +
  485. + if (!cur_ops.len)
  486. + cur_ops.len = mtd->erasesize - offset;
  487. +
  488. + from += cur_ops.len;
  489. + retry_count = 0;
  490. + }
  491. +
  492. + return 0;
  493. +}
  494. +
  495. +static int
  496. +mtk_bmt_write(struct mtd_info *mtd, loff_t to,
  497. + struct mtd_oob_ops *ops)
  498. +{
  499. + struct mtd_oob_ops cur_ops = *ops;
  500. + int retry_count = 0;
  501. + loff_t cur_to;
  502. + int ret;
  503. +
  504. + ops->retlen = 0;
  505. + ops->oobretlen = 0;
  506. +
  507. + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  508. + u32 offset = to & (bmtd.blk_size - 1);
  509. + u32 block = to >> bmtd.blk_shift;
  510. + u32 cur_block;
  511. +
  512. + cur_block = get_mapping_block_index(block);
  513. + cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  514. +
  515. + cur_ops.oobretlen = 0;
  516. + cur_ops.retlen = 0;
  517. + cur_ops.len = min_t(u32, bmtd.blk_size - offset,
  518. + ops->len - ops->retlen);
  519. + ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
  520. + if (ret < 0) {
  521. + update_bmt(block);
  522. + if (retry_count++ < 10)
  523. + continue;
  524. +
  525. + return ret;
  526. + }
  527. +
  528. + ops->retlen += cur_ops.retlen;
  529. + ops->oobretlen += cur_ops.oobretlen;
  530. +
  531. + cur_ops.ooboffs = 0;
  532. + cur_ops.datbuf += cur_ops.retlen;
  533. + cur_ops.oobbuf += cur_ops.oobretlen;
  534. + cur_ops.ooblen -= cur_ops.oobretlen;
  535. +
  536. + if (!cur_ops.len)
  537. + cur_ops.len = mtd->erasesize - offset;
  538. +
  539. + to += cur_ops.len;
  540. + retry_count = 0;
  541. + }
  542. +
  543. + return 0;
  544. +}
  545. +
  546. +static int
  547. +mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  548. +{
  549. + struct erase_info mapped_instr = {
  550. + .len = bmtd.blk_size,
  551. + };
  552. + int retry_count = 0;
  553. + u64 start_addr, end_addr;
  554. + int ret;
  555. + u16 orig_block, block;
  556. +
  557. + start_addr = instr->addr & (~mtd->erasesize_mask);
  558. + end_addr = instr->addr + instr->len;
  559. +
  560. + while (start_addr < end_addr) {
  561. + orig_block = start_addr >> bmtd.blk_shift;
  562. + block = get_mapping_block_index(orig_block);
  563. + mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
  564. + ret = bmtd._erase(mtd, &mapped_instr);
  565. + if (ret) {
  566. + update_bmt(orig_block);
  567. + if (retry_count++ < 10)
  568. + continue;
  569. + instr->fail_addr = start_addr;
  570. + break;
  571. + }
  572. + start_addr += mtd->erasesize;
  573. + retry_count = 0;
  574. + }
  575. +
  576. + return ret;
  577. +}
  578. +static int
  579. +mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
  580. +{
  581. + int retry_count = 0;
  582. + u16 orig_block = ofs >> bmtd.blk_shift;
  583. + u16 block;
  584. + int ret;
  585. +
  586. +retry:
  587. + block = get_mapping_block_index(orig_block);
  588. + ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
  589. + if (ret) {
  590. + update_bmt(orig_block);
  591. + if (retry_count++ < 10)
  592. + goto retry;
  593. + }
  594. + return ret;
  595. +}
  596. +
  597. +static int
  598. +mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
  599. +{
  600. + u16 orig_block = ofs >> bmtd.blk_shift;
  601. + u16 block = get_mapping_block_index(orig_block);
  602. + update_bmt(orig_block);
  603. + return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
  604. +}
  605. +
  606. +static void
  607. +mtk_bmt_replace_ops(struct mtd_info *mtd)
  608. +{
  609. + bmtd._read_oob = mtd->_read_oob;
  610. + bmtd._write_oob = mtd->_write_oob;
  611. + bmtd._erase = mtd->_erase;
  612. + bmtd._block_isbad = mtd->_block_isbad;
  613. + bmtd._block_markbad = mtd->_block_markbad;
  614. +
  615. + mtd->_read_oob = mtk_bmt_read;
  616. + mtd->_write_oob = mtk_bmt_write;
  617. + mtd->_erase = mtk_bmt_mtd_erase;
  618. + mtd->_block_isbad = mtk_bmt_block_isbad;
  619. + mtd->_block_markbad = mtk_bmt_block_markbad;
  620. +}
  621. +
  622. +static int mtk_bmt_debug_mark_good(void *data, u64 val)
  623. +{
  624. + u32 block = val >> bmtd.blk_shift;
  625. +
  626. + bmtd.bbt->bb_tbl[block] = block;
  627. + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
  628. +
  629. + return 0;
  630. +}
  631. +
  632. +static int mtk_bmt_debug_mark_bad(void *data, u64 val)
  633. +{
  634. + u32 block = val >> bmtd.blk_shift;
  635. +
  636. + update_bmt(block);
  637. +
  638. + return 0;
  639. +}
  640. +
  641. +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
  642. +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
  643. +
  644. +static void
  645. +mtk_bmt_add_debugfs(void)
  646. +{
  647. + struct dentry *dir;
  648. +
  649. + dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
  650. + if (!dir)
  651. + return;
  652. +
  653. + debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
  654. + debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
  655. +}
  656. +
  657. +void mtk_bmt_detach(struct mtd_info *mtd)
  658. +{
  659. + if (bmtd.mtd != mtd)
  660. + return;
  661. +
  662. + if (bmtd.debugfs_dir)
  663. + debugfs_remove_recursive(bmtd.debugfs_dir);
  664. + bmtd.debugfs_dir = NULL;
  665. +
  666. + kfree(nand_bbt_buf);
  667. + kfree(nand_data_buf);
  668. +
  669. + mtd->_read_oob = bmtd._read_oob;
  670. + mtd->_write_oob = bmtd._write_oob;
  671. + mtd->_erase = bmtd._erase;
  672. + mtd->_block_isbad = bmtd._block_isbad;
  673. + mtd->_block_markbad = bmtd._block_markbad;
  674. + mtd->size = bmtd.total_blks << bmtd.blk_shift;
  675. +
  676. + memset(&bmtd, 0, sizeof(bmtd));
  677. +}
  678. +
  679. +/* total_blocks - The total count of blocks that the Nand Chip has */
  680. +int mtk_bmt_attach(struct mtd_info *mtd)
  681. +{
  682. + struct device_node *np;
  683. + struct bbbt *bbt;
  684. + u32 bufsz;
  685. + u32 block;
  686. + u16 total_blocks, pmt_block;
  687. + int ret = 0;
  688. + u32 bmt_pool_size, bmt_table_size;
  689. +
  690. + if (bmtd.mtd)
  691. + return -ENOSPC;
  692. +
  693. + np = mtd_get_of_node(mtd);
  694. + if (!np)
  695. + return 0;
  696. +
  697. + if (!of_property_read_bool(np, "mediatek,bmt-v2"))
  698. + return 0;
  699. +
  700. + if (of_property_read_u32(np, "mediatek,bmt-pool-size",
  701. + &bmt_pool_size) != 0)
  702. + bmt_pool_size = 80;
  703. +
  704. + if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
  705. + &bmtd.oob_offset) != 0)
  706. + bmtd.oob_offset = 0;
  707. +
  708. + if (of_property_read_u32(np, "mediatek,bmt-table-size",
  709. + &bmt_table_size) != 0)
  710. + bmt_table_size = 0x2000U;
  711. +
  712. + bmtd.mtd = mtd;
  713. + mtk_bmt_replace_ops(mtd);
  714. +
  715. + bmtd.table_size = bmt_table_size;
  716. + bmtd.blk_size = mtd->erasesize;
  717. + bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
  718. + bmtd.pg_size = mtd->writesize;
  719. + bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
  720. + total_blocks = mtd->size >> bmtd.blk_shift;
  721. + pmt_block = total_blocks - bmt_pool_size - 2;
  722. +
  723. + mtd->size = pmt_block << bmtd.blk_shift;
  724. +
  725. + /*
  726. + * ---------------------------------------
  727. + * | PMT(2blks) | BMT POOL(totalblks * 2%) |
  728. + * ---------------------------------------
  729. + * ^ ^
  730. + * | |
  731. + * pmt_block pmt_block + 2blocks(pool_lba)
  732. + *
  733. + * ATTETION!!!!!!
  734. + * The blocks ahead of the boundary block are stored in bb_tbl
  735. + * and blocks behind are stored in bmt_tbl
  736. + */
  737. +
  738. + bmtd.pool_lba = (u16)(pmt_block + 2);
  739. + bmtd.total_blks = total_blocks;
  740. + bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
  741. +
  742. + /* 3 buffers we need */
  743. + bufsz = round_up(sizeof(struct bbbt) +
  744. + bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
  745. + bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
  746. +
  747. + nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
  748. + nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
  749. +
  750. + if (!nand_bbt_buf || !nand_data_buf) {
  751. + pr_info("nand: FATAL ERR: allocate buffer failed!\n");
  752. + ret = -1;
  753. + goto error;
  754. + }
  755. +
  756. + memset(nand_bbt_buf, 0xff, bufsz);
  757. + memset(nand_data_buf, 0xff, bmtd.pg_size);
  758. +
  759. + BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
  760. + nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
  761. + BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
  762. + bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
  763. +
  764. + /* Scanning start from the first page of the last block
  765. + * of whole flash
  766. + */
  767. + bbt = scan_bmt(bmtd.total_blks - 1);
  768. + if (!bbt) {
  769. + /* BMT not found */
  770. + if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
  771. + pr_info("nand: FATAL: Too many blocks, can not support!\n");
  772. + ret = -1;
  773. + goto error;
  774. + }
  775. +
  776. + bbt = (struct bbbt *)nand_bbt_buf;
  777. + memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
  778. +
  779. + if (scan_bad_blocks(bbt)) {
  780. + ret = -1;
  781. + goto error;
  782. + }
  783. +
  784. + /* BMT always in the last valid block in pool */
  785. + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
  786. + block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
  787. + pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
  788. +
  789. + if (bmtd.bmt_blk_idx == 0)
  790. + pr_info("nand: Warning: no available block in BMT pool!\n");
  791. + else if (bmtd.bmt_blk_idx == (u16)-1) {
  792. + ret = -1;
  793. + goto error;
  794. + }
  795. + }
  796. + mtk_bmt_add_debugfs();
  797. +
  798. + bmtd.bbt = bbt;
  799. + return 0;
  800. +
  801. +error:
  802. + mtk_bmt_detach(mtd);
  803. + return ret;
  804. +}
  805. +
  806. +
  807. +MODULE_LICENSE("GPL");
  808. +MODULE_AUTHOR("Xiangsheng Hou <[email protected]>, Felix Fietkau <[email protected]>");
  809. +MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
  810. +
  811. --- /dev/null
  812. +++ b/include/linux/mtd/mtk_bmt.h
  813. @@ -0,0 +1,18 @@
  814. +#ifndef __MTK_BMT_H
  815. +#define __MTK_BMT_H
  816. +
  817. +#ifdef CONFIG_MTD_NAND_MTK_BMT
  818. +int mtk_bmt_attach(struct mtd_info *mtd);
  819. +void mtk_bmt_detach(struct mtd_info *mtd);
  820. +#else
  821. +static inline int mtk_bmt_attach(struct mtd_info *mtd)
  822. +{
  823. + return 0;
  824. +}
  825. +
  826. +static inline void mtk_bmt_detach(struct mtd_info *mtd)
  827. +{
  828. +}
  829. +#endif
  830. +
  831. +#endif
  832. --- a/drivers/mtd/mtk-snand/mtk-snand-mtd.c
  833. +++ b/drivers/mtd/mtk-snand/mtk-snand-mtd.c
  834. @@ -16,6 +16,7 @@
  835. #include <linux/dma-mapping.h>
  836. #include <linux/wait.h>
  837. #include <linux/mtd/mtd.h>
  838. +#include <linux/mtd/mtk_bmt.h>
  839. #include <linux/mtd/partitions.h>
  840. #include <linux/of_platform.h>
  841. @@ -612,6 +613,8 @@ static int mtk_snand_probe(struct platfo
  842. mtd->_block_isbad = mtk_snand_mtd_block_isbad;
  843. mtd->_block_markbad = mtk_snand_mtd_block_markbad;
  844. + mtk_bmt_attach(mtd);
  845. +
  846. ret = mtd_device_register(mtd, NULL, 0);
  847. if (ret) {
  848. dev_err(msm->pdev.dev, "failed to register mtd partition\n");
  849. @@ -623,6 +626,7 @@ static int mtk_snand_probe(struct platfo
  850. return 0;
  851. errout4:
  852. + mtk_bmt_detach(mtd);
  853. devm_kfree(msm->pdev.dev, msm->page_cache);
  854. errout3:
  855. @@ -650,6 +654,8 @@ static int mtk_snand_remove(struct platf
  856. if (ret)
  857. return ret;
  858. + mtk_bmt_detach(mtd);
  859. +
  860. mtk_snand_cleanup(msm->snf);
  861. if (msm->irq >= 0)