mtk_bmt.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * Copyright (c) 2017 MediaTek Inc.
  3. * Author: Xiangsheng Hou <[email protected]>
  4. * Copyright (c) 2020 Felix Fietkau <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/gfp.h>
  17. #include <linux/kernel.h>
  18. #include <linux/of.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/mtd/mtk_bmt.h>
  22. #include <linux/module.h>
  23. #include <linux/debugfs.h>
  24. #define MAIN_SIGNATURE_OFFSET 0
  25. #define OOB_SIGNATURE_OFFSET 1
  26. #define BBPOOL_RATIO 2
  27. #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
  28. /* Maximum 8k blocks */
  29. #define BB_TABLE_MAX bmtd.table_size
  30. #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
  31. #define BMT_TBL_DEF_VAL 0x0
  32. /*
  33. * Burner Bad Block Table
  34. * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
  35. */
  36. struct bbbt {
  37. char signature[3];
  38. /* This version is used to distinguish the legacy and new algorithm */
  39. #define BBMT_VERSION 2
  40. unsigned char version;
  41. /* Below 2 tables will be written in SLC */
  42. u16 bb_tbl[];
  43. };
  44. struct bbmt {
  45. u16 block;
  46. #define NO_MAPPED 0
  47. #define NORMAL_MAPPED 1
  48. #define BMT_MAPPED 2
  49. u16 mapped;
  50. };
  51. static struct bmt_desc {
  52. struct mtd_info *mtd;
  53. int (*_read_oob) (struct mtd_info *mtd, loff_t from,
  54. struct mtd_oob_ops *ops);
  55. int (*_write_oob) (struct mtd_info *mtd, loff_t to,
  56. struct mtd_oob_ops *ops);
  57. int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
  58. int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
  59. int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
  60. struct bbbt *bbt;
  61. struct dentry *debugfs_dir;
  62. u32 table_size;
  63. u32 pg_size;
  64. u32 blk_size;
  65. u16 pg_shift;
  66. u16 blk_shift;
  67. /* bbt logical address */
  68. u16 pool_lba;
  69. /* bbt physical address */
  70. u16 pool_pba;
  71. /* Maximum count of bad blocks that the vendor guaranteed */
  72. u16 bb_max;
  73. /* Total blocks of the Nand Chip */
  74. u16 total_blks;
  75. /* The block(n) BMT is located at (bmt_tbl[n]) */
  76. u16 bmt_blk_idx;
  77. /* How many pages needs to store 'struct bbbt' */
  78. u32 bmt_pgs;
  79. /* to compensate for driver level remapping */
  80. u8 oob_offset;
  81. } bmtd = {0};
  82. static unsigned char *nand_bbt_buf;
  83. static unsigned char *nand_data_buf;
  84. /* -------- Unit conversions -------- */
  85. static inline u32 blk_pg(u16 block)
  86. {
  87. return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
  88. }
  89. /* -------- Nand operations wrapper -------- */
  90. static inline int
  91. bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
  92. unsigned char *fdm, int fdm_len)
  93. {
  94. struct mtd_oob_ops ops = {
  95. .mode = MTD_OPS_PLACE_OOB,
  96. .ooboffs = bmtd.oob_offset,
  97. .oobbuf = fdm,
  98. .ooblen = fdm_len,
  99. .datbuf = dat,
  100. .len = dat_len,
  101. };
  102. return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
  103. }
  104. static inline int bbt_nand_erase(u16 block)
  105. {
  106. struct mtd_info *mtd = bmtd.mtd;
  107. struct erase_info instr = {
  108. .addr = (loff_t)block << bmtd.blk_shift,
  109. .len = bmtd.blk_size,
  110. };
  111. return bmtd._erase(mtd, &instr);
  112. }
  113. /* -------- Bad Blocks Management -------- */
  114. static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
  115. {
  116. return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
  117. }
  118. static int
  119. read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
  120. {
  121. u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
  122. return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
  123. }
  124. static int write_bmt(u16 block, unsigned char *dat)
  125. {
  126. struct mtd_oob_ops ops = {
  127. .mode = MTD_OPS_PLACE_OOB,
  128. .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
  129. .oobbuf = "bmt",
  130. .ooblen = 3,
  131. .datbuf = dat,
  132. .len = bmtd.bmt_pgs << bmtd.pg_shift,
  133. };
  134. loff_t addr = (loff_t)block << bmtd.blk_shift;
  135. return bmtd._write_oob(bmtd.mtd, addr, &ops);
  136. }
  137. static u16 find_valid_block(u16 block)
  138. {
  139. u8 fdm[4];
  140. int ret;
  141. int loop = 0;
  142. retry:
  143. if (block >= bmtd.total_blks)
  144. return 0;
  145. ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
  146. fdm, sizeof(fdm));
  147. /* Read the 1st byte of FDM to judge whether it's a bad
  148. * or not
  149. */
  150. if (ret || fdm[0] != 0xff) {
  151. pr_info("nand: found bad block 0x%x\n", block);
  152. if (loop >= bmtd.bb_max) {
  153. pr_info("nand: FATAL ERR: too many bad blocks!!\n");
  154. return 0;
  155. }
  156. loop++;
  157. block++;
  158. goto retry;
  159. }
  160. return block;
  161. }
  162. /* Find out all bad blocks, and fill in the mapping table */
  163. static int scan_bad_blocks(struct bbbt *bbt)
  164. {
  165. int i;
  166. u16 block = 0;
  167. /* First time download, the block0 MUST NOT be a bad block,
  168. * this is guaranteed by vendor
  169. */
  170. bbt->bb_tbl[0] = 0;
  171. /*
  172. * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
  173. * G - Good block; B - Bad block
  174. * ---------------------------
  175. * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
  176. * ---------------------------
  177. * What bb_tbl[i] looks like:
  178. * physical block(i):
  179. * 0 1 2 3 4 5 6 7 8 9 a b c
  180. * mapped block(bb_tbl[i]):
  181. * 0 1 3 6 7 8 9 b ......
  182. * ATTENTION:
  183. * If new bad block ocurred(n), search bmt_tbl to find
  184. * a available block(x), and fill in the bb_tbl[n] = x;
  185. */
  186. for (i = 1; i < bmtd.pool_lba; i++) {
  187. bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
  188. BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
  189. if (bbt->bb_tbl[i] == 0)
  190. return -1;
  191. }
  192. /* Physical Block start Address of BMT pool */
  193. bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
  194. if (bmtd.pool_pba >= bmtd.total_blks - 2) {
  195. pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
  196. return -1;
  197. }
  198. BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
  199. i = 0;
  200. block = bmtd.pool_pba;
  201. /*
  202. * The bmt table is used for runtime bad block mapping
  203. * G - Good block; B - Bad block
  204. * ---------------------------
  205. * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
  206. * ---------------------------
  207. * block: 0 1 2 3 4 5 6 7 8 9 a b c
  208. * What bmt_tbl[i] looks like in initial state:
  209. * i:
  210. * 0 1 2 3 4 5 6 7
  211. * bmt_tbl[i].block:
  212. * 0 1 3 6 7 8 9 b
  213. * bmt_tbl[i].mapped:
  214. * N N N N N N N B
  215. * N - Not mapped(Available)
  216. * M - Mapped
  217. * B - BMT
  218. * ATTENTION:
  219. * BMT always in the last valid block in pool
  220. */
  221. while ((block = find_valid_block(block)) != 0) {
  222. bmt_tbl(bbt)[i].block = block;
  223. bmt_tbl(bbt)[i].mapped = NO_MAPPED;
  224. BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
  225. block++;
  226. i++;
  227. }
  228. /* i - How many available blocks in pool, which is the length of bmt_tbl[]
  229. * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
  230. */
  231. bmtd.bmt_blk_idx = i - 1;
  232. bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
  233. if (i < 1) {
  234. pr_info("nand: FATAL ERR: no space to store BMT!!\n");
  235. return -1;
  236. }
  237. pr_info("[BBT] %d available blocks in BMT pool\n", i);
  238. return 0;
  239. }
  240. static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
  241. {
  242. struct bbbt *bbt = (struct bbbt *)buf;
  243. u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
  244. if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
  245. memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
  246. if (bbt->version == BBMT_VERSION)
  247. return true;
  248. }
  249. BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
  250. sig[0], sig[1], sig[2],
  251. fdm[1], fdm[2], fdm[3]);
  252. return false;
  253. }
  254. static u16 get_bmt_index(struct bbmt *bmt)
  255. {
  256. int i = 0;
  257. while (bmt[i].block != BMT_TBL_DEF_VAL) {
  258. if (bmt[i].mapped == BMT_MAPPED)
  259. return i;
  260. i++;
  261. }
  262. return 0;
  263. }
  264. static struct bbbt *scan_bmt(u16 block)
  265. {
  266. u8 fdm[4];
  267. if (block < bmtd.pool_lba)
  268. return NULL;
  269. if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
  270. return scan_bmt(block - 1);
  271. if (is_valid_bmt(nand_bbt_buf, fdm)) {
  272. bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
  273. if (bmtd.bmt_blk_idx == 0) {
  274. pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
  275. return NULL;
  276. }
  277. pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
  278. return (struct bbbt *)nand_bbt_buf;
  279. } else
  280. return scan_bmt(block - 1);
  281. }
  282. /* Write the Burner Bad Block Table to Nand Flash
  283. * n - write BMT to bmt_tbl[n]
  284. */
  285. static u16 upload_bmt(struct bbbt *bbt, int n)
  286. {
  287. u16 block;
  288. retry:
  289. if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
  290. pr_info("nand: FATAL ERR: no space to store BMT!\n");
  291. return (u16)-1;
  292. }
  293. block = bmt_tbl(bbt)[n].block;
  294. BBT_LOG("n = 0x%x, block = 0x%x", n, block);
  295. if (bbt_nand_erase(block)) {
  296. bmt_tbl(bbt)[n].block = 0;
  297. /* erase failed, try the previous block: bmt_tbl[n - 1].block */
  298. n--;
  299. goto retry;
  300. }
  301. /* The signature offset is fixed set to 0,
  302. * oob signature offset is fixed set to 1
  303. */
  304. memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
  305. bbt->version = BBMT_VERSION;
  306. if (write_bmt(block, (unsigned char *)bbt)) {
  307. bmt_tbl(bbt)[n].block = 0;
  308. /* write failed, try the previous block in bmt_tbl[n - 1] */
  309. n--;
  310. goto retry;
  311. }
  312. /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
  313. return n;
  314. }
  315. static u16 find_valid_block_in_pool(struct bbbt *bbt)
  316. {
  317. int i;
  318. if (bmtd.bmt_blk_idx == 0)
  319. goto error;
  320. for (i = 0; i < bmtd.bmt_blk_idx; i++) {
  321. if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
  322. bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
  323. return bmt_tbl(bbt)[i].block;
  324. }
  325. }
  326. error:
  327. pr_info("nand: FATAL ERR: BMT pool is run out!\n");
  328. return 0;
  329. }
  330. /* We met a bad block, mark it as bad and map it to a valid block in pool,
  331. * if it's a write failure, we need to write the data to mapped block
  332. */
  333. static bool update_bmt(u16 block)
  334. {
  335. u16 mapped_blk;
  336. struct bbbt *bbt;
  337. bbt = bmtd.bbt;
  338. mapped_blk = find_valid_block_in_pool(bbt);
  339. if (mapped_blk == 0)
  340. return false;
  341. /* Map new bad block to available block in pool */
  342. bbt->bb_tbl[block] = mapped_blk;
  343. bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
  344. return true;
  345. }
  346. u16 get_mapping_block_index(int block)
  347. {
  348. int mapping_block;
  349. if (block < bmtd.pool_lba)
  350. mapping_block = bmtd.bbt->bb_tbl[block];
  351. else
  352. mapping_block = block;
  353. BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
  354. return mapping_block;
  355. }
  356. static int
  357. mtk_bmt_read(struct mtd_info *mtd, loff_t from,
  358. struct mtd_oob_ops *ops)
  359. {
  360. struct mtd_oob_ops cur_ops = *ops;
  361. int retry_count = 0;
  362. loff_t cur_from;
  363. int ret;
  364. ops->retlen = 0;
  365. ops->oobretlen = 0;
  366. while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  367. u32 offset = from & (bmtd.blk_size - 1);
  368. u32 block = from >> bmtd.blk_shift;
  369. u32 cur_block;
  370. cur_block = get_mapping_block_index(block);
  371. cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  372. cur_ops.oobretlen = 0;
  373. cur_ops.retlen = 0;
  374. cur_ops.len = min_t(u32, mtd->erasesize - offset,
  375. ops->len - ops->retlen);
  376. ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
  377. if (ret < 0) {
  378. update_bmt(block);
  379. if (retry_count++ < 10)
  380. continue;
  381. return ret;
  382. }
  383. ops->retlen += cur_ops.retlen;
  384. ops->oobretlen += cur_ops.oobretlen;
  385. cur_ops.ooboffs = 0;
  386. cur_ops.datbuf += cur_ops.retlen;
  387. cur_ops.oobbuf += cur_ops.oobretlen;
  388. cur_ops.ooblen -= cur_ops.oobretlen;
  389. if (!cur_ops.len)
  390. cur_ops.len = mtd->erasesize - offset;
  391. from += cur_ops.len;
  392. retry_count = 0;
  393. }
  394. return 0;
  395. }
  396. static int
  397. mtk_bmt_write(struct mtd_info *mtd, loff_t to,
  398. struct mtd_oob_ops *ops)
  399. {
  400. struct mtd_oob_ops cur_ops = *ops;
  401. int retry_count = 0;
  402. loff_t cur_to;
  403. int ret;
  404. ops->retlen = 0;
  405. ops->oobretlen = 0;
  406. while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  407. u32 offset = to & (bmtd.blk_size - 1);
  408. u32 block = to >> bmtd.blk_shift;
  409. u32 cur_block;
  410. cur_block = get_mapping_block_index(block);
  411. cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  412. cur_ops.oobretlen = 0;
  413. cur_ops.retlen = 0;
  414. cur_ops.len = min_t(u32, bmtd.blk_size - offset,
  415. ops->len - ops->retlen);
  416. ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
  417. if (ret < 0) {
  418. update_bmt(block);
  419. if (retry_count++ < 10)
  420. continue;
  421. return ret;
  422. }
  423. ops->retlen += cur_ops.retlen;
  424. ops->oobretlen += cur_ops.oobretlen;
  425. cur_ops.ooboffs = 0;
  426. cur_ops.datbuf += cur_ops.retlen;
  427. cur_ops.oobbuf += cur_ops.oobretlen;
  428. cur_ops.ooblen -= cur_ops.oobretlen;
  429. if (!cur_ops.len)
  430. cur_ops.len = mtd->erasesize - offset;
  431. to += cur_ops.len;
  432. retry_count = 0;
  433. }
  434. return 0;
  435. }
  436. static int
  437. mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  438. {
  439. struct erase_info mapped_instr = {
  440. .len = bmtd.blk_size,
  441. };
  442. int retry_count = 0;
  443. u64 start_addr, end_addr;
  444. int ret;
  445. u16 orig_block, block;
  446. start_addr = instr->addr & (~mtd->erasesize_mask);
  447. end_addr = instr->addr + instr->len;
  448. while (start_addr < end_addr) {
  449. orig_block = start_addr >> bmtd.blk_shift;
  450. block = get_mapping_block_index(orig_block);
  451. mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
  452. ret = bmtd._erase(mtd, &mapped_instr);
  453. if (ret) {
  454. update_bmt(orig_block);
  455. if (retry_count++ < 10)
  456. continue;
  457. instr->fail_addr = start_addr;
  458. break;
  459. }
  460. start_addr += mtd->erasesize;
  461. retry_count = 0;
  462. }
  463. return ret;
  464. }
  465. static int
  466. mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
  467. {
  468. int retry_count = 0;
  469. u16 orig_block = ofs >> bmtd.blk_shift;
  470. u16 block;
  471. int ret;
  472. retry:
  473. block = get_mapping_block_index(orig_block);
  474. ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
  475. if (ret) {
  476. update_bmt(orig_block);
  477. if (retry_count++ < 10)
  478. goto retry;
  479. }
  480. return ret;
  481. }
  482. static int
  483. mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
  484. {
  485. u16 orig_block = ofs >> bmtd.blk_shift;
  486. u16 block = get_mapping_block_index(orig_block);
  487. update_bmt(orig_block);
  488. return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
  489. }
  490. static void
  491. mtk_bmt_replace_ops(struct mtd_info *mtd)
  492. {
  493. bmtd._read_oob = mtd->_read_oob;
  494. bmtd._write_oob = mtd->_write_oob;
  495. bmtd._erase = mtd->_erase;
  496. bmtd._block_isbad = mtd->_block_isbad;
  497. bmtd._block_markbad = mtd->_block_markbad;
  498. mtd->_read_oob = mtk_bmt_read;
  499. mtd->_write_oob = mtk_bmt_write;
  500. mtd->_erase = mtk_bmt_mtd_erase;
  501. mtd->_block_isbad = mtk_bmt_block_isbad;
  502. mtd->_block_markbad = mtk_bmt_block_markbad;
  503. }
  504. static int mtk_bmt_debug_mark_good(void *data, u64 val)
  505. {
  506. u32 block = val >> bmtd.blk_shift;
  507. bmtd.bbt->bb_tbl[block] = block;
  508. bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
  509. return 0;
  510. }
  511. static int mtk_bmt_debug_mark_bad(void *data, u64 val)
  512. {
  513. u32 block = val >> bmtd.blk_shift;
  514. update_bmt(block);
  515. return 0;
  516. }
  517. DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
  518. DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
  519. static void
  520. mtk_bmt_add_debugfs(void)
  521. {
  522. struct dentry *dir;
  523. dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
  524. if (!dir)
  525. return;
  526. debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
  527. debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
  528. }
  529. void mtk_bmt_detach(struct mtd_info *mtd)
  530. {
  531. if (bmtd.mtd != mtd)
  532. return;
  533. if (bmtd.debugfs_dir)
  534. debugfs_remove_recursive(bmtd.debugfs_dir);
  535. bmtd.debugfs_dir = NULL;
  536. kfree(nand_bbt_buf);
  537. kfree(nand_data_buf);
  538. mtd->_read_oob = bmtd._read_oob;
  539. mtd->_write_oob = bmtd._write_oob;
  540. mtd->_erase = bmtd._erase;
  541. mtd->_block_isbad = bmtd._block_isbad;
  542. mtd->_block_markbad = bmtd._block_markbad;
  543. mtd->size = bmtd.total_blks << bmtd.blk_shift;
  544. memset(&bmtd, 0, sizeof(bmtd));
  545. }
  546. /* total_blocks - The total count of blocks that the Nand Chip has */
  547. int mtk_bmt_attach(struct mtd_info *mtd)
  548. {
  549. struct device_node *np;
  550. struct bbbt *bbt;
  551. u32 bufsz;
  552. u32 block;
  553. u16 total_blocks, pmt_block;
  554. int ret = 0;
  555. u32 bmt_pool_size, bmt_table_size;
  556. if (bmtd.mtd)
  557. return -ENOSPC;
  558. np = mtd_get_of_node(mtd);
  559. if (!np)
  560. return 0;
  561. if (!of_property_read_bool(np, "mediatek,bmt-v2"))
  562. return 0;
  563. if (of_property_read_u32(np, "mediatek,bmt-pool-size",
  564. &bmt_pool_size) != 0)
  565. bmt_pool_size = 80;
  566. if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
  567. &bmtd.oob_offset) != 0)
  568. bmtd.oob_offset = 0;
  569. if (of_property_read_u32(np, "mediatek,bmt-table-size",
  570. &bmt_table_size) != 0)
  571. bmt_table_size = 0x2000U;
  572. bmtd.mtd = mtd;
  573. mtk_bmt_replace_ops(mtd);
  574. bmtd.table_size = bmt_table_size;
  575. bmtd.blk_size = mtd->erasesize;
  576. bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
  577. bmtd.pg_size = mtd->writesize;
  578. bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
  579. total_blocks = mtd->size >> bmtd.blk_shift;
  580. pmt_block = total_blocks - bmt_pool_size - 2;
  581. mtd->size = pmt_block << bmtd.blk_shift;
  582. /*
  583. * ---------------------------------------
  584. * | PMT(2blks) | BMT POOL(totalblks * 2%) |
  585. * ---------------------------------------
  586. * ^ ^
  587. * | |
  588. * pmt_block pmt_block + 2blocks(pool_lba)
  589. *
  590. * ATTETION!!!!!!
  591. * The blocks ahead of the boundary block are stored in bb_tbl
  592. * and blocks behind are stored in bmt_tbl
  593. */
  594. bmtd.pool_lba = (u16)(pmt_block + 2);
  595. bmtd.total_blks = total_blocks;
  596. bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
  597. /* 3 buffers we need */
  598. bufsz = round_up(sizeof(struct bbbt) +
  599. bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
  600. bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
  601. nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
  602. nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
  603. if (!nand_bbt_buf || !nand_data_buf) {
  604. pr_info("nand: FATAL ERR: allocate buffer failed!\n");
  605. ret = -1;
  606. goto error;
  607. }
  608. memset(nand_bbt_buf, 0xff, bufsz);
  609. memset(nand_data_buf, 0xff, bmtd.pg_size);
  610. BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
  611. nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
  612. BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
  613. bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
  614. /* Scanning start from the first page of the last block
  615. * of whole flash
  616. */
  617. bbt = scan_bmt(bmtd.total_blks - 1);
  618. if (!bbt) {
  619. /* BMT not found */
  620. if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
  621. pr_info("nand: FATAL: Too many blocks, can not support!\n");
  622. ret = -1;
  623. goto error;
  624. }
  625. bbt = (struct bbbt *)nand_bbt_buf;
  626. memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
  627. if (scan_bad_blocks(bbt)) {
  628. ret = -1;
  629. goto error;
  630. }
  631. /* BMT always in the last valid block in pool */
  632. bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
  633. block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
  634. pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
  635. if (bmtd.bmt_blk_idx == 0)
  636. pr_info("nand: Warning: no available block in BMT pool!\n");
  637. else if (bmtd.bmt_blk_idx == (u16)-1) {
  638. ret = -1;
  639. goto error;
  640. }
  641. }
  642. mtk_bmt_add_debugfs();
  643. bmtd.bbt = bbt;
  644. return 0;
  645. error:
  646. mtk_bmt_detach(mtd);
  647. return ret;
  648. }
  649. MODULE_LICENSE("GPL");
  650. MODULE_AUTHOR("Xiangsheng Hou <[email protected]>, Felix Fietkau <[email protected]>");
  651. MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");