mtk_bmt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. /*
  2. * Copyright (c) 2017 MediaTek Inc.
  3. * Author: Xiangsheng Hou <[email protected]>
  4. * Copyright (c) 2020-2022 Felix Fietkau <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/gfp.h>
  17. #include <linux/slab.h>
  18. #include <linux/bits.h>
  19. #include "mtk_bmt.h"
  20. struct bmt_desc bmtd = {};
  21. /* -------- Nand operations wrapper -------- */
  22. int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
  23. {
  24. int pages = bmtd.blk_size >> bmtd.pg_shift;
  25. loff_t src = (loff_t)src_blk << bmtd.blk_shift;
  26. loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
  27. loff_t offset = 0;
  28. uint8_t oob[64];
  29. int i, ret;
  30. for (i = 0; i < pages; i++) {
  31. struct mtd_oob_ops rd_ops = {
  32. .mode = MTD_OPS_PLACE_OOB,
  33. .oobbuf = oob,
  34. .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
  35. .datbuf = bmtd.data_buf,
  36. .len = bmtd.pg_size,
  37. };
  38. struct mtd_oob_ops wr_ops = {
  39. .mode = MTD_OPS_PLACE_OOB,
  40. .oobbuf = oob,
  41. .datbuf = bmtd.data_buf,
  42. .len = bmtd.pg_size,
  43. };
  44. if (offset >= max_offset)
  45. break;
  46. ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
  47. if (ret < 0 && !mtd_is_bitflip(ret))
  48. return ret;
  49. if (!rd_ops.retlen)
  50. break;
  51. ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
  52. if (ret < 0)
  53. return ret;
  54. wr_ops.ooblen = rd_ops.oobretlen;
  55. offset += rd_ops.retlen;
  56. }
  57. return 0;
  58. }
  59. /* -------- Bad Blocks Management -------- */
  60. bool mapping_block_in_range(int block, int *start, int *end)
  61. {
  62. const __be32 *cur = bmtd.remap_range;
  63. u32 addr = block << bmtd.blk_shift;
  64. int i;
  65. if (!cur || !bmtd.remap_range_len) {
  66. *start = 0;
  67. *end = bmtd.total_blks;
  68. return true;
  69. }
  70. for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
  71. if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
  72. continue;
  73. *start = be32_to_cpu(cur[0]);
  74. *end = be32_to_cpu(cur[1]);
  75. return true;
  76. }
  77. return false;
  78. }
  79. static bool
  80. mtk_bmt_remap_block(u32 block, u32 mapped_block, int copy_len)
  81. {
  82. int start, end;
  83. if (!mapping_block_in_range(block, &start, &end))
  84. return false;
  85. return bmtd.ops->remap_block(block, mapped_block, copy_len);
  86. }
  87. static int
  88. mtk_bmt_read(struct mtd_info *mtd, loff_t from,
  89. struct mtd_oob_ops *ops)
  90. {
  91. struct mtd_oob_ops cur_ops = *ops;
  92. int retry_count = 0;
  93. loff_t cur_from;
  94. int ret = 0;
  95. int max_bitflips = 0;
  96. ops->retlen = 0;
  97. ops->oobretlen = 0;
  98. while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  99. int cur_ret;
  100. u32 offset = from & (bmtd.blk_size - 1);
  101. u32 block = from >> bmtd.blk_shift;
  102. int cur_block;
  103. cur_block = bmtd.ops->get_mapping_block(block);
  104. if (cur_block < 0)
  105. return -EIO;
  106. cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  107. cur_ops.oobretlen = 0;
  108. cur_ops.retlen = 0;
  109. cur_ops.len = min_t(u32, mtd->erasesize - offset,
  110. ops->len - ops->retlen);
  111. cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
  112. if (cur_ret < 0)
  113. ret = cur_ret;
  114. else
  115. max_bitflips = max_t(int, max_bitflips, cur_ret);
  116. if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
  117. if (mtk_bmt_remap_block(block, cur_block, mtd->erasesize) &&
  118. retry_count++ < 10)
  119. continue;
  120. goto out;
  121. }
  122. if (mtd->bitflip_threshold && cur_ret >= mtd->bitflip_threshold)
  123. mtk_bmt_remap_block(block, cur_block, mtd->erasesize);
  124. ops->retlen += cur_ops.retlen;
  125. ops->oobretlen += cur_ops.oobretlen;
  126. cur_ops.ooboffs = 0;
  127. cur_ops.datbuf += cur_ops.retlen;
  128. cur_ops.oobbuf += cur_ops.oobretlen;
  129. cur_ops.ooblen -= cur_ops.oobretlen;
  130. if (!cur_ops.len)
  131. cur_ops.len = mtd->erasesize - offset;
  132. from += cur_ops.len;
  133. retry_count = 0;
  134. }
  135. out:
  136. if (ret < 0)
  137. return ret;
  138. return max_bitflips;
  139. }
  140. static int
  141. mtk_bmt_write(struct mtd_info *mtd, loff_t to,
  142. struct mtd_oob_ops *ops)
  143. {
  144. struct mtd_oob_ops cur_ops = *ops;
  145. int retry_count = 0;
  146. loff_t cur_to;
  147. int ret;
  148. ops->retlen = 0;
  149. ops->oobretlen = 0;
  150. while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  151. u32 offset = to & (bmtd.blk_size - 1);
  152. u32 block = to >> bmtd.blk_shift;
  153. int cur_block;
  154. cur_block = bmtd.ops->get_mapping_block(block);
  155. if (cur_block < 0)
  156. return -EIO;
  157. cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  158. cur_ops.oobretlen = 0;
  159. cur_ops.retlen = 0;
  160. cur_ops.len = min_t(u32, bmtd.blk_size - offset,
  161. ops->len - ops->retlen);
  162. ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
  163. if (ret < 0) {
  164. if (mtk_bmt_remap_block(block, cur_block, offset) &&
  165. retry_count++ < 10)
  166. continue;
  167. return ret;
  168. }
  169. ops->retlen += cur_ops.retlen;
  170. ops->oobretlen += cur_ops.oobretlen;
  171. cur_ops.ooboffs = 0;
  172. cur_ops.datbuf += cur_ops.retlen;
  173. cur_ops.oobbuf += cur_ops.oobretlen;
  174. cur_ops.ooblen -= cur_ops.oobretlen;
  175. if (!cur_ops.len)
  176. cur_ops.len = mtd->erasesize - offset;
  177. to += cur_ops.len;
  178. retry_count = 0;
  179. }
  180. return 0;
  181. }
  182. static int
  183. mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  184. {
  185. struct erase_info mapped_instr = {
  186. .len = bmtd.blk_size,
  187. };
  188. int retry_count = 0;
  189. u64 start_addr, end_addr;
  190. int ret;
  191. u16 orig_block;
  192. int block;
  193. start_addr = instr->addr & (~mtd->erasesize_mask);
  194. end_addr = instr->addr + instr->len;
  195. while (start_addr < end_addr) {
  196. orig_block = start_addr >> bmtd.blk_shift;
  197. block = bmtd.ops->get_mapping_block(orig_block);
  198. if (block < 0)
  199. return -EIO;
  200. mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
  201. ret = bmtd._erase(mtd, &mapped_instr);
  202. if (ret) {
  203. if (mtk_bmt_remap_block(orig_block, block, 0) &&
  204. retry_count++ < 10)
  205. continue;
  206. instr->fail_addr = start_addr;
  207. break;
  208. }
  209. start_addr += mtd->erasesize;
  210. retry_count = 0;
  211. }
  212. return ret;
  213. }
  214. static int
  215. mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
  216. {
  217. int retry_count = 0;
  218. u16 orig_block = ofs >> bmtd.blk_shift;
  219. u16 block;
  220. int ret;
  221. retry:
  222. block = bmtd.ops->get_mapping_block(orig_block);
  223. ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
  224. if (ret) {
  225. if (mtk_bmt_remap_block(orig_block, block, bmtd.blk_size) &&
  226. retry_count++ < 10)
  227. goto retry;
  228. }
  229. return ret;
  230. }
  231. static int
  232. mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
  233. {
  234. u16 orig_block = ofs >> bmtd.blk_shift;
  235. int block;
  236. block = bmtd.ops->get_mapping_block(orig_block);
  237. if (block < 0)
  238. return -EIO;
  239. mtk_bmt_remap_block(orig_block, block, bmtd.blk_size);
  240. return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
  241. }
  242. static void
  243. mtk_bmt_replace_ops(struct mtd_info *mtd)
  244. {
  245. bmtd._read_oob = mtd->_read_oob;
  246. bmtd._write_oob = mtd->_write_oob;
  247. bmtd._erase = mtd->_erase;
  248. bmtd._block_isbad = mtd->_block_isbad;
  249. bmtd._block_markbad = mtd->_block_markbad;
  250. mtd->_read_oob = mtk_bmt_read;
  251. mtd->_write_oob = mtk_bmt_write;
  252. mtd->_erase = mtk_bmt_mtd_erase;
  253. mtd->_block_isbad = mtk_bmt_block_isbad;
  254. mtd->_block_markbad = mtk_bmt_block_markbad;
  255. }
  256. static int mtk_bmt_debug_repair(void *data, u64 val)
  257. {
  258. int block = val >> bmtd.blk_shift;
  259. int prev_block, new_block;
  260. prev_block = bmtd.ops->get_mapping_block(block);
  261. if (prev_block < 0)
  262. return -EIO;
  263. bmtd.ops->unmap_block(block);
  264. new_block = bmtd.ops->get_mapping_block(block);
  265. if (new_block < 0)
  266. return -EIO;
  267. if (prev_block == new_block)
  268. return 0;
  269. bbt_nand_erase(new_block);
  270. bbt_nand_copy(new_block, prev_block, bmtd.blk_size);
  271. return 0;
  272. }
  273. static int mtk_bmt_debug_mark_good(void *data, u64 val)
  274. {
  275. bmtd.ops->unmap_block(val >> bmtd.blk_shift);
  276. return 0;
  277. }
  278. static int mtk_bmt_debug_mark_bad(void *data, u64 val)
  279. {
  280. u32 block = val >> bmtd.blk_shift;
  281. int cur_block;
  282. cur_block = bmtd.ops->get_mapping_block(block);
  283. if (cur_block < 0)
  284. return -EIO;
  285. mtk_bmt_remap_block(block, cur_block, bmtd.blk_size);
  286. return 0;
  287. }
  288. static int mtk_bmt_debug(void *data, u64 val)
  289. {
  290. return bmtd.ops->debug(data, val);
  291. }
  292. DEFINE_DEBUGFS_ATTRIBUTE(fops_repair, NULL, mtk_bmt_debug_repair, "%llu\n");
  293. DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
  294. DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
  295. DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
  296. static void
  297. mtk_bmt_add_debugfs(void)
  298. {
  299. struct dentry *dir;
  300. dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
  301. if (!dir)
  302. return;
  303. debugfs_create_file_unsafe("repair", S_IWUSR, dir, NULL, &fops_repair);
  304. debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
  305. debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
  306. debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
  307. }
  308. void mtk_bmt_detach(struct mtd_info *mtd)
  309. {
  310. if (bmtd.mtd != mtd)
  311. return;
  312. debugfs_remove_recursive(bmtd.debugfs_dir);
  313. bmtd.debugfs_dir = NULL;
  314. kfree(bmtd.bbt_buf);
  315. kfree(bmtd.data_buf);
  316. mtd->_read_oob = bmtd._read_oob;
  317. mtd->_write_oob = bmtd._write_oob;
  318. mtd->_erase = bmtd._erase;
  319. mtd->_block_isbad = bmtd._block_isbad;
  320. mtd->_block_markbad = bmtd._block_markbad;
  321. mtd->size = bmtd.total_blks << bmtd.blk_shift;
  322. memset(&bmtd, 0, sizeof(bmtd));
  323. }
  324. int mtk_bmt_attach(struct mtd_info *mtd)
  325. {
  326. struct device_node *np;
  327. int ret = 0;
  328. u32 overridden_oobsize = 0;
  329. if (bmtd.mtd)
  330. return -ENOSPC;
  331. np = mtd_get_of_node(mtd);
  332. if (!np)
  333. return 0;
  334. if (of_property_read_bool(np, "mediatek,bmt-v2"))
  335. bmtd.ops = &mtk_bmt_v2_ops;
  336. else if (of_property_read_bool(np, "mediatek,nmbm"))
  337. bmtd.ops = &mtk_bmt_nmbm_ops;
  338. else if (of_property_read_bool(np, "mediatek,bbt"))
  339. bmtd.ops = &mtk_bmt_bbt_ops;
  340. else
  341. return 0;
  342. bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
  343. &bmtd.remap_range_len);
  344. bmtd.remap_range_len /= 8;
  345. bmtd.mtd = mtd;
  346. mtk_bmt_replace_ops(mtd);
  347. if (!of_property_read_u32(np, "mediatek,bmt-mtd-overridden-oobsize",
  348. &overridden_oobsize))
  349. if (overridden_oobsize < bmtd.mtd->oobsize) {
  350. bmtd.mtd->oobsize = overridden_oobsize;
  351. pr_info("NMBM: mtd OOB size has been overridden to %luB\n",
  352. (long unsigned int)bmtd.mtd->oobsize);
  353. }
  354. bmtd.blk_size = mtd->erasesize;
  355. bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
  356. bmtd.pg_size = mtd->writesize;
  357. bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
  358. bmtd.total_blks = mtd->size >> bmtd.blk_shift;
  359. bmtd.data_buf = kzalloc(bmtd.pg_size + bmtd.mtd->oobsize, GFP_KERNEL);
  360. if (!bmtd.data_buf) {
  361. pr_info("nand: FATAL ERR: allocate buffer failed!\n");
  362. ret = -1;
  363. goto error;
  364. }
  365. memset(bmtd.data_buf, 0xff, bmtd.pg_size + bmtd.mtd->oobsize);
  366. ret = bmtd.ops->init(np);
  367. if (ret)
  368. goto error;
  369. mtk_bmt_add_debugfs();
  370. return 0;
  371. error:
  372. mtk_bmt_detach(mtd);
  373. return ret;
  374. }
  375. MODULE_LICENSE("GPL");
  376. MODULE_AUTHOR("Xiangsheng Hou <[email protected]>, Felix Fietkau <[email protected]>");
  377. MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");