mtk_bmt.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. /*
  2. * Copyright (c) 2017 MediaTek Inc.
  3. * Author: Xiangsheng Hou <[email protected]>
  4. * Copyright (c) 2020-2022 Felix Fietkau <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/gfp.h>
  17. #include <linux/slab.h>
  18. #include <linux/bits.h>
  19. #include "mtk_bmt.h"
  20. struct bmt_desc bmtd = {};
  21. /* -------- Nand operations wrapper -------- */
  22. int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
  23. {
  24. int pages = bmtd.blk_size >> bmtd.pg_shift;
  25. loff_t src = (loff_t)src_blk << bmtd.blk_shift;
  26. loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
  27. loff_t offset = 0;
  28. uint8_t oob[64];
  29. int i, ret;
  30. for (i = 0; i < pages; i++) {
  31. struct mtd_oob_ops rd_ops = {
  32. .mode = MTD_OPS_PLACE_OOB,
  33. .oobbuf = oob,
  34. .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
  35. .datbuf = bmtd.data_buf,
  36. .len = bmtd.pg_size,
  37. };
  38. struct mtd_oob_ops wr_ops = {
  39. .mode = MTD_OPS_PLACE_OOB,
  40. .oobbuf = oob,
  41. .datbuf = bmtd.data_buf,
  42. .len = bmtd.pg_size,
  43. };
  44. if (offset >= max_offset)
  45. break;
  46. ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
  47. if (ret < 0 && !mtd_is_bitflip(ret))
  48. return ret;
  49. if (!rd_ops.retlen)
  50. break;
  51. ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
  52. if (ret < 0)
  53. return ret;
  54. wr_ops.ooblen = rd_ops.oobretlen;
  55. offset += rd_ops.retlen;
  56. }
  57. return 0;
  58. }
  59. /* -------- Bad Blocks Management -------- */
  60. bool mapping_block_in_range(int block, int *start, int *end)
  61. {
  62. const __be32 *cur = bmtd.remap_range;
  63. u32 addr = block << bmtd.blk_shift;
  64. int i;
  65. if (!cur || !bmtd.remap_range_len) {
  66. *start = 0;
  67. *end = bmtd.total_blks;
  68. return true;
  69. }
  70. for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
  71. if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
  72. continue;
  73. *start = be32_to_cpu(cur[0]);
  74. *end = be32_to_cpu(cur[1]);
  75. return true;
  76. }
  77. return false;
  78. }
  79. static bool
  80. mtk_bmt_remap_block(u32 block, u32 mapped_block, int copy_len)
  81. {
  82. int start, end;
  83. if (!mapping_block_in_range(block, &start, &end))
  84. return false;
  85. return bmtd.ops->remap_block(block, mapped_block, copy_len);
  86. }
  87. static int
  88. mtk_bmt_read(struct mtd_info *mtd, loff_t from,
  89. struct mtd_oob_ops *ops)
  90. {
  91. struct mtd_oob_ops cur_ops = *ops;
  92. int retry_count = 0;
  93. loff_t cur_from;
  94. int ret = 0;
  95. int max_bitflips = 0;
  96. ops->retlen = 0;
  97. ops->oobretlen = 0;
  98. while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  99. int cur_ret;
  100. u32 offset = from & (bmtd.blk_size - 1);
  101. u32 block = from >> bmtd.blk_shift;
  102. int cur_block;
  103. cur_block = bmtd.ops->get_mapping_block(block);
  104. if (cur_block < 0)
  105. return -EIO;
  106. cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  107. cur_ops.oobretlen = 0;
  108. cur_ops.retlen = 0;
  109. cur_ops.len = min_t(u32, mtd->erasesize - offset,
  110. ops->len - ops->retlen);
  111. cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
  112. if (cur_ret < 0)
  113. ret = cur_ret;
  114. else
  115. max_bitflips = max_t(int, max_bitflips, cur_ret);
  116. if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
  117. if (mtk_bmt_remap_block(block, cur_block, mtd->erasesize) &&
  118. retry_count++ < 10)
  119. continue;
  120. goto out;
  121. }
  122. if (cur_ret >= mtd->bitflip_threshold)
  123. mtk_bmt_remap_block(block, cur_block, mtd->erasesize);
  124. ops->retlen += cur_ops.retlen;
  125. ops->oobretlen += cur_ops.oobretlen;
  126. cur_ops.ooboffs = 0;
  127. cur_ops.datbuf += cur_ops.retlen;
  128. cur_ops.oobbuf += cur_ops.oobretlen;
  129. cur_ops.ooblen -= cur_ops.oobretlen;
  130. if (!cur_ops.len)
  131. cur_ops.len = mtd->erasesize - offset;
  132. from += cur_ops.len;
  133. retry_count = 0;
  134. }
  135. out:
  136. if (ret < 0)
  137. return ret;
  138. return max_bitflips;
  139. }
  140. static int
  141. mtk_bmt_write(struct mtd_info *mtd, loff_t to,
  142. struct mtd_oob_ops *ops)
  143. {
  144. struct mtd_oob_ops cur_ops = *ops;
  145. int retry_count = 0;
  146. loff_t cur_to;
  147. int ret;
  148. ops->retlen = 0;
  149. ops->oobretlen = 0;
  150. while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
  151. u32 offset = to & (bmtd.blk_size - 1);
  152. u32 block = to >> bmtd.blk_shift;
  153. int cur_block;
  154. cur_block = bmtd.ops->get_mapping_block(block);
  155. if (cur_block < 0)
  156. return -EIO;
  157. cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
  158. cur_ops.oobretlen = 0;
  159. cur_ops.retlen = 0;
  160. cur_ops.len = min_t(u32, bmtd.blk_size - offset,
  161. ops->len - ops->retlen);
  162. ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
  163. if (ret < 0) {
  164. if (mtk_bmt_remap_block(block, cur_block, offset) &&
  165. retry_count++ < 10)
  166. continue;
  167. return ret;
  168. }
  169. ops->retlen += cur_ops.retlen;
  170. ops->oobretlen += cur_ops.oobretlen;
  171. cur_ops.ooboffs = 0;
  172. cur_ops.datbuf += cur_ops.retlen;
  173. cur_ops.oobbuf += cur_ops.oobretlen;
  174. cur_ops.ooblen -= cur_ops.oobretlen;
  175. if (!cur_ops.len)
  176. cur_ops.len = mtd->erasesize - offset;
  177. to += cur_ops.len;
  178. retry_count = 0;
  179. }
  180. return 0;
  181. }
  182. static int
  183. mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  184. {
  185. struct erase_info mapped_instr = {
  186. .len = bmtd.blk_size,
  187. };
  188. int retry_count = 0;
  189. u64 start_addr, end_addr;
  190. int ret;
  191. u16 orig_block;
  192. int block;
  193. start_addr = instr->addr & (~mtd->erasesize_mask);
  194. end_addr = instr->addr + instr->len;
  195. while (start_addr < end_addr) {
  196. orig_block = start_addr >> bmtd.blk_shift;
  197. block = bmtd.ops->get_mapping_block(orig_block);
  198. if (block < 0)
  199. return -EIO;
  200. mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
  201. ret = bmtd._erase(mtd, &mapped_instr);
  202. if (ret) {
  203. if (mtk_bmt_remap_block(orig_block, block, 0) &&
  204. retry_count++ < 10)
  205. continue;
  206. instr->fail_addr = start_addr;
  207. break;
  208. }
  209. start_addr += mtd->erasesize;
  210. retry_count = 0;
  211. }
  212. return ret;
  213. }
  214. static int
  215. mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
  216. {
  217. int retry_count = 0;
  218. u16 orig_block = ofs >> bmtd.blk_shift;
  219. u16 block;
  220. int ret;
  221. retry:
  222. block = bmtd.ops->get_mapping_block(orig_block);
  223. ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
  224. if (ret) {
  225. if (mtk_bmt_remap_block(orig_block, block, bmtd.blk_size) &&
  226. retry_count++ < 10)
  227. goto retry;
  228. }
  229. return ret;
  230. }
  231. static int
  232. mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
  233. {
  234. u16 orig_block = ofs >> bmtd.blk_shift;
  235. int block;
  236. block = bmtd.ops->get_mapping_block(orig_block);
  237. if (block < 0)
  238. return -EIO;
  239. mtk_bmt_remap_block(orig_block, block, bmtd.blk_size);
  240. return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
  241. }
  242. static void
  243. mtk_bmt_replace_ops(struct mtd_info *mtd)
  244. {
  245. bmtd._read_oob = mtd->_read_oob;
  246. bmtd._write_oob = mtd->_write_oob;
  247. bmtd._erase = mtd->_erase;
  248. bmtd._block_isbad = mtd->_block_isbad;
  249. bmtd._block_markbad = mtd->_block_markbad;
  250. mtd->_read_oob = mtk_bmt_read;
  251. mtd->_write_oob = mtk_bmt_write;
  252. mtd->_erase = mtk_bmt_mtd_erase;
  253. mtd->_block_isbad = mtk_bmt_block_isbad;
  254. mtd->_block_markbad = mtk_bmt_block_markbad;
  255. }
  256. static int mtk_bmt_debug_mark_good(void *data, u64 val)
  257. {
  258. bmtd.ops->unmap_block(val >> bmtd.blk_shift);
  259. return 0;
  260. }
  261. static int mtk_bmt_debug_mark_bad(void *data, u64 val)
  262. {
  263. u32 block = val >> bmtd.blk_shift;
  264. int cur_block;
  265. cur_block = bmtd.ops->get_mapping_block(block);
  266. if (cur_block < 0)
  267. return -EIO;
  268. mtk_bmt_remap_block(block, cur_block, bmtd.blk_size);
  269. return 0;
  270. }
  271. static int mtk_bmt_debug(void *data, u64 val)
  272. {
  273. return bmtd.ops->debug(data, val);
  274. }
  275. DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
  276. DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
  277. DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
  278. static void
  279. mtk_bmt_add_debugfs(void)
  280. {
  281. struct dentry *dir;
  282. dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
  283. if (!dir)
  284. return;
  285. debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
  286. debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
  287. debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
  288. }
  289. void mtk_bmt_detach(struct mtd_info *mtd)
  290. {
  291. if (bmtd.mtd != mtd)
  292. return;
  293. if (bmtd.debugfs_dir)
  294. debugfs_remove_recursive(bmtd.debugfs_dir);
  295. bmtd.debugfs_dir = NULL;
  296. kfree(bmtd.bbt_buf);
  297. kfree(bmtd.data_buf);
  298. mtd->_read_oob = bmtd._read_oob;
  299. mtd->_write_oob = bmtd._write_oob;
  300. mtd->_erase = bmtd._erase;
  301. mtd->_block_isbad = bmtd._block_isbad;
  302. mtd->_block_markbad = bmtd._block_markbad;
  303. mtd->size = bmtd.total_blks << bmtd.blk_shift;
  304. memset(&bmtd, 0, sizeof(bmtd));
  305. }
  306. int mtk_bmt_attach(struct mtd_info *mtd)
  307. {
  308. struct device_node *np;
  309. int ret = 0;
  310. if (bmtd.mtd)
  311. return -ENOSPC;
  312. np = mtd_get_of_node(mtd);
  313. if (!np)
  314. return 0;
  315. if (of_property_read_bool(np, "mediatek,bmt-v2"))
  316. bmtd.ops = &mtk_bmt_v2_ops;
  317. else if (of_property_read_bool(np, "mediatek,nmbm"))
  318. bmtd.ops = &mtk_bmt_nmbm_ops;
  319. else if (of_property_read_bool(np, "mediatek,bbt"))
  320. bmtd.ops = &mtk_bmt_bbt_ops;
  321. else
  322. return 0;
  323. bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
  324. &bmtd.remap_range_len);
  325. bmtd.remap_range_len /= 8;
  326. bmtd.mtd = mtd;
  327. mtk_bmt_replace_ops(mtd);
  328. bmtd.blk_size = mtd->erasesize;
  329. bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
  330. bmtd.pg_size = mtd->writesize;
  331. bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
  332. bmtd.total_blks = mtd->size >> bmtd.blk_shift;
  333. bmtd.data_buf = kzalloc(bmtd.pg_size + bmtd.mtd->oobsize, GFP_KERNEL);
  334. if (!bmtd.data_buf) {
  335. pr_info("nand: FATAL ERR: allocate buffer failed!\n");
  336. ret = -1;
  337. goto error;
  338. }
  339. memset(bmtd.data_buf, 0xff, bmtd.pg_size + bmtd.mtd->oobsize);
  340. ret = bmtd.ops->init(np);
  341. if (ret)
  342. goto error;
  343. mtk_bmt_add_debugfs();
  344. return 0;
  345. error:
  346. mtk_bmt_detach(mtd);
  347. return ret;
  348. }
  349. MODULE_LICENSE("GPL");
  350. MODULE_AUTHOR("Xiangsheng Hou <[email protected]>, Felix Fietkau <[email protected]>");
  351. MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");