123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871 |
- --- a/drivers/mtd/nand/Kconfig
- +++ b/drivers/mtd/nand/Kconfig
- @@ -15,6 +15,10 @@ config MTD_NAND_ECC
- bool
- depends on MTD_NAND_CORE
-
- +config MTD_NAND_MTK_BMT
- + bool "Support MediaTek NAND Bad-block Management Table"
- + default n
- +
- endmenu
-
- endmenu
- --- a/drivers/mtd/nand/Makefile
- +++ b/drivers/mtd/nand/Makefile
- @@ -2,6 +2,7 @@
-
- nandcore-objs := core.o bbt.o
- obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
- +obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o
-
- obj-y += onenand/
- obj-y += raw/
- --- /dev/null
- +++ b/drivers/mtd/nand/mtk_bmt.c
- @@ -0,0 +1,788 @@
- +/*
- + * Copyright (c) 2017 MediaTek Inc.
- + * Author: Xiangsheng Hou <[email protected]>
- + * Copyright (c) 2020 Felix Fietkau <[email protected]>
- + *
- + * This program is free software; you can redistribute it and/or modify
- + * it under the terms of the GNU General Public License version 2 as
- + * published by the Free Software Foundation.
- + *
- + * This program is distributed in the hope that it will be useful,
- + * but WITHOUT ANY WARRANTY; without even the implied warranty of
- + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + * GNU General Public License for more details.
- + */
- +
- +#include <linux/slab.h>
- +#include <linux/gfp.h>
- +#include <linux/kernel.h>
- +#include <linux/of.h>
- +#include <linux/mtd/mtd.h>
- +#include <linux/mtd/partitions.h>
- +#include <linux/mtd/mtk_bmt.h>
- +#include <linux/module.h>
- +#include <linux/debugfs.h>
- +
- +#define MAIN_SIGNATURE_OFFSET 0
- +#define OOB_SIGNATURE_OFFSET 1
- +#define BBPOOL_RATIO 2
- +
- +#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
- +
- +/* Maximum 8k blocks */
- +#define BB_TABLE_MAX bmtd.table_size
- +#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
- +#define BMT_TBL_DEF_VAL 0x0
- +
- +/*
- + * Burner Bad Block Table
- + * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
- + */
- +
- +struct bbbt {
- + char signature[3];
- + /* This version is used to distinguish the legacy and new algorithm */
- +#define BBMT_VERSION 2
- + unsigned char version;
- + /* Below 2 tables will be written in SLC */
- + u16 bb_tbl[];
- +};
- +
- +struct bbmt {
- + u16 block;
- +#define NO_MAPPED 0
- +#define NORMAL_MAPPED 1
- +#define BMT_MAPPED 2
- + u16 mapped;
- +};
- +
- +static struct bmt_desc {
- + struct mtd_info *mtd;
- +
- + int (*_read_oob) (struct mtd_info *mtd, loff_t from,
- + struct mtd_oob_ops *ops);
- + int (*_write_oob) (struct mtd_info *mtd, loff_t to,
- + struct mtd_oob_ops *ops);
- + int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
- + int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
- + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
- +
- + struct bbbt *bbt;
- +
- + struct dentry *debugfs_dir;
- +
- + u32 table_size;
- + u32 pg_size;
- + u32 blk_size;
- + u16 pg_shift;
- + u16 blk_shift;
- + /* bbt logical address */
- + u16 pool_lba;
- + /* bbt physical address */
- + u16 pool_pba;
- + /* Maximum count of bad blocks that the vendor guaranteed */
- + u16 bb_max;
- + /* Total blocks of the Nand Chip */
- + u16 total_blks;
- + /* The block(n) BMT is located at (bmt_tbl[n]) */
- + u16 bmt_blk_idx;
- + /* How many pages needs to store 'struct bbbt' */
- + u32 bmt_pgs;
- +
- + /* to compensate for driver level remapping */
- + u8 oob_offset;
- +} bmtd = {0};
- +
- +static unsigned char *nand_bbt_buf;
- +static unsigned char *nand_data_buf;
- +
- +/* -------- Unit conversions -------- */
- +static inline u32 blk_pg(u16 block)
- +{
- + return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
- +}
- +
- +/* -------- Nand operations wrapper -------- */
- +static inline int
- +bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
- + unsigned char *fdm, int fdm_len)
- +{
- + struct mtd_oob_ops ops = {
- + .mode = MTD_OPS_PLACE_OOB,
- + .ooboffs = bmtd.oob_offset,
- + .oobbuf = fdm,
- + .ooblen = fdm_len,
- + .datbuf = dat,
- + .len = dat_len,
- + };
- +
- + return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
- +}
- +
- +static inline int bbt_nand_erase(u16 block)
- +{
- + struct mtd_info *mtd = bmtd.mtd;
- + struct erase_info instr = {
- + .addr = (loff_t)block << bmtd.blk_shift,
- + .len = bmtd.blk_size,
- + };
- +
- + return bmtd._erase(mtd, &instr);
- +}
- +
- +/* -------- Bad Blocks Management -------- */
- +static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
- +{
- + return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
- +}
- +
- +static int
- +read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
- +{
- + u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
- +
- + return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
- +}
- +
- +static int write_bmt(u16 block, unsigned char *dat)
- +{
- + struct mtd_oob_ops ops = {
- + .mode = MTD_OPS_PLACE_OOB,
- + .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
- + .oobbuf = "bmt",
- + .ooblen = 3,
- + .datbuf = dat,
- + .len = bmtd.bmt_pgs << bmtd.pg_shift,
- + };
- + loff_t addr = (loff_t)block << bmtd.blk_shift;
- +
- + return bmtd._write_oob(bmtd.mtd, addr, &ops);
- +}
- +
- +static u16 find_valid_block(u16 block)
- +{
- + u8 fdm[4];
- + int ret;
- + int loop = 0;
- +
- +retry:
- + if (block >= bmtd.total_blks)
- + return 0;
- +
- + ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
- + fdm, sizeof(fdm));
- + /* Read the 1st byte of FDM to judge whether it's a bad
- + * or not
- + */
- + if (ret || fdm[0] != 0xff) {
- + pr_info("nand: found bad block 0x%x\n", block);
- + if (loop >= bmtd.bb_max) {
- + pr_info("nand: FATAL ERR: too many bad blocks!!\n");
- + return 0;
- + }
- +
- + loop++;
- + block++;
- + goto retry;
- + }
- +
- + return block;
- +}
- +
- +/* Find out all bad blocks, and fill in the mapping table */
- +static int scan_bad_blocks(struct bbbt *bbt)
- +{
- + int i;
- + u16 block = 0;
- +
- + /* First time download, the block0 MUST NOT be a bad block,
- + * this is guaranteed by vendor
- + */
- + bbt->bb_tbl[0] = 0;
- +
- + /*
- + * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
- + * G - Good block; B - Bad block
- + * ---------------------------
- + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
- + * ---------------------------
- + * What bb_tbl[i] looks like:
- + * physical block(i):
- + * 0 1 2 3 4 5 6 7 8 9 a b c
- + * mapped block(bb_tbl[i]):
- + * 0 1 3 6 7 8 9 b ......
- + * ATTENTION:
- + * If new bad block ocurred(n), search bmt_tbl to find
- + * a available block(x), and fill in the bb_tbl[n] = x;
- + */
- + for (i = 1; i < bmtd.pool_lba; i++) {
- + bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
- + BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
- + if (bbt->bb_tbl[i] == 0)
- + return -1;
- + }
- +
- + /* Physical Block start Address of BMT pool */
- + bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
- + if (bmtd.pool_pba >= bmtd.total_blks - 2) {
- + pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
- + return -1;
- + }
- +
- + BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
- + i = 0;
- + block = bmtd.pool_pba;
- + /*
- + * The bmt table is used for runtime bad block mapping
- + * G - Good block; B - Bad block
- + * ---------------------------
- + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
- + * ---------------------------
- + * block: 0 1 2 3 4 5 6 7 8 9 a b c
- + * What bmt_tbl[i] looks like in initial state:
- + * i:
- + * 0 1 2 3 4 5 6 7
- + * bmt_tbl[i].block:
- + * 0 1 3 6 7 8 9 b
- + * bmt_tbl[i].mapped:
- + * N N N N N N N B
- + * N - Not mapped(Available)
- + * M - Mapped
- + * B - BMT
- + * ATTENTION:
- + * BMT always in the last valid block in pool
- + */
- + while ((block = find_valid_block(block)) != 0) {
- + bmt_tbl(bbt)[i].block = block;
- + bmt_tbl(bbt)[i].mapped = NO_MAPPED;
- + BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
- + block++;
- + i++;
- + }
- +
- + /* i - How many available blocks in pool, which is the length of bmt_tbl[]
- + * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
- + */
- + bmtd.bmt_blk_idx = i - 1;
- + bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
- +
- + if (i < 1) {
- + pr_info("nand: FATAL ERR: no space to store BMT!!\n");
- + return -1;
- + }
- +
- + pr_info("[BBT] %d available blocks in BMT pool\n", i);
- +
- + return 0;
- +}
- +
- +static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
- +{
- + struct bbbt *bbt = (struct bbbt *)buf;
- + u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
- +
- +
- + if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
- + memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
- + if (bbt->version == BBMT_VERSION)
- + return true;
- + }
- + BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
- + sig[0], sig[1], sig[2],
- + fdm[1], fdm[2], fdm[3]);
- + return false;
- +}
- +
- +static u16 get_bmt_index(struct bbmt *bmt)
- +{
- + int i = 0;
- +
- + while (bmt[i].block != BMT_TBL_DEF_VAL) {
- + if (bmt[i].mapped == BMT_MAPPED)
- + return i;
- + i++;
- + }
- + return 0;
- +}
- +
- +static struct bbbt *scan_bmt(u16 block)
- +{
- + u8 fdm[4];
- +
- + if (block < bmtd.pool_lba)
- + return NULL;
- +
- + if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
- + return scan_bmt(block - 1);
- +
- + if (is_valid_bmt(nand_bbt_buf, fdm)) {
- + bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
- + if (bmtd.bmt_blk_idx == 0) {
- + pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
- + return NULL;
- + }
- + pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
- + return (struct bbbt *)nand_bbt_buf;
- + } else
- + return scan_bmt(block - 1);
- +}
- +
- +/* Write the Burner Bad Block Table to Nand Flash
- + * n - write BMT to bmt_tbl[n]
- + */
- +static u16 upload_bmt(struct bbbt *bbt, int n)
- +{
- + u16 block;
- +
- +retry:
- + if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
- + pr_info("nand: FATAL ERR: no space to store BMT!\n");
- + return (u16)-1;
- + }
- +
- + block = bmt_tbl(bbt)[n].block;
- + BBT_LOG("n = 0x%x, block = 0x%x", n, block);
- + if (bbt_nand_erase(block)) {
- + bmt_tbl(bbt)[n].block = 0;
- + /* erase failed, try the previous block: bmt_tbl[n - 1].block */
- + n--;
- + goto retry;
- + }
- +
- + /* The signature offset is fixed set to 0,
- + * oob signature offset is fixed set to 1
- + */
- + memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
- + bbt->version = BBMT_VERSION;
- +
- + if (write_bmt(block, (unsigned char *)bbt)) {
- + bmt_tbl(bbt)[n].block = 0;
- +
- + /* write failed, try the previous block in bmt_tbl[n - 1] */
- + n--;
- + goto retry;
- + }
- +
- + /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
- + return n;
- +}
- +
- +static u16 find_valid_block_in_pool(struct bbbt *bbt)
- +{
- + int i;
- +
- + if (bmtd.bmt_blk_idx == 0)
- + goto error;
- +
- + for (i = 0; i < bmtd.bmt_blk_idx; i++) {
- + if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
- + bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
- + return bmt_tbl(bbt)[i].block;
- + }
- + }
- +
- +error:
- + pr_info("nand: FATAL ERR: BMT pool is run out!\n");
- + return 0;
- +}
- +
- +/* We met a bad block, mark it as bad and map it to a valid block in pool,
- + * if it's a write failure, we need to write the data to mapped block
- + */
- +static bool update_bmt(u16 block)
- +{
- + u16 mapped_blk;
- + struct bbbt *bbt;
- +
- + bbt = bmtd.bbt;
- + mapped_blk = find_valid_block_in_pool(bbt);
- + if (mapped_blk == 0)
- + return false;
- +
- + /* Map new bad block to available block in pool */
- + bbt->bb_tbl[block] = mapped_blk;
- + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
- +
- + return true;
- +}
- +
- +u16 get_mapping_block_index(int block)
- +{
- + int mapping_block;
- +
- + if (block < bmtd.pool_lba)
- + mapping_block = bmtd.bbt->bb_tbl[block];
- + else
- + mapping_block = block;
- + BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
- +
- + return mapping_block;
- +}
- +
- +static int
- +mtk_bmt_read(struct mtd_info *mtd, loff_t from,
- + struct mtd_oob_ops *ops)
- +{
- + struct mtd_oob_ops cur_ops = *ops;
- + int retry_count = 0;
- + loff_t cur_from;
- + int ret;
- +
- + ops->retlen = 0;
- + ops->oobretlen = 0;
- +
- + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
- + u32 offset = from & (bmtd.blk_size - 1);
- + u32 block = from >> bmtd.blk_shift;
- + u32 cur_block;
- +
- + cur_block = get_mapping_block_index(block);
- + cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
- +
- + cur_ops.oobretlen = 0;
- + cur_ops.retlen = 0;
- + cur_ops.len = min_t(u32, mtd->erasesize - offset,
- + ops->len - ops->retlen);
- + ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
- + if (ret < 0) {
- + update_bmt(block);
- + if (retry_count++ < 10)
- + continue;
- +
- + return ret;
- + }
- +
- + ops->retlen += cur_ops.retlen;
- + ops->oobretlen += cur_ops.oobretlen;
- +
- + cur_ops.ooboffs = 0;
- + cur_ops.datbuf += cur_ops.retlen;
- + cur_ops.oobbuf += cur_ops.oobretlen;
- + cur_ops.ooblen -= cur_ops.oobretlen;
- +
- + if (!cur_ops.len)
- + cur_ops.len = mtd->erasesize - offset;
- +
- + from += cur_ops.len;
- + retry_count = 0;
- + }
- +
- + return 0;
- +}
- +
- +static int
- +mtk_bmt_write(struct mtd_info *mtd, loff_t to,
- + struct mtd_oob_ops *ops)
- +{
- + struct mtd_oob_ops cur_ops = *ops;
- + int retry_count = 0;
- + loff_t cur_to;
- + int ret;
- +
- + ops->retlen = 0;
- + ops->oobretlen = 0;
- +
- + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
- + u32 offset = to & (bmtd.blk_size - 1);
- + u32 block = to >> bmtd.blk_shift;
- + u32 cur_block;
- +
- + cur_block = get_mapping_block_index(block);
- + cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
- +
- + cur_ops.oobretlen = 0;
- + cur_ops.retlen = 0;
- + cur_ops.len = min_t(u32, bmtd.blk_size - offset,
- + ops->len - ops->retlen);
- + ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
- + if (ret < 0) {
- + update_bmt(block);
- + if (retry_count++ < 10)
- + continue;
- +
- + return ret;
- + }
- +
- + ops->retlen += cur_ops.retlen;
- + ops->oobretlen += cur_ops.oobretlen;
- +
- + cur_ops.ooboffs = 0;
- + cur_ops.datbuf += cur_ops.retlen;
- + cur_ops.oobbuf += cur_ops.oobretlen;
- + cur_ops.ooblen -= cur_ops.oobretlen;
- +
- + if (!cur_ops.len)
- + cur_ops.len = mtd->erasesize - offset;
- +
- + to += cur_ops.len;
- + retry_count = 0;
- + }
- +
- + return 0;
- +}
- +
- +static int
- +mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
- +{
- + struct erase_info mapped_instr = {
- + .len = bmtd.blk_size,
- + };
- + int retry_count = 0;
- + u64 start_addr, end_addr;
- + int ret;
- + u16 orig_block, block;
- +
- + start_addr = instr->addr & (~mtd->erasesize_mask);
- + end_addr = instr->addr + instr->len;
- +
- + while (start_addr < end_addr) {
- + orig_block = start_addr >> bmtd.blk_shift;
- + block = get_mapping_block_index(orig_block);
- + mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
- + ret = bmtd._erase(mtd, &mapped_instr);
- + if (ret) {
- + update_bmt(orig_block);
- + if (retry_count++ < 10)
- + continue;
- + instr->fail_addr = start_addr;
- + break;
- + }
- + start_addr += mtd->erasesize;
- + retry_count = 0;
- + }
- +
- + return ret;
- +}
- +static int
- +mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
- +{
- + int retry_count = 0;
- + u16 orig_block = ofs >> bmtd.blk_shift;
- + u16 block;
- + int ret;
- +
- +retry:
- + block = get_mapping_block_index(orig_block);
- + ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
- + if (ret) {
- + update_bmt(orig_block);
- + if (retry_count++ < 10)
- + goto retry;
- + }
- + return ret;
- +}
- +
- +static int
- +mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
- +{
- + u16 orig_block = ofs >> bmtd.blk_shift;
- + u16 block = get_mapping_block_index(orig_block);
- + update_bmt(orig_block);
- + return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
- +}
- +
- +static void
- +mtk_bmt_replace_ops(struct mtd_info *mtd)
- +{
- + bmtd._read_oob = mtd->_read_oob;
- + bmtd._write_oob = mtd->_write_oob;
- + bmtd._erase = mtd->_erase;
- + bmtd._block_isbad = mtd->_block_isbad;
- + bmtd._block_markbad = mtd->_block_markbad;
- +
- + mtd->_read_oob = mtk_bmt_read;
- + mtd->_write_oob = mtk_bmt_write;
- + mtd->_erase = mtk_bmt_mtd_erase;
- + mtd->_block_isbad = mtk_bmt_block_isbad;
- + mtd->_block_markbad = mtk_bmt_block_markbad;
- +}
- +
- +static int mtk_bmt_debug_mark_good(void *data, u64 val)
- +{
- + u32 block = val >> bmtd.blk_shift;
- +
- + bmtd.bbt->bb_tbl[block] = block;
- + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
- +
- + return 0;
- +}
- +
- +static int mtk_bmt_debug_mark_bad(void *data, u64 val)
- +{
- + u32 block = val >> bmtd.blk_shift;
- +
- + update_bmt(block);
- +
- + return 0;
- +}
- +
- +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
- +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
- +
- +static void
- +mtk_bmt_add_debugfs(void)
- +{
- + struct dentry *dir;
- +
- + dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
- + if (!dir)
- + return;
- +
- + debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
- + debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
- +}
- +
- +void mtk_bmt_detach(struct mtd_info *mtd)
- +{
- + if (bmtd.mtd != mtd)
- + return;
- +
- + if (bmtd.debugfs_dir)
- + debugfs_remove_recursive(bmtd.debugfs_dir);
- + bmtd.debugfs_dir = NULL;
- +
- + kfree(nand_bbt_buf);
- + kfree(nand_data_buf);
- +
- + mtd->_read_oob = bmtd._read_oob;
- + mtd->_write_oob = bmtd._write_oob;
- + mtd->_erase = bmtd._erase;
- + mtd->_block_isbad = bmtd._block_isbad;
- + mtd->_block_markbad = bmtd._block_markbad;
- + mtd->size = bmtd.total_blks << bmtd.blk_shift;
- +
- + memset(&bmtd, 0, sizeof(bmtd));
- +}
- +
- +/* total_blocks - The total count of blocks that the Nand Chip has */
- +int mtk_bmt_attach(struct mtd_info *mtd)
- +{
- + struct device_node *np;
- + struct bbbt *bbt;
- + u32 bufsz;
- + u32 block;
- + u16 total_blocks, pmt_block;
- + int ret = 0;
- + u32 bmt_pool_size, bmt_table_size;
- +
- + if (bmtd.mtd)
- + return -ENOSPC;
- +
- + np = mtd_get_of_node(mtd);
- + if (!np)
- + return 0;
- +
- + if (!of_property_read_bool(np, "mediatek,bmt-v2"))
- + return 0;
- +
- + if (of_property_read_u32(np, "mediatek,bmt-pool-size",
- + &bmt_pool_size) != 0)
- + bmt_pool_size = 80;
- +
- + if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
- + &bmtd.oob_offset) != 0)
- + bmtd.oob_offset = 0;
- +
- + if (of_property_read_u32(np, "mediatek,bmt-table-size",
- + &bmt_table_size) != 0)
- + bmt_table_size = 0x2000U;
- +
- + bmtd.mtd = mtd;
- + mtk_bmt_replace_ops(mtd);
- +
- + bmtd.table_size = bmt_table_size;
- + bmtd.blk_size = mtd->erasesize;
- + bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
- + bmtd.pg_size = mtd->writesize;
- + bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
- + total_blocks = mtd->size >> bmtd.blk_shift;
- + pmt_block = total_blocks - bmt_pool_size - 2;
- +
- + mtd->size = pmt_block << bmtd.blk_shift;
- +
- + /*
- + * ---------------------------------------
- + * | PMT(2blks) | BMT POOL(totalblks * 2%) |
- + * ---------------------------------------
- + * ^ ^
- + * | |
- + * pmt_block pmt_block + 2blocks(pool_lba)
- + *
- + * ATTETION!!!!!!
- + * The blocks ahead of the boundary block are stored in bb_tbl
- + * and blocks behind are stored in bmt_tbl
- + */
- +
- + bmtd.pool_lba = (u16)(pmt_block + 2);
- + bmtd.total_blks = total_blocks;
- + bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
- +
- + /* 3 buffers we need */
- + bufsz = round_up(sizeof(struct bbbt) +
- + bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
- + bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
- +
- + nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
- + nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
- +
- + if (!nand_bbt_buf || !nand_data_buf) {
- + pr_info("nand: FATAL ERR: allocate buffer failed!\n");
- + ret = -1;
- + goto error;
- + }
- +
- + memset(nand_bbt_buf, 0xff, bufsz);
- + memset(nand_data_buf, 0xff, bmtd.pg_size);
- +
- + BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
- + nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
- + BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
- + bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
- +
- + /* Scanning start from the first page of the last block
- + * of whole flash
- + */
- + bbt = scan_bmt(bmtd.total_blks - 1);
- + if (!bbt) {
- + /* BMT not found */
- + if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
- + pr_info("nand: FATAL: Too many blocks, can not support!\n");
- + ret = -1;
- + goto error;
- + }
- +
- + bbt = (struct bbbt *)nand_bbt_buf;
- + memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
- +
- + if (scan_bad_blocks(bbt)) {
- + ret = -1;
- + goto error;
- + }
- +
- + /* BMT always in the last valid block in pool */
- + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
- + block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
- + pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
- +
- + if (bmtd.bmt_blk_idx == 0)
- + pr_info("nand: Warning: no available block in BMT pool!\n");
- + else if (bmtd.bmt_blk_idx == (u16)-1) {
- + ret = -1;
- + goto error;
- + }
- + }
- + mtk_bmt_add_debugfs();
- +
- + bmtd.bbt = bbt;
- + return 0;
- +
- +error:
- + mtk_bmt_detach(mtd);
- + return ret;
- +}
- +
- +
- +MODULE_LICENSE("GPL");
- +MODULE_AUTHOR("Xiangsheng Hou <[email protected]>, Felix Fietkau <[email protected]>");
- +MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
- +
- --- /dev/null
- +++ b/include/linux/mtd/mtk_bmt.h
- @@ -0,0 +1,18 @@
- +#ifndef __MTK_BMT_H
- +#define __MTK_BMT_H
- +
- +#ifdef CONFIG_MTD_NAND_MTK_BMT
- +int mtk_bmt_attach(struct mtd_info *mtd);
- +void mtk_bmt_detach(struct mtd_info *mtd);
- +#else
- +static inline int mtk_bmt_attach(struct mtd_info *mtd)
- +{
- + return 0;
- +}
- +
- +static inline void mtk_bmt_detach(struct mtd_info *mtd)
- +{
- +}
- +#endif
- +
- +#endif
- --- a/drivers/mtd/mtk-snand/mtk-snand-mtd.c
- +++ b/drivers/mtd/mtk-snand/mtk-snand-mtd.c
- @@ -16,6 +16,7 @@
- #include <linux/dma-mapping.h>
- #include <linux/wait.h>
- #include <linux/mtd/mtd.h>
- +#include <linux/mtd/mtk_bmt.h>
- #include <linux/mtd/partitions.h>
- #include <linux/of_platform.h>
-
- @@ -612,6 +613,8 @@ static int mtk_snand_probe(struct platfo
- mtd->_block_isbad = mtk_snand_mtd_block_isbad;
- mtd->_block_markbad = mtk_snand_mtd_block_markbad;
-
- + mtk_bmt_attach(mtd);
- +
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret) {
- dev_err(msm->pdev.dev, "failed to register mtd partition\n");
- @@ -623,6 +626,7 @@ static int mtk_snand_probe(struct platfo
- return 0;
-
- errout4:
- + mtk_bmt_detach(mtd);
- devm_kfree(msm->pdev.dev, msm->page_cache);
-
- errout3:
- @@ -650,6 +654,8 @@ static int mtk_snand_remove(struct platf
- if (ret)
- return ret;
-
- + mtk_bmt_detach(mtd);
- +
- mtk_snand_cleanup(msm->snf);
-
- if (msm->irq >= 0)
|