#include <linux/mtd/mtk_bmt.h>
#include <linux/module.h>
#include <linux/debugfs.h>
+#include <linux/bits.h>
#define MAIN_SIGNATURE_OFFSET 0
#define OOB_SIGNATURE_OFFSET 1
/* How many pages needs to store 'struct bbbt' */
u32 bmt_pgs;
+ const __be32 *remap_range;
+ int remap_range_len;
+
/* to compensate for driver level remapping */
u8 oob_offset;
} bmtd = {0};
return bmtd._erase(mtd, &instr);
}
+static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
+{
+ int pages = bmtd.blk_size >> bmtd.pg_shift;
+ loff_t src = (loff_t)src_blk << bmtd.blk_shift;
+ loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
+ loff_t offset = 0;
+ uint8_t oob[64];
+ int i, ret;
+
+ for (i = 0; i < pages; i++) {
+ struct mtd_oob_ops rd_ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .oobbuf = oob,
+ .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
+ .datbuf = nand_data_buf,
+ .len = bmtd.pg_size,
+ };
+ struct mtd_oob_ops wr_ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .oobbuf = oob,
+ .datbuf = nand_data_buf,
+ .len = bmtd.pg_size,
+ };
+
+ if (offset >= max_offset)
+ break;
+
+ ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
+ if (ret < 0 && !mtd_is_bitflip(ret))
+ return ret;
+
+ if (!rd_ops.retlen)
+ break;
+
+ ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
+ if (ret < 0)
+ return ret;
+
+ wr_ops.ooblen = rd_ops.oobretlen;
+ offset += rd_ops.retlen;
+ }
+
+ return 0;
+}
+
/* -------- Bad Blocks Management -------- */
static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
{
/* We met a bad block, mark it as bad and map it to a valid block in pool,
* if it's a write failure, we need to write the data to mapped block
*/
-static bool update_bmt(u16 block)
+static bool update_bmt(u16 block, int copy_len)
{
u16 mapped_blk;
struct bbbt *bbt;
/* Map new bad block to available block in pool */
bbt->bb_tbl[block] = mapped_blk;
+
+ /* Erase new block */
+ bbt_nand_erase(mapped_blk);
+ if (copy_len > 0)
+ bbt_nand_copy(mapped_blk, block, copy_len);
+
bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
return true;
}
+static bool
+mapping_block_in_range(int block)
+{
+ const __be32 *cur = bmtd.remap_range;
+ u32 addr = block << bmtd.blk_shift;
+ int i;
+
+ if (!cur || !bmtd.remap_range_len)
+ return true;
+
+ for (i = 0; i < bmtd.remap_range_len; i++, cur += 2)
+ if (addr >= be32_to_cpu(cur[0]) && addr < be32_to_cpu(cur[1]))
+ return true;
+
+ return false;
+}
+
u16 get_mapping_block_index(int block)
{
- int mapping_block;
+ if (block >= bmtd.pool_lba)
+ return block;
- if (block < bmtd.pool_lba)
- mapping_block = bmtd.bbt->bb_tbl[block];
- else
- mapping_block = block;
- BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
+ if (!mapping_block_in_range(block))
+ return block;
- return mapping_block;
+ return bmtd.bbt->bb_tbl[block];
}
static int
struct mtd_oob_ops cur_ops = *ops;
int retry_count = 0;
loff_t cur_from;
- int ret;
+ int ret = 0;
ops->retlen = 0;
ops->oobretlen = 0;
while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
+ int cur_ret;
+
u32 offset = from & (bmtd.blk_size - 1);
u32 block = from >> bmtd.blk_shift;
u32 cur_block;
cur_ops.retlen = 0;
cur_ops.len = min_t(u32, mtd->erasesize - offset,
ops->len - ops->retlen);
- ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
- if (ret < 0) {
- update_bmt(block);
+ cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
+ if (cur_ret < 0)
+ ret = cur_ret;
+ if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
+ update_bmt(block, mtd->erasesize);
if (retry_count++ < 10)
continue;
retry_count = 0;
}
- return 0;
+ return ret;
}
static int
ops->len - ops->retlen);
ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
if (ret < 0) {
- update_bmt(block);
+ update_bmt(block, offset);
if (retry_count++ < 10)
continue;
mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
ret = bmtd._erase(mtd, &mapped_instr);
if (ret) {
- update_bmt(orig_block);
+ update_bmt(orig_block, 0);
if (retry_count++ < 10)
continue;
instr->fail_addr = start_addr;
block = get_mapping_block_index(orig_block);
ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
if (ret) {
- update_bmt(orig_block);
+ update_bmt(orig_block, bmtd.blk_size);
if (retry_count++ < 10)
goto retry;
}
{
u16 orig_block = ofs >> bmtd.blk_shift;
u16 block = get_mapping_block_index(orig_block);
- update_bmt(orig_block);
+ update_bmt(orig_block, bmtd.blk_size);
return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
}
{
u32 block = val >> bmtd.blk_shift;
- update_bmt(block);
+ update_bmt(block, bmtd.blk_size);
+
+ return 0;
+}
+
+static unsigned long *
+mtk_bmt_get_mapping_mask(void)
+{
+ struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
+ int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
+ unsigned long *used;
+ int i, k;
+
+ used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
+ if (!used)
+ return NULL;
+
+ for (i = 1; i < main_blocks; i++) {
+ if (bmtd.bbt->bb_tbl[i] == i)
+ continue;
+
+ for (k = 0; k < bmtd.bmt_blk_idx; k++) {
+ if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
+ continue;
+
+ set_bit(k, used);
+ break;
+ }
+ }
+
+ return used;
+}
+
+static int mtk_bmt_debug(void *data, u64 val)
+{
+ struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
+ struct mtd_info *mtd = bmtd.mtd;
+ unsigned long *used;
+ int main_blocks = mtd->size >> bmtd.blk_shift;
+ int n_remap = 0;
+ int i;
+
+ used = mtk_bmt_get_mapping_mask();
+ if (!used)
+ return -ENOMEM;
+
+ switch (val) {
+ case 0:
+ for (i = 1; i < main_blocks; i++) {
+ if (bmtd.bbt->bb_tbl[i] == i)
+ continue;
+
+ printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
+ n_remap++;
+ }
+ for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
+ char c;
+
+ switch (bbmt[i].mapped) {
+ case NO_MAPPED:
+ continue;
+ case NORMAL_MAPPED:
+ c = 'm';
+ if (test_bit(i, used))
+ c = 'M';
+ break;
+ case BMT_MAPPED:
+ c = 'B';
+ break;
+ default:
+ c = 'X';
+ break;
+ }
+ printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
+ }
+ break;
+ case 100:
+ for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
+ if (bbmt[i].mapped != NORMAL_MAPPED)
+ continue;
+
+ if (test_bit(i, used))
+ continue;
+
+ n_remap++;
+ bbmt[i].mapped = NO_MAPPED;
+ printk("free block [%d:%x]\n", i, bbmt[i].block);
+ }
+ if (n_remap)
+ bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
+ break;
+ }
+
+ kfree(used);
return 0;
}
+
DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
static void
mtk_bmt_add_debugfs(void)
debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
+ debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
}
void mtk_bmt_detach(struct mtd_info *mtd)
&bmt_table_size) != 0)
bmt_table_size = 0x2000U;
+ bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
+ &bmtd.remap_range_len);
+ bmtd.remap_range_len /= 8;
+
bmtd.mtd = mtd;
mtk_bmt_replace_ops(mtd);