kernel: mtk_bmt: skip bitflip check if threshold isn't set
[openwrt/staging/nbd.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
index 721eb0e6f0507df1c74e356abafa1282bd8715a8..bcff7d6ac8207644d667a6e35aca5a12dcfab828 100644 (file)
@@ -92,6 +92,17 @@ bool mapping_block_in_range(int block, int *start, int *end)
        return false;
 }
 
+static bool
+mtk_bmt_remap_block(u32 block, u32 mapped_block, int copy_len)
+{
+       int start, end;
+
+       if (!mapping_block_in_range(block, &start, &end))
+               return false;
+
+       return bmtd.ops->remap_block(block, mapped_block, copy_len);
+}
+
 static int
 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
             struct mtd_oob_ops *ops)
@@ -101,7 +112,6 @@ mtk_bmt_read(struct mtd_info *mtd, loff_t from,
        loff_t cur_from;
        int ret = 0;
        int max_bitflips = 0;
-       int start, end;
 
        ops->retlen = 0;
        ops->oobretlen = 0;
@@ -129,16 +139,15 @@ mtk_bmt_read(struct mtd_info *mtd, loff_t from,
                else
                        max_bitflips = max_t(int, max_bitflips, cur_ret);
                if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
-                       bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
-                       if (retry_count++ < 10)
+                       if (mtk_bmt_remap_block(block, cur_block, mtd->erasesize) &&
+                               retry_count++ < 10)
                                continue;
 
                        goto out;
                }
 
-               if (cur_ret >= mtd->bitflip_threshold &&
-                   mapping_block_in_range(block, &start, &end))
-                       bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
+               if (mtd->bitflip_threshold && cur_ret >= mtd->bitflip_threshold)
+                       mtk_bmt_remap_block(block, cur_block, mtd->erasesize);
 
                ops->retlen += cur_ops.retlen;
                ops->oobretlen += cur_ops.oobretlen;
@@ -191,8 +200,8 @@ mtk_bmt_write(struct mtd_info *mtd, loff_t to,
                                         ops->len - ops->retlen);
                ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
                if (ret < 0) {
-                       bmtd.ops->remap_block(block, cur_block, offset);
-                       if (retry_count++ < 10)
+                       if (mtk_bmt_remap_block(block, cur_block, offset) &&
+                           retry_count++ < 10)
                                continue;
 
                        return ret;
@@ -239,8 +248,8 @@ mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
                mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
                ret = bmtd._erase(mtd, &mapped_instr);
                if (ret) {
-                       bmtd.ops->remap_block(orig_block, block, 0);
-                       if (retry_count++ < 10)
+                       if (mtk_bmt_remap_block(orig_block, block, 0) &&
+                           retry_count++ < 10)
                                continue;
                        instr->fail_addr = start_addr;
                        break;
@@ -263,8 +272,8 @@ retry:
        block = bmtd.ops->get_mapping_block(orig_block);
        ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
        if (ret) {
-               bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
-               if (retry_count++ < 10)
+               if (mtk_bmt_remap_block(orig_block, block, bmtd.blk_size) &&
+                   retry_count++ < 10)
                        goto retry;
        }
        return ret;
@@ -280,7 +289,7 @@ mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
        if (block < 0)
                return -EIO;
 
-       bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
+       mtk_bmt_remap_block(orig_block, block, bmtd.blk_size);
 
        return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
 }
@@ -301,9 +310,32 @@ mtk_bmt_replace_ops(struct mtd_info *mtd)
        mtd->_block_markbad = mtk_bmt_block_markbad;
 }
 
+static int mtk_bmt_debug_repair(void *data, u64 val)
+{
+       int block = val >> bmtd.blk_shift;
+       int prev_block, new_block;
+
+       prev_block = bmtd.ops->get_mapping_block(block);
+       if (prev_block < 0)
+               return -EIO;
+
+       bmtd.ops->unmap_block(block);
+       new_block = bmtd.ops->get_mapping_block(block);
+       if (new_block < 0)
+               return -EIO;
+
+       if (prev_block == new_block)
+               return 0;
+
+       bbt_nand_erase(new_block);
+       bbt_nand_copy(new_block, prev_block, bmtd.blk_size);
+
+       return 0;
+}
+
 static int mtk_bmt_debug_mark_good(void *data, u64 val)
 {
-        bmtd.ops->unmap_block(val >> bmtd.blk_shift);
+       bmtd.ops->unmap_block(val >> bmtd.blk_shift);
 
        return 0;
 }
@@ -317,7 +349,7 @@ static int mtk_bmt_debug_mark_bad(void *data, u64 val)
        if (cur_block < 0)
                return -EIO;
 
-       bmtd.ops->remap_block(block, cur_block, bmtd.blk_size);
+       mtk_bmt_remap_block(block, cur_block, bmtd.blk_size);
 
        return 0;
 }
@@ -328,6 +360,7 @@ static int mtk_bmt_debug(void *data, u64 val)
 }
 
 
+DEFINE_DEBUGFS_ATTRIBUTE(fops_repair, NULL, mtk_bmt_debug_repair, "%llu\n");
 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
@@ -341,6 +374,7 @@ mtk_bmt_add_debugfs(void)
        if (!dir)
                return;
 
+       debugfs_create_file_unsafe("repair", S_IWUSR, dir, NULL, &fops_repair);
        debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
        debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
        debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
@@ -383,6 +417,8 @@ int mtk_bmt_attach(struct mtd_info *mtd)
 
        if (of_property_read_bool(np, "mediatek,bmt-v2"))
                bmtd.ops = &mtk_bmt_v2_ops;
+       else if (of_property_read_bool(np, "mediatek,nmbm"))
+               bmtd.ops = &mtk_bmt_nmbm_ops;
        else if (of_property_read_bool(np, "mediatek,bbt"))
                bmtd.ops = &mtk_bmt_bbt_ops;
        else
@@ -401,14 +437,14 @@ int mtk_bmt_attach(struct mtd_info *mtd)
        bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
        bmtd.total_blks = mtd->size >> bmtd.blk_shift;
 
-       bmtd.data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
+       bmtd.data_buf = kzalloc(bmtd.pg_size + bmtd.mtd->oobsize, GFP_KERNEL);
        if (!bmtd.data_buf) {
                pr_info("nand: FATAL ERR: allocate buffer failed!\n");
                ret = -1;
                goto error;
        }
 
-       memset(bmtd.data_buf, 0xff, bmtd.pg_size);
+       memset(bmtd.data_buf, 0xff, bmtd.pg_size + bmtd.mtd->oobsize);
 
        ret = bmtd.ops->init(np);
        if (ret)