2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
22 struct bmt_desc bmtd
= {};
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk
, u16 src_blk
, loff_t max_offset
)
27 int pages
= bmtd
.blk_size
>> bmtd
.pg_shift
;
28 loff_t src
= (loff_t
)src_blk
<< bmtd
.blk_shift
;
29 loff_t dest
= (loff_t
)dest_blk
<< bmtd
.blk_shift
;
34 for (i
= 0; i
< pages
; i
++) {
35 struct mtd_oob_ops rd_ops
= {
36 .mode
= MTD_OPS_PLACE_OOB
,
38 .ooblen
= min_t(int, bmtd
.mtd
->oobsize
/ pages
, sizeof(oob
)),
39 .datbuf
= bmtd
.data_buf
,
42 struct mtd_oob_ops wr_ops
= {
43 .mode
= MTD_OPS_PLACE_OOB
,
45 .datbuf
= bmtd
.data_buf
,
49 if (offset
>= max_offset
)
52 ret
= bmtd
._read_oob(bmtd
.mtd
, src
+ offset
, &rd_ops
);
53 if (ret
< 0 && !mtd_is_bitflip(ret
))
59 ret
= bmtd
._write_oob(bmtd
.mtd
, dest
+ offset
, &wr_ops
);
63 wr_ops
.ooblen
= rd_ops
.oobretlen
;
64 offset
+= rd_ops
.retlen
;
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block
, int *start
, int *end
)
73 const __be32
*cur
= bmtd
.remap_range
;
74 u32 addr
= block
<< bmtd
.blk_shift
;
77 if (!cur
|| !bmtd
.remap_range_len
) {
79 *end
= bmtd
.total_blks
;
83 for (i
= 0; i
< bmtd
.remap_range_len
; i
++, cur
+= 2) {
84 if (addr
< be32_to_cpu(cur
[0]) || addr
>= be32_to_cpu(cur
[1]))
87 *start
= be32_to_cpu(cur
[0]);
88 *end
= be32_to_cpu(cur
[1]);
96 mtk_bmt_remap_block(u32 block
, u32 mapped_block
, int copy_len
)
100 if (!mapping_block_in_range(block
, &start
, &end
))
103 return bmtd
.ops
->remap_block(block
, mapped_block
, copy_len
);
107 mtk_bmt_read(struct mtd_info
*mtd
, loff_t from
,
108 struct mtd_oob_ops
*ops
)
110 struct mtd_oob_ops cur_ops
= *ops
;
114 int max_bitflips
= 0;
119 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
122 u32 offset
= from
& (bmtd
.blk_size
- 1);
123 u32 block
= from
>> bmtd
.blk_shift
;
126 cur_block
= bmtd
.ops
->get_mapping_block(block
);
130 cur_from
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
132 cur_ops
.oobretlen
= 0;
134 cur_ops
.len
= min_t(u32
, mtd
->erasesize
- offset
,
135 ops
->len
- ops
->retlen
);
136 cur_ret
= bmtd
._read_oob(mtd
, cur_from
, &cur_ops
);
140 max_bitflips
= max_t(int, max_bitflips
, cur_ret
);
141 if (cur_ret
< 0 && !mtd_is_bitflip(cur_ret
)) {
142 if (mtk_bmt_remap_block(block
, cur_block
, mtd
->erasesize
) &&
149 if (mtd
->bitflip_threshold
&& cur_ret
>= mtd
->bitflip_threshold
)
150 mtk_bmt_remap_block(block
, cur_block
, mtd
->erasesize
);
152 ops
->retlen
+= cur_ops
.retlen
;
153 ops
->oobretlen
+= cur_ops
.oobretlen
;
156 cur_ops
.datbuf
+= cur_ops
.retlen
;
157 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
158 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
161 cur_ops
.len
= mtd
->erasesize
- offset
;
175 mtk_bmt_write(struct mtd_info
*mtd
, loff_t to
,
176 struct mtd_oob_ops
*ops
)
178 struct mtd_oob_ops cur_ops
= *ops
;
186 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
187 u32 offset
= to
& (bmtd
.blk_size
- 1);
188 u32 block
= to
>> bmtd
.blk_shift
;
191 cur_block
= bmtd
.ops
->get_mapping_block(block
);
195 cur_to
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
197 cur_ops
.oobretlen
= 0;
199 cur_ops
.len
= min_t(u32
, bmtd
.blk_size
- offset
,
200 ops
->len
- ops
->retlen
);
201 ret
= bmtd
._write_oob(mtd
, cur_to
, &cur_ops
);
203 if (mtk_bmt_remap_block(block
, cur_block
, offset
) &&
210 ops
->retlen
+= cur_ops
.retlen
;
211 ops
->oobretlen
+= cur_ops
.oobretlen
;
214 cur_ops
.datbuf
+= cur_ops
.retlen
;
215 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
216 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
219 cur_ops
.len
= mtd
->erasesize
- offset
;
229 mtk_bmt_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
231 struct erase_info mapped_instr
= {
232 .len
= bmtd
.blk_size
,
235 u64 start_addr
, end_addr
;
240 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
241 end_addr
= instr
->addr
+ instr
->len
;
243 while (start_addr
< end_addr
) {
244 orig_block
= start_addr
>> bmtd
.blk_shift
;
245 block
= bmtd
.ops
->get_mapping_block(orig_block
);
248 mapped_instr
.addr
= (loff_t
)block
<< bmtd
.blk_shift
;
249 ret
= bmtd
._erase(mtd
, &mapped_instr
);
251 if (mtk_bmt_remap_block(orig_block
, block
, 0) &&
254 instr
->fail_addr
= start_addr
;
257 start_addr
+= mtd
->erasesize
;
264 mtk_bmt_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
267 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
272 block
= bmtd
.ops
->get_mapping_block(orig_block
);
273 ret
= bmtd
._block_isbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
275 if (mtk_bmt_remap_block(orig_block
, block
, bmtd
.blk_size
) &&
283 mtk_bmt_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
285 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
288 block
= bmtd
.ops
->get_mapping_block(orig_block
);
292 mtk_bmt_remap_block(orig_block
, block
, bmtd
.blk_size
);
294 return bmtd
._block_markbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
298 mtk_bmt_replace_ops(struct mtd_info
*mtd
)
300 bmtd
._read_oob
= mtd
->_read_oob
;
301 bmtd
._write_oob
= mtd
->_write_oob
;
302 bmtd
._erase
= mtd
->_erase
;
303 bmtd
._block_isbad
= mtd
->_block_isbad
;
304 bmtd
._block_markbad
= mtd
->_block_markbad
;
306 mtd
->_read_oob
= mtk_bmt_read
;
307 mtd
->_write_oob
= mtk_bmt_write
;
308 mtd
->_erase
= mtk_bmt_mtd_erase
;
309 mtd
->_block_isbad
= mtk_bmt_block_isbad
;
310 mtd
->_block_markbad
= mtk_bmt_block_markbad
;
313 static int mtk_bmt_debug_repair(void *data
, u64 val
)
315 int block
= val
>> bmtd
.blk_shift
;
316 int prev_block
, new_block
;
318 prev_block
= bmtd
.ops
->get_mapping_block(block
);
322 bmtd
.ops
->unmap_block(block
);
323 new_block
= bmtd
.ops
->get_mapping_block(block
);
327 if (prev_block
== new_block
)
330 bbt_nand_erase(new_block
);
331 bbt_nand_copy(new_block
, prev_block
, bmtd
.blk_size
);
336 static int mtk_bmt_debug_mark_good(void *data
, u64 val
)
338 bmtd
.ops
->unmap_block(val
>> bmtd
.blk_shift
);
343 static int mtk_bmt_debug_mark_bad(void *data
, u64 val
)
345 u32 block
= val
>> bmtd
.blk_shift
;
348 cur_block
= bmtd
.ops
->get_mapping_block(block
);
352 mtk_bmt_remap_block(block
, cur_block
, bmtd
.blk_size
);
357 static int mtk_bmt_debug(void *data
, u64 val
)
359 return bmtd
.ops
->debug(data
, val
);
363 DEFINE_DEBUGFS_ATTRIBUTE(fops_repair
, NULL
, mtk_bmt_debug_repair
, "%llu\n");
364 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good
, NULL
, mtk_bmt_debug_mark_good
, "%llu\n");
365 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad
, NULL
, mtk_bmt_debug_mark_bad
, "%llu\n");
366 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug
, NULL
, mtk_bmt_debug
, "%llu\n");
369 mtk_bmt_add_debugfs(void)
373 dir
= bmtd
.debugfs_dir
= debugfs_create_dir("mtk-bmt", NULL
);
377 debugfs_create_file_unsafe("repair", S_IWUSR
, dir
, NULL
, &fops_repair
);
378 debugfs_create_file_unsafe("mark_good", S_IWUSR
, dir
, NULL
, &fops_mark_good
);
379 debugfs_create_file_unsafe("mark_bad", S_IWUSR
, dir
, NULL
, &fops_mark_bad
);
380 debugfs_create_file_unsafe("debug", S_IWUSR
, dir
, NULL
, &fops_debug
);
383 void mtk_bmt_detach(struct mtd_info
*mtd
)
388 if (bmtd
.debugfs_dir
)
389 debugfs_remove_recursive(bmtd
.debugfs_dir
);
390 bmtd
.debugfs_dir
= NULL
;
393 kfree(bmtd
.data_buf
);
395 mtd
->_read_oob
= bmtd
._read_oob
;
396 mtd
->_write_oob
= bmtd
._write_oob
;
397 mtd
->_erase
= bmtd
._erase
;
398 mtd
->_block_isbad
= bmtd
._block_isbad
;
399 mtd
->_block_markbad
= bmtd
._block_markbad
;
400 mtd
->size
= bmtd
.total_blks
<< bmtd
.blk_shift
;
402 memset(&bmtd
, 0, sizeof(bmtd
));
406 int mtk_bmt_attach(struct mtd_info
*mtd
)
408 struct device_node
*np
;
414 np
= mtd_get_of_node(mtd
);
418 if (of_property_read_bool(np
, "mediatek,bmt-v2"))
419 bmtd
.ops
= &mtk_bmt_v2_ops
;
420 else if (of_property_read_bool(np
, "mediatek,nmbm"))
421 bmtd
.ops
= &mtk_bmt_nmbm_ops
;
422 else if (of_property_read_bool(np
, "mediatek,bbt"))
423 bmtd
.ops
= &mtk_bmt_bbt_ops
;
427 bmtd
.remap_range
= of_get_property(np
, "mediatek,bmt-remap-range",
428 &bmtd
.remap_range_len
);
429 bmtd
.remap_range_len
/= 8;
432 mtk_bmt_replace_ops(mtd
);
434 bmtd
.blk_size
= mtd
->erasesize
;
435 bmtd
.blk_shift
= ffs(bmtd
.blk_size
) - 1;
436 bmtd
.pg_size
= mtd
->writesize
;
437 bmtd
.pg_shift
= ffs(bmtd
.pg_size
) - 1;
438 bmtd
.total_blks
= mtd
->size
>> bmtd
.blk_shift
;
440 bmtd
.data_buf
= kzalloc(bmtd
.pg_size
+ bmtd
.mtd
->oobsize
, GFP_KERNEL
);
441 if (!bmtd
.data_buf
) {
442 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
447 memset(bmtd
.data_buf
, 0xff, bmtd
.pg_size
+ bmtd
.mtd
->oobsize
);
449 ret
= bmtd
.ops
->init(np
);
453 mtk_bmt_add_debugfs();
462 MODULE_LICENSE("GPL");
463 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
464 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");