2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
22 struct bmt_desc bmtd
= {};
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk
, u16 src_blk
, loff_t max_offset
)
27 int pages
= bmtd
.blk_size
>> bmtd
.pg_shift
;
28 loff_t src
= (loff_t
)src_blk
<< bmtd
.blk_shift
;
29 loff_t dest
= (loff_t
)dest_blk
<< bmtd
.blk_shift
;
34 for (i
= 0; i
< pages
; i
++) {
35 struct mtd_oob_ops rd_ops
= {
36 .mode
= MTD_OPS_PLACE_OOB
,
38 .ooblen
= min_t(int, bmtd
.mtd
->oobsize
/ pages
, sizeof(oob
)),
39 .datbuf
= bmtd
.data_buf
,
42 struct mtd_oob_ops wr_ops
= {
43 .mode
= MTD_OPS_PLACE_OOB
,
45 .datbuf
= bmtd
.data_buf
,
49 if (offset
>= max_offset
)
52 ret
= bmtd
._read_oob(bmtd
.mtd
, src
+ offset
, &rd_ops
);
53 if (ret
< 0 && !mtd_is_bitflip(ret
))
59 ret
= bmtd
._write_oob(bmtd
.mtd
, dest
+ offset
, &wr_ops
);
63 wr_ops
.ooblen
= rd_ops
.oobretlen
;
64 offset
+= rd_ops
.retlen
;
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block
, int *start
, int *end
)
73 const __be32
*cur
= bmtd
.remap_range
;
74 u32 addr
= block
<< bmtd
.blk_shift
;
77 if (!cur
|| !bmtd
.remap_range_len
) {
79 *end
= bmtd
.total_blks
;
83 for (i
= 0; i
< bmtd
.remap_range_len
; i
++, cur
+= 2) {
84 if (addr
< be32_to_cpu(cur
[0]) || addr
>= be32_to_cpu(cur
[1]))
87 *start
= be32_to_cpu(cur
[0]);
88 *end
= be32_to_cpu(cur
[1]);
96 mtk_bmt_remap_block(u32 block
, u32 mapped_block
, int copy_len
)
100 if (!mapping_block_in_range(block
, &start
, &end
))
103 return bmtd
.ops
->remap_block(block
, mapped_block
, copy_len
);
107 mtk_bmt_read(struct mtd_info
*mtd
, loff_t from
,
108 struct mtd_oob_ops
*ops
)
110 struct mtd_oob_ops cur_ops
= *ops
;
114 int max_bitflips
= 0;
119 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
122 u32 offset
= from
& (bmtd
.blk_size
- 1);
123 u32 block
= from
>> bmtd
.blk_shift
;
126 cur_block
= bmtd
.ops
->get_mapping_block(block
);
130 cur_from
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
132 cur_ops
.oobretlen
= 0;
134 cur_ops
.len
= min_t(u32
, mtd
->erasesize
- offset
,
135 ops
->len
- ops
->retlen
);
136 cur_ret
= bmtd
._read_oob(mtd
, cur_from
, &cur_ops
);
140 max_bitflips
= max_t(int, max_bitflips
, cur_ret
);
141 if (cur_ret
< 0 && !mtd_is_bitflip(cur_ret
)) {
142 if (mtk_bmt_remap_block(block
, cur_block
, mtd
->erasesize
) &&
149 if (cur_ret
>= mtd
->bitflip_threshold
)
150 mtk_bmt_remap_block(block
, cur_block
, mtd
->erasesize
);
152 ops
->retlen
+= cur_ops
.retlen
;
153 ops
->oobretlen
+= cur_ops
.oobretlen
;
156 cur_ops
.datbuf
+= cur_ops
.retlen
;
157 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
158 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
161 cur_ops
.len
= mtd
->erasesize
- offset
;
175 mtk_bmt_write(struct mtd_info
*mtd
, loff_t to
,
176 struct mtd_oob_ops
*ops
)
178 struct mtd_oob_ops cur_ops
= *ops
;
186 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
187 u32 offset
= to
& (bmtd
.blk_size
- 1);
188 u32 block
= to
>> bmtd
.blk_shift
;
191 cur_block
= bmtd
.ops
->get_mapping_block(block
);
195 cur_to
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
197 cur_ops
.oobretlen
= 0;
199 cur_ops
.len
= min_t(u32
, bmtd
.blk_size
- offset
,
200 ops
->len
- ops
->retlen
);
201 ret
= bmtd
._write_oob(mtd
, cur_to
, &cur_ops
);
203 if (mtk_bmt_remap_block(block
, cur_block
, offset
) &&
210 ops
->retlen
+= cur_ops
.retlen
;
211 ops
->oobretlen
+= cur_ops
.oobretlen
;
214 cur_ops
.datbuf
+= cur_ops
.retlen
;
215 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
216 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
219 cur_ops
.len
= mtd
->erasesize
- offset
;
229 mtk_bmt_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
231 struct erase_info mapped_instr
= {
232 .len
= bmtd
.blk_size
,
235 u64 start_addr
, end_addr
;
240 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
241 end_addr
= instr
->addr
+ instr
->len
;
243 while (start_addr
< end_addr
) {
244 orig_block
= start_addr
>> bmtd
.blk_shift
;
245 block
= bmtd
.ops
->get_mapping_block(orig_block
);
248 mapped_instr
.addr
= (loff_t
)block
<< bmtd
.blk_shift
;
249 ret
= bmtd
._erase(mtd
, &mapped_instr
);
251 if (mtk_bmt_remap_block(orig_block
, block
, 0) &&
254 instr
->fail_addr
= start_addr
;
257 start_addr
+= mtd
->erasesize
;
264 mtk_bmt_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
267 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
272 block
= bmtd
.ops
->get_mapping_block(orig_block
);
273 ret
= bmtd
._block_isbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
275 if (mtk_bmt_remap_block(orig_block
, block
, bmtd
.blk_size
) &&
283 mtk_bmt_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
285 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
288 block
= bmtd
.ops
->get_mapping_block(orig_block
);
292 mtk_bmt_remap_block(orig_block
, block
, bmtd
.blk_size
);
294 return bmtd
._block_markbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
298 mtk_bmt_replace_ops(struct mtd_info
*mtd
)
300 bmtd
._read_oob
= mtd
->_read_oob
;
301 bmtd
._write_oob
= mtd
->_write_oob
;
302 bmtd
._erase
= mtd
->_erase
;
303 bmtd
._block_isbad
= mtd
->_block_isbad
;
304 bmtd
._block_markbad
= mtd
->_block_markbad
;
306 mtd
->_read_oob
= mtk_bmt_read
;
307 mtd
->_write_oob
= mtk_bmt_write
;
308 mtd
->_erase
= mtk_bmt_mtd_erase
;
309 mtd
->_block_isbad
= mtk_bmt_block_isbad
;
310 mtd
->_block_markbad
= mtk_bmt_block_markbad
;
313 static int mtk_bmt_debug_mark_good(void *data
, u64 val
)
315 bmtd
.ops
->unmap_block(val
>> bmtd
.blk_shift
);
320 static int mtk_bmt_debug_mark_bad(void *data
, u64 val
)
322 u32 block
= val
>> bmtd
.blk_shift
;
325 cur_block
= bmtd
.ops
->get_mapping_block(block
);
329 mtk_bmt_remap_block(block
, cur_block
, bmtd
.blk_size
);
334 static int mtk_bmt_debug(void *data
, u64 val
)
336 return bmtd
.ops
->debug(data
, val
);
340 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good
, NULL
, mtk_bmt_debug_mark_good
, "%llu\n");
341 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad
, NULL
, mtk_bmt_debug_mark_bad
, "%llu\n");
342 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug
, NULL
, mtk_bmt_debug
, "%llu\n");
345 mtk_bmt_add_debugfs(void)
349 dir
= bmtd
.debugfs_dir
= debugfs_create_dir("mtk-bmt", NULL
);
353 debugfs_create_file_unsafe("mark_good", S_IWUSR
, dir
, NULL
, &fops_mark_good
);
354 debugfs_create_file_unsafe("mark_bad", S_IWUSR
, dir
, NULL
, &fops_mark_bad
);
355 debugfs_create_file_unsafe("debug", S_IWUSR
, dir
, NULL
, &fops_debug
);
358 void mtk_bmt_detach(struct mtd_info
*mtd
)
363 if (bmtd
.debugfs_dir
)
364 debugfs_remove_recursive(bmtd
.debugfs_dir
);
365 bmtd
.debugfs_dir
= NULL
;
368 kfree(bmtd
.data_buf
);
370 mtd
->_read_oob
= bmtd
._read_oob
;
371 mtd
->_write_oob
= bmtd
._write_oob
;
372 mtd
->_erase
= bmtd
._erase
;
373 mtd
->_block_isbad
= bmtd
._block_isbad
;
374 mtd
->_block_markbad
= bmtd
._block_markbad
;
375 mtd
->size
= bmtd
.total_blks
<< bmtd
.blk_shift
;
377 memset(&bmtd
, 0, sizeof(bmtd
));
381 int mtk_bmt_attach(struct mtd_info
*mtd
)
383 struct device_node
*np
;
389 np
= mtd_get_of_node(mtd
);
393 if (of_property_read_bool(np
, "mediatek,bmt-v2"))
394 bmtd
.ops
= &mtk_bmt_v2_ops
;
395 else if (of_property_read_bool(np
, "mediatek,bbt"))
396 bmtd
.ops
= &mtk_bmt_bbt_ops
;
400 bmtd
.remap_range
= of_get_property(np
, "mediatek,bmt-remap-range",
401 &bmtd
.remap_range_len
);
402 bmtd
.remap_range_len
/= 8;
405 mtk_bmt_replace_ops(mtd
);
407 bmtd
.blk_size
= mtd
->erasesize
;
408 bmtd
.blk_shift
= ffs(bmtd
.blk_size
) - 1;
409 bmtd
.pg_size
= mtd
->writesize
;
410 bmtd
.pg_shift
= ffs(bmtd
.pg_size
) - 1;
411 bmtd
.total_blks
= mtd
->size
>> bmtd
.blk_shift
;
413 bmtd
.data_buf
= kzalloc(bmtd
.pg_size
, GFP_KERNEL
);
414 if (!bmtd
.data_buf
) {
415 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
420 memset(bmtd
.data_buf
, 0xff, bmtd
.pg_size
);
422 ret
= bmtd
.ops
->init(np
);
426 mtk_bmt_add_debugfs();
435 MODULE_LICENSE("GPL");
436 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
437 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");