2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
22 struct bmt_desc bmtd
= {};
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk
, u16 src_blk
, loff_t max_offset
)
27 int pages
= bmtd
.blk_size
>> bmtd
.pg_shift
;
28 loff_t src
= (loff_t
)src_blk
<< bmtd
.blk_shift
;
29 loff_t dest
= (loff_t
)dest_blk
<< bmtd
.blk_shift
;
34 for (i
= 0; i
< pages
; i
++) {
35 struct mtd_oob_ops rd_ops
= {
36 .mode
= MTD_OPS_PLACE_OOB
,
38 .ooblen
= min_t(int, bmtd
.mtd
->oobsize
/ pages
, sizeof(oob
)),
39 .datbuf
= bmtd
.data_buf
,
42 struct mtd_oob_ops wr_ops
= {
43 .mode
= MTD_OPS_PLACE_OOB
,
45 .datbuf
= bmtd
.data_buf
,
49 if (offset
>= max_offset
)
52 ret
= bmtd
._read_oob(bmtd
.mtd
, src
+ offset
, &rd_ops
);
53 if (ret
< 0 && !mtd_is_bitflip(ret
))
59 ret
= bmtd
._write_oob(bmtd
.mtd
, dest
+ offset
, &wr_ops
);
63 wr_ops
.ooblen
= rd_ops
.oobretlen
;
64 offset
+= rd_ops
.retlen
;
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block
, int *start
, int *end
)
73 const __be32
*cur
= bmtd
.remap_range
;
74 u32 addr
= block
<< bmtd
.blk_shift
;
77 if (!cur
|| !bmtd
.remap_range_len
) {
79 *end
= bmtd
.total_blks
;
83 for (i
= 0; i
< bmtd
.remap_range_len
; i
++, cur
+= 2) {
84 if (addr
< be32_to_cpu(cur
[0]) || addr
>= be32_to_cpu(cur
[1]))
87 *start
= be32_to_cpu(cur
[0]);
88 *end
= be32_to_cpu(cur
[1]);
96 mtk_bmt_read(struct mtd_info
*mtd
, loff_t from
,
97 struct mtd_oob_ops
*ops
)
99 struct mtd_oob_ops cur_ops
= *ops
;
103 int max_bitflips
= 0;
109 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
112 u32 offset
= from
& (bmtd
.blk_size
- 1);
113 u32 block
= from
>> bmtd
.blk_shift
;
116 cur_block
= bmtd
.ops
->get_mapping_block(block
);
120 cur_from
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
122 cur_ops
.oobretlen
= 0;
124 cur_ops
.len
= min_t(u32
, mtd
->erasesize
- offset
,
125 ops
->len
- ops
->retlen
);
126 cur_ret
= bmtd
._read_oob(mtd
, cur_from
, &cur_ops
);
130 max_bitflips
= max_t(int, max_bitflips
, cur_ret
);
131 if (cur_ret
< 0 && !mtd_is_bitflip(cur_ret
)) {
132 bmtd
.ops
->remap_block(block
, cur_block
, mtd
->erasesize
);
133 if (retry_count
++ < 10)
139 if (cur_ret
>= mtd
->bitflip_threshold
&&
140 mapping_block_in_range(block
, &start
, &end
))
141 bmtd
.ops
->remap_block(block
, cur_block
, mtd
->erasesize
);
143 ops
->retlen
+= cur_ops
.retlen
;
144 ops
->oobretlen
+= cur_ops
.oobretlen
;
147 cur_ops
.datbuf
+= cur_ops
.retlen
;
148 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
149 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
152 cur_ops
.len
= mtd
->erasesize
- offset
;
166 mtk_bmt_write(struct mtd_info
*mtd
, loff_t to
,
167 struct mtd_oob_ops
*ops
)
169 struct mtd_oob_ops cur_ops
= *ops
;
177 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
178 u32 offset
= to
& (bmtd
.blk_size
- 1);
179 u32 block
= to
>> bmtd
.blk_shift
;
182 cur_block
= bmtd
.ops
->get_mapping_block(block
);
186 cur_to
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
188 cur_ops
.oobretlen
= 0;
190 cur_ops
.len
= min_t(u32
, bmtd
.blk_size
- offset
,
191 ops
->len
- ops
->retlen
);
192 ret
= bmtd
._write_oob(mtd
, cur_to
, &cur_ops
);
194 bmtd
.ops
->remap_block(block
, cur_block
, offset
);
195 if (retry_count
++ < 10)
201 ops
->retlen
+= cur_ops
.retlen
;
202 ops
->oobretlen
+= cur_ops
.oobretlen
;
205 cur_ops
.datbuf
+= cur_ops
.retlen
;
206 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
207 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
210 cur_ops
.len
= mtd
->erasesize
- offset
;
220 mtk_bmt_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
222 struct erase_info mapped_instr
= {
223 .len
= bmtd
.blk_size
,
226 u64 start_addr
, end_addr
;
231 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
232 end_addr
= instr
->addr
+ instr
->len
;
234 while (start_addr
< end_addr
) {
235 orig_block
= start_addr
>> bmtd
.blk_shift
;
236 block
= bmtd
.ops
->get_mapping_block(orig_block
);
239 mapped_instr
.addr
= (loff_t
)block
<< bmtd
.blk_shift
;
240 ret
= bmtd
._erase(mtd
, &mapped_instr
);
242 bmtd
.ops
->remap_block(orig_block
, block
, 0);
243 if (retry_count
++ < 10)
245 instr
->fail_addr
= start_addr
;
248 start_addr
+= mtd
->erasesize
;
255 mtk_bmt_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
258 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
263 block
= bmtd
.ops
->get_mapping_block(orig_block
);
264 ret
= bmtd
._block_isbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
266 bmtd
.ops
->remap_block(orig_block
, block
, bmtd
.blk_size
);
267 if (retry_count
++ < 10)
274 mtk_bmt_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
276 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
279 block
= bmtd
.ops
->get_mapping_block(orig_block
);
283 bmtd
.ops
->remap_block(orig_block
, block
, bmtd
.blk_size
);
285 return bmtd
._block_markbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
289 mtk_bmt_replace_ops(struct mtd_info
*mtd
)
291 bmtd
._read_oob
= mtd
->_read_oob
;
292 bmtd
._write_oob
= mtd
->_write_oob
;
293 bmtd
._erase
= mtd
->_erase
;
294 bmtd
._block_isbad
= mtd
->_block_isbad
;
295 bmtd
._block_markbad
= mtd
->_block_markbad
;
297 mtd
->_read_oob
= mtk_bmt_read
;
298 mtd
->_write_oob
= mtk_bmt_write
;
299 mtd
->_erase
= mtk_bmt_mtd_erase
;
300 mtd
->_block_isbad
= mtk_bmt_block_isbad
;
301 mtd
->_block_markbad
= mtk_bmt_block_markbad
;
304 static int mtk_bmt_debug_mark_good(void *data
, u64 val
)
306 bmtd
.ops
->unmap_block(val
>> bmtd
.blk_shift
);
311 static int mtk_bmt_debug_mark_bad(void *data
, u64 val
)
313 u32 block
= val
>> bmtd
.blk_shift
;
316 cur_block
= bmtd
.ops
->get_mapping_block(block
);
320 bmtd
.ops
->remap_block(block
, cur_block
, bmtd
.blk_size
);
325 static int mtk_bmt_debug(void *data
, u64 val
)
327 return bmtd
.ops
->debug(data
, val
);
331 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good
, NULL
, mtk_bmt_debug_mark_good
, "%llu\n");
332 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad
, NULL
, mtk_bmt_debug_mark_bad
, "%llu\n");
333 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug
, NULL
, mtk_bmt_debug
, "%llu\n");
336 mtk_bmt_add_debugfs(void)
340 dir
= bmtd
.debugfs_dir
= debugfs_create_dir("mtk-bmt", NULL
);
344 debugfs_create_file_unsafe("mark_good", S_IWUSR
, dir
, NULL
, &fops_mark_good
);
345 debugfs_create_file_unsafe("mark_bad", S_IWUSR
, dir
, NULL
, &fops_mark_bad
);
346 debugfs_create_file_unsafe("debug", S_IWUSR
, dir
, NULL
, &fops_debug
);
349 void mtk_bmt_detach(struct mtd_info
*mtd
)
354 if (bmtd
.debugfs_dir
)
355 debugfs_remove_recursive(bmtd
.debugfs_dir
);
356 bmtd
.debugfs_dir
= NULL
;
359 kfree(bmtd
.data_buf
);
361 mtd
->_read_oob
= bmtd
._read_oob
;
362 mtd
->_write_oob
= bmtd
._write_oob
;
363 mtd
->_erase
= bmtd
._erase
;
364 mtd
->_block_isbad
= bmtd
._block_isbad
;
365 mtd
->_block_markbad
= bmtd
._block_markbad
;
366 mtd
->size
= bmtd
.total_blks
<< bmtd
.blk_shift
;
368 memset(&bmtd
, 0, sizeof(bmtd
));
372 int mtk_bmt_attach(struct mtd_info
*mtd
)
374 struct device_node
*np
;
380 np
= mtd_get_of_node(mtd
);
384 if (of_property_read_bool(np
, "mediatek,bmt-v2"))
385 bmtd
.ops
= &mtk_bmt_v2_ops
;
386 else if (of_property_read_bool(np
, "mediatek,bbt"))
387 bmtd
.ops
= &mtk_bmt_bbt_ops
;
391 bmtd
.remap_range
= of_get_property(np
, "mediatek,bmt-remap-range",
392 &bmtd
.remap_range_len
);
393 bmtd
.remap_range_len
/= 8;
396 mtk_bmt_replace_ops(mtd
);
398 bmtd
.blk_size
= mtd
->erasesize
;
399 bmtd
.blk_shift
= ffs(bmtd
.blk_size
) - 1;
400 bmtd
.pg_size
= mtd
->writesize
;
401 bmtd
.pg_shift
= ffs(bmtd
.pg_size
) - 1;
402 bmtd
.total_blks
= mtd
->size
>> bmtd
.blk_shift
;
404 bmtd
.data_buf
= kzalloc(bmtd
.pg_size
, GFP_KERNEL
);
405 if (!bmtd
.data_buf
) {
406 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
411 memset(bmtd
.data_buf
, 0xff, bmtd
.pg_size
);
413 ret
= bmtd
.ops
->init(np
);
417 mtk_bmt_add_debugfs();
426 MODULE_LICENSE("GPL");
427 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
428 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");