2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
22 struct bmt_desc bmtd
= {};
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk
, u16 src_blk
, loff_t max_offset
)
27 int pages
= bmtd
.blk_size
>> bmtd
.pg_shift
;
28 loff_t src
= (loff_t
)src_blk
<< bmtd
.blk_shift
;
29 loff_t dest
= (loff_t
)dest_blk
<< bmtd
.blk_shift
;
34 for (i
= 0; i
< pages
; i
++) {
35 struct mtd_oob_ops rd_ops
= {
36 .mode
= MTD_OPS_PLACE_OOB
,
38 .ooblen
= min_t(int, bmtd
.mtd
->oobsize
/ pages
, sizeof(oob
)),
39 .datbuf
= bmtd
.data_buf
,
42 struct mtd_oob_ops wr_ops
= {
43 .mode
= MTD_OPS_PLACE_OOB
,
45 .datbuf
= bmtd
.data_buf
,
49 if (offset
>= max_offset
)
52 ret
= bmtd
._read_oob(bmtd
.mtd
, src
+ offset
, &rd_ops
);
53 if (ret
< 0 && !mtd_is_bitflip(ret
))
59 ret
= bmtd
._write_oob(bmtd
.mtd
, dest
+ offset
, &wr_ops
);
63 wr_ops
.ooblen
= rd_ops
.oobretlen
;
64 offset
+= rd_ops
.retlen
;
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block
, int *start
, int *end
)
73 const __be32
*cur
= bmtd
.remap_range
;
74 u32 addr
= block
<< bmtd
.blk_shift
;
77 if (!cur
|| !bmtd
.remap_range_len
) {
79 *end
= bmtd
.total_blks
;
83 for (i
= 0; i
< bmtd
.remap_range_len
; i
++, cur
+= 2) {
84 if (addr
< be32_to_cpu(cur
[0]) || addr
>= be32_to_cpu(cur
[1]))
87 *start
= be32_to_cpu(cur
[0]);
88 *end
= be32_to_cpu(cur
[1]);
96 mtk_bmt_read(struct mtd_info
*mtd
, loff_t from
,
97 struct mtd_oob_ops
*ops
)
99 struct mtd_oob_ops cur_ops
= *ops
;
103 int max_bitflips
= 0;
109 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
112 u32 offset
= from
& (bmtd
.blk_size
- 1);
113 u32 block
= from
>> bmtd
.blk_shift
;
116 cur_block
= bmtd
.ops
->get_mapping_block(block
);
117 cur_from
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
119 cur_ops
.oobretlen
= 0;
121 cur_ops
.len
= min_t(u32
, mtd
->erasesize
- offset
,
122 ops
->len
- ops
->retlen
);
123 cur_ret
= bmtd
._read_oob(mtd
, cur_from
, &cur_ops
);
127 max_bitflips
= max_t(int, max_bitflips
, cur_ret
);
128 if (cur_ret
< 0 && !mtd_is_bitflip(cur_ret
)) {
129 bmtd
.ops
->remap_block(block
, cur_block
, mtd
->erasesize
);
130 if (retry_count
++ < 10)
136 if (cur_ret
>= mtd
->bitflip_threshold
&&
137 mapping_block_in_range(block
, &start
, &end
))
138 bmtd
.ops
->remap_block(block
, cur_block
, mtd
->erasesize
);
140 ops
->retlen
+= cur_ops
.retlen
;
141 ops
->oobretlen
+= cur_ops
.oobretlen
;
144 cur_ops
.datbuf
+= cur_ops
.retlen
;
145 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
146 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
149 cur_ops
.len
= mtd
->erasesize
- offset
;
163 mtk_bmt_write(struct mtd_info
*mtd
, loff_t to
,
164 struct mtd_oob_ops
*ops
)
166 struct mtd_oob_ops cur_ops
= *ops
;
174 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
175 u32 offset
= to
& (bmtd
.blk_size
- 1);
176 u32 block
= to
>> bmtd
.blk_shift
;
179 cur_block
= bmtd
.ops
->get_mapping_block(block
);
180 cur_to
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
182 cur_ops
.oobretlen
= 0;
184 cur_ops
.len
= min_t(u32
, bmtd
.blk_size
- offset
,
185 ops
->len
- ops
->retlen
);
186 ret
= bmtd
._write_oob(mtd
, cur_to
, &cur_ops
);
188 bmtd
.ops
->remap_block(block
, cur_block
, offset
);
189 if (retry_count
++ < 10)
195 ops
->retlen
+= cur_ops
.retlen
;
196 ops
->oobretlen
+= cur_ops
.oobretlen
;
199 cur_ops
.datbuf
+= cur_ops
.retlen
;
200 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
201 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
204 cur_ops
.len
= mtd
->erasesize
- offset
;
214 mtk_bmt_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
216 struct erase_info mapped_instr
= {
217 .len
= bmtd
.blk_size
,
220 u64 start_addr
, end_addr
;
222 u16 orig_block
, block
;
224 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
225 end_addr
= instr
->addr
+ instr
->len
;
227 while (start_addr
< end_addr
) {
228 orig_block
= start_addr
>> bmtd
.blk_shift
;
229 block
= bmtd
.ops
->get_mapping_block(orig_block
);
230 mapped_instr
.addr
= (loff_t
)block
<< bmtd
.blk_shift
;
231 ret
= bmtd
._erase(mtd
, &mapped_instr
);
233 bmtd
.ops
->remap_block(orig_block
, block
, 0);
234 if (retry_count
++ < 10)
236 instr
->fail_addr
= start_addr
;
239 start_addr
+= mtd
->erasesize
;
246 mtk_bmt_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
249 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
254 block
= bmtd
.ops
->get_mapping_block(orig_block
);
255 ret
= bmtd
._block_isbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
257 bmtd
.ops
->remap_block(orig_block
, block
, bmtd
.blk_size
);
258 if (retry_count
++ < 10)
265 mtk_bmt_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
267 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
268 u16 block
= bmtd
.ops
->get_mapping_block(orig_block
);
270 bmtd
.ops
->remap_block(orig_block
, block
, bmtd
.blk_size
);
272 return bmtd
._block_markbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
276 mtk_bmt_replace_ops(struct mtd_info
*mtd
)
278 bmtd
._read_oob
= mtd
->_read_oob
;
279 bmtd
._write_oob
= mtd
->_write_oob
;
280 bmtd
._erase
= mtd
->_erase
;
281 bmtd
._block_isbad
= mtd
->_block_isbad
;
282 bmtd
._block_markbad
= mtd
->_block_markbad
;
284 mtd
->_read_oob
= mtk_bmt_read
;
285 mtd
->_write_oob
= mtk_bmt_write
;
286 mtd
->_erase
= mtk_bmt_mtd_erase
;
287 mtd
->_block_isbad
= mtk_bmt_block_isbad
;
288 mtd
->_block_markbad
= mtk_bmt_block_markbad
;
291 static int mtk_bmt_debug_mark_good(void *data
, u64 val
)
293 bmtd
.ops
->unmap_block(val
>> bmtd
.blk_shift
);
298 static int mtk_bmt_debug_mark_bad(void *data
, u64 val
)
300 u32 block
= val
>> bmtd
.blk_shift
;
301 u16 cur_block
= bmtd
.ops
->get_mapping_block(block
);
303 bmtd
.ops
->remap_block(block
, cur_block
, bmtd
.blk_size
);
308 static int mtk_bmt_debug(void *data
, u64 val
)
310 return bmtd
.ops
->debug(data
, val
);
314 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good
, NULL
, mtk_bmt_debug_mark_good
, "%llu\n");
315 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad
, NULL
, mtk_bmt_debug_mark_bad
, "%llu\n");
316 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug
, NULL
, mtk_bmt_debug
, "%llu\n");
319 mtk_bmt_add_debugfs(void)
323 dir
= bmtd
.debugfs_dir
= debugfs_create_dir("mtk-bmt", NULL
);
327 debugfs_create_file_unsafe("mark_good", S_IWUSR
, dir
, NULL
, &fops_mark_good
);
328 debugfs_create_file_unsafe("mark_bad", S_IWUSR
, dir
, NULL
, &fops_mark_bad
);
329 debugfs_create_file_unsafe("debug", S_IWUSR
, dir
, NULL
, &fops_debug
);
332 void mtk_bmt_detach(struct mtd_info
*mtd
)
337 if (bmtd
.debugfs_dir
)
338 debugfs_remove_recursive(bmtd
.debugfs_dir
);
339 bmtd
.debugfs_dir
= NULL
;
342 kfree(bmtd
.data_buf
);
344 mtd
->_read_oob
= bmtd
._read_oob
;
345 mtd
->_write_oob
= bmtd
._write_oob
;
346 mtd
->_erase
= bmtd
._erase
;
347 mtd
->_block_isbad
= bmtd
._block_isbad
;
348 mtd
->_block_markbad
= bmtd
._block_markbad
;
349 mtd
->size
= bmtd
.total_blks
<< bmtd
.blk_shift
;
351 memset(&bmtd
, 0, sizeof(bmtd
));
355 int mtk_bmt_attach(struct mtd_info
*mtd
)
357 struct device_node
*np
;
363 np
= mtd_get_of_node(mtd
);
367 if (of_property_read_bool(np
, "mediatek,bmt-v2"))
368 bmtd
.ops
= &mtk_bmt_v2_ops
;
369 else if (of_property_read_bool(np
, "mediatek,bbt"))
370 bmtd
.ops
= &mtk_bmt_bbt_ops
;
374 bmtd
.remap_range
= of_get_property(np
, "mediatek,bmt-remap-range",
375 &bmtd
.remap_range_len
);
376 bmtd
.remap_range_len
/= 8;
379 mtk_bmt_replace_ops(mtd
);
381 bmtd
.blk_size
= mtd
->erasesize
;
382 bmtd
.blk_shift
= ffs(bmtd
.blk_size
) - 1;
383 bmtd
.pg_size
= mtd
->writesize
;
384 bmtd
.pg_shift
= ffs(bmtd
.pg_size
) - 1;
385 bmtd
.total_blks
= mtd
->size
>> bmtd
.blk_shift
;
387 bmtd
.data_buf
= kzalloc(bmtd
.pg_size
, GFP_KERNEL
);
388 if (!bmtd
.data_buf
) {
389 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
394 memset(bmtd
.data_buf
, 0xff, bmtd
.pg_size
);
396 ret
= bmtd
.ops
->init(np
);
400 mtk_bmt_add_debugfs();
409 MODULE_LICENSE("GPL");
410 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
411 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");