2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
21 bbt_block_is_bad(u16 block
)
23 u8 cur
= bmtd
.bbt_buf
[block
/ 4];
25 return cur
& (3 << ((block
% 4) * 2));
29 bbt_set_block_state(u16 block
, bool bad
)
31 u8 mask
= (3 << ((block
% 4) * 2));
34 bmtd
.bbt_buf
[block
/ 4] |= mask
;
36 bmtd
.bbt_buf
[block
/ 4] &= ~mask
;
38 bbt_nand_erase(bmtd
.bmt_blk_idx
);
39 write_bmt(bmtd
.bmt_blk_idx
, bmtd
.bbt_buf
);
43 get_mapping_block_index_bbt(int block
)
49 if (!mapping_block_in_range(block
, &start
, &end
))
52 start
>>= bmtd
.blk_shift
;
53 end
>>= bmtd
.blk_shift
;
54 /* skip bad blocks within the mapping range */
56 for (i
= start
; i
< end
; i
++) {
57 if (bbt_block_is_bad(i
))
68 /* when overflowing, remap remaining blocks to bad ones */
69 for (i
= end
- 1; bad_blocks
> 0; i
--) {
70 if (!bbt_block_is_bad(i
))
74 if (bad_blocks
<= ofs
)
81 static bool remap_block_bbt(u16 block
, u16 mapped_blk
, int copy_len
)
86 if (!mapping_block_in_range(block
, &start
, &end
))
89 bbt_set_block_state(mapped_blk
, true);
91 new_blk
= get_mapping_block_index_bbt(block
);
92 bbt_nand_erase(new_blk
);
94 bbt_nand_copy(new_blk
, mapped_blk
, copy_len
);
100 unmap_block_bbt(u16 block
)
102 bbt_set_block_state(block
, false);
106 mtk_bmt_read_bbt(void)
111 for (i
= bmtd
.total_blks
- 1; i
>= bmtd
.total_blks
- 5; i
--) {
112 u32 page
= i
<< (bmtd
.blk_shift
- bmtd
.pg_shift
);
114 if (bbt_nand_read(page
, bmtd
.bbt_buf
, bmtd
.pg_size
,
115 oob_buf
, sizeof(oob_buf
))) {
116 pr_info("read_bbt: could not read block %d\n", i
);
120 if (oob_buf
[0] != 0xff) {
121 pr_info("read_bbt: bad block at %d\n", i
);
125 if (memcmp(&oob_buf
[1], "mtknand", 7) != 0) {
126 pr_info("read_bbt: signature mismatch in block %d\n", i
);
127 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_OFFSET
, 16, 1, oob_buf
, 8, 1);
131 pr_info("read_bbt: found bbt at block %d\n", i
);
132 bmtd
.bmt_blk_idx
= i
;
141 mtk_bmt_init_bbt(struct device_node
*np
)
143 int buf_size
= round_up(bmtd
.total_blks
>> 2, bmtd
.blk_size
);
146 bmtd
.bbt_buf
= kmalloc(buf_size
, GFP_KERNEL
);
150 memset(bmtd
.bbt_buf
, 0xff, buf_size
);
151 bmtd
.mtd
->size
-= 4 * bmtd
.mtd
->erasesize
;
153 ret
= mtk_bmt_read_bbt();
157 bmtd
.bmt_pgs
= buf_size
/ bmtd
.pg_size
;
162 static int mtk_bmt_debug_bbt(void *data
, u64 val
)
169 for (i
= 0; i
< bmtd
.total_blks
; i
+= 4) {
170 u8 cur
= bmtd
.bbt_buf
[i
/ 4];
172 for (k
= 0; k
< 4; k
++, cur
>>= 2)
173 buf
[k
] = (cur
& 3) ? 'B' : '.';
176 printk("[%06x] %s\n", i
* bmtd
.blk_size
, buf
);
181 for (i
= bmtd
.bmt_blk_idx
; i
< bmtd
.total_blks
- 1; i
++)
182 bbt_nand_erase(bmtd
.bmt_blk_idx
);
185 bmtd
.bmt_blk_idx
= bmtd
.total_blks
- 1;
186 bbt_nand_erase(bmtd
.bmt_blk_idx
);
187 write_bmt(bmtd
.bmt_blk_idx
, bmtd
.bbt_buf
);
195 const struct mtk_bmt_ops mtk_bmt_bbt_ops
= {
198 .init
= mtk_bmt_init_bbt
,
199 .remap_block
= remap_block_bbt
,
200 .unmap_block
= unmap_block_bbt
,
201 .get_mapping_block
= get_mapping_block_index_bbt
,
202 .debug
= mtk_bmt_debug_bbt
,