2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
22 /* This version is used to distinguish the legacy and new algorithm */
23 #define BBMT_VERSION 2
24 unsigned char version
;
25 /* Below 2 tables will be written in SLC */
32 #define NORMAL_MAPPED 1
37 /* Maximum 8k blocks */
38 #define BBPOOL_RATIO 2
39 #define BB_TABLE_MAX bmtd.table_size
40 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
41 #define BMT_TBL_DEF_VAL 0x0
43 static inline struct bbmt
*bmt_tbl(struct bbbt
*bbbt
)
45 return (struct bbmt
*)&bbbt
->bb_tbl
[bmtd
.table_size
];
48 static u16
find_valid_block(u16 block
)
55 if (block
>= bmtd
.total_blks
)
58 ret
= bbt_nand_read(blk_pg(block
), bmtd
.data_buf
, bmtd
.pg_size
,
60 /* Read the 1st byte of FDM to judge whether it's a bad
63 if (ret
|| fdm
[0] != 0xff) {
64 pr_info("nand: found bad block 0x%x\n", block
);
65 if (loop
>= bmtd
.bb_max
) {
66 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
78 /* Find out all bad blocks, and fill in the mapping table */
79 static int scan_bad_blocks(struct bbbt
*bbt
)
84 /* First time download, the block0 MUST NOT be a bad block,
85 * this is guaranteed by vendor
90 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
91 * G - Good block; B - Bad block
92 * ---------------------------
93 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
94 * ---------------------------
95 * What bb_tbl[i] looks like:
97 * 0 1 2 3 4 5 6 7 8 9 a b c
98 * mapped block(bb_tbl[i]):
99 * 0 1 3 6 7 8 9 b ......
101 * If new bad block ocurred(n), search bmt_tbl to find
102 * a available block(x), and fill in the bb_tbl[n] = x;
104 for (i
= 1; i
< bmtd
.pool_lba
; i
++) {
105 bbt
->bb_tbl
[i
] = find_valid_block(bbt
->bb_tbl
[i
- 1] + 1);
106 BBT_LOG("bb_tbl[0x%x] = 0x%x", i
, bbt
->bb_tbl
[i
]);
107 if (bbt
->bb_tbl
[i
] == 0)
111 /* Physical Block start Address of BMT pool */
112 bmtd
.pool_pba
= bbt
->bb_tbl
[i
- 1] + 1;
113 if (bmtd
.pool_pba
>= bmtd
.total_blks
- 2) {
114 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
118 BBT_LOG("pool_pba=0x%x", bmtd
.pool_pba
);
120 block
= bmtd
.pool_pba
;
122 * The bmt table is used for runtime bad block mapping
123 * G - Good block; B - Bad block
124 * ---------------------------
125 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
126 * ---------------------------
127 * block: 0 1 2 3 4 5 6 7 8 9 a b c
128 * What bmt_tbl[i] looks like in initial state:
135 * N - Not mapped(Available)
139 * BMT always in the last valid block in pool
141 while ((block
= find_valid_block(block
)) != 0) {
142 bmt_tbl(bbt
)[i
].block
= block
;
143 bmt_tbl(bbt
)[i
].mapped
= NO_MAPPED
;
144 BBT_LOG("bmt_tbl[%d].block = 0x%x", i
, block
);
149 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
150 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
152 bmtd
.bmt_blk_idx
= i
- 1;
153 bmt_tbl(bbt
)[bmtd
.bmt_blk_idx
].mapped
= BMT_MAPPED
;
156 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
160 pr_info("[BBT] %d available blocks in BMT pool\n", i
);
165 static bool is_valid_bmt(unsigned char *buf
, unsigned char *fdm
)
167 struct bbbt
*bbt
= (struct bbbt
*)buf
;
168 u8
*sig
= (u8
*)bbt
->signature
+ MAIN_SIGNATURE_OFFSET
;
171 if (memcmp(bbt
->signature
+ MAIN_SIGNATURE_OFFSET
, "BMT", 3) == 0 &&
172 memcmp(fdm
+ OOB_SIGNATURE_OFFSET
, "bmt", 3) == 0) {
173 if (bbt
->version
== BBMT_VERSION
)
176 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
177 sig
[0], sig
[1], sig
[2],
178 fdm
[1], fdm
[2], fdm
[3]);
182 static u16
get_bmt_index(struct bbmt
*bmt
)
186 while (bmt
[i
].block
!= BMT_TBL_DEF_VAL
) {
187 if (bmt
[i
].mapped
== BMT_MAPPED
)
195 read_bmt(u16 block
, unsigned char *dat
, unsigned char *fdm
, int fdm_len
)
197 u32 len
= bmtd
.bmt_pgs
<< bmtd
.pg_shift
;
199 return bbt_nand_read(blk_pg(block
), dat
, len
, fdm
, fdm_len
);
202 static struct bbbt
*scan_bmt(u16 block
)
206 if (block
< bmtd
.pool_lba
)
209 if (read_bmt(block
, bmtd
.bbt_buf
, fdm
, sizeof(fdm
)))
210 return scan_bmt(block
- 1);
212 if (is_valid_bmt(bmtd
.bbt_buf
, fdm
)) {
213 bmtd
.bmt_blk_idx
= get_bmt_index(bmt_tbl((struct bbbt
*)bmtd
.bbt_buf
));
214 if (bmtd
.bmt_blk_idx
== 0) {
215 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
218 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block
);
219 return (struct bbbt
*)bmtd
.bbt_buf
;
221 return scan_bmt(block
- 1);
224 /* Write the Burner Bad Block Table to Nand Flash
225 * n - write BMT to bmt_tbl[n]
227 static u16
upload_bmt(struct bbbt
*bbt
, int n
)
232 if (n
< 0 || bmt_tbl(bbt
)[n
].mapped
== NORMAL_MAPPED
) {
233 pr_info("nand: FATAL ERR: no space to store BMT!\n");
237 block
= bmt_tbl(bbt
)[n
].block
;
238 BBT_LOG("n = 0x%x, block = 0x%x", n
, block
);
239 if (bbt_nand_erase(block
)) {
240 bmt_tbl(bbt
)[n
].block
= 0;
241 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
246 /* The signature offset is fixed set to 0,
247 * oob signature offset is fixed set to 1
249 memcpy(bbt
->signature
+ MAIN_SIGNATURE_OFFSET
, "BMT", 3);
250 bbt
->version
= BBMT_VERSION
;
252 if (write_bmt(block
, (unsigned char *)bbt
)) {
253 bmt_tbl(bbt
)[n
].block
= 0;
255 /* write failed, try the previous block in bmt_tbl[n - 1] */
260 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
264 static u16
find_valid_block_in_pool(struct bbbt
*bbt
)
268 if (bmtd
.bmt_blk_idx
== 0)
271 for (i
= 0; i
< bmtd
.bmt_blk_idx
; i
++) {
272 if (bmt_tbl(bbt
)[i
].block
!= 0 && bmt_tbl(bbt
)[i
].mapped
== NO_MAPPED
) {
273 bmt_tbl(bbt
)[i
].mapped
= NORMAL_MAPPED
;
274 return bmt_tbl(bbt
)[i
].block
;
279 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
283 /* We met a bad block, mark it as bad and map it to a valid block in pool,
284 * if it's a write failure, we need to write the data to mapped block
286 static bool remap_block_v2(u16 block
, u16 mapped_block
, int copy_len
)
292 mapped_blk
= find_valid_block_in_pool(bbt
);
296 /* Map new bad block to available block in pool */
297 bbt
->bb_tbl
[block
] = mapped_blk
;
299 /* Erase new block */
300 bbt_nand_erase(mapped_blk
);
302 bbt_nand_copy(mapped_blk
, block
, copy_len
);
304 bmtd
.bmt_blk_idx
= upload_bmt(bbt
, bmtd
.bmt_blk_idx
);
309 static u16
get_mapping_block_index_v2(int block
)
313 if (block
>= bmtd
.pool_lba
)
316 if (!mapping_block_in_range(block
, &start
, &end
))
319 return bmtd
.bbt
->bb_tbl
[block
];
323 unmap_block_v2(u16 block
)
325 bmtd
.bbt
->bb_tbl
[block
] = block
;
326 bmtd
.bmt_blk_idx
= upload_bmt(bmtd
.bbt
, bmtd
.bmt_blk_idx
);
329 static unsigned long *
330 mtk_bmt_get_mapping_mask(void)
332 struct bbmt
*bbmt
= bmt_tbl(bmtd
.bbt
);
333 int main_blocks
= bmtd
.mtd
->size
>> bmtd
.blk_shift
;
337 used
= kcalloc(sizeof(unsigned long), BIT_WORD(bmtd
.bmt_blk_idx
) + 1, GFP_KERNEL
);
341 for (i
= 1; i
< main_blocks
; i
++) {
342 if (bmtd
.bbt
->bb_tbl
[i
] == i
)
345 for (k
= 0; k
< bmtd
.bmt_blk_idx
; k
++) {
346 if (bmtd
.bbt
->bb_tbl
[i
] != bbmt
[k
].block
)
357 static int mtk_bmt_debug_v2(void *data
, u64 val
)
359 struct bbmt
*bbmt
= bmt_tbl(bmtd
.bbt
);
360 struct mtd_info
*mtd
= bmtd
.mtd
;
362 int main_blocks
= mtd
->size
>> bmtd
.blk_shift
;
366 used
= mtk_bmt_get_mapping_mask();
372 for (i
= 1; i
< main_blocks
; i
++) {
373 if (bmtd
.bbt
->bb_tbl
[i
] == i
)
376 printk("remap [%x->%x]\n", i
, bmtd
.bbt
->bb_tbl
[i
]);
379 for (i
= 0; i
<= bmtd
.bmt_blk_idx
; i
++) {
382 switch (bbmt
[i
].mapped
) {
387 if (test_bit(i
, used
))
397 printk("[%x:%c] = 0x%x\n", i
, c
, bbmt
[i
].block
);
401 for (i
= 0; i
<= bmtd
.bmt_blk_idx
; i
++) {
402 if (bbmt
[i
].mapped
!= NORMAL_MAPPED
)
405 if (test_bit(i
, used
))
409 bbmt
[i
].mapped
= NO_MAPPED
;
410 printk("free block [%d:%x]\n", i
, bbmt
[i
].block
);
413 bmtd
.bmt_blk_idx
= upload_bmt(bmtd
.bbt
, bmtd
.bmt_blk_idx
);
422 static int mtk_bmt_init_v2(struct device_node
*np
)
424 u32 bmt_pool_size
, bmt_table_size
;
428 if (of_property_read_u32(np
, "mediatek,bmt-pool-size",
429 &bmt_pool_size
) != 0)
432 if (of_property_read_u8(np
, "mediatek,bmt-oob-offset",
433 &bmtd
.oob_offset
) != 0)
436 if (of_property_read_u32(np
, "mediatek,bmt-table-size",
437 &bmt_table_size
) != 0)
438 bmt_table_size
= 0x2000U
;
440 bmtd
.table_size
= bmt_table_size
;
442 pmt_block
= bmtd
.total_blks
- bmt_pool_size
- 2;
444 bmtd
.mtd
->size
= pmt_block
<< bmtd
.blk_shift
;
447 * ---------------------------------------
448 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
449 * ---------------------------------------
452 * pmt_block pmt_block + 2blocks(pool_lba)
455 * The blocks ahead of the boundary block are stored in bb_tbl
456 * and blocks behind are stored in bmt_tbl
459 bmtd
.pool_lba
= (u16
)(pmt_block
+ 2);
460 bmtd
.bb_max
= bmtd
.total_blks
* BBPOOL_RATIO
/ 100;
462 bufsz
= round_up(sizeof(struct bbbt
) +
463 bmt_table_size
* sizeof(struct bbmt
), bmtd
.pg_size
);
464 bmtd
.bmt_pgs
= bufsz
>> bmtd
.pg_shift
;
466 bmtd
.bbt_buf
= kzalloc(bufsz
, GFP_KERNEL
);
470 memset(bmtd
.bbt_buf
, 0xff, bufsz
);
472 /* Scanning start from the first page of the last block
475 bmtd
.bbt
= scan_bmt(bmtd
.total_blks
- 1);
478 if (bmtd
.total_blks
> BB_TABLE_MAX
+ BMT_TABLE_MAX
) {
479 pr_info("nand: FATAL: Too many blocks, can not support!\n");
483 bmtd
.bbt
= (struct bbbt
*)bmtd
.bbt_buf
;
484 memset(bmt_tbl(bmtd
.bbt
), BMT_TBL_DEF_VAL
,
485 bmtd
.table_size
* sizeof(struct bbmt
));
487 if (scan_bad_blocks(bmtd
.bbt
))
490 /* BMT always in the last valid block in pool */
491 bmtd
.bmt_blk_idx
= upload_bmt(bmtd
.bbt
, bmtd
.bmt_blk_idx
);
492 block
= bmt_tbl(bmtd
.bbt
)[bmtd
.bmt_blk_idx
].block
;
493 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block
);
495 if (bmtd
.bmt_blk_idx
== 0)
496 pr_info("nand: Warning: no available block in BMT pool!\n");
497 else if (bmtd
.bmt_blk_idx
== (u16
)-1)
505 const struct mtk_bmt_ops mtk_bmt_v2_ops
= {
508 .init
= mtk_bmt_init_v2
,
509 .remap_block
= remap_block_v2
,
510 .unmap_block
= unmap_block_v2
,
511 .get_mapping_block
= get_mapping_block_index_v2
,
512 .debug
= mtk_bmt_debug_v2
,