2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
26 #define MAIN_SIGNATURE_OFFSET 0
27 #define OOB_SIGNATURE_OFFSET 1
28 #define BBPOOL_RATIO 2
30 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
32 /* Maximum 8k blocks */
33 #define BB_TABLE_MAX bmtd.table_size
34 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
35 #define BMT_TBL_DEF_VAL 0x0
38 * Burner Bad Block Table
39 * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
44 /* This version is used to distinguish the legacy and new algorithm */
45 #define BBMT_VERSION 2
46 unsigned char version
;
47 /* Below 2 tables will be written in SLC */
54 #define NORMAL_MAPPED 1
59 static struct bmt_desc
{
62 int (*_read_oob
) (struct mtd_info
*mtd
, loff_t from
,
63 struct mtd_oob_ops
*ops
);
64 int (*_write_oob
) (struct mtd_info
*mtd
, loff_t to
,
65 struct mtd_oob_ops
*ops
);
66 int (*_erase
) (struct mtd_info
*mtd
, struct erase_info
*instr
);
67 int (*_block_isbad
) (struct mtd_info
*mtd
, loff_t ofs
);
68 int (*_block_markbad
) (struct mtd_info
*mtd
, loff_t ofs
);
72 struct dentry
*debugfs_dir
;
79 /* bbt logical address */
81 /* bbt physical address */
83 /* Maximum count of bad blocks that the vendor guaranteed */
85 /* Total blocks of the Nand Chip */
87 /* The block(n) BMT is located at (bmt_tbl[n]) */
89 /* How many pages needs to store 'struct bbbt' */
92 /* to compensate for driver level remapping */
96 static unsigned char *nand_bbt_buf
;
97 static unsigned char *nand_data_buf
;
99 /* -------- Unit conversions -------- */
100 static inline u32
blk_pg(u16 block
)
102 return (u32
)(block
<< (bmtd
.blk_shift
- bmtd
.pg_shift
));
105 /* -------- Nand operations wrapper -------- */
107 bbt_nand_read(u32 page
, unsigned char *dat
, int dat_len
,
108 unsigned char *fdm
, int fdm_len
)
110 struct mtd_oob_ops ops
= {
111 .mode
= MTD_OPS_PLACE_OOB
,
112 .ooboffs
= bmtd
.oob_offset
,
119 return bmtd
._read_oob(bmtd
.mtd
, page
<< bmtd
.pg_shift
, &ops
);
122 static inline int bbt_nand_erase(u16 block
)
124 struct mtd_info
*mtd
= bmtd
.mtd
;
125 struct erase_info instr
= {
126 .addr
= (loff_t
)block
<< bmtd
.blk_shift
,
127 .len
= bmtd
.blk_size
,
130 return bmtd
._erase(mtd
, &instr
);
133 /* -------- Bad Blocks Management -------- */
134 static inline struct bbmt
*bmt_tbl(struct bbbt
*bbbt
)
136 return (struct bbmt
*)&bbbt
->bb_tbl
[bmtd
.table_size
];
140 read_bmt(u16 block
, unsigned char *dat
, unsigned char *fdm
, int fdm_len
)
142 u32 len
= bmtd
.bmt_pgs
<< bmtd
.pg_shift
;
144 return bbt_nand_read(blk_pg(block
), dat
, len
, fdm
, fdm_len
);
147 static int write_bmt(u16 block
, unsigned char *dat
)
149 struct mtd_oob_ops ops
= {
150 .mode
= MTD_OPS_PLACE_OOB
,
151 .ooboffs
= OOB_SIGNATURE_OFFSET
+ bmtd
.oob_offset
,
155 .len
= bmtd
.bmt_pgs
<< bmtd
.pg_shift
,
157 loff_t addr
= (loff_t
)block
<< bmtd
.blk_shift
;
159 return bmtd
._write_oob(bmtd
.mtd
, addr
, &ops
);
162 static u16
find_valid_block(u16 block
)
169 if (block
>= bmtd
.total_blks
)
172 ret
= bbt_nand_read(blk_pg(block
), nand_data_buf
, bmtd
.pg_size
,
174 /* Read the 1st byte of FDM to judge whether it's a bad
177 if (ret
|| fdm
[0] != 0xff) {
178 pr_info("nand: found bad block 0x%x\n", block
);
179 if (loop
>= bmtd
.bb_max
) {
180 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
192 /* Find out all bad blocks, and fill in the mapping table */
193 static int scan_bad_blocks(struct bbbt
*bbt
)
198 /* First time download, the block0 MUST NOT be a bad block,
199 * this is guaranteed by vendor
204 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
205 * G - Good block; B - Bad block
206 * ---------------------------
207 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
208 * ---------------------------
209 * What bb_tbl[i] looks like:
211 * 0 1 2 3 4 5 6 7 8 9 a b c
212 * mapped block(bb_tbl[i]):
213 * 0 1 3 6 7 8 9 b ......
215 * If new bad block ocurred(n), search bmt_tbl to find
216 * a available block(x), and fill in the bb_tbl[n] = x;
218 for (i
= 1; i
< bmtd
.pool_lba
; i
++) {
219 bbt
->bb_tbl
[i
] = find_valid_block(bbt
->bb_tbl
[i
- 1] + 1);
220 BBT_LOG("bb_tbl[0x%x] = 0x%x", i
, bbt
->bb_tbl
[i
]);
221 if (bbt
->bb_tbl
[i
] == 0)
225 /* Physical Block start Address of BMT pool */
226 bmtd
.pool_pba
= bbt
->bb_tbl
[i
- 1] + 1;
227 if (bmtd
.pool_pba
>= bmtd
.total_blks
- 2) {
228 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
232 BBT_LOG("pool_pba=0x%x", bmtd
.pool_pba
);
234 block
= bmtd
.pool_pba
;
236 * The bmt table is used for runtime bad block mapping
237 * G - Good block; B - Bad block
238 * ---------------------------
239 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
240 * ---------------------------
241 * block: 0 1 2 3 4 5 6 7 8 9 a b c
242 * What bmt_tbl[i] looks like in initial state:
249 * N - Not mapped(Available)
253 * BMT always in the last valid block in pool
255 while ((block
= find_valid_block(block
)) != 0) {
256 bmt_tbl(bbt
)[i
].block
= block
;
257 bmt_tbl(bbt
)[i
].mapped
= NO_MAPPED
;
258 BBT_LOG("bmt_tbl[%d].block = 0x%x", i
, block
);
263 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
264 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
266 bmtd
.bmt_blk_idx
= i
- 1;
267 bmt_tbl(bbt
)[bmtd
.bmt_blk_idx
].mapped
= BMT_MAPPED
;
270 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
274 pr_info("[BBT] %d available blocks in BMT pool\n", i
);
279 static bool is_valid_bmt(unsigned char *buf
, unsigned char *fdm
)
281 struct bbbt
*bbt
= (struct bbbt
*)buf
;
282 u8
*sig
= (u8
*)bbt
->signature
+ MAIN_SIGNATURE_OFFSET
;
285 if (memcmp(bbt
->signature
+ MAIN_SIGNATURE_OFFSET
, "BMT", 3) == 0 &&
286 memcmp(fdm
+ OOB_SIGNATURE_OFFSET
, "bmt", 3) == 0) {
287 if (bbt
->version
== BBMT_VERSION
)
290 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
291 sig
[0], sig
[1], sig
[2],
292 fdm
[1], fdm
[2], fdm
[3]);
296 static u16
get_bmt_index(struct bbmt
*bmt
)
300 while (bmt
[i
].block
!= BMT_TBL_DEF_VAL
) {
301 if (bmt
[i
].mapped
== BMT_MAPPED
)
308 static struct bbbt
*scan_bmt(u16 block
)
312 if (block
< bmtd
.pool_lba
)
315 if (read_bmt(block
, nand_bbt_buf
, fdm
, sizeof(fdm
)))
316 return scan_bmt(block
- 1);
318 if (is_valid_bmt(nand_bbt_buf
, fdm
)) {
319 bmtd
.bmt_blk_idx
= get_bmt_index(bmt_tbl((struct bbbt
*)nand_bbt_buf
));
320 if (bmtd
.bmt_blk_idx
== 0) {
321 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
324 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block
);
325 return (struct bbbt
*)nand_bbt_buf
;
327 return scan_bmt(block
- 1);
330 /* Write the Burner Bad Block Table to Nand Flash
331 * n - write BMT to bmt_tbl[n]
333 static u16
upload_bmt(struct bbbt
*bbt
, int n
)
338 if (n
< 0 || bmt_tbl(bbt
)[n
].mapped
== NORMAL_MAPPED
) {
339 pr_info("nand: FATAL ERR: no space to store BMT!\n");
343 block
= bmt_tbl(bbt
)[n
].block
;
344 BBT_LOG("n = 0x%x, block = 0x%x", n
, block
);
345 if (bbt_nand_erase(block
)) {
346 bmt_tbl(bbt
)[n
].block
= 0;
347 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
352 /* The signature offset is fixed set to 0,
353 * oob signature offset is fixed set to 1
355 memcpy(bbt
->signature
+ MAIN_SIGNATURE_OFFSET
, "BMT", 3);
356 bbt
->version
= BBMT_VERSION
;
358 if (write_bmt(block
, (unsigned char *)bbt
)) {
359 bmt_tbl(bbt
)[n
].block
= 0;
361 /* write failed, try the previous block in bmt_tbl[n - 1] */
366 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
370 static u16
find_valid_block_in_pool(struct bbbt
*bbt
)
374 if (bmtd
.bmt_blk_idx
== 0)
377 for (i
= 0; i
< bmtd
.bmt_blk_idx
; i
++) {
378 if (bmt_tbl(bbt
)[i
].block
!= 0 && bmt_tbl(bbt
)[i
].mapped
== NO_MAPPED
) {
379 bmt_tbl(bbt
)[i
].mapped
= NORMAL_MAPPED
;
380 return bmt_tbl(bbt
)[i
].block
;
385 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
389 /* We met a bad block, mark it as bad and map it to a valid block in pool,
390 * if it's a write failure, we need to write the data to mapped block
392 static bool update_bmt(u16 block
)
398 mapped_blk
= find_valid_block_in_pool(bbt
);
402 /* Map new bad block to available block in pool */
403 bbt
->bb_tbl
[block
] = mapped_blk
;
404 bmtd
.bmt_blk_idx
= upload_bmt(bbt
, bmtd
.bmt_blk_idx
);
409 u16
get_mapping_block_index(int block
)
413 if (block
< bmtd
.pool_lba
)
414 mapping_block
= bmtd
.bbt
->bb_tbl
[block
];
416 mapping_block
= block
;
417 BBT_LOG("0x%x mapped to 0x%x", block
, mapping_block
);
419 return mapping_block
;
423 mtk_bmt_read(struct mtd_info
*mtd
, loff_t from
,
424 struct mtd_oob_ops
*ops
)
426 struct mtd_oob_ops cur_ops
= *ops
;
434 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
435 u32 offset
= from
& (bmtd
.blk_size
- 1);
436 u32 block
= from
>> bmtd
.blk_shift
;
439 cur_block
= get_mapping_block_index(block
);
440 cur_from
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
442 cur_ops
.oobretlen
= 0;
444 cur_ops
.len
= min_t(u32
, mtd
->erasesize
- offset
,
445 ops
->len
- ops
->retlen
);
446 ret
= bmtd
._read_oob(mtd
, cur_from
, &cur_ops
);
449 if (retry_count
++ < 10)
455 ops
->retlen
+= cur_ops
.retlen
;
456 ops
->oobretlen
+= cur_ops
.oobretlen
;
459 cur_ops
.datbuf
+= cur_ops
.retlen
;
460 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
461 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
464 cur_ops
.len
= mtd
->erasesize
- offset
;
474 mtk_bmt_write(struct mtd_info
*mtd
, loff_t to
,
475 struct mtd_oob_ops
*ops
)
477 struct mtd_oob_ops cur_ops
= *ops
;
485 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
486 u32 offset
= to
& (bmtd
.blk_size
- 1);
487 u32 block
= to
>> bmtd
.blk_shift
;
490 cur_block
= get_mapping_block_index(block
);
491 cur_to
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
493 cur_ops
.oobretlen
= 0;
495 cur_ops
.len
= min_t(u32
, bmtd
.blk_size
- offset
,
496 ops
->len
- ops
->retlen
);
497 ret
= bmtd
._write_oob(mtd
, cur_to
, &cur_ops
);
500 if (retry_count
++ < 10)
506 ops
->retlen
+= cur_ops
.retlen
;
507 ops
->oobretlen
+= cur_ops
.oobretlen
;
510 cur_ops
.datbuf
+= cur_ops
.retlen
;
511 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
512 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
515 cur_ops
.len
= mtd
->erasesize
- offset
;
525 mtk_bmt_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
527 struct erase_info mapped_instr
= {
528 .len
= bmtd
.blk_size
,
531 u64 start_addr
, end_addr
;
533 u16 orig_block
, block
;
535 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
536 end_addr
= instr
->addr
+ instr
->len
;
538 while (start_addr
< end_addr
) {
539 orig_block
= start_addr
>> bmtd
.blk_shift
;
540 block
= get_mapping_block_index(orig_block
);
541 mapped_instr
.addr
= (loff_t
)block
<< bmtd
.blk_shift
;
542 ret
= bmtd
._erase(mtd
, &mapped_instr
);
544 update_bmt(orig_block
);
545 if (retry_count
++ < 10)
547 instr
->fail_addr
= start_addr
;
550 start_addr
+= mtd
->erasesize
;
557 mtk_bmt_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
560 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
565 block
= get_mapping_block_index(orig_block
);
566 ret
= bmtd
._block_isbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
568 update_bmt(orig_block
);
569 if (retry_count
++ < 10)
576 mtk_bmt_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
578 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
579 u16 block
= get_mapping_block_index(orig_block
);
580 update_bmt(orig_block
);
581 return bmtd
._block_markbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
585 mtk_bmt_replace_ops(struct mtd_info
*mtd
)
587 bmtd
._read_oob
= mtd
->_read_oob
;
588 bmtd
._write_oob
= mtd
->_write_oob
;
589 bmtd
._erase
= mtd
->_erase
;
590 bmtd
._block_isbad
= mtd
->_block_isbad
;
591 bmtd
._block_markbad
= mtd
->_block_markbad
;
593 mtd
->_read_oob
= mtk_bmt_read
;
594 mtd
->_write_oob
= mtk_bmt_write
;
595 mtd
->_erase
= mtk_bmt_mtd_erase
;
596 mtd
->_block_isbad
= mtk_bmt_block_isbad
;
597 mtd
->_block_markbad
= mtk_bmt_block_markbad
;
600 static int mtk_bmt_debug_mark_good(void *data
, u64 val
)
602 u32 block
= val
>> bmtd
.blk_shift
;
604 bmtd
.bbt
->bb_tbl
[block
] = block
;
605 bmtd
.bmt_blk_idx
= upload_bmt(bmtd
.bbt
, bmtd
.bmt_blk_idx
);
610 static int mtk_bmt_debug_mark_bad(void *data
, u64 val
)
612 u32 block
= val
>> bmtd
.blk_shift
;
619 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good
, NULL
, mtk_bmt_debug_mark_good
, "%llu\n");
620 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad
, NULL
, mtk_bmt_debug_mark_bad
, "%llu\n");
623 mtk_bmt_add_debugfs(void)
627 dir
= bmtd
.debugfs_dir
= debugfs_create_dir("mtk-bmt", NULL
);
631 debugfs_create_file_unsafe("mark_good", S_IWUSR
, dir
, NULL
, &fops_mark_good
);
632 debugfs_create_file_unsafe("mark_bad", S_IWUSR
, dir
, NULL
, &fops_mark_bad
);
635 void mtk_bmt_detach(struct mtd_info
*mtd
)
640 if (bmtd
.debugfs_dir
)
641 debugfs_remove_recursive(bmtd
.debugfs_dir
);
642 bmtd
.debugfs_dir
= NULL
;
645 kfree(nand_data_buf
);
647 mtd
->_read_oob
= bmtd
._read_oob
;
648 mtd
->_write_oob
= bmtd
._write_oob
;
649 mtd
->_erase
= bmtd
._erase
;
650 mtd
->_block_isbad
= bmtd
._block_isbad
;
651 mtd
->_block_markbad
= bmtd
._block_markbad
;
652 mtd
->size
= bmtd
.total_blks
<< bmtd
.blk_shift
;
654 memset(&bmtd
, 0, sizeof(bmtd
));
657 /* total_blocks - The total count of blocks that the Nand Chip has */
658 int mtk_bmt_attach(struct mtd_info
*mtd
)
660 struct device_node
*np
;
664 u16 total_blocks
, pmt_block
;
666 u32 bmt_pool_size
, bmt_table_size
;
671 np
= mtd_get_of_node(mtd
);
675 if (!of_property_read_bool(np
, "mediatek,bmt-v2"))
678 if (of_property_read_u32(np
, "mediatek,bmt-pool-size",
679 &bmt_pool_size
) != 0)
682 if (of_property_read_u8(np
, "mediatek,bmt-oob-offset",
683 &bmtd
.oob_offset
) != 0)
686 if (of_property_read_u32(np
, "mediatek,bmt-table-size",
687 &bmt_table_size
) != 0)
688 bmt_table_size
= 0x2000U
;
691 mtk_bmt_replace_ops(mtd
);
693 bmtd
.table_size
= bmt_table_size
;
694 bmtd
.blk_size
= mtd
->erasesize
;
695 bmtd
.blk_shift
= ffs(bmtd
.blk_size
) - 1;
696 bmtd
.pg_size
= mtd
->writesize
;
697 bmtd
.pg_shift
= ffs(bmtd
.pg_size
) - 1;
698 total_blocks
= mtd
->size
>> bmtd
.blk_shift
;
699 pmt_block
= total_blocks
- bmt_pool_size
- 2;
701 mtd
->size
= pmt_block
<< bmtd
.blk_shift
;
704 * ---------------------------------------
705 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
706 * ---------------------------------------
709 * pmt_block pmt_block + 2blocks(pool_lba)
712 * The blocks ahead of the boundary block are stored in bb_tbl
713 * and blocks behind are stored in bmt_tbl
716 bmtd
.pool_lba
= (u16
)(pmt_block
+ 2);
717 bmtd
.total_blks
= total_blocks
;
718 bmtd
.bb_max
= bmtd
.total_blks
* BBPOOL_RATIO
/ 100;
720 /* 3 buffers we need */
721 bufsz
= round_up(sizeof(struct bbbt
) +
722 bmt_table_size
* sizeof(struct bbmt
), bmtd
.pg_size
);
723 bmtd
.bmt_pgs
= bufsz
>> bmtd
.pg_shift
;
725 nand_bbt_buf
= kzalloc(bufsz
, GFP_KERNEL
);
726 nand_data_buf
= kzalloc(bmtd
.pg_size
, GFP_KERNEL
);
728 if (!nand_bbt_buf
|| !nand_data_buf
) {
729 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
734 memset(nand_bbt_buf
, 0xff, bufsz
);
735 memset(nand_data_buf
, 0xff, bmtd
.pg_size
);
737 BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
738 nand_bbt_buf
, bufsz
, nand_data_buf
, bmtd
.pg_size
);
739 BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
740 bmtd
.pool_lba
, bmtd
.total_blks
, bmtd
.bb_max
);
742 /* Scanning start from the first page of the last block
745 bbt
= scan_bmt(bmtd
.total_blks
- 1);
748 if (bmtd
.total_blks
> BB_TABLE_MAX
+ BMT_TABLE_MAX
) {
749 pr_info("nand: FATAL: Too many blocks, can not support!\n");
754 bbt
= (struct bbbt
*)nand_bbt_buf
;
755 memset(bmt_tbl(bbt
), BMT_TBL_DEF_VAL
, bmtd
.table_size
* sizeof(struct bbmt
));
757 if (scan_bad_blocks(bbt
)) {
762 /* BMT always in the last valid block in pool */
763 bmtd
.bmt_blk_idx
= upload_bmt(bbt
, bmtd
.bmt_blk_idx
);
764 block
= bmt_tbl(bbt
)[bmtd
.bmt_blk_idx
].block
;
765 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block
);
767 if (bmtd
.bmt_blk_idx
== 0)
768 pr_info("nand: Warning: no available block in BMT pool!\n");
769 else if (bmtd
.bmt_blk_idx
== (u16
)-1) {
774 mtk_bmt_add_debugfs();
785 MODULE_LICENSE("GPL");
786 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
787 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");