2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
25 #include <linux/bits.h>
27 #define MAIN_SIGNATURE_OFFSET 0
28 #define OOB_SIGNATURE_OFFSET 1
29 #define BBPOOL_RATIO 2
31 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
33 /* Maximum 8k blocks */
34 #define BB_TABLE_MAX bmtd.table_size
35 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
36 #define BMT_TBL_DEF_VAL 0x0
41 int (*init
)(struct device_node
*np
);
42 bool (*remap_block
)(u16 block
, u16 mapped_block
, int copy_len
);
43 void (*unmap_block
)(u16 block
);
44 u16 (*get_mapping_block
)(int block
);
45 int (*debug
)(void *data
, u64 val
);
50 /* This version is used to distinguish the legacy and new algorithm */
51 #define BBMT_VERSION 2
52 unsigned char version
;
53 /* Below 2 tables will be written in SLC */
60 #define NORMAL_MAPPED 1
65 static struct bmt_desc
{
68 int (*_read_oob
) (struct mtd_info
*mtd
, loff_t from
,
69 struct mtd_oob_ops
*ops
);
70 int (*_write_oob
) (struct mtd_info
*mtd
, loff_t to
,
71 struct mtd_oob_ops
*ops
);
72 int (*_erase
) (struct mtd_info
*mtd
, struct erase_info
*instr
);
73 int (*_block_isbad
) (struct mtd_info
*mtd
, loff_t ofs
);
74 int (*_block_markbad
) (struct mtd_info
*mtd
, loff_t ofs
);
76 const struct mtk_bmt_ops
*ops
;
80 struct dentry
*debugfs_dir
;
87 /* bbt logical address */
89 /* bbt physical address */
91 /* Maximum count of bad blocks that the vendor guaranteed */
93 /* Total blocks of the Nand Chip */
95 /* The block(n) BMT is located at (bmt_tbl[n]) */
97 /* How many pages needs to store 'struct bbbt' */
100 const __be32
*remap_range
;
103 /* to compensate for driver level remapping */
107 static unsigned char *nand_bbt_buf
;
108 static unsigned char *nand_data_buf
;
110 /* -------- Unit conversions -------- */
111 static inline u32
blk_pg(u16 block
)
113 return (u32
)(block
<< (bmtd
.blk_shift
- bmtd
.pg_shift
));
116 /* -------- Nand operations wrapper -------- */
118 bbt_nand_read(u32 page
, unsigned char *dat
, int dat_len
,
119 unsigned char *fdm
, int fdm_len
)
121 struct mtd_oob_ops ops
= {
122 .mode
= MTD_OPS_PLACE_OOB
,
123 .ooboffs
= bmtd
.oob_offset
,
130 return bmtd
._read_oob(bmtd
.mtd
, page
<< bmtd
.pg_shift
, &ops
);
133 static inline int bbt_nand_erase(u16 block
)
135 struct mtd_info
*mtd
= bmtd
.mtd
;
136 struct erase_info instr
= {
137 .addr
= (loff_t
)block
<< bmtd
.blk_shift
,
138 .len
= bmtd
.blk_size
,
141 return bmtd
._erase(mtd
, &instr
);
144 static inline int bbt_nand_copy(u16 dest_blk
, u16 src_blk
, loff_t max_offset
)
146 int pages
= bmtd
.blk_size
>> bmtd
.pg_shift
;
147 loff_t src
= (loff_t
)src_blk
<< bmtd
.blk_shift
;
148 loff_t dest
= (loff_t
)dest_blk
<< bmtd
.blk_shift
;
153 for (i
= 0; i
< pages
; i
++) {
154 struct mtd_oob_ops rd_ops
= {
155 .mode
= MTD_OPS_PLACE_OOB
,
157 .ooblen
= min_t(int, bmtd
.mtd
->oobsize
/ pages
, sizeof(oob
)),
158 .datbuf
= nand_data_buf
,
161 struct mtd_oob_ops wr_ops
= {
162 .mode
= MTD_OPS_PLACE_OOB
,
164 .datbuf
= nand_data_buf
,
168 if (offset
>= max_offset
)
171 ret
= bmtd
._read_oob(bmtd
.mtd
, src
+ offset
, &rd_ops
);
172 if (ret
< 0 && !mtd_is_bitflip(ret
))
178 ret
= bmtd
._write_oob(bmtd
.mtd
, dest
+ offset
, &wr_ops
);
182 wr_ops
.ooblen
= rd_ops
.oobretlen
;
183 offset
+= rd_ops
.retlen
;
189 /* -------- Bad Blocks Management -------- */
190 static inline struct bbmt
*bmt_tbl(struct bbbt
*bbbt
)
192 return (struct bbmt
*)&bbbt
->bb_tbl
[bmtd
.table_size
];
196 read_bmt(u16 block
, unsigned char *dat
, unsigned char *fdm
, int fdm_len
)
198 u32 len
= bmtd
.bmt_pgs
<< bmtd
.pg_shift
;
200 return bbt_nand_read(blk_pg(block
), dat
, len
, fdm
, fdm_len
);
203 static int write_bmt(u16 block
, unsigned char *dat
)
205 struct mtd_oob_ops ops
= {
206 .mode
= MTD_OPS_PLACE_OOB
,
207 .ooboffs
= OOB_SIGNATURE_OFFSET
+ bmtd
.oob_offset
,
208 .oobbuf
= bmtd
.ops
->sig
,
209 .ooblen
= bmtd
.ops
->sig_len
,
211 .len
= bmtd
.bmt_pgs
<< bmtd
.pg_shift
,
213 loff_t addr
= (loff_t
)block
<< bmtd
.blk_shift
;
215 return bmtd
._write_oob(bmtd
.mtd
, addr
, &ops
);
218 static u16
find_valid_block(u16 block
)
225 if (block
>= bmtd
.total_blks
)
228 ret
= bbt_nand_read(blk_pg(block
), nand_data_buf
, bmtd
.pg_size
,
230 /* Read the 1st byte of FDM to judge whether it's a bad
233 if (ret
|| fdm
[0] != 0xff) {
234 pr_info("nand: found bad block 0x%x\n", block
);
235 if (loop
>= bmtd
.bb_max
) {
236 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
248 /* Find out all bad blocks, and fill in the mapping table */
249 static int scan_bad_blocks(struct bbbt
*bbt
)
254 /* First time download, the block0 MUST NOT be a bad block,
255 * this is guaranteed by vendor
260 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
261 * G - Good block; B - Bad block
262 * ---------------------------
263 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
264 * ---------------------------
265 * What bb_tbl[i] looks like:
267 * 0 1 2 3 4 5 6 7 8 9 a b c
268 * mapped block(bb_tbl[i]):
269 * 0 1 3 6 7 8 9 b ......
271 * If new bad block ocurred(n), search bmt_tbl to find
272 * a available block(x), and fill in the bb_tbl[n] = x;
274 for (i
= 1; i
< bmtd
.pool_lba
; i
++) {
275 bbt
->bb_tbl
[i
] = find_valid_block(bbt
->bb_tbl
[i
- 1] + 1);
276 BBT_LOG("bb_tbl[0x%x] = 0x%x", i
, bbt
->bb_tbl
[i
]);
277 if (bbt
->bb_tbl
[i
] == 0)
281 /* Physical Block start Address of BMT pool */
282 bmtd
.pool_pba
= bbt
->bb_tbl
[i
- 1] + 1;
283 if (bmtd
.pool_pba
>= bmtd
.total_blks
- 2) {
284 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
288 BBT_LOG("pool_pba=0x%x", bmtd
.pool_pba
);
290 block
= bmtd
.pool_pba
;
292 * The bmt table is used for runtime bad block mapping
293 * G - Good block; B - Bad block
294 * ---------------------------
295 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
296 * ---------------------------
297 * block: 0 1 2 3 4 5 6 7 8 9 a b c
298 * What bmt_tbl[i] looks like in initial state:
305 * N - Not mapped(Available)
309 * BMT always in the last valid block in pool
311 while ((block
= find_valid_block(block
)) != 0) {
312 bmt_tbl(bbt
)[i
].block
= block
;
313 bmt_tbl(bbt
)[i
].mapped
= NO_MAPPED
;
314 BBT_LOG("bmt_tbl[%d].block = 0x%x", i
, block
);
319 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
320 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
322 bmtd
.bmt_blk_idx
= i
- 1;
323 bmt_tbl(bbt
)[bmtd
.bmt_blk_idx
].mapped
= BMT_MAPPED
;
326 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
330 pr_info("[BBT] %d available blocks in BMT pool\n", i
);
335 static bool is_valid_bmt(unsigned char *buf
, unsigned char *fdm
)
337 struct bbbt
*bbt
= (struct bbbt
*)buf
;
338 u8
*sig
= (u8
*)bbt
->signature
+ MAIN_SIGNATURE_OFFSET
;
341 if (memcmp(bbt
->signature
+ MAIN_SIGNATURE_OFFSET
, "BMT", 3) == 0 &&
342 memcmp(fdm
+ OOB_SIGNATURE_OFFSET
, "bmt", 3) == 0) {
343 if (bbt
->version
== BBMT_VERSION
)
346 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
347 sig
[0], sig
[1], sig
[2],
348 fdm
[1], fdm
[2], fdm
[3]);
352 static u16
get_bmt_index(struct bbmt
*bmt
)
356 while (bmt
[i
].block
!= BMT_TBL_DEF_VAL
) {
357 if (bmt
[i
].mapped
== BMT_MAPPED
)
364 static struct bbbt
*scan_bmt(u16 block
)
368 if (block
< bmtd
.pool_lba
)
371 if (read_bmt(block
, nand_bbt_buf
, fdm
, sizeof(fdm
)))
372 return scan_bmt(block
- 1);
374 if (is_valid_bmt(nand_bbt_buf
, fdm
)) {
375 bmtd
.bmt_blk_idx
= get_bmt_index(bmt_tbl((struct bbbt
*)nand_bbt_buf
));
376 if (bmtd
.bmt_blk_idx
== 0) {
377 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
380 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block
);
381 return (struct bbbt
*)nand_bbt_buf
;
383 return scan_bmt(block
- 1);
386 /* Write the Burner Bad Block Table to Nand Flash
387 * n - write BMT to bmt_tbl[n]
389 static u16
upload_bmt(struct bbbt
*bbt
, int n
)
394 if (n
< 0 || bmt_tbl(bbt
)[n
].mapped
== NORMAL_MAPPED
) {
395 pr_info("nand: FATAL ERR: no space to store BMT!\n");
399 block
= bmt_tbl(bbt
)[n
].block
;
400 BBT_LOG("n = 0x%x, block = 0x%x", n
, block
);
401 if (bbt_nand_erase(block
)) {
402 bmt_tbl(bbt
)[n
].block
= 0;
403 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
408 /* The signature offset is fixed set to 0,
409 * oob signature offset is fixed set to 1
411 memcpy(bbt
->signature
+ MAIN_SIGNATURE_OFFSET
, "BMT", 3);
412 bbt
->version
= BBMT_VERSION
;
414 if (write_bmt(block
, (unsigned char *)bbt
)) {
415 bmt_tbl(bbt
)[n
].block
= 0;
417 /* write failed, try the previous block in bmt_tbl[n - 1] */
422 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
426 static u16
find_valid_block_in_pool(struct bbbt
*bbt
)
430 if (bmtd
.bmt_blk_idx
== 0)
433 for (i
= 0; i
< bmtd
.bmt_blk_idx
; i
++) {
434 if (bmt_tbl(bbt
)[i
].block
!= 0 && bmt_tbl(bbt
)[i
].mapped
== NO_MAPPED
) {
435 bmt_tbl(bbt
)[i
].mapped
= NORMAL_MAPPED
;
436 return bmt_tbl(bbt
)[i
].block
;
441 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
445 /* We met a bad block, mark it as bad and map it to a valid block in pool,
446 * if it's a write failure, we need to write the data to mapped block
448 static bool remap_block_v2(u16 block
, u16 mapped_block
, int copy_len
)
454 mapped_blk
= find_valid_block_in_pool(bbt
);
458 /* Map new bad block to available block in pool */
459 bbt
->bb_tbl
[block
] = mapped_blk
;
461 /* Erase new block */
462 bbt_nand_erase(mapped_blk
);
464 bbt_nand_copy(mapped_blk
, block
, copy_len
);
466 bmtd
.bmt_blk_idx
= upload_bmt(bbt
, bmtd
.bmt_blk_idx
);
472 mapping_block_in_range(int block
, int *start
, int *end
)
474 const __be32
*cur
= bmtd
.remap_range
;
475 u32 addr
= block
<< bmtd
.blk_shift
;
478 if (!cur
|| !bmtd
.remap_range_len
) {
480 *end
= bmtd
.total_blks
;
484 for (i
= 0; i
< bmtd
.remap_range_len
; i
++, cur
+= 2) {
485 if (addr
< be32_to_cpu(cur
[0]) || addr
>= be32_to_cpu(cur
[1]))
488 *start
= be32_to_cpu(cur
[0]);
489 *end
= be32_to_cpu(cur
[1]);
497 get_mapping_block_index_v2(int block
)
501 if (block
>= bmtd
.pool_lba
)
504 if (!mapping_block_in_range(block
, &start
, &end
))
507 return bmtd
.bbt
->bb_tbl
[block
];
511 mtk_bmt_read(struct mtd_info
*mtd
, loff_t from
,
512 struct mtd_oob_ops
*ops
)
514 struct mtd_oob_ops cur_ops
= *ops
;
518 int max_bitflips
= 0;
524 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
527 u32 offset
= from
& (bmtd
.blk_size
- 1);
528 u32 block
= from
>> bmtd
.blk_shift
;
531 cur_block
= bmtd
.ops
->get_mapping_block(block
);
532 cur_from
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
534 cur_ops
.oobretlen
= 0;
536 cur_ops
.len
= min_t(u32
, mtd
->erasesize
- offset
,
537 ops
->len
- ops
->retlen
);
538 cur_ret
= bmtd
._read_oob(mtd
, cur_from
, &cur_ops
);
542 max_bitflips
= max_t(int, max_bitflips
, cur_ret
);
543 if (cur_ret
< 0 && !mtd_is_bitflip(cur_ret
)) {
544 bmtd
.ops
->remap_block(block
, cur_block
, mtd
->erasesize
);
545 if (retry_count
++ < 10)
551 if (cur_ret
>= mtd
->bitflip_threshold
&&
552 mapping_block_in_range(block
, &start
, &end
))
553 bmtd
.ops
->remap_block(block
, cur_block
, mtd
->erasesize
);
555 ops
->retlen
+= cur_ops
.retlen
;
556 ops
->oobretlen
+= cur_ops
.oobretlen
;
559 cur_ops
.datbuf
+= cur_ops
.retlen
;
560 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
561 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
564 cur_ops
.len
= mtd
->erasesize
- offset
;
578 mtk_bmt_write(struct mtd_info
*mtd
, loff_t to
,
579 struct mtd_oob_ops
*ops
)
581 struct mtd_oob_ops cur_ops
= *ops
;
589 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
590 u32 offset
= to
& (bmtd
.blk_size
- 1);
591 u32 block
= to
>> bmtd
.blk_shift
;
594 cur_block
= bmtd
.ops
->get_mapping_block(block
);
595 cur_to
= ((loff_t
)cur_block
<< bmtd
.blk_shift
) + offset
;
597 cur_ops
.oobretlen
= 0;
599 cur_ops
.len
= min_t(u32
, bmtd
.blk_size
- offset
,
600 ops
->len
- ops
->retlen
);
601 ret
= bmtd
._write_oob(mtd
, cur_to
, &cur_ops
);
603 bmtd
.ops
->remap_block(block
, cur_block
, offset
);
604 if (retry_count
++ < 10)
610 ops
->retlen
+= cur_ops
.retlen
;
611 ops
->oobretlen
+= cur_ops
.oobretlen
;
614 cur_ops
.datbuf
+= cur_ops
.retlen
;
615 cur_ops
.oobbuf
+= cur_ops
.oobretlen
;
616 cur_ops
.ooblen
-= cur_ops
.oobretlen
;
619 cur_ops
.len
= mtd
->erasesize
- offset
;
629 mtk_bmt_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
631 struct erase_info mapped_instr
= {
632 .len
= bmtd
.blk_size
,
635 u64 start_addr
, end_addr
;
637 u16 orig_block
, block
;
639 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
640 end_addr
= instr
->addr
+ instr
->len
;
642 while (start_addr
< end_addr
) {
643 orig_block
= start_addr
>> bmtd
.blk_shift
;
644 block
= bmtd
.ops
->get_mapping_block(orig_block
);
645 mapped_instr
.addr
= (loff_t
)block
<< bmtd
.blk_shift
;
646 ret
= bmtd
._erase(mtd
, &mapped_instr
);
648 bmtd
.ops
->remap_block(orig_block
, block
, 0);
649 if (retry_count
++ < 10)
651 instr
->fail_addr
= start_addr
;
654 start_addr
+= mtd
->erasesize
;
661 mtk_bmt_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
664 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
669 block
= bmtd
.ops
->get_mapping_block(orig_block
);
670 ret
= bmtd
._block_isbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
672 bmtd
.ops
->remap_block(orig_block
, block
, bmtd
.blk_size
);
673 if (retry_count
++ < 10)
680 mtk_bmt_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
682 u16 orig_block
= ofs
>> bmtd
.blk_shift
;
683 u16 block
= bmtd
.ops
->get_mapping_block(orig_block
);
685 bmtd
.ops
->remap_block(orig_block
, block
, bmtd
.blk_size
);
687 return bmtd
._block_markbad(mtd
, (loff_t
)block
<< bmtd
.blk_shift
);
691 mtk_bmt_replace_ops(struct mtd_info
*mtd
)
693 bmtd
._read_oob
= mtd
->_read_oob
;
694 bmtd
._write_oob
= mtd
->_write_oob
;
695 bmtd
._erase
= mtd
->_erase
;
696 bmtd
._block_isbad
= mtd
->_block_isbad
;
697 bmtd
._block_markbad
= mtd
->_block_markbad
;
699 mtd
->_read_oob
= mtk_bmt_read
;
700 mtd
->_write_oob
= mtk_bmt_write
;
701 mtd
->_erase
= mtk_bmt_mtd_erase
;
702 mtd
->_block_isbad
= mtk_bmt_block_isbad
;
703 mtd
->_block_markbad
= mtk_bmt_block_markbad
;
707 unmap_block_v2(u16 block
)
709 bmtd
.bbt
->bb_tbl
[block
] = block
;
710 bmtd
.bmt_blk_idx
= upload_bmt(bmtd
.bbt
, bmtd
.bmt_blk_idx
);
713 static int mtk_bmt_debug_mark_good(void *data
, u64 val
)
715 bmtd
.ops
->unmap_block(val
>> bmtd
.blk_shift
);
720 static int mtk_bmt_debug_mark_bad(void *data
, u64 val
)
722 u32 block
= val
>> bmtd
.blk_shift
;
723 u16 cur_block
= bmtd
.ops
->get_mapping_block(block
);
725 bmtd
.ops
->remap_block(block
, cur_block
, bmtd
.blk_size
);
730 static unsigned long *
731 mtk_bmt_get_mapping_mask(void)
733 struct bbmt
*bbmt
= bmt_tbl(bmtd
.bbt
);
734 int main_blocks
= bmtd
.mtd
->size
>> bmtd
.blk_shift
;
738 used
= kcalloc(sizeof(unsigned long), BIT_WORD(bmtd
.bmt_blk_idx
) + 1, GFP_KERNEL
);
742 for (i
= 1; i
< main_blocks
; i
++) {
743 if (bmtd
.bbt
->bb_tbl
[i
] == i
)
746 for (k
= 0; k
< bmtd
.bmt_blk_idx
; k
++) {
747 if (bmtd
.bbt
->bb_tbl
[i
] != bbmt
[k
].block
)
758 static int mtk_bmt_debug_v2(void *data
, u64 val
)
760 struct bbmt
*bbmt
= bmt_tbl(bmtd
.bbt
);
761 struct mtd_info
*mtd
= bmtd
.mtd
;
763 int main_blocks
= mtd
->size
>> bmtd
.blk_shift
;
767 used
= mtk_bmt_get_mapping_mask();
773 for (i
= 1; i
< main_blocks
; i
++) {
774 if (bmtd
.bbt
->bb_tbl
[i
] == i
)
777 printk("remap [%x->%x]\n", i
, bmtd
.bbt
->bb_tbl
[i
]);
780 for (i
= 0; i
<= bmtd
.bmt_blk_idx
; i
++) {
783 switch (bbmt
[i
].mapped
) {
788 if (test_bit(i
, used
))
798 printk("[%x:%c] = 0x%x\n", i
, c
, bbmt
[i
].block
);
802 for (i
= 0; i
<= bmtd
.bmt_blk_idx
; i
++) {
803 if (bbmt
[i
].mapped
!= NORMAL_MAPPED
)
806 if (test_bit(i
, used
))
810 bbmt
[i
].mapped
= NO_MAPPED
;
811 printk("free block [%d:%x]\n", i
, bbmt
[i
].block
);
814 bmtd
.bmt_blk_idx
= upload_bmt(bmtd
.bbt
, bmtd
.bmt_blk_idx
);
823 static int mtk_bmt_debug(void *data
, u64 val
)
825 return bmtd
.ops
->debug(data
, val
);
829 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good
, NULL
, mtk_bmt_debug_mark_good
, "%llu\n");
830 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad
, NULL
, mtk_bmt_debug_mark_bad
, "%llu\n");
831 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug
, NULL
, mtk_bmt_debug
, "%llu\n");
834 mtk_bmt_add_debugfs(void)
838 dir
= bmtd
.debugfs_dir
= debugfs_create_dir("mtk-bmt", NULL
);
842 debugfs_create_file_unsafe("mark_good", S_IWUSR
, dir
, NULL
, &fops_mark_good
);
843 debugfs_create_file_unsafe("mark_bad", S_IWUSR
, dir
, NULL
, &fops_mark_bad
);
844 debugfs_create_file_unsafe("debug", S_IWUSR
, dir
, NULL
, &fops_debug
);
847 void mtk_bmt_detach(struct mtd_info
*mtd
)
852 if (bmtd
.debugfs_dir
)
853 debugfs_remove_recursive(bmtd
.debugfs_dir
);
854 bmtd
.debugfs_dir
= NULL
;
857 kfree(nand_data_buf
);
859 mtd
->_read_oob
= bmtd
._read_oob
;
860 mtd
->_write_oob
= bmtd
._write_oob
;
861 mtd
->_erase
= bmtd
._erase
;
862 mtd
->_block_isbad
= bmtd
._block_isbad
;
863 mtd
->_block_markbad
= bmtd
._block_markbad
;
864 mtd
->size
= bmtd
.total_blks
<< bmtd
.blk_shift
;
866 memset(&bmtd
, 0, sizeof(bmtd
));
869 static int mtk_bmt_init_v2(struct device_node
*np
)
871 u32 bmt_pool_size
, bmt_table_size
;
875 if (of_property_read_u32(np
, "mediatek,bmt-pool-size",
876 &bmt_pool_size
) != 0)
879 if (of_property_read_u8(np
, "mediatek,bmt-oob-offset",
880 &bmtd
.oob_offset
) != 0)
883 if (of_property_read_u32(np
, "mediatek,bmt-table-size",
884 &bmt_table_size
) != 0)
885 bmt_table_size
= 0x2000U
;
887 bmtd
.table_size
= bmt_table_size
;
889 pmt_block
= bmtd
.total_blks
- bmt_pool_size
- 2;
891 bmtd
.mtd
->size
= pmt_block
<< bmtd
.blk_shift
;
894 * ---------------------------------------
895 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
896 * ---------------------------------------
899 * pmt_block pmt_block + 2blocks(pool_lba)
902 * The blocks ahead of the boundary block are stored in bb_tbl
903 * and blocks behind are stored in bmt_tbl
906 bmtd
.pool_lba
= (u16
)(pmt_block
+ 2);
907 bmtd
.bb_max
= bmtd
.total_blks
* BBPOOL_RATIO
/ 100;
909 bufsz
= round_up(sizeof(struct bbbt
) +
910 bmt_table_size
* sizeof(struct bbmt
), bmtd
.pg_size
);
911 bmtd
.bmt_pgs
= bufsz
>> bmtd
.pg_shift
;
913 nand_bbt_buf
= kzalloc(bufsz
, GFP_KERNEL
);
917 memset(nand_bbt_buf
, 0xff, bufsz
);
919 /* Scanning start from the first page of the last block
922 bmtd
.bbt
= scan_bmt(bmtd
.total_blks
- 1);
925 if (bmtd
.total_blks
> BB_TABLE_MAX
+ BMT_TABLE_MAX
) {
926 pr_info("nand: FATAL: Too many blocks, can not support!\n");
930 bmtd
.bbt
= (struct bbbt
*)nand_bbt_buf
;
931 memset(bmt_tbl(bmtd
.bbt
), BMT_TBL_DEF_VAL
,
932 bmtd
.table_size
* sizeof(struct bbmt
));
934 if (scan_bad_blocks(bmtd
.bbt
))
937 /* BMT always in the last valid block in pool */
938 bmtd
.bmt_blk_idx
= upload_bmt(bmtd
.bbt
, bmtd
.bmt_blk_idx
);
939 block
= bmt_tbl(bmtd
.bbt
)[bmtd
.bmt_blk_idx
].block
;
940 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block
);
942 if (bmtd
.bmt_blk_idx
== 0)
943 pr_info("nand: Warning: no available block in BMT pool!\n");
944 else if (bmtd
.bmt_blk_idx
== (u16
)-1)
952 bbt_block_is_bad(u16 block
)
954 u8 cur
= nand_bbt_buf
[block
/ 4];
956 return cur
& (3 << ((block
% 4) * 2));
960 bbt_set_block_state(u16 block
, bool bad
)
962 u8 mask
= (3 << ((block
% 4) * 2));
965 nand_bbt_buf
[block
/ 4] |= mask
;
967 nand_bbt_buf
[block
/ 4] &= ~mask
;
969 bbt_nand_erase(bmtd
.bmt_blk_idx
);
970 write_bmt(bmtd
.bmt_blk_idx
, nand_bbt_buf
);
974 get_mapping_block_index_bbt(int block
)
980 if (!mapping_block_in_range(block
, &start
, &end
))
983 start
>>= bmtd
.blk_shift
;
984 end
>>= bmtd
.blk_shift
;
985 /* skip bad blocks within the mapping range */
987 for (i
= start
; i
< end
; i
++) {
988 if (bbt_block_is_bad(i
))
999 /* when overflowing, remap remaining blocks to bad ones */
1000 for (i
= end
- 1; bad_blocks
> 0; i
--) {
1001 if (!bbt_block_is_bad(i
))
1005 if (bad_blocks
<= ofs
)
1012 static bool remap_block_bbt(u16 block
, u16 mapped_blk
, int copy_len
)
1017 if (!mapping_block_in_range(block
, &start
, &end
))
1020 bbt_set_block_state(mapped_blk
, true);
1022 new_blk
= get_mapping_block_index_bbt(block
);
1023 bbt_nand_erase(new_blk
);
1025 bbt_nand_copy(new_blk
, mapped_blk
, copy_len
);
1031 unmap_block_bbt(u16 block
)
1033 bbt_set_block_state(block
, false);
1037 mtk_bmt_read_bbt(void)
1042 for (i
= bmtd
.total_blks
- 1; i
>= bmtd
.total_blks
- 5; i
--) {
1043 u32 page
= i
<< (bmtd
.blk_shift
- bmtd
.pg_shift
);
1045 if (bbt_nand_read(page
, nand_bbt_buf
, bmtd
.pg_size
,
1046 oob_buf
, sizeof(oob_buf
))) {
1047 pr_info("read_bbt: could not read block %d\n", i
);
1051 if (oob_buf
[0] != 0xff) {
1052 pr_info("read_bbt: bad block at %d\n", i
);
1056 if (memcmp(&oob_buf
[1], "mtknand", 7) != 0) {
1057 pr_info("read_bbt: signature mismatch in block %d\n", i
);
1058 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_OFFSET
, 16, 1, oob_buf
, 8, 1);
1062 pr_info("read_bbt: found bbt at block %d\n", i
);
1063 bmtd
.bmt_blk_idx
= i
;
1072 mtk_bmt_init_bbt(struct device_node
*np
)
1074 int buf_size
= round_up(bmtd
.total_blks
>> 2, bmtd
.blk_size
);
1077 nand_bbt_buf
= kmalloc(buf_size
, GFP_KERNEL
);
1081 memset(nand_bbt_buf
, 0xff, buf_size
);
1082 bmtd
.mtd
->size
-= 4 * bmtd
.mtd
->erasesize
;
1084 ret
= mtk_bmt_read_bbt();
1088 bmtd
.bmt_pgs
= buf_size
/ bmtd
.pg_size
;
1093 static int mtk_bmt_debug_bbt(void *data
, u64 val
)
1100 for (i
= 0; i
< bmtd
.total_blks
; i
+= 4) {
1101 u8 cur
= nand_bbt_buf
[i
/ 4];
1103 for (k
= 0; k
< 4; k
++, cur
>>= 2)
1104 buf
[k
] = (cur
& 3) ? 'B' : '.';
1107 printk("[%06x] %s\n", i
* bmtd
.blk_size
, buf
);
1112 for (i
= bmtd
.bmt_blk_idx
; i
< bmtd
.total_blks
- 1; i
++)
1113 bbt_nand_erase(bmtd
.bmt_blk_idx
);
1116 bmtd
.bmt_blk_idx
= bmtd
.total_blks
- 1;
1117 bbt_nand_erase(bmtd
.bmt_blk_idx
);
1118 write_bmt(bmtd
.bmt_blk_idx
, nand_bbt_buf
);
1126 int mtk_bmt_attach(struct mtd_info
*mtd
)
1128 static const struct mtk_bmt_ops v2_ops
= {
1131 .init
= mtk_bmt_init_v2
,
1132 .remap_block
= remap_block_v2
,
1133 .unmap_block
= unmap_block_v2
,
1134 .get_mapping_block
= get_mapping_block_index_v2
,
1135 .debug
= mtk_bmt_debug_v2
,
1137 static const struct mtk_bmt_ops bbt_ops
= {
1140 .init
= mtk_bmt_init_bbt
,
1141 .remap_block
= remap_block_bbt
,
1142 .unmap_block
= unmap_block_bbt
,
1143 .get_mapping_block
= get_mapping_block_index_bbt
,
1144 .debug
= mtk_bmt_debug_bbt
,
1146 struct device_node
*np
;
1152 np
= mtd_get_of_node(mtd
);
1156 if (of_property_read_bool(np
, "mediatek,bmt-v2"))
1158 else if (of_property_read_bool(np
, "mediatek,bbt"))
1159 bmtd
.ops
= &bbt_ops
;
1163 bmtd
.remap_range
= of_get_property(np
, "mediatek,bmt-remap-range",
1164 &bmtd
.remap_range_len
);
1165 bmtd
.remap_range_len
/= 8;
1168 mtk_bmt_replace_ops(mtd
);
1170 bmtd
.blk_size
= mtd
->erasesize
;
1171 bmtd
.blk_shift
= ffs(bmtd
.blk_size
) - 1;
1172 bmtd
.pg_size
= mtd
->writesize
;
1173 bmtd
.pg_shift
= ffs(bmtd
.pg_size
) - 1;
1174 bmtd
.total_blks
= mtd
->size
>> bmtd
.blk_shift
;
1176 nand_data_buf
= kzalloc(bmtd
.pg_size
, GFP_KERNEL
);
1177 if (!nand_data_buf
) {
1178 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
1183 memset(nand_data_buf
, 0xff, bmtd
.pg_size
);
1185 ret
= bmtd
.ops
->init(np
);
1189 mtk_bmt_add_debugfs();
1193 mtk_bmt_detach(mtd
);
1198 MODULE_LICENSE("GPL");
1199 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
1200 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");