1 #include <linux/crc32.h>
2 #include <linux/slab.h>
5 #define nlog_err(ni, ...) printk(KERN_ERR __VA_ARGS__)
6 #define nlog_info(ni, ...) printk(KERN_INFO __VA_ARGS__)
7 #define nlog_debug(ni, ...) printk(KERN_INFO __VA_ARGS__)
8 #define nlog_warn(ni, ...) printk(KERN_WARNING __VA_ARGS__)
10 #define NMBM_MAGIC_SIGNATURE 0x304d4d4e /* NMM0 */
11 #define NMBM_MAGIC_INFO_TABLE 0x314d4d4e /* NMM1 */
13 #define NMBM_VERSION_MAJOR_S 0
14 #define NMBM_VERSION_MAJOR_M 0xffff
15 #define NMBM_VERSION_MINOR_S 16
16 #define NMBM_VERSION_MINOR_M 0xffff
17 #define NMBM_VERSION_MAKE(major, minor) (((major) & NMBM_VERSION_MAJOR_M) | \
18 (((minor) & NMBM_VERSION_MINOR_M) << \
19 NMBM_VERSION_MINOR_S))
20 #define NMBM_VERSION_MAJOR_GET(ver) (((ver) >> NMBM_VERSION_MAJOR_S) & \
22 #define NMBM_VERSION_MINOR_GET(ver) (((ver) >> NMBM_VERSION_MINOR_S) & \
25 #define NMBM_BITMAP_UNIT_SIZE (sizeof(u32))
26 #define NMBM_BITMAP_BITS_PER_BLOCK 2
27 #define NMBM_BITMAP_BITS_PER_UNIT (8 * sizeof(u32))
28 #define NMBM_BITMAP_BLOCKS_PER_UNIT (NMBM_BITMAP_BITS_PER_UNIT / \
29 NMBM_BITMAP_BITS_PER_BLOCK)
31 #define NMBM_SPARE_BLOCK_MULTI 1
32 #define NMBM_SPARE_BLOCK_DIV 2
33 #define NMBM_SPARE_BLOCK_MIN 2
35 #define NMBM_MGMT_DIV 16
36 #define NMBM_MGMT_BLOCKS_MIN 32
38 #define NMBM_TRY_COUNT 3
40 #define BLOCK_ST_BAD 0
41 #define BLOCK_ST_NEED_REMAP 2
42 #define BLOCK_ST_GOOD 3
43 #define BLOCK_ST_MASK 3
45 #define NMBM_VER_MAJOR 1
46 #define NMBM_VER_MINOR 0
47 #define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
57 struct nmbm_signature
{
58 struct nmbm_header header
;
68 struct nmbm_info_table_header
{
69 struct nmbm_header header
;
72 u32 mapping_table_off
;
76 struct nmbm_instance
{
81 struct nmbm_signature signature
;
85 u32 info_table_spare_blocks
;
86 struct nmbm_info_table_header info_table
;
89 u32 block_state_changed
;
92 int32_t *block_mapping
;
93 u32 block_mapping_changed
;
94 u32 mapping_table_size
;
101 u32 data_block_count
;
106 u32 mapping_blocks_ba
;
107 u32 mapping_blocks_top_ba
;
111 u32 max_reserved_blocks
;
112 bool empty_page_ecc_ok
;
116 static inline u32
nmbm_crc32(u32 crcval
, const void *buf
, size_t size
)
119 const unsigned char *p
= buf
;
127 crcval
= crc32_le(crcval
, p
, chksz
);
135 * nlog_table_creation - Print log of table creation event
136 * @ni: NMBM instance structure
137 * @main_table: whether the table is main info table
138 * @start_ba: start block address of the table
139 * @end_ba: block address after the end of the table
141 static void nlog_table_creation(struct nmbm_instance
*ni
, bool main_table
,
142 uint32_t start_ba
, uint32_t end_ba
)
144 if (start_ba
== end_ba
- 1)
145 nlog_info(ni
, "%s info table has been written to block %u\n",
146 main_table
? "Main" : "Backup", start_ba
);
148 nlog_info(ni
, "%s info table has been written to block %u-%u\n",
149 main_table
? "Main" : "Backup", start_ba
, end_ba
- 1);
153 * nlog_table_update - Print log of table update event
154 * @ni: NMBM instance structure
155 * @main_table: whether the table is main info table
156 * @start_ba: start block address of the table
157 * @end_ba: block address after the end of the table
159 static void nlog_table_update(struct nmbm_instance
*ni
, bool main_table
,
160 uint32_t start_ba
, uint32_t end_ba
)
162 if (start_ba
== end_ba
- 1)
163 nlog_debug(ni
, "%s info table has been updated in block %u\n",
164 main_table
? "Main" : "Backup", start_ba
);
166 nlog_debug(ni
, "%s info table has been updated in block %u-%u\n",
167 main_table
? "Main" : "Backup", start_ba
, end_ba
- 1);
171 * nlog_table_found - Print log of table found event
172 * @ni: NMBM instance structure
173 * @first_table: whether the table is first found info table
174 * @write_count: write count of the info table
175 * @start_ba: start block address of the table
176 * @end_ba: block address after the end of the table
178 static void nlog_table_found(struct nmbm_instance
*ni
, bool first_table
,
179 uint32_t write_count
, uint32_t start_ba
,
182 if (start_ba
== end_ba
- 1)
183 nlog_info(ni
, "%s info table with writecount %u found in block %u\n",
184 first_table
? "First" : "Second", write_count
,
187 nlog_info(ni
, "%s info table with writecount %u found in block %u-%u\n",
188 first_table
? "First" : "Second", write_count
,
189 start_ba
, end_ba
- 1);
192 /*****************************************************************************/
193 /* Address conversion functions */
194 /*****************************************************************************/
197 * ba2addr - Convert a block address to linear address
198 * @ni: NMBM instance structure
201 static uint64_t ba2addr(struct nmbm_instance
*ni
, uint32_t ba
)
203 return (uint64_t)ba
<< bmtd
.blk_shift
;
206 * size2blk - Get minimum required blocks for storing specific size of data
207 * @ni: NMBM instance structure
208 * @size: size for storing
210 static uint32_t size2blk(struct nmbm_instance
*ni
, uint64_t size
)
212 return (size
+ bmtd
.blk_size
- 1) >> bmtd
.blk_shift
;
215 /*****************************************************************************/
216 /* High level NAND chip APIs */
217 /*****************************************************************************/
220 * nmbm_read_phys_page - Read page with retry
221 * @ni: NMBM instance structure
222 * @addr: linear address where the data will be read from
223 * @data: the main data to be read
224 * @oob: the oob data to be read
226 * Read a page for at most NMBM_TRY_COUNT times.
228 * Return 0 for success, positive value for corrected bitflip count,
229 * -EBADMSG for ecc error, other negative values for other errors
231 static int nmbm_read_phys_page(struct nmbm_instance
*ni
, uint64_t addr
,
232 void *data
, void *oob
)
236 for (tries
= 0; tries
< NMBM_TRY_COUNT
; tries
++) {
237 struct mtd_oob_ops ops
= {
238 .mode
= MTD_OPS_PLACE_OOB
,
244 ops
.len
= bmtd
.pg_size
;
246 ops
.ooblen
= mtd_oobavail(bmtd
.mtd
, &ops
);
248 ret
= bmtd
._read_oob(bmtd
.mtd
, addr
, &ops
);
250 return min_t(u32
, bmtd
.mtd
->bitflip_threshold
+ 1,
251 bmtd
.mtd
->ecc_strength
);
257 nlog_err(ni
, "Page read failed at address 0x%08llx\n", addr
);
263 * nmbm_write_phys_page - Write page with retry
264 * @ni: NMBM instance structure
265 * @addr: linear address where the data will be written to
266 * @data: the main data to be written
267 * @oob: the oob data to be written
269 * Write a page for at most NMBM_TRY_COUNT times.
271 static bool nmbm_write_phys_page(struct nmbm_instance
*ni
, uint64_t addr
,
272 const void *data
, const void *oob
)
276 for (tries
= 0; tries
< NMBM_TRY_COUNT
; tries
++) {
277 struct mtd_oob_ops ops
= {
278 .mode
= MTD_OPS_PLACE_OOB
,
279 .oobbuf
= (void *)oob
,
280 .datbuf
= (void *)data
,
284 ops
.len
= bmtd
.pg_size
;
286 ops
.ooblen
= mtd_oobavail(bmtd
.mtd
, &ops
);
288 ret
= bmtd
._write_oob(bmtd
.mtd
, addr
, &ops
);
293 nlog_err(ni
, "Page write failed at address 0x%08llx\n", addr
);
299 * nmbm_erase_phys_block - Erase a block with retry
300 * @ni: NMBM instance structure
301 * @addr: Linear address
303 * Erase a block for at most NMBM_TRY_COUNT times.
305 static bool nmbm_erase_phys_block(struct nmbm_instance
*ni
, uint64_t addr
)
309 for (tries
= 0; tries
< NMBM_TRY_COUNT
; tries
++) {
310 struct erase_info ei
= {
312 .len
= bmtd
.mtd
->erasesize
,
315 ret
= bmtd
._erase(bmtd
.mtd
, &ei
);
320 nlog_err(ni
, "Block erasure failed at address 0x%08llx\n", addr
);
326 * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
327 * @ni: NMBM instance structure
330 static bool nmbm_check_bad_phys_block(struct nmbm_instance
*ni
, uint32_t ba
)
332 uint64_t addr
= ba2addr(ni
, ba
);
334 return bmtd
._block_isbad(bmtd
.mtd
, addr
);
338 * nmbm_mark_phys_bad_block - Mark a block bad
339 * @ni: NMBM instance structure
340 * @addr: Linear address
342 static int nmbm_mark_phys_bad_block(struct nmbm_instance
*ni
, uint32_t ba
)
344 uint64_t addr
= ba2addr(ni
, ba
);
346 nlog_info(ni
, "Block %u [0x%08llx] will be marked bad\n", ba
, addr
);
348 return bmtd
._block_markbad(bmtd
.mtd
, addr
);
351 /*****************************************************************************/
352 /* NMBM related functions */
353 /*****************************************************************************/
356 * nmbm_check_header - Check whether a NMBM structure is valid
357 * @data: pointer to a NMBM structure with a NMBM header at beginning
358 * @size: Size of the buffer pointed by @header
360 * The size of the NMBM structure may be larger than NMBM header,
361 * e.g. block mapping table and block state table.
363 static bool nmbm_check_header(const void *data
, uint32_t size
)
365 const struct nmbm_header
*header
= data
;
366 struct nmbm_header nhdr
;
367 uint32_t new_checksum
;
370 * Make sure expected structure size is equal or smaller than
373 if (header
->size
> size
)
376 memcpy(&nhdr
, data
, sizeof(nhdr
));
379 new_checksum
= nmbm_crc32(0, &nhdr
, sizeof(nhdr
));
380 if (header
->size
> sizeof(nhdr
))
381 new_checksum
= nmbm_crc32(new_checksum
,
382 (const uint8_t *)data
+ sizeof(nhdr
),
383 header
->size
- sizeof(nhdr
));
385 if (header
->checksum
!= new_checksum
)
392 * nmbm_update_checksum - Update checksum of a NMBM structure
393 * @header: pointer to a NMBM structure with a NMBM header at beginning
395 * The size of the NMBM structure must be specified by @header->size
397 static void nmbm_update_checksum(struct nmbm_header
*header
)
399 header
->checksum
= 0;
400 header
->checksum
= nmbm_crc32(0, header
, header
->size
);
404 * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
405 * @block_count: number of blocks of data
407 * Calculate number of blocks should be reserved for data
409 static uint32_t nmbm_get_spare_block_count(uint32_t block_count
)
413 val
= (block_count
+ NMBM_SPARE_BLOCK_DIV
/ 2) / NMBM_SPARE_BLOCK_DIV
;
414 val
*= NMBM_SPARE_BLOCK_MULTI
;
416 if (val
< NMBM_SPARE_BLOCK_MIN
)
417 val
= NMBM_SPARE_BLOCK_MIN
;
423 * nmbm_get_block_state_raw - Get state of a block from raw block state table
424 * @block_state: pointer to raw block state table (bitmap)
427 static uint32_t nmbm_get_block_state_raw(u32
*block_state
,
430 uint32_t unit
, shift
;
432 unit
= ba
/ NMBM_BITMAP_BLOCKS_PER_UNIT
;
433 shift
= (ba
% NMBM_BITMAP_BLOCKS_PER_UNIT
) * NMBM_BITMAP_BITS_PER_BLOCK
;
435 return (block_state
[unit
] >> shift
) & BLOCK_ST_MASK
;
439 * nmbm_get_block_state - Get state of a block from block state table
440 * @ni: NMBM instance structure
443 static uint32_t nmbm_get_block_state(struct nmbm_instance
*ni
, uint32_t ba
)
445 return nmbm_get_block_state_raw(ni
->block_state
, ba
);
449 * nmbm_set_block_state - Set state of a block to block state table
450 * @ni: NMBM instance structure
452 * @state: block state
454 * Set state of a block. If the block state changed, ni->block_state_changed
457 static bool nmbm_set_block_state(struct nmbm_instance
*ni
, uint32_t ba
,
460 uint32_t unit
, shift
, orig
;
463 unit
= ba
/ NMBM_BITMAP_BLOCKS_PER_UNIT
;
464 shift
= (ba
% NMBM_BITMAP_BLOCKS_PER_UNIT
) * NMBM_BITMAP_BITS_PER_BLOCK
;
466 orig
= (ni
->block_state
[unit
] >> shift
) & BLOCK_ST_MASK
;
467 state
&= BLOCK_ST_MASK
;
469 uv
= ni
->block_state
[unit
] & (~(BLOCK_ST_MASK
<< shift
));
470 uv
|= state
<< shift
;
471 ni
->block_state
[unit
] = uv
;
474 ni
->block_state_changed
++;
482 * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
483 * @ni: NMBM instance structure
484 * @ba: start physical block address
485 * @nba: return physical block address after walk
486 * @count: number of good blocks to be skipped
487 * @limit: highest block address allowed for walking
489 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
490 * return the next good block address.
492 * If no enough good blocks counted while @limit reached, false will be returned.
494 * If @count == 0, nearest good block address will be returned.
495 * @limit is not counted in walking.
497 static bool nmbm_block_walk_asc(struct nmbm_instance
*ni
, uint32_t ba
,
498 uint32_t *nba
, uint32_t count
,
501 int32_t nblock
= count
;
503 if (limit
>= ni
->block_count
)
504 limit
= ni
->block_count
- 1;
507 if (nmbm_get_block_state(ni
, ba
) == BLOCK_ST_GOOD
)
522 * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
523 * @ni: NMBM instance structure
524 * @ba: start physical block address
525 * @nba: return physical block address after walk
526 * @count: number of good blocks to be skipped
527 * @limit: lowest block address allowed for walking
529 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
530 * return the next good block address.
532 * If no enough good blocks counted while @limit reached, false will be returned.
534 * If @count == 0, nearest good block address will be returned.
535 * @limit is not counted in walking.
537 static bool nmbm_block_walk_desc(struct nmbm_instance
*ni
, uint32_t ba
,
538 uint32_t *nba
, uint32_t count
, uint32_t limit
)
540 int32_t nblock
= count
;
542 if (limit
>= ni
->block_count
)
543 limit
= ni
->block_count
- 1;
546 if (nmbm_get_block_state(ni
, ba
) == BLOCK_ST_GOOD
)
561 * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
562 * @ni: NMBM instance structure
563 * @ascending: whether to walk ascending
564 * @ba: start physical block address
565 * @nba: return physical block address after walk
566 * @count: number of good blocks to be skipped
567 * @limit: highest/lowest block address allowed for walking
569 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
570 * return the next good block address.
572 * If no enough good blocks counted while @limit reached, false will be returned.
574 * If @count == 0, nearest good block address will be returned.
575 * @limit can be set to negative if no limit required.
576 * @limit is not counted in walking.
578 static bool nmbm_block_walk(struct nmbm_instance
*ni
, bool ascending
,
579 uint32_t ba
, uint32_t *nba
, int32_t count
,
583 return nmbm_block_walk_asc(ni
, ba
, nba
, count
, limit
);
585 return nmbm_block_walk_desc(ni
, ba
, nba
, count
, limit
);
589 * nmbm_scan_badblocks - Scan and record all bad blocks
590 * @ni: NMBM instance structure
592 * Scan the entire lower NAND chip and record all bad blocks in to block state
595 static void nmbm_scan_badblocks(struct nmbm_instance
*ni
)
599 for (ba
= 0; ba
< ni
->block_count
; ba
++) {
600 if (nmbm_check_bad_phys_block(ni
, ba
)) {
601 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
602 nlog_info(ni
, "Bad block %u [0x%08llx]\n", ba
,
609 * nmbm_build_mapping_table - Build initial block mapping table
610 * @ni: NMBM instance structure
612 * The initial mapping table will be compatible with the stratage of
613 * factory production.
615 static void nmbm_build_mapping_table(struct nmbm_instance
*ni
)
619 for (pb
= 0, lb
= 0; pb
< ni
->mgmt_start_ba
; pb
++) {
620 if (nmbm_get_block_state(ni
, pb
) == BLOCK_ST_BAD
)
623 /* Always map to the next good block */
624 ni
->block_mapping
[lb
++] = pb
;
627 ni
->data_block_count
= lb
;
629 /* Unusable/Management blocks */
630 for (pb
= lb
; pb
< ni
->block_count
; pb
++)
631 ni
->block_mapping
[pb
] = -1;
635 * nmbm_erase_block_and_check - Erase a block and check its usability
636 * @ni: NMBM instance structure
637 * @ba: block address to be erased
639 * Erase a block anc check its usability
641 * Return true if the block is usable, false if erasure failure or the block
642 * has too many bitflips.
644 static bool nmbm_erase_block_and_check(struct nmbm_instance
*ni
, uint32_t ba
)
650 success
= nmbm_erase_phys_block(ni
, ba2addr(ni
, ba
));
654 if (!ni
->empty_page_ecc_ok
)
657 /* Check every page to make sure there aren't too many bitflips */
659 addr
= ba2addr(ni
, ba
);
661 for (off
= 0; off
< bmtd
.blk_size
; off
+= bmtd
.pg_size
) {
662 ret
= nmbm_read_phys_page(ni
, addr
+ off
, ni
->page_cache
, NULL
);
663 if (ret
== -EBADMSG
) {
665 * empty_page_ecc_ok means the empty page is
666 * still protected by ECC. So reading pages with ECC
667 * enabled and -EBADMSG means there are too many
668 * bitflips that can't be recovered, and the block
669 * containing the page should be marked bad.
672 "Too many bitflips in empty page at 0x%llx\n",
682 * nmbm_erase_range - Erase a range of blocks
683 * @ni: NMBM instance structure
684 * @ba: block address where the erasure will start
685 * @limit: top block address allowed for erasure
687 * Erase blocks within the specific range. Newly-found bad blocks will be
690 * @limit is not counted into the allowed erasure address.
692 static void nmbm_erase_range(struct nmbm_instance
*ni
, uint32_t ba
,
698 if (nmbm_get_block_state(ni
, ba
) != BLOCK_ST_GOOD
)
701 /* Insurance to detect unexpected bad block marked by user */
702 if (nmbm_check_bad_phys_block(ni
, ba
)) {
703 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
707 success
= nmbm_erase_block_and_check(ni
, ba
);
711 nmbm_mark_phys_bad_block(ni
, ba
);
712 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
720 * nmbm_write_repeated_data - Write critical data to a block with retry
721 * @ni: NMBM instance structure
722 * @ba: block address where the data will be written to
723 * @data: the data to be written
724 * @size: size of the data
726 * Write data to every page of the block. Success only if all pages within
727 * this block have been successfully written.
729 * Make sure data size is not bigger than one page.
731 * This function will write and verify every page for at most
732 * NMBM_TRY_COUNT times.
734 static bool nmbm_write_repeated_data(struct nmbm_instance
*ni
, uint32_t ba
,
735 const void *data
, uint32_t size
)
741 if (size
> bmtd
.pg_size
)
744 addr
= ba2addr(ni
, ba
);
746 for (off
= 0; off
< bmtd
.blk_size
; off
+= bmtd
.pg_size
) {
747 /* Prepare page data. fill 0xff to unused region */
748 memcpy(ni
->page_cache
, data
, size
);
749 memset(ni
->page_cache
+ size
, 0xff, ni
->rawpage_size
- size
);
751 success
= nmbm_write_phys_page(ni
, addr
+ off
, ni
->page_cache
, NULL
);
755 /* Verify the data just written. ECC error indicates failure */
756 ret
= nmbm_read_phys_page(ni
, addr
+ off
, ni
->page_cache
, NULL
);
760 if (memcmp(ni
->page_cache
, data
, size
))
768 * nmbm_write_signature - Write signature to NAND chip
769 * @ni: NMBM instance structure
770 * @limit: top block address allowed for writing
771 * @signature: the signature to be written
772 * @signature_ba: the actual block address where signature is written to
774 * Write signature within a specific range, from chip bottom to limit.
775 * At most one block will be written.
777 * @limit is not counted into the allowed write address.
779 static bool nmbm_write_signature(struct nmbm_instance
*ni
, uint32_t limit
,
780 const struct nmbm_signature
*signature
,
781 uint32_t *signature_ba
)
783 uint32_t ba
= ni
->block_count
- 1;
787 if (nmbm_get_block_state(ni
, ba
) != BLOCK_ST_GOOD
)
790 /* Insurance to detect unexpected bad block marked by user */
791 if (nmbm_check_bad_phys_block(ni
, ba
)) {
792 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
796 success
= nmbm_erase_block_and_check(ni
, ba
);
800 success
= nmbm_write_repeated_data(ni
, ba
, signature
,
808 nmbm_mark_phys_bad_block(ni
, ba
);
809 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
819 * nmbn_read_data - Read data
820 * @ni: NMBM instance structure
821 * @addr: linear address where the data will be read from
822 * @data: the data to be read
823 * @size: the size of data
826 * Every page will be tried for at most NMBM_TRY_COUNT times.
828 * Return 0 for success, positive value for corrected bitflip count,
829 * -EBADMSG for ecc error, other negative values for other errors
831 static int nmbn_read_data(struct nmbm_instance
*ni
, uint64_t addr
, void *data
,
836 uint32_t sizeremain
= size
, chunksize
, leading
;
840 leading
= off
& (bmtd
.pg_size
- 1);
841 chunksize
= bmtd
.pg_size
- leading
;
842 if (chunksize
> sizeremain
)
843 chunksize
= sizeremain
;
845 if (chunksize
== bmtd
.pg_size
) {
846 ret
= nmbm_read_phys_page(ni
, off
- leading
, ptr
, NULL
);
850 ret
= nmbm_read_phys_page(ni
, off
- leading
,
851 ni
->page_cache
, NULL
);
855 memcpy(ptr
, ni
->page_cache
+ leading
, chunksize
);
860 sizeremain
-= chunksize
;
867 * nmbn_write_verify_data - Write data with validation
868 * @ni: NMBM instance structure
869 * @addr: linear address where the data will be written to
870 * @data: the data to be written
871 * @size: the size of data
873 * Write data and verify.
874 * Every page will be tried for at most NMBM_TRY_COUNT times.
876 static bool nmbn_write_verify_data(struct nmbm_instance
*ni
, uint64_t addr
,
877 const void *data
, uint32_t size
)
880 const uint8_t *ptr
= data
;
881 uint32_t sizeremain
= size
, chunksize
, leading
;
886 leading
= off
& (bmtd
.pg_size
- 1);
887 chunksize
= bmtd
.pg_size
- leading
;
888 if (chunksize
> sizeremain
)
889 chunksize
= sizeremain
;
891 /* Prepare page data. fill 0xff to unused region */
892 memset(ni
->page_cache
, 0xff, ni
->rawpage_size
);
893 memcpy(ni
->page_cache
+ leading
, ptr
, chunksize
);
895 success
= nmbm_write_phys_page(ni
, off
- leading
,
896 ni
->page_cache
, NULL
);
900 /* Verify the data just written. ECC error indicates failure */
901 ret
= nmbm_read_phys_page(ni
, off
- leading
, ni
->page_cache
, NULL
);
905 if (memcmp(ni
->page_cache
+ leading
, ptr
, chunksize
))
910 sizeremain
-= chunksize
;
917 * nmbm_write_mgmt_range - Write management data into NAND within a range
918 * @ni: NMBM instance structure
919 * @addr: preferred start block address for writing
920 * @limit: highest block address allowed for writing
921 * @data: the data to be written
922 * @size: the size of data
923 * @actual_start_ba: actual start block address of data
924 * @actual_end_ba: block address after the end of data
926 * @limit is not counted into the allowed write address.
928 static bool nmbm_write_mgmt_range(struct nmbm_instance
*ni
, uint32_t ba
,
929 uint32_t limit
, const void *data
,
930 uint32_t size
, uint32_t *actual_start_ba
,
931 uint32_t *actual_end_ba
)
933 const uint8_t *ptr
= data
;
934 uint32_t sizeremain
= size
, chunksize
;
937 while (sizeremain
&& ba
< limit
) {
938 chunksize
= sizeremain
;
939 if (chunksize
> bmtd
.blk_size
)
940 chunksize
= bmtd
.blk_size
;
942 if (nmbm_get_block_state(ni
, ba
) != BLOCK_ST_GOOD
)
945 /* Insurance to detect unexpected bad block marked by user */
946 if (nmbm_check_bad_phys_block(ni
, ba
)) {
947 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
951 success
= nmbm_erase_block_and_check(ni
, ba
);
955 success
= nmbn_write_verify_data(ni
, ba2addr(ni
, ba
), ptr
,
960 if (sizeremain
== size
)
961 *actual_start_ba
= ba
;
964 sizeremain
-= chunksize
;
969 nmbm_mark_phys_bad_block(ni
, ba
);
970 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
985 * nmbm_generate_info_table_cache - Generate info table cache data
986 * @ni: NMBM instance structure
988 * Generate info table cache data to be written into flash.
990 static bool nmbm_generate_info_table_cache(struct nmbm_instance
*ni
)
992 bool changed
= false;
994 memset(ni
->info_table_cache
, 0xff, ni
->info_table_size
);
996 memcpy(ni
->info_table_cache
+ ni
->info_table
.state_table_off
,
997 ni
->block_state
, ni
->state_table_size
);
999 memcpy(ni
->info_table_cache
+ ni
->info_table
.mapping_table_off
,
1000 ni
->block_mapping
, ni
->mapping_table_size
);
1002 ni
->info_table
.header
.magic
= NMBM_MAGIC_INFO_TABLE
;
1003 ni
->info_table
.header
.version
= NMBM_VER
;
1004 ni
->info_table
.header
.size
= ni
->info_table_size
;
1006 if (ni
->block_state_changed
|| ni
->block_mapping_changed
) {
1007 ni
->info_table
.write_count
++;
1011 memcpy(ni
->info_table_cache
, &ni
->info_table
, sizeof(ni
->info_table
));
1013 nmbm_update_checksum((struct nmbm_header
*)ni
->info_table_cache
);
1019 * nmbm_write_info_table - Write info table into NAND within a range
1020 * @ni: NMBM instance structure
1021 * @ba: preferred start block address for writing
1022 * @limit: highest block address allowed for writing
1023 * @actual_start_ba: actual start block address of info table
1024 * @actual_end_ba: block address after the end of info table
1026 * @limit is counted into the allowed write address.
1028 static bool nmbm_write_info_table(struct nmbm_instance
*ni
, uint32_t ba
,
1029 uint32_t limit
, uint32_t *actual_start_ba
,
1030 uint32_t *actual_end_ba
)
1032 return nmbm_write_mgmt_range(ni
, ba
, limit
, ni
->info_table_cache
,
1033 ni
->info_table_size
, actual_start_ba
,
1038 * nmbm_mark_tables_clean - Mark info table `clean'
1039 * @ni: NMBM instance structure
1041 static void nmbm_mark_tables_clean(struct nmbm_instance
*ni
)
1043 ni
->block_state_changed
= 0;
1044 ni
->block_mapping_changed
= 0;
1048 * nmbm_try_reserve_blocks - Reserve blocks with compromisation
1049 * @ni: NMBM instance structure
1050 * @ba: start physical block address
1051 * @nba: return physical block address after reservation
1052 * @count: number of good blocks to be skipped
1053 * @min_count: minimum number of good blocks to be skipped
1054 * @limit: highest/lowest block address allowed for walking
1056 * Reserve specific blocks. If failed, try to reserve as many as possible.
1058 static bool nmbm_try_reserve_blocks(struct nmbm_instance
*ni
, uint32_t ba
,
1059 uint32_t *nba
, uint32_t count
,
1060 int32_t min_count
, int32_t limit
)
1062 int32_t nblocks
= count
;
1065 while (nblocks
>= min_count
) {
1066 success
= nmbm_block_walk(ni
, true, ba
, nba
, nblocks
, limit
);
1077 * nmbm_rebuild_info_table - Build main & backup info table from scratch
1078 * @ni: NMBM instance structure
1079 * @allow_no_gap: allow no spare blocks between two tables
1081 static bool nmbm_rebuild_info_table(struct nmbm_instance
*ni
)
1083 uint32_t table_start_ba
, table_end_ba
, next_start_ba
;
1084 uint32_t main_table_end_ba
;
1087 /* Set initial value */
1088 ni
->main_table_ba
= 0;
1089 ni
->backup_table_ba
= 0;
1090 ni
->mapping_blocks_ba
= ni
->mapping_blocks_top_ba
;
1092 /* Write main table */
1093 success
= nmbm_write_info_table(ni
, ni
->mgmt_start_ba
,
1094 ni
->mapping_blocks_top_ba
,
1095 &table_start_ba
, &table_end_ba
);
1097 /* Failed to write main table, data will be lost */
1098 nlog_err(ni
, "Unable to write at least one info table!\n");
1099 nlog_err(ni
, "Please save your data before power off!\n");
1104 /* Main info table is successfully written, record its offset */
1105 ni
->main_table_ba
= table_start_ba
;
1106 main_table_end_ba
= table_end_ba
;
1108 /* Adjust mapping_blocks_ba */
1109 ni
->mapping_blocks_ba
= table_end_ba
;
1111 nmbm_mark_tables_clean(ni
);
1113 nlog_table_creation(ni
, true, table_start_ba
, table_end_ba
);
1115 /* Reserve spare blocks for main info table. */
1116 success
= nmbm_try_reserve_blocks(ni
, table_end_ba
,
1118 ni
->info_table_spare_blocks
, 0,
1119 ni
->mapping_blocks_top_ba
-
1120 size2blk(ni
, ni
->info_table_size
));
1122 /* There is no spare block. */
1123 nlog_debug(ni
, "No room for backup info table\n");
1127 /* Write backup info table. */
1128 success
= nmbm_write_info_table(ni
, next_start_ba
,
1129 ni
->mapping_blocks_top_ba
,
1130 &table_start_ba
, &table_end_ba
);
1132 /* There is no enough blocks for backup table. */
1133 nlog_debug(ni
, "No room for backup info table\n");
1137 /* Backup table is successfully written, record its offset */
1138 ni
->backup_table_ba
= table_start_ba
;
1140 /* Adjust mapping_blocks_off */
1141 ni
->mapping_blocks_ba
= table_end_ba
;
1143 /* Erase spare blocks of main table to clean possible interference data */
1144 nmbm_erase_range(ni
, main_table_end_ba
, ni
->backup_table_ba
);
1146 nlog_table_creation(ni
, false, table_start_ba
, table_end_ba
);
1152 * nmbm_rescue_single_info_table - Rescue when there is only one info table
1153 * @ni: NMBM instance structure
1155 * This function is called when there is only one info table exists.
1156 * This function may fail if we can't write new info table
1158 static bool nmbm_rescue_single_info_table(struct nmbm_instance
*ni
)
1160 uint32_t table_start_ba
, table_end_ba
, write_ba
;
1163 /* Try to write new info table in front of existing table */
1164 success
= nmbm_write_info_table(ni
, ni
->mgmt_start_ba
,
1170 * New table becomes the main table, existing table becomes
1173 ni
->backup_table_ba
= ni
->main_table_ba
;
1174 ni
->main_table_ba
= table_start_ba
;
1176 nmbm_mark_tables_clean(ni
);
1178 /* Erase spare blocks of main table to clean possible interference data */
1179 nmbm_erase_range(ni
, table_end_ba
, ni
->backup_table_ba
);
1181 nlog_table_creation(ni
, true, table_start_ba
, table_end_ba
);
1186 /* Try to reserve spare blocks for existing table */
1187 success
= nmbm_try_reserve_blocks(ni
, ni
->mapping_blocks_ba
, &write_ba
,
1188 ni
->info_table_spare_blocks
, 0,
1189 ni
->mapping_blocks_top_ba
-
1190 size2blk(ni
, ni
->info_table_size
));
1192 nlog_warn(ni
, "Failed to rescue single info table\n");
1196 /* Try to write new info table next to the existing table */
1197 while (write_ba
>= ni
->mapping_blocks_ba
) {
1198 success
= nmbm_write_info_table(ni
, write_ba
,
1199 ni
->mapping_blocks_top_ba
,
1209 /* Erase spare blocks of main table to clean possible interference data */
1210 nmbm_erase_range(ni
, ni
->mapping_blocks_ba
, table_start_ba
);
1212 /* New table becomes the backup table */
1213 ni
->backup_table_ba
= table_start_ba
;
1214 ni
->mapping_blocks_ba
= table_end_ba
;
1216 nmbm_mark_tables_clean(ni
);
1218 nlog_table_creation(ni
, false, table_start_ba
, table_end_ba
);
1223 nlog_warn(ni
, "Failed to rescue single info table\n");
1228 * nmbm_update_single_info_table - Update specific one info table
1229 * @ni: NMBM instance structure
1231 static bool nmbm_update_single_info_table(struct nmbm_instance
*ni
,
1232 bool update_main_table
)
1234 uint32_t write_start_ba
, write_limit
, table_start_ba
, table_end_ba
;
1237 /* Determine the write range */
1238 if (update_main_table
) {
1239 write_start_ba
= ni
->main_table_ba
;
1240 write_limit
= ni
->backup_table_ba
;
1242 write_start_ba
= ni
->backup_table_ba
;
1243 write_limit
= ni
->mapping_blocks_top_ba
;
1246 success
= nmbm_write_info_table(ni
, write_start_ba
, write_limit
,
1247 &table_start_ba
, &table_end_ba
);
1249 if (update_main_table
) {
1250 ni
->main_table_ba
= table_start_ba
;
1252 ni
->backup_table_ba
= table_start_ba
;
1253 ni
->mapping_blocks_ba
= table_end_ba
;
1256 nmbm_mark_tables_clean(ni
);
1258 nlog_table_update(ni
, update_main_table
, table_start_ba
,
1264 if (update_main_table
) {
1266 * If failed to update main table, make backup table the new
1267 * main table, and call nmbm_rescue_single_info_table()
1269 nlog_warn(ni
, "Unable to update %s info table\n",
1270 update_main_table
? "Main" : "Backup");
1272 ni
->main_table_ba
= ni
->backup_table_ba
;
1273 ni
->backup_table_ba
= 0;
1274 return nmbm_rescue_single_info_table(ni
);
1277 /* Only one table left */
1278 ni
->mapping_blocks_ba
= ni
->backup_table_ba
;
1279 ni
->backup_table_ba
= 0;
1285 * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1286 * @ni: NMBM instance structure
1288 * This function is called when main info table failed to be written, and
1289 * backup info table exists.
1291 static bool nmbm_rescue_main_info_table(struct nmbm_instance
*ni
)
1293 uint32_t tmp_table_start_ba
, tmp_table_end_ba
, main_table_start_ba
;
1294 uint32_t main_table_end_ba
, write_ba
;
1295 uint32_t info_table_erasesize
= size2blk(ni
, ni
->info_table_size
);
1298 /* Try to reserve spare blocks for existing backup info table */
1299 success
= nmbm_try_reserve_blocks(ni
, ni
->mapping_blocks_ba
, &write_ba
,
1300 ni
->info_table_spare_blocks
, 0,
1301 ni
->mapping_blocks_top_ba
-
1302 info_table_erasesize
);
1304 /* There is no spare block. Backup info table becomes the main table. */
1305 nlog_err(ni
, "No room for temporary info table\n");
1306 ni
->main_table_ba
= ni
->backup_table_ba
;
1307 ni
->backup_table_ba
= 0;
1311 /* Try to write temporary info table into spare unmapped blocks */
1312 while (write_ba
>= ni
->mapping_blocks_ba
) {
1313 success
= nmbm_write_info_table(ni
, write_ba
,
1314 ni
->mapping_blocks_top_ba
,
1315 &tmp_table_start_ba
,
1324 /* Backup info table becomes the main table */
1325 nlog_err(ni
, "Failed to update main info table\n");
1326 ni
->main_table_ba
= ni
->backup_table_ba
;
1327 ni
->backup_table_ba
= 0;
1331 /* Adjust mapping_blocks_off */
1332 ni
->mapping_blocks_ba
= tmp_table_end_ba
;
1335 * Now write main info table at the beginning of management area.
1336 * This operation will generally destroy the original backup info
1339 success
= nmbm_write_info_table(ni
, ni
->mgmt_start_ba
,
1341 &main_table_start_ba
,
1342 &main_table_end_ba
);
1344 /* Temporary info table becomes the main table */
1345 ni
->main_table_ba
= tmp_table_start_ba
;
1346 ni
->backup_table_ba
= 0;
1348 nmbm_mark_tables_clean(ni
);
1350 nlog_err(ni
, "Failed to update main info table\n");
1355 /* Main info table has been successfully written, record its offset */
1356 ni
->main_table_ba
= main_table_start_ba
;
1358 nmbm_mark_tables_clean(ni
);
1360 nlog_table_creation(ni
, true, main_table_start_ba
, main_table_end_ba
);
1363 * Temporary info table becomes the new backup info table if it's
1366 if (main_table_end_ba
<= tmp_table_start_ba
) {
1367 ni
->backup_table_ba
= tmp_table_start_ba
;
1369 nlog_table_creation(ni
, false, tmp_table_start_ba
,
1375 /* Adjust mapping_blocks_off */
1376 ni
->mapping_blocks_ba
= main_table_end_ba
;
1378 /* Try to reserve spare blocks for new main info table */
1379 success
= nmbm_try_reserve_blocks(ni
, main_table_end_ba
, &write_ba
,
1380 ni
->info_table_spare_blocks
, 0,
1381 ni
->mapping_blocks_top_ba
-
1382 info_table_erasesize
);
1384 /* There is no spare block. Only main table exists. */
1385 nlog_err(ni
, "No room for backup info table\n");
1386 ni
->backup_table_ba
= 0;
1390 /* Write new backup info table. */
1391 while (write_ba
>= main_table_end_ba
) {
1392 success
= nmbm_write_info_table(ni
, write_ba
,
1393 ni
->mapping_blocks_top_ba
,
1394 &tmp_table_start_ba
,
1403 nlog_err(ni
, "No room for backup info table\n");
1404 ni
->backup_table_ba
= 0;
1408 /* Backup info table has been successfully written, record its offset */
1409 ni
->backup_table_ba
= tmp_table_start_ba
;
1411 /* Adjust mapping_blocks_off */
1412 ni
->mapping_blocks_ba
= tmp_table_end_ba
;
1414 /* Erase spare blocks of main table to clean possible interference data */
1415 nmbm_erase_range(ni
, main_table_end_ba
, ni
->backup_table_ba
);
1417 nlog_table_creation(ni
, false, tmp_table_start_ba
, tmp_table_end_ba
);
1423 * nmbm_update_info_table_once - Update info table once
1424 * @ni: NMBM instance structure
1425 * @force: force update
1427 * Update both main and backup info table. Return true if at least one info
1428 * table has been successfully written.
1429 * This function only try to update info table once regard less of the result.
1431 static bool nmbm_update_info_table_once(struct nmbm_instance
*ni
, bool force
)
1433 uint32_t table_start_ba
, table_end_ba
;
1434 uint32_t main_table_limit
;
1437 /* Do nothing if there is no change */
1438 if (!nmbm_generate_info_table_cache(ni
) && !force
)
1441 /* Check whether both two tables exist */
1442 if (!ni
->backup_table_ba
) {
1443 main_table_limit
= ni
->mapping_blocks_top_ba
;
1444 goto write_main_table
;
1448 * Write backup info table in its current range.
1449 * Note that limit is set to mapping_blocks_top_off to provide as many
1450 * spare blocks as possible for the backup table. If at last
1451 * unmapped blocks are used by backup table, mapping_blocks_off will
1454 success
= nmbm_write_info_table(ni
, ni
->backup_table_ba
,
1455 ni
->mapping_blocks_top_ba
,
1456 &table_start_ba
, &table_end_ba
);
1459 * There is nothing to do if failed to write backup table.
1460 * Write the main table now.
1462 nlog_err(ni
, "No room for backup table\n");
1463 ni
->mapping_blocks_ba
= ni
->backup_table_ba
;
1464 ni
->backup_table_ba
= 0;
1465 main_table_limit
= ni
->mapping_blocks_top_ba
;
1466 goto write_main_table
;
1469 /* Backup table is successfully written, record its offset */
1470 ni
->backup_table_ba
= table_start_ba
;
1472 /* Adjust mapping_blocks_off */
1473 ni
->mapping_blocks_ba
= table_end_ba
;
1475 nmbm_mark_tables_clean(ni
);
1477 /* The normal limit of main table */
1478 main_table_limit
= ni
->backup_table_ba
;
1480 nlog_table_update(ni
, false, table_start_ba
, table_end_ba
);
1483 if (!ni
->main_table_ba
)
1484 goto rebuild_tables
;
1486 /* Write main info table in its current range */
1487 success
= nmbm_write_info_table(ni
, ni
->main_table_ba
,
1488 main_table_limit
, &table_start_ba
,
1491 /* If failed to write main table, go rescue procedure */
1492 if (!ni
->backup_table_ba
)
1493 goto rebuild_tables
;
1495 return nmbm_rescue_main_info_table(ni
);
1498 /* Main info table is successfully written, record its offset */
1499 ni
->main_table_ba
= table_start_ba
;
1501 /* Adjust mapping_blocks_off */
1502 if (!ni
->backup_table_ba
)
1503 ni
->mapping_blocks_ba
= table_end_ba
;
1505 nmbm_mark_tables_clean(ni
);
1507 nlog_table_update(ni
, true, table_start_ba
, table_end_ba
);
1512 return nmbm_rebuild_info_table(ni
);
1516 * nmbm_update_info_table - Update info table
1517 * @ni: NMBM instance structure
1519 * Update both main and backup info table. Return true if at least one table
1520 * has been successfully written.
1521 * This function will try to update info table repeatedly until no new bad
1522 * block found during updating.
1524 static bool nmbm_update_info_table(struct nmbm_instance
*ni
)
1531 while (ni
->block_state_changed
|| ni
->block_mapping_changed
) {
1532 success
= nmbm_update_info_table_once(ni
, false);
1534 nlog_err(ni
, "Failed to update info table\n");
1543 * nmbm_map_block - Map a bad block to a unused spare block
1544 * @ni: NMBM instance structure
1545 * @lb: logic block addr to map
1547 static bool nmbm_map_block(struct nmbm_instance
*ni
, uint32_t lb
)
1552 if (ni
->mapping_blocks_ba
== ni
->mapping_blocks_top_ba
) {
1553 nlog_warn(ni
, "No spare unmapped blocks.\n");
1557 success
= nmbm_block_walk(ni
, false, ni
->mapping_blocks_top_ba
, &pb
, 0,
1558 ni
->mapping_blocks_ba
);
1560 nlog_warn(ni
, "No spare unmapped blocks.\n");
1561 nmbm_update_info_table(ni
);
1562 ni
->mapping_blocks_top_ba
= ni
->mapping_blocks_ba
;
1566 ni
->block_mapping
[lb
] = pb
;
1567 ni
->mapping_blocks_top_ba
--;
1568 ni
->block_mapping_changed
++;
1570 nlog_info(ni
, "Logic block %u mapped to physical block %u\n", lb
, pb
);
1576 * nmbm_create_info_table - Create info table(s)
1577 * @ni: NMBM instance structure
1579 * This function assumes that the chip has no existing info table(s)
1581 static bool nmbm_create_info_table(struct nmbm_instance
*ni
)
1586 /* Set initial mapping_blocks_top_off */
1587 success
= nmbm_block_walk(ni
, false, ni
->signature_ba
,
1588 &ni
->mapping_blocks_top_ba
, 1,
1591 nlog_err(ni
, "No room for spare blocks\n");
1595 /* Generate info table cache */
1596 nmbm_generate_info_table_cache(ni
);
1598 /* Write info table */
1599 success
= nmbm_rebuild_info_table(ni
);
1601 nlog_err(ni
, "Failed to build info tables\n");
1605 /* Remap bad block(s) at end of data area */
1606 for (lb
= ni
->data_block_count
; lb
< ni
->mgmt_start_ba
; lb
++) {
1607 success
= nmbm_map_block(ni
, lb
);
1611 ni
->data_block_count
++;
1614 /* If state table and/or mapping table changed, update info table. */
1615 success
= nmbm_update_info_table(ni
);
1623 * nmbm_create_new - Create NMBM on a new chip
1624 * @ni: NMBM instance structure
1626 static bool nmbm_create_new(struct nmbm_instance
*ni
)
1630 /* Determine the boundary of management blocks */
1631 ni
->mgmt_start_ba
= ni
->block_count
* (NMBM_MGMT_DIV
- ni
->max_ratio
) / NMBM_MGMT_DIV
;
1633 if (ni
->max_reserved_blocks
&& ni
->block_count
- ni
->mgmt_start_ba
> ni
->max_reserved_blocks
)
1634 ni
->mgmt_start_ba
= ni
->block_count
- ni
->max_reserved_blocks
;
1636 nlog_info(ni
, "NMBM management region starts at block %u [0x%08llx]\n",
1637 ni
->mgmt_start_ba
, ba2addr(ni
, ni
->mgmt_start_ba
));
1639 /* Fill block state table & mapping table */
1640 nmbm_scan_badblocks(ni
);
1641 nmbm_build_mapping_table(ni
);
1643 /* Write signature */
1644 ni
->signature
.header
.magic
= NMBM_MAGIC_SIGNATURE
;
1645 ni
->signature
.header
.version
= NMBM_VER
;
1646 ni
->signature
.header
.size
= sizeof(ni
->signature
);
1647 ni
->signature
.nand_size
= bmtd
.total_blks
<< bmtd
.blk_shift
;
1648 ni
->signature
.block_size
= bmtd
.blk_size
;
1649 ni
->signature
.page_size
= bmtd
.pg_size
;
1650 ni
->signature
.spare_size
= bmtd
.mtd
->oobsize
;
1651 ni
->signature
.mgmt_start_pb
= ni
->mgmt_start_ba
;
1652 ni
->signature
.max_try_count
= NMBM_TRY_COUNT
;
1653 nmbm_update_checksum(&ni
->signature
.header
);
1655 success
= nmbm_write_signature(ni
, ni
->mgmt_start_ba
,
1656 &ni
->signature
, &ni
->signature_ba
);
1658 nlog_err(ni
, "Failed to write signature to a proper offset\n");
1662 nlog_info(ni
, "Signature has been written to block %u [0x%08llx]\n",
1663 ni
->signature_ba
, ba2addr(ni
, ni
->signature_ba
));
1665 /* Write info table(s) */
1666 success
= nmbm_create_info_table(ni
);
1668 nlog_info(ni
, "NMBM has been successfully created\n");
1676 * nmbm_check_info_table_header - Check if a info table header is valid
1677 * @ni: NMBM instance structure
1678 * @data: pointer to the info table header
1680 static bool nmbm_check_info_table_header(struct nmbm_instance
*ni
, void *data
)
1682 struct nmbm_info_table_header
*ifthdr
= data
;
1684 if (ifthdr
->header
.magic
!= NMBM_MAGIC_INFO_TABLE
)
1687 if (ifthdr
->header
.size
!= ni
->info_table_size
)
1690 if (ifthdr
->mapping_table_off
- ifthdr
->state_table_off
< ni
->state_table_size
)
1693 if (ni
->info_table_size
- ifthdr
->mapping_table_off
< ni
->mapping_table_size
)
1700 * nmbm_check_info_table - Check if a whole info table is valid
1701 * @ni: NMBM instance structure
1702 * @start_ba: start block address of this table
1703 * @end_ba: end block address of this table
1704 * @data: pointer to the info table header
1705 * @mapping_blocks_top_ba: return the block address of top remapped block
1707 static bool nmbm_check_info_table(struct nmbm_instance
*ni
, uint32_t start_ba
,
1708 uint32_t end_ba
, void *data
,
1709 uint32_t *mapping_blocks_top_ba
)
1711 struct nmbm_info_table_header
*ifthdr
= data
;
1712 int32_t *block_mapping
= (int32_t *)((uintptr_t)data
+ ifthdr
->mapping_table_off
);
1713 u32
*block_state
= (u32
*)((uintptr_t)data
+ ifthdr
->state_table_off
);
1714 uint32_t minimum_mapping_pb
= ni
->signature_ba
;
1717 for (ba
= 0; ba
< ni
->data_block_count
; ba
++) {
1718 if ((block_mapping
[ba
] >= ni
->data_block_count
&& block_mapping
[ba
] < end_ba
) ||
1719 block_mapping
[ba
] == ni
->signature_ba
)
1722 if (block_mapping
[ba
] >= end_ba
&& block_mapping
[ba
] < minimum_mapping_pb
)
1723 minimum_mapping_pb
= block_mapping
[ba
];
1726 for (ba
= start_ba
; ba
< end_ba
; ba
++) {
1727 if (nmbm_get_block_state(ni
, ba
) != BLOCK_ST_GOOD
)
1730 if (nmbm_get_block_state_raw(block_state
, ba
) != BLOCK_ST_GOOD
)
1734 *mapping_blocks_top_ba
= minimum_mapping_pb
- 1;
1740 * nmbm_try_load_info_table - Try to load info table from a address
1741 * @ni: NMBM instance structure
1742 * @ba: start block address of the info table
1743 * @eba: return the block address after end of the table
1744 * @write_count: return the write count of this table
1745 * @mapping_blocks_top_ba: return the block address of top remapped block
1746 * @table_loaded: used to record whether ni->info_table has valid data
1748 static bool nmbm_try_load_info_table(struct nmbm_instance
*ni
, uint32_t ba
,
1749 uint32_t *eba
, uint32_t *write_count
,
1750 uint32_t *mapping_blocks_top_ba
,
1753 struct nmbm_info_table_header
*ifthdr
= (void *)ni
->info_table_cache
;
1754 uint8_t *off
= ni
->info_table_cache
;
1755 uint32_t limit
= ba
+ size2blk(ni
, ni
->info_table_size
);
1756 uint32_t start_ba
= 0, chunksize
, sizeremain
= ni
->info_table_size
;
1757 bool success
, checkhdr
= true;
1760 while (sizeremain
&& ba
< limit
) {
1761 if (nmbm_get_block_state(ni
, ba
) != BLOCK_ST_GOOD
)
1764 if (nmbm_check_bad_phys_block(ni
, ba
)) {
1765 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
1769 chunksize
= sizeremain
;
1770 if (chunksize
> bmtd
.blk_size
)
1771 chunksize
= bmtd
.blk_size
;
1773 /* Assume block with ECC error has no info table data */
1774 ret
= nmbn_read_data(ni
, ba2addr(ni
, ba
), off
, chunksize
);
1776 goto skip_bad_block
;
1781 success
= nmbm_check_info_table_header(ni
, off
);
1790 sizeremain
-= chunksize
;
1795 /* Only mark bad in memory */
1796 nmbm_set_block_state(ni
, ba
, BLOCK_ST_BAD
);
1805 success
= nmbm_check_header(ni
->info_table_cache
, ni
->info_table_size
);
1810 *write_count
= ifthdr
->write_count
;
1812 success
= nmbm_check_info_table(ni
, start_ba
, ba
, ni
->info_table_cache
,
1813 mapping_blocks_top_ba
);
1817 if (!table_loaded
|| ifthdr
->write_count
> ni
->info_table
.write_count
) {
1818 memcpy(&ni
->info_table
, ifthdr
, sizeof(ni
->info_table
));
1819 memcpy(ni
->block_state
,
1820 (uint8_t *)ifthdr
+ ifthdr
->state_table_off
,
1821 ni
->state_table_size
);
1822 memcpy(ni
->block_mapping
,
1823 (uint8_t *)ifthdr
+ ifthdr
->mapping_table_off
,
1824 ni
->mapping_table_size
);
1825 ni
->info_table
.write_count
= ifthdr
->write_count
;
1832 * nmbm_search_info_table - Search info table from specific address
1833 * @ni: NMBM instance structure
1834 * @ba: start block address to search
1835 * @limit: highest block address allowed for searching
1836 * @table_start_ba: return the start block address of this table
1837 * @table_end_ba: return the block address after end of this table
1838 * @write_count: return the write count of this table
1839 * @mapping_blocks_top_ba: return the block address of top remapped block
1840 * @table_loaded: used to record whether ni->info_table has valid data
1842 static bool nmbm_search_info_table(struct nmbm_instance
*ni
, uint32_t ba
,
1843 uint32_t limit
, uint32_t *table_start_ba
,
1844 uint32_t *table_end_ba
,
1845 uint32_t *write_count
,
1846 uint32_t *mapping_blocks_top_ba
,
1851 while (ba
< limit
- size2blk(ni
, ni
->info_table_size
)) {
1852 success
= nmbm_try_load_info_table(ni
, ba
, table_end_ba
,
1854 mapping_blocks_top_ba
,
1857 *table_start_ba
= ba
;
1868 * nmbm_load_info_table - Load info table(s) from a chip
1869 * @ni: NMBM instance structure
1870 * @ba: start block address to search info table
1871 * @limit: highest block address allowed for searching
1873 static bool nmbm_load_info_table(struct nmbm_instance
*ni
, uint32_t ba
,
1876 uint32_t main_table_end_ba
, backup_table_end_ba
, table_end_ba
;
1877 uint32_t main_mapping_blocks_top_ba
, backup_mapping_blocks_top_ba
;
1878 uint32_t main_table_write_count
, backup_table_write_count
;
1882 /* Set initial value */
1883 ni
->main_table_ba
= 0;
1884 ni
->backup_table_ba
= 0;
1885 ni
->info_table
.write_count
= 0;
1886 ni
->mapping_blocks_top_ba
= ni
->signature_ba
- 1;
1887 ni
->data_block_count
= ni
->signature
.mgmt_start_pb
;
1889 /* Find first info table */
1890 success
= nmbm_search_info_table(ni
, ba
, limit
, &ni
->main_table_ba
,
1891 &main_table_end_ba
, &main_table_write_count
,
1892 &main_mapping_blocks_top_ba
, false);
1894 nlog_warn(ni
, "No valid info table found\n");
1898 table_end_ba
= main_table_end_ba
;
1900 nlog_table_found(ni
, true, main_table_write_count
, ni
->main_table_ba
,
1903 /* Find second info table */
1904 success
= nmbm_search_info_table(ni
, main_table_end_ba
, limit
,
1905 &ni
->backup_table_ba
, &backup_table_end_ba
,
1906 &backup_table_write_count
, &backup_mapping_blocks_top_ba
, true);
1908 nlog_warn(ni
, "Second info table not found\n");
1910 table_end_ba
= backup_table_end_ba
;
1912 nlog_table_found(ni
, false, backup_table_write_count
,
1913 ni
->backup_table_ba
, backup_table_end_ba
);
1916 /* Pick mapping_blocks_top_ba */
1917 if (!ni
->backup_table_ba
) {
1918 ni
->mapping_blocks_top_ba
= main_mapping_blocks_top_ba
;
1920 if (main_table_write_count
>= backup_table_write_count
)
1921 ni
->mapping_blocks_top_ba
= main_mapping_blocks_top_ba
;
1923 ni
->mapping_blocks_top_ba
= backup_mapping_blocks_top_ba
;
1926 /* Set final mapping_blocks_ba */
1927 ni
->mapping_blocks_ba
= table_end_ba
;
1929 /* Set final data_block_count */
1930 for (i
= ni
->signature
.mgmt_start_pb
; i
> 0; i
--) {
1931 if (ni
->block_mapping
[i
- 1] >= 0) {
1932 ni
->data_block_count
= i
;
1937 /* Regenerate the info table cache from the final selected info table */
1938 nmbm_generate_info_table_cache(ni
);
1941 * If only one table exists, try to write another table.
1942 * If two tables have different write count, try to update info table
1944 if (!ni
->backup_table_ba
) {
1945 success
= nmbm_rescue_single_info_table(ni
);
1946 } else if (main_table_write_count
!= backup_table_write_count
) {
1947 /* Mark state & mapping tables changed */
1948 ni
->block_state_changed
= 1;
1949 ni
->block_mapping_changed
= 1;
1951 success
= nmbm_update_single_info_table(ni
,
1952 main_table_write_count
< backup_table_write_count
);
1958 * If there is no spare unmapped blocks, or still only one table
1959 * exists, set the chip to read-only
1961 if (ni
->mapping_blocks_ba
== ni
->mapping_blocks_top_ba
) {
1962 nlog_warn(ni
, "No spare unmapped blocks. Device is now read-only\n");
1964 } else if (!success
) {
1965 nlog_warn(ni
, "Only one info table found. Device is now read-only\n");
1973 * nmbm_load_existing - Load NMBM from a new chip
1974 * @ni: NMBM instance structure
1976 static bool nmbm_load_existing(struct nmbm_instance
*ni
)
1980 /* Calculate the boundary of management blocks */
1981 ni
->mgmt_start_ba
= ni
->signature
.mgmt_start_pb
;
1983 nlog_debug(ni
, "NMBM management region starts at block %u [0x%08llx]\n",
1984 ni
->mgmt_start_ba
, ba2addr(ni
, ni
->mgmt_start_ba
));
1986 /* Look for info table(s) */
1987 success
= nmbm_load_info_table(ni
, ni
->mgmt_start_ba
,
1990 nlog_info(ni
, "NMBM has been successfully attached\n");
1994 if (!ni
->force_create
) {
1995 printk("not creating NMBM table\n");
1999 /* Fill block state table & mapping table */
2000 nmbm_scan_badblocks(ni
);
2001 nmbm_build_mapping_table(ni
);
2003 /* Write info table(s) */
2004 success
= nmbm_create_info_table(ni
);
2006 nlog_info(ni
, "NMBM has been successfully created\n");
2014 * nmbm_find_signature - Find signature in the lower NAND chip
2015 * @ni: NMBM instance structure
2016 * @signature_ba: used for storing block address of the signature
2017 * @signature_ba: return the actual block address of signature block
2019 * Find a valid signature from a specific range in the lower NAND chip,
2020 * from bottom (highest address) to top (lowest address)
2022 * Return true if found.
2024 static bool nmbm_find_signature(struct nmbm_instance
*ni
,
2025 struct nmbm_signature
*signature
,
2026 uint32_t *signature_ba
)
2028 struct nmbm_signature sig
;
2030 uint32_t block_count
, ba
, limit
;
2034 /* Calculate top and bottom block address */
2035 block_count
= bmtd
.total_blks
;
2037 limit
= (block_count
/ NMBM_MGMT_DIV
) * (NMBM_MGMT_DIV
- ni
->max_ratio
);
2038 if (ni
->max_reserved_blocks
&& block_count
- limit
> ni
->max_reserved_blocks
)
2039 limit
= block_count
- ni
->max_reserved_blocks
;
2041 while (ba
>= limit
) {
2043 addr
= ba2addr(ni
, ba
);
2045 if (nmbm_check_bad_phys_block(ni
, ba
))
2048 /* Check every page.
2049 * As long as at leaset one page contains valid signature,
2050 * the block is treated as a valid signature block.
2052 for (off
= 0; off
< bmtd
.blk_size
;
2053 off
+= bmtd
.pg_size
) {
2054 ret
= nmbn_read_data(ni
, addr
+ off
, &sig
,
2059 /* Check for header size and checksum */
2060 success
= nmbm_check_header(&sig
, sizeof(sig
));
2064 /* Check for header magic */
2065 if (sig
.header
.magic
== NMBM_MAGIC_SIGNATURE
) {
2067 memcpy(signature
, &sig
, sizeof(sig
));
2078 * nmbm_calc_structure_size - Calculate the instance structure size
2079 * @nld: NMBM lower device structure
2081 static size_t nmbm_calc_structure_size(void)
2083 uint32_t state_table_size
, mapping_table_size
, info_table_size
;
2084 uint32_t block_count
;
2086 block_count
= bmtd
.total_blks
;
2088 /* Calculate info table size */
2089 state_table_size
= ((block_count
+ NMBM_BITMAP_BLOCKS_PER_UNIT
- 1) /
2090 NMBM_BITMAP_BLOCKS_PER_UNIT
) * NMBM_BITMAP_UNIT_SIZE
;
2091 mapping_table_size
= block_count
* sizeof(int32_t);
2093 info_table_size
= ALIGN(sizeof(struct nmbm_info_table_header
),
2095 info_table_size
+= ALIGN(state_table_size
, bmtd
.pg_size
);
2096 info_table_size
+= ALIGN(mapping_table_size
, bmtd
.pg_size
);
2098 return info_table_size
+ state_table_size
+ mapping_table_size
+
2099 sizeof(struct nmbm_instance
);
2103 * nmbm_init_structure - Initialize members of instance structure
2104 * @ni: NMBM instance structure
2106 static void nmbm_init_structure(struct nmbm_instance
*ni
)
2108 uint32_t pages_per_block
, blocks_per_chip
;
2111 pages_per_block
= bmtd
.blk_size
/ bmtd
.pg_size
;
2112 blocks_per_chip
= bmtd
.total_blks
;
2114 ni
->rawpage_size
= bmtd
.pg_size
+ bmtd
.mtd
->oobsize
;
2115 ni
->rawblock_size
= pages_per_block
* ni
->rawpage_size
;
2116 ni
->rawchip_size
= blocks_per_chip
* ni
->rawblock_size
;
2118 /* Calculate number of block this chip */
2119 ni
->block_count
= blocks_per_chip
;
2121 /* Calculate info table size */
2122 ni
->state_table_size
= ((ni
->block_count
+ NMBM_BITMAP_BLOCKS_PER_UNIT
- 1) /
2123 NMBM_BITMAP_BLOCKS_PER_UNIT
) * NMBM_BITMAP_UNIT_SIZE
;
2124 ni
->mapping_table_size
= ni
->block_count
* sizeof(*ni
->block_mapping
);
2126 ni
->info_table_size
= ALIGN(sizeof(ni
->info_table
),
2128 ni
->info_table
.state_table_off
= ni
->info_table_size
;
2130 ni
->info_table_size
+= ALIGN(ni
->state_table_size
,
2132 ni
->info_table
.mapping_table_off
= ni
->info_table_size
;
2134 ni
->info_table_size
+= ALIGN(ni
->mapping_table_size
,
2137 ni
->info_table_spare_blocks
= nmbm_get_spare_block_count(
2138 size2blk(ni
, ni
->info_table_size
));
2140 /* Assign memory to members */
2141 ptr
= (uintptr_t)ni
+ sizeof(*ni
);
2143 ni
->info_table_cache
= (void *)ptr
;
2144 ptr
+= ni
->info_table_size
;
2146 ni
->block_state
= (void *)ptr
;
2147 ptr
+= ni
->state_table_size
;
2149 ni
->block_mapping
= (void *)ptr
;
2150 ptr
+= ni
->mapping_table_size
;
2152 ni
->page_cache
= bmtd
.data_buf
;
2154 /* Initialize block state table */
2155 ni
->block_state_changed
= 0;
2156 memset(ni
->block_state
, 0xff, ni
->state_table_size
);
2158 /* Initialize block mapping table */
2159 ni
->block_mapping_changed
= 0;
2163 * nmbm_attach - Attach to a lower device
2164 * @ni: NMBM instance structure
2166 static int nmbm_attach(struct nmbm_instance
*ni
)
2173 /* Initialize NMBM instance */
2174 nmbm_init_structure(ni
);
2176 success
= nmbm_find_signature(ni
, &ni
->signature
, &ni
->signature_ba
);
2178 if (!ni
->force_create
) {
2179 nlog_err(ni
, "Signature not found\n");
2183 success
= nmbm_create_new(ni
);
2190 nlog_info(ni
, "Signature found at block %u [0x%08llx]\n",
2191 ni
->signature_ba
, ba2addr(ni
, ni
->signature_ba
));
2193 if (ni
->signature
.header
.version
!= NMBM_VER
) {
2194 nlog_err(ni
, "NMBM version %u.%u is not supported\n",
2195 NMBM_VERSION_MAJOR_GET(ni
->signature
.header
.version
),
2196 NMBM_VERSION_MINOR_GET(ni
->signature
.header
.version
));
2200 if (ni
->signature
.nand_size
!= bmtd
.total_blks
<< bmtd
.blk_shift
||
2201 ni
->signature
.block_size
!= bmtd
.blk_size
||
2202 ni
->signature
.page_size
!= bmtd
.pg_size
||
2203 ni
->signature
.spare_size
!= bmtd
.mtd
->oobsize
) {
2204 nlog_err(ni
, "NMBM configuration mismatch\n");
2208 success
= nmbm_load_existing(ni
);
2215 static bool remap_block_nmbm(u16 block
, u16 mapped_block
, int copy_len
)
2217 struct nmbm_instance
*ni
= bmtd
.ni
;
2220 if (block
>= ni
->data_block_count
)
2223 nmbm_set_block_state(ni
, mapped_block
, BLOCK_ST_BAD
);
2224 if (!nmbm_map_block(ni
, block
))
2227 new_block
= ni
->block_mapping
[block
];
2228 bbt_nand_erase(new_block
);
2230 bbt_nand_copy(new_block
, mapped_block
, copy_len
);
2231 nmbm_update_info_table(ni
);
2236 static int get_mapping_block_index_nmbm(int block
)
2238 struct nmbm_instance
*ni
= bmtd
.ni
;
2240 if (block
>= ni
->data_block_count
)
2243 return ni
->block_mapping
[block
];
2246 static int mtk_bmt_init_nmbm(struct device_node
*np
)
2248 struct nmbm_instance
*ni
;
2251 ni
= kzalloc(nmbm_calc_structure_size(), GFP_KERNEL
);
2257 if (of_property_read_u32(np
, "mediatek,bmt-max-ratio", &ni
->max_ratio
))
2259 if (of_property_read_u32(np
, "mediatek,bmt-max-reserved-blocks",
2260 &ni
->max_reserved_blocks
))
2261 ni
->max_reserved_blocks
= 256;
2262 if (of_property_read_bool(np
, "mediatek,empty-page-ecc-protected"))
2263 ni
->empty_page_ecc_ok
= true;
2264 if (of_property_read_bool(np
, "mediatek,bmt-force-create"))
2265 ni
->force_create
= true;
2267 ret
= nmbm_attach(ni
);
2271 bmtd
.mtd
->size
= ni
->data_block_count
<< bmtd
.blk_shift
;
2282 static int mtk_bmt_debug_nmbm(void *data
, u64 val
)
2284 struct nmbm_instance
*ni
= bmtd
.ni
;
2289 for (i
= 1; i
< ni
->data_block_count
; i
++) {
2290 if (ni
->block_mapping
[i
] < ni
->mapping_blocks_ba
)
2293 printk("remap [%x->%x]\n", i
, ni
->block_mapping
[i
]);
2300 static void unmap_block_nmbm(u16 block
)
2302 struct nmbm_instance
*ni
= bmtd
.ni
;
2306 if (block
>= ni
->data_block_count
)
2311 while (ni
->block_mapping
[start
] >= ni
->mapping_blocks_ba
) {
2321 new_block
= ni
->block_mapping
[start
] + offset
;
2322 nmbm_set_block_state(ni
, new_block
, BLOCK_ST_GOOD
);
2323 ni
->block_mapping
[block
] = new_block
;
2324 ni
->block_mapping_changed
++;
2326 new_block
= ni
->signature_ba
- 1;
2327 for (block
= 0; block
< ni
->data_block_count
; block
++) {
2328 int cur
= ni
->block_mapping
[block
];
2330 if (cur
< ni
->mapping_blocks_ba
)
2333 if (cur
<= new_block
)
2334 new_block
= cur
- 1;
2337 ni
->mapping_blocks_top_ba
= new_block
;
2339 nmbm_update_info_table(ni
);
2342 const struct mtk_bmt_ops mtk_bmt_nmbm_ops
= {
2343 .init
= mtk_bmt_init_nmbm
,
2344 .remap_block
= remap_block_nmbm
,
2345 .unmap_block
= unmap_block_nmbm
,
2346 .get_mapping_block
= get_mapping_block_index_nmbm
,
2347 .debug
= mtk_bmt_debug_nmbm
,