kernel: add support for mediatek NMBM flash mapping support
[openwrt/staging/pepe2k.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt_nmbm.c
1 #include <linux/crc32.h>
2 #include <linux/slab.h>
3 #include "mtk_bmt.h"
4
5 #define nlog_err(ni, ...) printk(KERN_ERR __VA_ARGS__)
6 #define nlog_info(ni, ...) printk(KERN_INFO __VA_ARGS__)
7 #define nlog_debug(ni, ...) printk(KERN_INFO __VA_ARGS__)
8 #define nlog_warn(ni, ...) printk(KERN_WARNING __VA_ARGS__)
9
10 #define NMBM_MAGIC_SIGNATURE 0x304d4d4e /* NMM0 */
11 #define NMBM_MAGIC_INFO_TABLE 0x314d4d4e /* NMM1 */
12
13 #define NMBM_VERSION_MAJOR_S 0
14 #define NMBM_VERSION_MAJOR_M 0xffff
15 #define NMBM_VERSION_MINOR_S 16
16 #define NMBM_VERSION_MINOR_M 0xffff
17 #define NMBM_VERSION_MAKE(major, minor) (((major) & NMBM_VERSION_MAJOR_M) | \
18 (((minor) & NMBM_VERSION_MINOR_M) << \
19 NMBM_VERSION_MINOR_S))
20 #define NMBM_VERSION_MAJOR_GET(ver) (((ver) >> NMBM_VERSION_MAJOR_S) & \
21 NMBM_VERSION_MAJOR_M)
22 #define NMBM_VERSION_MINOR_GET(ver) (((ver) >> NMBM_VERSION_MINOR_S) & \
23 NMBM_VERSION_MINOR_M)
24
25 #define NMBM_BITMAP_UNIT_SIZE (sizeof(u32))
26 #define NMBM_BITMAP_BITS_PER_BLOCK 2
27 #define NMBM_BITMAP_BITS_PER_UNIT (8 * sizeof(u32))
28 #define NMBM_BITMAP_BLOCKS_PER_UNIT (NMBM_BITMAP_BITS_PER_UNIT / \
29 NMBM_BITMAP_BITS_PER_BLOCK)
30
31 #define NMBM_SPARE_BLOCK_MULTI 1
32 #define NMBM_SPARE_BLOCK_DIV 2
33 #define NMBM_SPARE_BLOCK_MIN 2
34
35 #define NMBM_MGMT_DIV 16
36 #define NMBM_MGMT_BLOCKS_MIN 32
37
38 #define NMBM_TRY_COUNT 3
39
40 #define BLOCK_ST_BAD 0
41 #define BLOCK_ST_NEED_REMAP 2
42 #define BLOCK_ST_GOOD 3
43 #define BLOCK_ST_MASK 3
44
45 #define NMBM_VER_MAJOR 1
46 #define NMBM_VER_MINOR 0
47 #define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
48 NMBM_VER_MINOR)
49
50 struct nmbm_header {
51 u32 magic;
52 u32 version;
53 u32 size;
54 u32 checksum;
55 };
56
57 struct nmbm_signature {
58 struct nmbm_header header;
59 uint64_t nand_size;
60 u32 block_size;
61 u32 page_size;
62 u32 spare_size;
63 u32 mgmt_start_pb;
64 u8 max_try_count;
65 u8 padding[3];
66 };
67
68 struct nmbm_info_table_header {
69 struct nmbm_header header;
70 u32 write_count;
71 u32 state_table_off;
72 u32 mapping_table_off;
73 u32 padding;
74 };
75
76 struct nmbm_instance {
77 u32 rawpage_size;
78 u32 rawblock_size;
79 u32 rawchip_size;
80
81 struct nmbm_signature signature;
82
83 u8 *info_table_cache;
84 u32 info_table_size;
85 u32 info_table_spare_blocks;
86 struct nmbm_info_table_header info_table;
87
88 u32 *block_state;
89 u32 block_state_changed;
90 u32 state_table_size;
91
92 int32_t *block_mapping;
93 u32 block_mapping_changed;
94 u32 mapping_table_size;
95
96 u8 *page_cache;
97
98 int protected;
99
100 u32 block_count;
101 u32 data_block_count;
102
103 u32 mgmt_start_ba;
104 u32 main_table_ba;
105 u32 backup_table_ba;
106 u32 mapping_blocks_ba;
107 u32 mapping_blocks_top_ba;
108 u32 signature_ba;
109
110 u32 max_ratio;
111 u32 max_reserved_blocks;
112 bool empty_page_ecc_ok;
113 bool force_create;
114 };
115
116 static inline u32 nmbm_crc32(u32 crcval, const void *buf, size_t size)
117 {
118 unsigned int chksz;
119 const unsigned char *p = buf;
120
121 while (size) {
122 if (size > UINT_MAX)
123 chksz = UINT_MAX;
124 else
125 chksz = (uint)size;
126
127 crcval = crc32_le(crcval, p, chksz);
128 size -= chksz;
129 p += chksz;
130 }
131
132 return crcval;
133 }
134 /*
135 * nlog_table_creation - Print log of table creation event
136 * @ni: NMBM instance structure
137 * @main_table: whether the table is main info table
138 * @start_ba: start block address of the table
139 * @end_ba: block address after the end of the table
140 */
141 static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
142 uint32_t start_ba, uint32_t end_ba)
143 {
144 if (start_ba == end_ba - 1)
145 nlog_info(ni, "%s info table has been written to block %u\n",
146 main_table ? "Main" : "Backup", start_ba);
147 else
148 nlog_info(ni, "%s info table has been written to block %u-%u\n",
149 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
150 }
151
152 /*
153 * nlog_table_update - Print log of table update event
154 * @ni: NMBM instance structure
155 * @main_table: whether the table is main info table
156 * @start_ba: start block address of the table
157 * @end_ba: block address after the end of the table
158 */
159 static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
160 uint32_t start_ba, uint32_t end_ba)
161 {
162 if (start_ba == end_ba - 1)
163 nlog_debug(ni, "%s info table has been updated in block %u\n",
164 main_table ? "Main" : "Backup", start_ba);
165 else
166 nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
167 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
168 }
169
170 /*
171 * nlog_table_found - Print log of table found event
172 * @ni: NMBM instance structure
173 * @first_table: whether the table is first found info table
174 * @write_count: write count of the info table
175 * @start_ba: start block address of the table
176 * @end_ba: block address after the end of the table
177 */
178 static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
179 uint32_t write_count, uint32_t start_ba,
180 uint32_t end_ba)
181 {
182 if (start_ba == end_ba - 1)
183 nlog_info(ni, "%s info table with writecount %u found in block %u\n",
184 first_table ? "First" : "Second", write_count,
185 start_ba);
186 else
187 nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
188 first_table ? "First" : "Second", write_count,
189 start_ba, end_ba - 1);
190 }
191
192 /*****************************************************************************/
193 /* Address conversion functions */
194 /*****************************************************************************/
195
196 /*
197 * ba2addr - Convert a block address to linear address
198 * @ni: NMBM instance structure
199 * @ba: Block address
200 */
201 static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
202 {
203 return (uint64_t)ba << bmtd.blk_shift;
204 }
205 /*
206 * size2blk - Get minimum required blocks for storing specific size of data
207 * @ni: NMBM instance structure
208 * @size: size for storing
209 */
210 static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
211 {
212 return (size + bmtd.blk_size - 1) >> bmtd.blk_shift;
213 }
214
215 /*****************************************************************************/
216 /* High level NAND chip APIs */
217 /*****************************************************************************/
218
219 /*
220 * nmbm_read_phys_page - Read page with retry
221 * @ni: NMBM instance structure
222 * @addr: linear address where the data will be read from
223 * @data: the main data to be read
224 * @oob: the oob data to be read
225 *
226 * Read a page for at most NMBM_TRY_COUNT times.
227 *
228 * Return 0 for success, positive value for corrected bitflip count,
229 * -EBADMSG for ecc error, other negative values for other errors
230 */
231 static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
232 void *data, void *oob)
233 {
234 int tries, ret;
235
236 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
237 struct mtd_oob_ops ops = {
238 .mode = MTD_OPS_PLACE_OOB,
239 .oobbuf = oob,
240 .datbuf = data,
241 };
242
243 if (data)
244 ops.len = bmtd.pg_size;
245 if (oob)
246 ops.ooblen = mtd_oobavail(bmtd.mtd, &ops);
247
248 ret = bmtd._read_oob(bmtd.mtd, addr, &ops);
249 if (ret == -EUCLEAN)
250 return min_t(u32, bmtd.mtd->bitflip_threshold + 1,
251 bmtd.mtd->ecc_strength);
252 if (ret >= 0)
253 return 0;
254 }
255
256 if (ret != -EBADMSG)
257 nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
258
259 return ret;
260 }
261
262 /*
263 * nmbm_write_phys_page - Write page with retry
264 * @ni: NMBM instance structure
265 * @addr: linear address where the data will be written to
266 * @data: the main data to be written
267 * @oob: the oob data to be written
268 *
269 * Write a page for at most NMBM_TRY_COUNT times.
270 */
271 static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
272 const void *data, const void *oob)
273 {
274 int tries, ret;
275
276 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
277 struct mtd_oob_ops ops = {
278 .mode = MTD_OPS_PLACE_OOB,
279 .oobbuf = (void *)oob,
280 .datbuf = (void *)data,
281 };
282
283 if (data)
284 ops.len = bmtd.pg_size;
285 if (oob)
286 ops.ooblen = mtd_oobavail(bmtd.mtd, &ops);
287
288 ret = bmtd._write_oob(bmtd.mtd, addr, &ops);
289 if (!ret)
290 return true;
291 }
292
293 nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
294
295 return false;
296 }
297
298 /*
299 * nmbm_erase_phys_block - Erase a block with retry
300 * @ni: NMBM instance structure
301 * @addr: Linear address
302 *
303 * Erase a block for at most NMBM_TRY_COUNT times.
304 */
305 static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
306 {
307 int tries, ret;
308
309 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
310 struct erase_info ei = {
311 .addr = addr,
312 .len = bmtd.mtd->erasesize,
313 };
314
315 ret = bmtd._erase(bmtd.mtd, &ei);
316 if (!ret)
317 return true;
318 }
319
320 nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
321
322 return false;
323 }
324
325 /*
326 * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
327 * @ni: NMBM instance structure
328 * @ba: block address
329 */
330 static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
331 {
332 uint64_t addr = ba2addr(ni, ba);
333
334 return bmtd._block_isbad(bmtd.mtd, addr);
335 }
336
337 /*
338 * nmbm_mark_phys_bad_block - Mark a block bad
339 * @ni: NMBM instance structure
340 * @addr: Linear address
341 */
342 static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
343 {
344 uint64_t addr = ba2addr(ni, ba);
345
346 nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
347
348 return bmtd._block_markbad(bmtd.mtd, addr);
349 }
350
351 /*****************************************************************************/
352 /* NMBM related functions */
353 /*****************************************************************************/
354
355 /*
356 * nmbm_check_header - Check whether a NMBM structure is valid
357 * @data: pointer to a NMBM structure with a NMBM header at beginning
358 * @size: Size of the buffer pointed by @header
359 *
360 * The size of the NMBM structure may be larger than NMBM header,
361 * e.g. block mapping table and block state table.
362 */
363 static bool nmbm_check_header(const void *data, uint32_t size)
364 {
365 const struct nmbm_header *header = data;
366 struct nmbm_header nhdr;
367 uint32_t new_checksum;
368
369 /*
370 * Make sure expected structure size is equal or smaller than
371 * buffer size.
372 */
373 if (header->size > size)
374 return false;
375
376 memcpy(&nhdr, data, sizeof(nhdr));
377
378 nhdr.checksum = 0;
379 new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
380 if (header->size > sizeof(nhdr))
381 new_checksum = nmbm_crc32(new_checksum,
382 (const uint8_t *)data + sizeof(nhdr),
383 header->size - sizeof(nhdr));
384
385 if (header->checksum != new_checksum)
386 return false;
387
388 return true;
389 }
390
391 /*
392 * nmbm_update_checksum - Update checksum of a NMBM structure
393 * @header: pointer to a NMBM structure with a NMBM header at beginning
394 *
395 * The size of the NMBM structure must be specified by @header->size
396 */
397 static void nmbm_update_checksum(struct nmbm_header *header)
398 {
399 header->checksum = 0;
400 header->checksum = nmbm_crc32(0, header, header->size);
401 }
402
403 /*
404 * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
405 * @block_count: number of blocks of data
406 *
407 * Calculate number of blocks should be reserved for data
408 */
409 static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
410 {
411 uint32_t val;
412
413 val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
414 val *= NMBM_SPARE_BLOCK_MULTI;
415
416 if (val < NMBM_SPARE_BLOCK_MIN)
417 val = NMBM_SPARE_BLOCK_MIN;
418
419 return val;
420 }
421
422 /*
423 * nmbm_get_block_state_raw - Get state of a block from raw block state table
424 * @block_state: pointer to raw block state table (bitmap)
425 * @ba: block address
426 */
427 static uint32_t nmbm_get_block_state_raw(u32 *block_state,
428 uint32_t ba)
429 {
430 uint32_t unit, shift;
431
432 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
433 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
434
435 return (block_state[unit] >> shift) & BLOCK_ST_MASK;
436 }
437
438 /*
439 * nmbm_get_block_state - Get state of a block from block state table
440 * @ni: NMBM instance structure
441 * @ba: block address
442 */
443 static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
444 {
445 return nmbm_get_block_state_raw(ni->block_state, ba);
446 }
447
448 /*
449 * nmbm_set_block_state - Set state of a block to block state table
450 * @ni: NMBM instance structure
451 * @ba: block address
452 * @state: block state
453 *
454 * Set state of a block. If the block state changed, ni->block_state_changed
455 * will be increased.
456 */
457 static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
458 uint32_t state)
459 {
460 uint32_t unit, shift, orig;
461 u32 uv;
462
463 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
464 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
465
466 orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
467 state &= BLOCK_ST_MASK;
468
469 uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
470 uv |= state << shift;
471 ni->block_state[unit] = uv;
472
473 if (orig != state) {
474 ni->block_state_changed++;
475 return true;
476 }
477
478 return false;
479 }
480
481 /*
482 * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
483 * @ni: NMBM instance structure
484 * @ba: start physical block address
485 * @nba: return physical block address after walk
486 * @count: number of good blocks to be skipped
487 * @limit: highest block address allowed for walking
488 *
489 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
490 * return the next good block address.
491 *
492 * If no enough good blocks counted while @limit reached, false will be returned.
493 *
494 * If @count == 0, nearest good block address will be returned.
495 * @limit is not counted in walking.
496 */
497 static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
498 uint32_t *nba, uint32_t count,
499 uint32_t limit)
500 {
501 int32_t nblock = count;
502
503 if (limit >= ni->block_count)
504 limit = ni->block_count - 1;
505
506 while (ba < limit) {
507 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
508 nblock--;
509
510 if (nblock < 0) {
511 *nba = ba;
512 return true;
513 }
514
515 ba++;
516 }
517
518 return false;
519 }
520
521 /*
522 * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
523 * @ni: NMBM instance structure
524 * @ba: start physical block address
525 * @nba: return physical block address after walk
526 * @count: number of good blocks to be skipped
527 * @limit: lowest block address allowed for walking
528 *
529 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
530 * return the next good block address.
531 *
532 * If no enough good blocks counted while @limit reached, false will be returned.
533 *
534 * If @count == 0, nearest good block address will be returned.
535 * @limit is not counted in walking.
536 */
537 static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
538 uint32_t *nba, uint32_t count, uint32_t limit)
539 {
540 int32_t nblock = count;
541
542 if (limit >= ni->block_count)
543 limit = ni->block_count - 1;
544
545 while (ba > limit) {
546 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
547 nblock--;
548
549 if (nblock < 0) {
550 *nba = ba;
551 return true;
552 }
553
554 ba--;
555 }
556
557 return false;
558 }
559
560 /*
561 * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
562 * @ni: NMBM instance structure
563 * @ascending: whether to walk ascending
564 * @ba: start physical block address
565 * @nba: return physical block address after walk
566 * @count: number of good blocks to be skipped
567 * @limit: highest/lowest block address allowed for walking
568 *
569 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
570 * return the next good block address.
571 *
572 * If no enough good blocks counted while @limit reached, false will be returned.
573 *
574 * If @count == 0, nearest good block address will be returned.
575 * @limit can be set to negative if no limit required.
576 * @limit is not counted in walking.
577 */
578 static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
579 uint32_t ba, uint32_t *nba, int32_t count,
580 int32_t limit)
581 {
582 if (ascending)
583 return nmbm_block_walk_asc(ni, ba, nba, count, limit);
584
585 return nmbm_block_walk_desc(ni, ba, nba, count, limit);
586 }
587
588 /*
589 * nmbm_scan_badblocks - Scan and record all bad blocks
590 * @ni: NMBM instance structure
591 *
592 * Scan the entire lower NAND chip and record all bad blocks in to block state
593 * table.
594 */
595 static void nmbm_scan_badblocks(struct nmbm_instance *ni)
596 {
597 uint32_t ba;
598
599 for (ba = 0; ba < ni->block_count; ba++) {
600 if (nmbm_check_bad_phys_block(ni, ba)) {
601 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
602 nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
603 ba2addr(ni, ba));
604 }
605 }
606 }
607
608 /*
609 * nmbm_build_mapping_table - Build initial block mapping table
610 * @ni: NMBM instance structure
611 *
612 * The initial mapping table will be compatible with the stratage of
613 * factory production.
614 */
615 static void nmbm_build_mapping_table(struct nmbm_instance *ni)
616 {
617 uint32_t pb, lb;
618
619 for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
620 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
621 continue;
622
623 /* Always map to the next good block */
624 ni->block_mapping[lb++] = pb;
625 }
626
627 ni->data_block_count = lb;
628
629 /* Unusable/Management blocks */
630 for (pb = lb; pb < ni->block_count; pb++)
631 ni->block_mapping[pb] = -1;
632 }
633
634 /*
635 * nmbm_erase_block_and_check - Erase a block and check its usability
636 * @ni: NMBM instance structure
637 * @ba: block address to be erased
638 *
639 * Erase a block anc check its usability
640 *
641 * Return true if the block is usable, false if erasure failure or the block
642 * has too many bitflips.
643 */
644 static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba)
645 {
646 uint64_t addr, off;
647 bool success;
648 int ret;
649
650 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
651 if (!success)
652 return false;
653
654 if (!ni->empty_page_ecc_ok)
655 return true;
656
657 /* Check every page to make sure there aren't too many bitflips */
658
659 addr = ba2addr(ni, ba);
660
661 for (off = 0; off < bmtd.blk_size; off += bmtd.pg_size) {
662 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL);
663 if (ret == -EBADMSG) {
664 /*
665 * empty_page_ecc_ok means the empty page is
666 * still protected by ECC. So reading pages with ECC
667 * enabled and -EBADMSG means there are too many
668 * bitflips that can't be recovered, and the block
669 * containing the page should be marked bad.
670 */
671 nlog_err(ni,
672 "Too many bitflips in empty page at 0x%llx\n",
673 addr + off);
674 return false;
675 }
676 }
677
678 return true;
679 }
680
681 /*
682 * nmbm_erase_range - Erase a range of blocks
683 * @ni: NMBM instance structure
684 * @ba: block address where the erasure will start
685 * @limit: top block address allowed for erasure
686 *
687 * Erase blocks within the specific range. Newly-found bad blocks will be
688 * marked.
689 *
690 * @limit is not counted into the allowed erasure address.
691 */
692 static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
693 uint32_t limit)
694 {
695 bool success;
696
697 while (ba < limit) {
698 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
699 goto next_block;
700
701 /* Insurance to detect unexpected bad block marked by user */
702 if (nmbm_check_bad_phys_block(ni, ba)) {
703 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
704 goto next_block;
705 }
706
707 success = nmbm_erase_block_and_check(ni, ba);
708 if (success)
709 goto next_block;
710
711 nmbm_mark_phys_bad_block(ni, ba);
712 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
713
714 next_block:
715 ba++;
716 }
717 }
718
719 /*
720 * nmbm_write_repeated_data - Write critical data to a block with retry
721 * @ni: NMBM instance structure
722 * @ba: block address where the data will be written to
723 * @data: the data to be written
724 * @size: size of the data
725 *
726 * Write data to every page of the block. Success only if all pages within
727 * this block have been successfully written.
728 *
729 * Make sure data size is not bigger than one page.
730 *
731 * This function will write and verify every page for at most
732 * NMBM_TRY_COUNT times.
733 */
734 static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
735 const void *data, uint32_t size)
736 {
737 uint64_t addr, off;
738 bool success;
739 int ret;
740
741 if (size > bmtd.pg_size)
742 return false;
743
744 addr = ba2addr(ni, ba);
745
746 for (off = 0; off < bmtd.blk_size; off += bmtd.pg_size) {
747 /* Prepare page data. fill 0xff to unused region */
748 memcpy(ni->page_cache, data, size);
749 memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
750
751 success = nmbm_write_phys_page(ni, addr + off, ni->page_cache, NULL);
752 if (!success)
753 return false;
754
755 /* Verify the data just written. ECC error indicates failure */
756 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL);
757 if (ret < 0)
758 return false;
759
760 if (memcmp(ni->page_cache, data, size))
761 return false;
762 }
763
764 return true;
765 }
766
767 /*
768 * nmbm_write_signature - Write signature to NAND chip
769 * @ni: NMBM instance structure
770 * @limit: top block address allowed for writing
771 * @signature: the signature to be written
772 * @signature_ba: the actual block address where signature is written to
773 *
774 * Write signature within a specific range, from chip bottom to limit.
775 * At most one block will be written.
776 *
777 * @limit is not counted into the allowed write address.
778 */
779 static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
780 const struct nmbm_signature *signature,
781 uint32_t *signature_ba)
782 {
783 uint32_t ba = ni->block_count - 1;
784 bool success;
785
786 while (ba > limit) {
787 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
788 goto next_block;
789
790 /* Insurance to detect unexpected bad block marked by user */
791 if (nmbm_check_bad_phys_block(ni, ba)) {
792 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
793 goto next_block;
794 }
795
796 success = nmbm_erase_block_and_check(ni, ba);
797 if (!success)
798 goto skip_bad_block;
799
800 success = nmbm_write_repeated_data(ni, ba, signature,
801 sizeof(*signature));
802 if (success) {
803 *signature_ba = ba;
804 return true;
805 }
806
807 skip_bad_block:
808 nmbm_mark_phys_bad_block(ni, ba);
809 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
810
811 next_block:
812 ba--;
813 };
814
815 return false;
816 }
817
818 /*
819 * nmbn_read_data - Read data
820 * @ni: NMBM instance structure
821 * @addr: linear address where the data will be read from
822 * @data: the data to be read
823 * @size: the size of data
824 *
825 * Read data range.
826 * Every page will be tried for at most NMBM_TRY_COUNT times.
827 *
828 * Return 0 for success, positive value for corrected bitflip count,
829 * -EBADMSG for ecc error, other negative values for other errors
830 */
831 static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
832 uint32_t size)
833 {
834 uint64_t off = addr;
835 uint8_t *ptr = data;
836 uint32_t sizeremain = size, chunksize, leading;
837 int ret;
838
839 while (sizeremain) {
840 leading = off & (bmtd.pg_size - 1);
841 chunksize = bmtd.pg_size - leading;
842 if (chunksize > sizeremain)
843 chunksize = sizeremain;
844
845 if (chunksize == bmtd.pg_size) {
846 ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL);
847 if (ret < 0)
848 return ret;
849 } else {
850 ret = nmbm_read_phys_page(ni, off - leading,
851 ni->page_cache, NULL);
852 if (ret < 0)
853 return ret;
854
855 memcpy(ptr, ni->page_cache + leading, chunksize);
856 }
857
858 off += chunksize;
859 ptr += chunksize;
860 sizeremain -= chunksize;
861 }
862
863 return 0;
864 }
865
866 /*
867 * nmbn_write_verify_data - Write data with validation
868 * @ni: NMBM instance structure
869 * @addr: linear address where the data will be written to
870 * @data: the data to be written
871 * @size: the size of data
872 *
873 * Write data and verify.
874 * Every page will be tried for at most NMBM_TRY_COUNT times.
875 */
876 static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
877 const void *data, uint32_t size)
878 {
879 uint64_t off = addr;
880 const uint8_t *ptr = data;
881 uint32_t sizeremain = size, chunksize, leading;
882 bool success;
883 int ret;
884
885 while (sizeremain) {
886 leading = off & (bmtd.pg_size - 1);
887 chunksize = bmtd.pg_size - leading;
888 if (chunksize > sizeremain)
889 chunksize = sizeremain;
890
891 /* Prepare page data. fill 0xff to unused region */
892 memset(ni->page_cache, 0xff, ni->rawpage_size);
893 memcpy(ni->page_cache + leading, ptr, chunksize);
894
895 success = nmbm_write_phys_page(ni, off - leading,
896 ni->page_cache, NULL);
897 if (!success)
898 return false;
899
900 /* Verify the data just written. ECC error indicates failure */
901 ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache, NULL);
902 if (ret < 0)
903 return false;
904
905 if (memcmp(ni->page_cache + leading, ptr, chunksize))
906 return false;
907
908 off += chunksize;
909 ptr += chunksize;
910 sizeremain -= chunksize;
911 }
912
913 return true;
914 }
915
916 /*
917 * nmbm_write_mgmt_range - Write management data into NAND within a range
918 * @ni: NMBM instance structure
919 * @addr: preferred start block address for writing
920 * @limit: highest block address allowed for writing
921 * @data: the data to be written
922 * @size: the size of data
923 * @actual_start_ba: actual start block address of data
924 * @actual_end_ba: block address after the end of data
925 *
926 * @limit is not counted into the allowed write address.
927 */
928 static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
929 uint32_t limit, const void *data,
930 uint32_t size, uint32_t *actual_start_ba,
931 uint32_t *actual_end_ba)
932 {
933 const uint8_t *ptr = data;
934 uint32_t sizeremain = size, chunksize;
935 bool success;
936
937 while (sizeremain && ba < limit) {
938 chunksize = sizeremain;
939 if (chunksize > bmtd.blk_size)
940 chunksize = bmtd.blk_size;
941
942 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
943 goto next_block;
944
945 /* Insurance to detect unexpected bad block marked by user */
946 if (nmbm_check_bad_phys_block(ni, ba)) {
947 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
948 goto next_block;
949 }
950
951 success = nmbm_erase_block_and_check(ni, ba);
952 if (!success)
953 goto skip_bad_block;
954
955 success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
956 chunksize);
957 if (!success)
958 goto skip_bad_block;
959
960 if (sizeremain == size)
961 *actual_start_ba = ba;
962
963 ptr += chunksize;
964 sizeremain -= chunksize;
965
966 goto next_block;
967
968 skip_bad_block:
969 nmbm_mark_phys_bad_block(ni, ba);
970 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
971
972 next_block:
973 ba++;
974 }
975
976 if (sizeremain)
977 return false;
978
979 *actual_end_ba = ba;
980
981 return true;
982 }
983
984 /*
985 * nmbm_generate_info_table_cache - Generate info table cache data
986 * @ni: NMBM instance structure
987 *
988 * Generate info table cache data to be written into flash.
989 */
990 static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
991 {
992 bool changed = false;
993
994 memset(ni->info_table_cache, 0xff, ni->info_table_size);
995
996 memcpy(ni->info_table_cache + ni->info_table.state_table_off,
997 ni->block_state, ni->state_table_size);
998
999 memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
1000 ni->block_mapping, ni->mapping_table_size);
1001
1002 ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
1003 ni->info_table.header.version = NMBM_VER;
1004 ni->info_table.header.size = ni->info_table_size;
1005
1006 if (ni->block_state_changed || ni->block_mapping_changed) {
1007 ni->info_table.write_count++;
1008 changed = true;
1009 }
1010
1011 memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
1012
1013 nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
1014
1015 return changed;
1016 }
1017
1018 /*
1019 * nmbm_write_info_table - Write info table into NAND within a range
1020 * @ni: NMBM instance structure
1021 * @ba: preferred start block address for writing
1022 * @limit: highest block address allowed for writing
1023 * @actual_start_ba: actual start block address of info table
1024 * @actual_end_ba: block address after the end of info table
1025 *
1026 * @limit is counted into the allowed write address.
1027 */
1028 static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
1029 uint32_t limit, uint32_t *actual_start_ba,
1030 uint32_t *actual_end_ba)
1031 {
1032 return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
1033 ni->info_table_size, actual_start_ba,
1034 actual_end_ba);
1035 }
1036
1037 /*
1038 * nmbm_mark_tables_clean - Mark info table `clean'
1039 * @ni: NMBM instance structure
1040 */
1041 static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
1042 {
1043 ni->block_state_changed = 0;
1044 ni->block_mapping_changed = 0;
1045 }
1046
1047 /*
1048 * nmbm_try_reserve_blocks - Reserve blocks with compromisation
1049 * @ni: NMBM instance structure
1050 * @ba: start physical block address
1051 * @nba: return physical block address after reservation
1052 * @count: number of good blocks to be skipped
1053 * @min_count: minimum number of good blocks to be skipped
1054 * @limit: highest/lowest block address allowed for walking
1055 *
1056 * Reserve specific blocks. If failed, try to reserve as many as possible.
1057 */
1058 static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
1059 uint32_t *nba, uint32_t count,
1060 int32_t min_count, int32_t limit)
1061 {
1062 int32_t nblocks = count;
1063 bool success;
1064
1065 while (nblocks >= min_count) {
1066 success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
1067 if (success)
1068 return true;
1069
1070 nblocks--;
1071 }
1072
1073 return false;
1074 }
1075
1076 /*
1077 * nmbm_rebuild_info_table - Build main & backup info table from scratch
1078 * @ni: NMBM instance structure
1079 * @allow_no_gap: allow no spare blocks between two tables
1080 */
1081 static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
1082 {
1083 uint32_t table_start_ba, table_end_ba, next_start_ba;
1084 uint32_t main_table_end_ba;
1085 bool success;
1086
1087 /* Set initial value */
1088 ni->main_table_ba = 0;
1089 ni->backup_table_ba = 0;
1090 ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
1091
1092 /* Write main table */
1093 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1094 ni->mapping_blocks_top_ba,
1095 &table_start_ba, &table_end_ba);
1096 if (!success) {
1097 /* Failed to write main table, data will be lost */
1098 nlog_err(ni, "Unable to write at least one info table!\n");
1099 nlog_err(ni, "Please save your data before power off!\n");
1100 ni->protected = 1;
1101 return false;
1102 }
1103
1104 /* Main info table is successfully written, record its offset */
1105 ni->main_table_ba = table_start_ba;
1106 main_table_end_ba = table_end_ba;
1107
1108 /* Adjust mapping_blocks_ba */
1109 ni->mapping_blocks_ba = table_end_ba;
1110
1111 nmbm_mark_tables_clean(ni);
1112
1113 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1114
1115 /* Reserve spare blocks for main info table. */
1116 success = nmbm_try_reserve_blocks(ni, table_end_ba,
1117 &next_start_ba,
1118 ni->info_table_spare_blocks, 0,
1119 ni->mapping_blocks_top_ba -
1120 size2blk(ni, ni->info_table_size));
1121 if (!success) {
1122 /* There is no spare block. */
1123 nlog_debug(ni, "No room for backup info table\n");
1124 return true;
1125 }
1126
1127 /* Write backup info table. */
1128 success = nmbm_write_info_table(ni, next_start_ba,
1129 ni->mapping_blocks_top_ba,
1130 &table_start_ba, &table_end_ba);
1131 if (!success) {
1132 /* There is no enough blocks for backup table. */
1133 nlog_debug(ni, "No room for backup info table\n");
1134 return true;
1135 }
1136
1137 /* Backup table is successfully written, record its offset */
1138 ni->backup_table_ba = table_start_ba;
1139
1140 /* Adjust mapping_blocks_off */
1141 ni->mapping_blocks_ba = table_end_ba;
1142
1143 /* Erase spare blocks of main table to clean possible interference data */
1144 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1145
1146 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1147
1148 return true;
1149 }
1150
1151 /*
1152 * nmbm_rescue_single_info_table - Rescue when there is only one info table
1153 * @ni: NMBM instance structure
1154 *
1155 * This function is called when there is only one info table exists.
1156 * This function may fail if we can't write new info table
1157 */
1158 static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
1159 {
1160 uint32_t table_start_ba, table_end_ba, write_ba;
1161 bool success;
1162
1163 /* Try to write new info table in front of existing table */
1164 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1165 ni->main_table_ba,
1166 &table_start_ba,
1167 &table_end_ba);
1168 if (success) {
1169 /*
1170 * New table becomes the main table, existing table becomes
1171 * the backup table.
1172 */
1173 ni->backup_table_ba = ni->main_table_ba;
1174 ni->main_table_ba = table_start_ba;
1175
1176 nmbm_mark_tables_clean(ni);
1177
1178 /* Erase spare blocks of main table to clean possible interference data */
1179 nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
1180
1181 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1182
1183 return true;
1184 }
1185
1186 /* Try to reserve spare blocks for existing table */
1187 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1188 ni->info_table_spare_blocks, 0,
1189 ni->mapping_blocks_top_ba -
1190 size2blk(ni, ni->info_table_size));
1191 if (!success) {
1192 nlog_warn(ni, "Failed to rescue single info table\n");
1193 return false;
1194 }
1195
1196 /* Try to write new info table next to the existing table */
1197 while (write_ba >= ni->mapping_blocks_ba) {
1198 success = nmbm_write_info_table(ni, write_ba,
1199 ni->mapping_blocks_top_ba,
1200 &table_start_ba,
1201 &table_end_ba);
1202 if (success)
1203 break;
1204
1205 write_ba--;
1206 }
1207
1208 if (success) {
1209 /* Erase spare blocks of main table to clean possible interference data */
1210 nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
1211
1212 /* New table becomes the backup table */
1213 ni->backup_table_ba = table_start_ba;
1214 ni->mapping_blocks_ba = table_end_ba;
1215
1216 nmbm_mark_tables_clean(ni);
1217
1218 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1219
1220 return true;
1221 }
1222
1223 nlog_warn(ni, "Failed to rescue single info table\n");
1224 return false;
1225 }
1226
1227 /*
1228 * nmbm_update_single_info_table - Update specific one info table
1229 * @ni: NMBM instance structure
1230 */
1231 static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
1232 bool update_main_table)
1233 {
1234 uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
1235 bool success;
1236
1237 /* Determine the write range */
1238 if (update_main_table) {
1239 write_start_ba = ni->main_table_ba;
1240 write_limit = ni->backup_table_ba;
1241 } else {
1242 write_start_ba = ni->backup_table_ba;
1243 write_limit = ni->mapping_blocks_top_ba;
1244 }
1245
1246 success = nmbm_write_info_table(ni, write_start_ba, write_limit,
1247 &table_start_ba, &table_end_ba);
1248 if (success) {
1249 if (update_main_table) {
1250 ni->main_table_ba = table_start_ba;
1251 } else {
1252 ni->backup_table_ba = table_start_ba;
1253 ni->mapping_blocks_ba = table_end_ba;
1254 }
1255
1256 nmbm_mark_tables_clean(ni);
1257
1258 nlog_table_update(ni, update_main_table, table_start_ba,
1259 table_end_ba);
1260
1261 return true;
1262 }
1263
1264 if (update_main_table) {
1265 /*
1266 * If failed to update main table, make backup table the new
1267 * main table, and call nmbm_rescue_single_info_table()
1268 */
1269 nlog_warn(ni, "Unable to update %s info table\n",
1270 update_main_table ? "Main" : "Backup");
1271
1272 ni->main_table_ba = ni->backup_table_ba;
1273 ni->backup_table_ba = 0;
1274 return nmbm_rescue_single_info_table(ni);
1275 }
1276
1277 /* Only one table left */
1278 ni->mapping_blocks_ba = ni->backup_table_ba;
1279 ni->backup_table_ba = 0;
1280
1281 return false;
1282 }
1283
1284 /*
1285 * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1286 * @ni: NMBM instance structure
1287 *
1288 * This function is called when main info table failed to be written, and
1289 * backup info table exists.
1290 */
1291 static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
1292 {
1293 uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
1294 uint32_t main_table_end_ba, write_ba;
1295 uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
1296 bool success;
1297
1298 /* Try to reserve spare blocks for existing backup info table */
1299 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1300 ni->info_table_spare_blocks, 0,
1301 ni->mapping_blocks_top_ba -
1302 info_table_erasesize);
1303 if (!success) {
1304 /* There is no spare block. Backup info table becomes the main table. */
1305 nlog_err(ni, "No room for temporary info table\n");
1306 ni->main_table_ba = ni->backup_table_ba;
1307 ni->backup_table_ba = 0;
1308 return true;
1309 }
1310
1311 /* Try to write temporary info table into spare unmapped blocks */
1312 while (write_ba >= ni->mapping_blocks_ba) {
1313 success = nmbm_write_info_table(ni, write_ba,
1314 ni->mapping_blocks_top_ba,
1315 &tmp_table_start_ba,
1316 &tmp_table_end_ba);
1317 if (success)
1318 break;
1319
1320 write_ba--;
1321 }
1322
1323 if (!success) {
1324 /* Backup info table becomes the main table */
1325 nlog_err(ni, "Failed to update main info table\n");
1326 ni->main_table_ba = ni->backup_table_ba;
1327 ni->backup_table_ba = 0;
1328 return true;
1329 }
1330
1331 /* Adjust mapping_blocks_off */
1332 ni->mapping_blocks_ba = tmp_table_end_ba;
1333
1334 /*
1335 * Now write main info table at the beginning of management area.
1336 * This operation will generally destroy the original backup info
1337 * table.
1338 */
1339 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1340 tmp_table_start_ba,
1341 &main_table_start_ba,
1342 &main_table_end_ba);
1343 if (!success) {
1344 /* Temporary info table becomes the main table */
1345 ni->main_table_ba = tmp_table_start_ba;
1346 ni->backup_table_ba = 0;
1347
1348 nmbm_mark_tables_clean(ni);
1349
1350 nlog_err(ni, "Failed to update main info table\n");
1351
1352 return true;
1353 }
1354
1355 /* Main info table has been successfully written, record its offset */
1356 ni->main_table_ba = main_table_start_ba;
1357
1358 nmbm_mark_tables_clean(ni);
1359
1360 nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
1361
1362 /*
1363 * Temporary info table becomes the new backup info table if it's
1364 * not overwritten.
1365 */
1366 if (main_table_end_ba <= tmp_table_start_ba) {
1367 ni->backup_table_ba = tmp_table_start_ba;
1368
1369 nlog_table_creation(ni, false, tmp_table_start_ba,
1370 tmp_table_end_ba);
1371
1372 return true;
1373 }
1374
1375 /* Adjust mapping_blocks_off */
1376 ni->mapping_blocks_ba = main_table_end_ba;
1377
1378 /* Try to reserve spare blocks for new main info table */
1379 success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
1380 ni->info_table_spare_blocks, 0,
1381 ni->mapping_blocks_top_ba -
1382 info_table_erasesize);
1383 if (!success) {
1384 /* There is no spare block. Only main table exists. */
1385 nlog_err(ni, "No room for backup info table\n");
1386 ni->backup_table_ba = 0;
1387 return true;
1388 }
1389
1390 /* Write new backup info table. */
1391 while (write_ba >= main_table_end_ba) {
1392 success = nmbm_write_info_table(ni, write_ba,
1393 ni->mapping_blocks_top_ba,
1394 &tmp_table_start_ba,
1395 &tmp_table_end_ba);
1396 if (success)
1397 break;
1398
1399 write_ba--;
1400 }
1401
1402 if (!success) {
1403 nlog_err(ni, "No room for backup info table\n");
1404 ni->backup_table_ba = 0;
1405 return true;
1406 }
1407
1408 /* Backup info table has been successfully written, record its offset */
1409 ni->backup_table_ba = tmp_table_start_ba;
1410
1411 /* Adjust mapping_blocks_off */
1412 ni->mapping_blocks_ba = tmp_table_end_ba;
1413
1414 /* Erase spare blocks of main table to clean possible interference data */
1415 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1416
1417 nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
1418
1419 return true;
1420 }
1421
1422 /*
1423 * nmbm_update_info_table_once - Update info table once
1424 * @ni: NMBM instance structure
1425 * @force: force update
1426 *
1427 * Update both main and backup info table. Return true if at least one info
1428 * table has been successfully written.
1429 * This function only try to update info table once regard less of the result.
1430 */
1431 static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
1432 {
1433 uint32_t table_start_ba, table_end_ba;
1434 uint32_t main_table_limit;
1435 bool success;
1436
1437 /* Do nothing if there is no change */
1438 if (!nmbm_generate_info_table_cache(ni) && !force)
1439 return true;
1440
1441 /* Check whether both two tables exist */
1442 if (!ni->backup_table_ba) {
1443 main_table_limit = ni->mapping_blocks_top_ba;
1444 goto write_main_table;
1445 }
1446
1447 /*
1448 * Write backup info table in its current range.
1449 * Note that limit is set to mapping_blocks_top_off to provide as many
1450 * spare blocks as possible for the backup table. If at last
1451 * unmapped blocks are used by backup table, mapping_blocks_off will
1452 * be adjusted.
1453 */
1454 success = nmbm_write_info_table(ni, ni->backup_table_ba,
1455 ni->mapping_blocks_top_ba,
1456 &table_start_ba, &table_end_ba);
1457 if (!success) {
1458 /*
1459 * There is nothing to do if failed to write backup table.
1460 * Write the main table now.
1461 */
1462 nlog_err(ni, "No room for backup table\n");
1463 ni->mapping_blocks_ba = ni->backup_table_ba;
1464 ni->backup_table_ba = 0;
1465 main_table_limit = ni->mapping_blocks_top_ba;
1466 goto write_main_table;
1467 }
1468
1469 /* Backup table is successfully written, record its offset */
1470 ni->backup_table_ba = table_start_ba;
1471
1472 /* Adjust mapping_blocks_off */
1473 ni->mapping_blocks_ba = table_end_ba;
1474
1475 nmbm_mark_tables_clean(ni);
1476
1477 /* The normal limit of main table */
1478 main_table_limit = ni->backup_table_ba;
1479
1480 nlog_table_update(ni, false, table_start_ba, table_end_ba);
1481
1482 write_main_table:
1483 if (!ni->main_table_ba)
1484 goto rebuild_tables;
1485
1486 /* Write main info table in its current range */
1487 success = nmbm_write_info_table(ni, ni->main_table_ba,
1488 main_table_limit, &table_start_ba,
1489 &table_end_ba);
1490 if (!success) {
1491 /* If failed to write main table, go rescue procedure */
1492 if (!ni->backup_table_ba)
1493 goto rebuild_tables;
1494
1495 return nmbm_rescue_main_info_table(ni);
1496 }
1497
1498 /* Main info table is successfully written, record its offset */
1499 ni->main_table_ba = table_start_ba;
1500
1501 /* Adjust mapping_blocks_off */
1502 if (!ni->backup_table_ba)
1503 ni->mapping_blocks_ba = table_end_ba;
1504
1505 nmbm_mark_tables_clean(ni);
1506
1507 nlog_table_update(ni, true, table_start_ba, table_end_ba);
1508
1509 return true;
1510
1511 rebuild_tables:
1512 return nmbm_rebuild_info_table(ni);
1513 }
1514
1515 /*
1516 * nmbm_update_info_table - Update info table
1517 * @ni: NMBM instance structure
1518 *
1519 * Update both main and backup info table. Return true if at least one table
1520 * has been successfully written.
1521 * This function will try to update info table repeatedly until no new bad
1522 * block found during updating.
1523 */
1524 static bool nmbm_update_info_table(struct nmbm_instance *ni)
1525 {
1526 bool success;
1527
1528 if (ni->protected)
1529 return true;
1530
1531 while (ni->block_state_changed || ni->block_mapping_changed) {
1532 success = nmbm_update_info_table_once(ni, false);
1533 if (!success) {
1534 nlog_err(ni, "Failed to update info table\n");
1535 return false;
1536 }
1537 }
1538
1539 return true;
1540 }
1541
1542 /*
1543 * nmbm_map_block - Map a bad block to a unused spare block
1544 * @ni: NMBM instance structure
1545 * @lb: logic block addr to map
1546 */
1547 static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
1548 {
1549 uint32_t pb;
1550 bool success;
1551
1552 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1553 nlog_warn(ni, "No spare unmapped blocks.\n");
1554 return false;
1555 }
1556
1557 success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
1558 ni->mapping_blocks_ba);
1559 if (!success) {
1560 nlog_warn(ni, "No spare unmapped blocks.\n");
1561 nmbm_update_info_table(ni);
1562 ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
1563 return false;
1564 }
1565
1566 ni->block_mapping[lb] = pb;
1567 ni->mapping_blocks_top_ba--;
1568 ni->block_mapping_changed++;
1569
1570 nlog_info(ni, "Logic block %u mapped to physical block %u\n", lb, pb);
1571
1572 return true;
1573 }
1574
1575 /*
1576 * nmbm_create_info_table - Create info table(s)
1577 * @ni: NMBM instance structure
1578 *
1579 * This function assumes that the chip has no existing info table(s)
1580 */
1581 static bool nmbm_create_info_table(struct nmbm_instance *ni)
1582 {
1583 uint32_t lb;
1584 bool success;
1585
1586 /* Set initial mapping_blocks_top_off */
1587 success = nmbm_block_walk(ni, false, ni->signature_ba,
1588 &ni->mapping_blocks_top_ba, 1,
1589 ni->mgmt_start_ba);
1590 if (!success) {
1591 nlog_err(ni, "No room for spare blocks\n");
1592 return false;
1593 }
1594
1595 /* Generate info table cache */
1596 nmbm_generate_info_table_cache(ni);
1597
1598 /* Write info table */
1599 success = nmbm_rebuild_info_table(ni);
1600 if (!success) {
1601 nlog_err(ni, "Failed to build info tables\n");
1602 return false;
1603 }
1604
1605 /* Remap bad block(s) at end of data area */
1606 for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
1607 success = nmbm_map_block(ni, lb);
1608 if (!success)
1609 break;
1610
1611 ni->data_block_count++;
1612 }
1613
1614 /* If state table and/or mapping table changed, update info table. */
1615 success = nmbm_update_info_table(ni);
1616 if (!success)
1617 return false;
1618
1619 return true;
1620 }
1621
1622 /*
1623 * nmbm_create_new - Create NMBM on a new chip
1624 * @ni: NMBM instance structure
1625 */
1626 static bool nmbm_create_new(struct nmbm_instance *ni)
1627 {
1628 bool success;
1629
1630 /* Determine the boundary of management blocks */
1631 ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->max_ratio) / NMBM_MGMT_DIV;
1632
1633 if (ni->max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->max_reserved_blocks)
1634 ni->mgmt_start_ba = ni->block_count - ni->max_reserved_blocks;
1635
1636 nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1637 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1638
1639 /* Fill block state table & mapping table */
1640 nmbm_scan_badblocks(ni);
1641 nmbm_build_mapping_table(ni);
1642
1643 /* Write signature */
1644 ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
1645 ni->signature.header.version = NMBM_VER;
1646 ni->signature.header.size = sizeof(ni->signature);
1647 ni->signature.nand_size = bmtd.total_blks << bmtd.blk_shift;
1648 ni->signature.block_size = bmtd.blk_size;
1649 ni->signature.page_size = bmtd.pg_size;
1650 ni->signature.spare_size = bmtd.mtd->oobsize;
1651 ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
1652 ni->signature.max_try_count = NMBM_TRY_COUNT;
1653 nmbm_update_checksum(&ni->signature.header);
1654
1655 success = nmbm_write_signature(ni, ni->mgmt_start_ba,
1656 &ni->signature, &ni->signature_ba);
1657 if (!success) {
1658 nlog_err(ni, "Failed to write signature to a proper offset\n");
1659 return false;
1660 }
1661
1662 nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
1663 ni->signature_ba, ba2addr(ni, ni->signature_ba));
1664
1665 /* Write info table(s) */
1666 success = nmbm_create_info_table(ni);
1667 if (success) {
1668 nlog_info(ni, "NMBM has been successfully created\n");
1669 return true;
1670 }
1671
1672 return false;
1673 }
1674
1675 /*
1676 * nmbm_check_info_table_header - Check if a info table header is valid
1677 * @ni: NMBM instance structure
1678 * @data: pointer to the info table header
1679 */
1680 static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
1681 {
1682 struct nmbm_info_table_header *ifthdr = data;
1683
1684 if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
1685 return false;
1686
1687 if (ifthdr->header.size != ni->info_table_size)
1688 return false;
1689
1690 if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
1691 return false;
1692
1693 if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
1694 return false;
1695
1696 return true;
1697 }
1698
1699 /*
1700 * nmbm_check_info_table - Check if a whole info table is valid
1701 * @ni: NMBM instance structure
1702 * @start_ba: start block address of this table
1703 * @end_ba: end block address of this table
1704 * @data: pointer to the info table header
1705 * @mapping_blocks_top_ba: return the block address of top remapped block
1706 */
1707 static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
1708 uint32_t end_ba, void *data,
1709 uint32_t *mapping_blocks_top_ba)
1710 {
1711 struct nmbm_info_table_header *ifthdr = data;
1712 int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
1713 u32 *block_state = (u32 *)((uintptr_t)data + ifthdr->state_table_off);
1714 uint32_t minimum_mapping_pb = ni->signature_ba;
1715 uint32_t ba;
1716
1717 for (ba = 0; ba < ni->data_block_count; ba++) {
1718 if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
1719 block_mapping[ba] == ni->signature_ba)
1720 return false;
1721
1722 if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
1723 minimum_mapping_pb = block_mapping[ba];
1724 }
1725
1726 for (ba = start_ba; ba < end_ba; ba++) {
1727 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1728 continue;
1729
1730 if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
1731 return false;
1732 }
1733
1734 *mapping_blocks_top_ba = minimum_mapping_pb - 1;
1735
1736 return true;
1737 }
1738
1739 /*
1740 * nmbm_try_load_info_table - Try to load info table from a address
1741 * @ni: NMBM instance structure
1742 * @ba: start block address of the info table
1743 * @eba: return the block address after end of the table
1744 * @write_count: return the write count of this table
1745 * @mapping_blocks_top_ba: return the block address of top remapped block
1746 * @table_loaded: used to record whether ni->info_table has valid data
1747 */
1748 static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1749 uint32_t *eba, uint32_t *write_count,
1750 uint32_t *mapping_blocks_top_ba,
1751 bool table_loaded)
1752 {
1753 struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
1754 uint8_t *off = ni->info_table_cache;
1755 uint32_t limit = ba + size2blk(ni, ni->info_table_size);
1756 uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
1757 bool success, checkhdr = true;
1758 int ret;
1759
1760 while (sizeremain && ba < limit) {
1761 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1762 goto next_block;
1763
1764 if (nmbm_check_bad_phys_block(ni, ba)) {
1765 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1766 goto next_block;
1767 }
1768
1769 chunksize = sizeremain;
1770 if (chunksize > bmtd.blk_size)
1771 chunksize = bmtd.blk_size;
1772
1773 /* Assume block with ECC error has no info table data */
1774 ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
1775 if (ret < 0)
1776 goto skip_bad_block;
1777 else if (ret > 0)
1778 return false;
1779
1780 if (checkhdr) {
1781 success = nmbm_check_info_table_header(ni, off);
1782 if (!success)
1783 return false;
1784
1785 start_ba = ba;
1786 checkhdr = false;
1787 }
1788
1789 off += chunksize;
1790 sizeremain -= chunksize;
1791
1792 goto next_block;
1793
1794 skip_bad_block:
1795 /* Only mark bad in memory */
1796 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1797
1798 next_block:
1799 ba++;
1800 }
1801
1802 if (sizeremain)
1803 return false;
1804
1805 success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
1806 if (!success)
1807 return false;
1808
1809 *eba = ba;
1810 *write_count = ifthdr->write_count;
1811
1812 success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
1813 mapping_blocks_top_ba);
1814 if (!success)
1815 return false;
1816
1817 if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
1818 memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
1819 memcpy(ni->block_state,
1820 (uint8_t *)ifthdr + ifthdr->state_table_off,
1821 ni->state_table_size);
1822 memcpy(ni->block_mapping,
1823 (uint8_t *)ifthdr + ifthdr->mapping_table_off,
1824 ni->mapping_table_size);
1825 ni->info_table.write_count = ifthdr->write_count;
1826 }
1827
1828 return true;
1829 }
1830
1831 /*
1832 * nmbm_search_info_table - Search info table from specific address
1833 * @ni: NMBM instance structure
1834 * @ba: start block address to search
1835 * @limit: highest block address allowed for searching
1836 * @table_start_ba: return the start block address of this table
1837 * @table_end_ba: return the block address after end of this table
1838 * @write_count: return the write count of this table
1839 * @mapping_blocks_top_ba: return the block address of top remapped block
1840 * @table_loaded: used to record whether ni->info_table has valid data
1841 */
1842 static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
1843 uint32_t limit, uint32_t *table_start_ba,
1844 uint32_t *table_end_ba,
1845 uint32_t *write_count,
1846 uint32_t *mapping_blocks_top_ba,
1847 bool table_loaded)
1848 {
1849 bool success;
1850
1851 while (ba < limit - size2blk(ni, ni->info_table_size)) {
1852 success = nmbm_try_load_info_table(ni, ba, table_end_ba,
1853 write_count,
1854 mapping_blocks_top_ba,
1855 table_loaded);
1856 if (success) {
1857 *table_start_ba = ba;
1858 return true;
1859 }
1860
1861 ba++;
1862 }
1863
1864 return false;
1865 }
1866
1867 /*
1868 * nmbm_load_info_table - Load info table(s) from a chip
1869 * @ni: NMBM instance structure
1870 * @ba: start block address to search info table
1871 * @limit: highest block address allowed for searching
1872 */
1873 static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1874 uint32_t limit)
1875 {
1876 uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
1877 uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
1878 uint32_t main_table_write_count, backup_table_write_count;
1879 uint32_t i;
1880 bool success;
1881
1882 /* Set initial value */
1883 ni->main_table_ba = 0;
1884 ni->backup_table_ba = 0;
1885 ni->info_table.write_count = 0;
1886 ni->mapping_blocks_top_ba = ni->signature_ba - 1;
1887 ni->data_block_count = ni->signature.mgmt_start_pb;
1888
1889 /* Find first info table */
1890 success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
1891 &main_table_end_ba, &main_table_write_count,
1892 &main_mapping_blocks_top_ba, false);
1893 if (!success) {
1894 nlog_warn(ni, "No valid info table found\n");
1895 return false;
1896 }
1897
1898 table_end_ba = main_table_end_ba;
1899
1900 nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
1901 main_table_end_ba);
1902
1903 /* Find second info table */
1904 success = nmbm_search_info_table(ni, main_table_end_ba, limit,
1905 &ni->backup_table_ba, &backup_table_end_ba,
1906 &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
1907 if (!success) {
1908 nlog_warn(ni, "Second info table not found\n");
1909 } else {
1910 table_end_ba = backup_table_end_ba;
1911
1912 nlog_table_found(ni, false, backup_table_write_count,
1913 ni->backup_table_ba, backup_table_end_ba);
1914 }
1915
1916 /* Pick mapping_blocks_top_ba */
1917 if (!ni->backup_table_ba) {
1918 ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
1919 } else {
1920 if (main_table_write_count >= backup_table_write_count)
1921 ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
1922 else
1923 ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
1924 }
1925
1926 /* Set final mapping_blocks_ba */
1927 ni->mapping_blocks_ba = table_end_ba;
1928
1929 /* Set final data_block_count */
1930 for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
1931 if (ni->block_mapping[i - 1] >= 0) {
1932 ni->data_block_count = i;
1933 break;
1934 }
1935 }
1936
1937 /* Regenerate the info table cache from the final selected info table */
1938 nmbm_generate_info_table_cache(ni);
1939
1940 /*
1941 * If only one table exists, try to write another table.
1942 * If two tables have different write count, try to update info table
1943 */
1944 if (!ni->backup_table_ba) {
1945 success = nmbm_rescue_single_info_table(ni);
1946 } else if (main_table_write_count != backup_table_write_count) {
1947 /* Mark state & mapping tables changed */
1948 ni->block_state_changed = 1;
1949 ni->block_mapping_changed = 1;
1950
1951 success = nmbm_update_single_info_table(ni,
1952 main_table_write_count < backup_table_write_count);
1953 } else {
1954 success = true;
1955 }
1956
1957 /*
1958 * If there is no spare unmapped blocks, or still only one table
1959 * exists, set the chip to read-only
1960 */
1961 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1962 nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
1963 ni->protected = 1;
1964 } else if (!success) {
1965 nlog_warn(ni, "Only one info table found. Device is now read-only\n");
1966 ni->protected = 1;
1967 }
1968
1969 return true;
1970 }
1971
1972 /*
1973 * nmbm_load_existing - Load NMBM from a new chip
1974 * @ni: NMBM instance structure
1975 */
1976 static bool nmbm_load_existing(struct nmbm_instance *ni)
1977 {
1978 bool success;
1979
1980 /* Calculate the boundary of management blocks */
1981 ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
1982
1983 nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1984 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1985
1986 /* Look for info table(s) */
1987 success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
1988 ni->signature_ba);
1989 if (success) {
1990 nlog_info(ni, "NMBM has been successfully attached\n");
1991 return true;
1992 }
1993
1994 if (!ni->force_create) {
1995 printk("not creating NMBM table\n");
1996 return false;
1997 }
1998
1999 /* Fill block state table & mapping table */
2000 nmbm_scan_badblocks(ni);
2001 nmbm_build_mapping_table(ni);
2002
2003 /* Write info table(s) */
2004 success = nmbm_create_info_table(ni);
2005 if (success) {
2006 nlog_info(ni, "NMBM has been successfully created\n");
2007 return true;
2008 }
2009
2010 return false;
2011 }
2012
2013 /*
2014 * nmbm_find_signature - Find signature in the lower NAND chip
2015 * @ni: NMBM instance structure
2016 * @signature_ba: used for storing block address of the signature
2017 * @signature_ba: return the actual block address of signature block
2018 *
2019 * Find a valid signature from a specific range in the lower NAND chip,
2020 * from bottom (highest address) to top (lowest address)
2021 *
2022 * Return true if found.
2023 */
2024 static bool nmbm_find_signature(struct nmbm_instance *ni,
2025 struct nmbm_signature *signature,
2026 uint32_t *signature_ba)
2027 {
2028 struct nmbm_signature sig;
2029 uint64_t off, addr;
2030 uint32_t block_count, ba, limit;
2031 bool success;
2032 int ret;
2033
2034 /* Calculate top and bottom block address */
2035 block_count = bmtd.total_blks;
2036 ba = block_count;
2037 limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->max_ratio);
2038 if (ni->max_reserved_blocks && block_count - limit > ni->max_reserved_blocks)
2039 limit = block_count - ni->max_reserved_blocks;
2040
2041 while (ba >= limit) {
2042 ba--;
2043 addr = ba2addr(ni, ba);
2044
2045 if (nmbm_check_bad_phys_block(ni, ba))
2046 continue;
2047
2048 /* Check every page.
2049 * As long as at leaset one page contains valid signature,
2050 * the block is treated as a valid signature block.
2051 */
2052 for (off = 0; off < bmtd.blk_size;
2053 off += bmtd.pg_size) {
2054 ret = nmbn_read_data(ni, addr + off, &sig,
2055 sizeof(sig));
2056 if (ret)
2057 continue;
2058
2059 /* Check for header size and checksum */
2060 success = nmbm_check_header(&sig, sizeof(sig));
2061 if (!success)
2062 continue;
2063
2064 /* Check for header magic */
2065 if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
2066 /* Found it */
2067 memcpy(signature, &sig, sizeof(sig));
2068 *signature_ba = ba;
2069 return true;
2070 }
2071 }
2072 };
2073
2074 return false;
2075 }
2076
2077 /*
2078 * nmbm_calc_structure_size - Calculate the instance structure size
2079 * @nld: NMBM lower device structure
2080 */
2081 static size_t nmbm_calc_structure_size(void)
2082 {
2083 uint32_t state_table_size, mapping_table_size, info_table_size;
2084 uint32_t block_count;
2085
2086 block_count = bmtd.total_blks;
2087
2088 /* Calculate info table size */
2089 state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2090 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2091 mapping_table_size = block_count * sizeof(int32_t);
2092
2093 info_table_size = ALIGN(sizeof(struct nmbm_info_table_header),
2094 bmtd.pg_size);
2095 info_table_size += ALIGN(state_table_size, bmtd.pg_size);
2096 info_table_size += ALIGN(mapping_table_size, bmtd.pg_size);
2097
2098 return info_table_size + state_table_size + mapping_table_size +
2099 sizeof(struct nmbm_instance);
2100 }
2101
2102 /*
2103 * nmbm_init_structure - Initialize members of instance structure
2104 * @ni: NMBM instance structure
2105 */
2106 static void nmbm_init_structure(struct nmbm_instance *ni)
2107 {
2108 uint32_t pages_per_block, blocks_per_chip;
2109 uintptr_t ptr;
2110
2111 pages_per_block = bmtd.blk_size / bmtd.pg_size;
2112 blocks_per_chip = bmtd.total_blks;
2113
2114 ni->rawpage_size = bmtd.pg_size + bmtd.mtd->oobsize;
2115 ni->rawblock_size = pages_per_block * ni->rawpage_size;
2116 ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
2117
2118 /* Calculate number of block this chip */
2119 ni->block_count = blocks_per_chip;
2120
2121 /* Calculate info table size */
2122 ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2123 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2124 ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
2125
2126 ni->info_table_size = ALIGN(sizeof(ni->info_table),
2127 bmtd.pg_size);
2128 ni->info_table.state_table_off = ni->info_table_size;
2129
2130 ni->info_table_size += ALIGN(ni->state_table_size,
2131 bmtd.pg_size);
2132 ni->info_table.mapping_table_off = ni->info_table_size;
2133
2134 ni->info_table_size += ALIGN(ni->mapping_table_size,
2135 bmtd.pg_size);
2136
2137 ni->info_table_spare_blocks = nmbm_get_spare_block_count(
2138 size2blk(ni, ni->info_table_size));
2139
2140 /* Assign memory to members */
2141 ptr = (uintptr_t)ni + sizeof(*ni);
2142
2143 ni->info_table_cache = (void *)ptr;
2144 ptr += ni->info_table_size;
2145
2146 ni->block_state = (void *)ptr;
2147 ptr += ni->state_table_size;
2148
2149 ni->block_mapping = (void *)ptr;
2150 ptr += ni->mapping_table_size;
2151
2152 ni->page_cache = bmtd.data_buf;
2153
2154 /* Initialize block state table */
2155 ni->block_state_changed = 0;
2156 memset(ni->block_state, 0xff, ni->state_table_size);
2157
2158 /* Initialize block mapping table */
2159 ni->block_mapping_changed = 0;
2160 }
2161
2162 /*
2163 * nmbm_attach - Attach to a lower device
2164 * @ni: NMBM instance structure
2165 */
2166 static int nmbm_attach(struct nmbm_instance *ni)
2167 {
2168 bool success;
2169
2170 if (!ni)
2171 return -EINVAL;
2172
2173 /* Initialize NMBM instance */
2174 nmbm_init_structure(ni);
2175
2176 success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
2177 if (!success) {
2178 if (!ni->force_create) {
2179 nlog_err(ni, "Signature not found\n");
2180 return -ENODEV;
2181 }
2182
2183 success = nmbm_create_new(ni);
2184 if (!success)
2185 return -ENODEV;
2186
2187 return 0;
2188 }
2189
2190 nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
2191 ni->signature_ba, ba2addr(ni, ni->signature_ba));
2192
2193 if (ni->signature.header.version != NMBM_VER) {
2194 nlog_err(ni, "NMBM version %u.%u is not supported\n",
2195 NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
2196 NMBM_VERSION_MINOR_GET(ni->signature.header.version));
2197 return -EINVAL;
2198 }
2199
2200 if (ni->signature.nand_size != bmtd.total_blks << bmtd.blk_shift ||
2201 ni->signature.block_size != bmtd.blk_size ||
2202 ni->signature.page_size != bmtd.pg_size ||
2203 ni->signature.spare_size != bmtd.mtd->oobsize) {
2204 nlog_err(ni, "NMBM configuration mismatch\n");
2205 return -EINVAL;
2206 }
2207
2208 success = nmbm_load_existing(ni);
2209 if (!success)
2210 return -ENODEV;
2211
2212 return 0;
2213 }
2214
2215 static bool remap_block_nmbm(u16 block, u16 mapped_block, int copy_len)
2216 {
2217 struct nmbm_instance *ni = bmtd.ni;
2218 int new_block;
2219
2220 if (block >= ni->data_block_count)
2221 return false;
2222
2223 nmbm_set_block_state(ni, mapped_block, BLOCK_ST_BAD);
2224 if (!nmbm_map_block(ni, block))
2225 return false;
2226
2227 new_block = ni->block_mapping[block];
2228 bbt_nand_erase(new_block);
2229 if (copy_len > 0)
2230 bbt_nand_copy(new_block, mapped_block, copy_len);
2231 nmbm_update_info_table(ni);
2232
2233 return true;
2234 }
2235
2236 static int get_mapping_block_index_nmbm(int block)
2237 {
2238 struct nmbm_instance *ni = bmtd.ni;
2239
2240 if (block >= ni->data_block_count)
2241 return -1;
2242
2243 return ni->block_mapping[block];
2244 }
2245
2246 static int mtk_bmt_init_nmbm(struct device_node *np)
2247 {
2248 struct nmbm_instance *ni;
2249 int ret;
2250
2251 ni = kzalloc(nmbm_calc_structure_size(), GFP_KERNEL);
2252 if (!ni)
2253 return -ENOMEM;
2254
2255 bmtd.ni = ni;
2256
2257 if (of_property_read_u32(np, "mediatek,bmt-max-ratio", &ni->max_ratio))
2258 ni->max_ratio = 1;
2259 if (of_property_read_u32(np, "mediatek,bmt-max-reserved-blocks",
2260 &ni->max_reserved_blocks))
2261 ni->max_reserved_blocks = 256;
2262 if (of_property_read_bool(np, "mediatek,empty-page-ecc-protected"))
2263 ni->empty_page_ecc_ok = true;
2264 if (of_property_read_bool(np, "mediatek,bmt-force-create"))
2265 ni->force_create = true;
2266
2267 ret = nmbm_attach(ni);
2268 if (ret)
2269 goto out;
2270
2271 bmtd.mtd->size = ni->data_block_count << bmtd.blk_shift;
2272
2273 return 0;
2274
2275 out:
2276 kfree(ni);
2277 bmtd.ni = NULL;
2278
2279 return ret;
2280 }
2281
2282 static int mtk_bmt_debug_nmbm(void *data, u64 val)
2283 {
2284 struct nmbm_instance *ni = bmtd.ni;
2285 int i;
2286
2287 switch (val) {
2288 case 0:
2289 for (i = 1; i < ni->data_block_count; i++) {
2290 if (ni->block_mapping[i] < ni->mapping_blocks_ba)
2291 continue;
2292
2293 printk("remap [%x->%x]\n", i, ni->block_mapping[i]);
2294 }
2295 }
2296
2297 return 0;
2298 }
2299
2300 static void unmap_block_nmbm(u16 block)
2301 {
2302 struct nmbm_instance *ni = bmtd.ni;
2303 int start, offset;
2304 int new_block;
2305
2306 if (block >= ni->data_block_count)
2307 return;
2308
2309 start = block;
2310 offset = 0;
2311 while (ni->block_mapping[start] >= ni->mapping_blocks_ba) {
2312 start--;
2313 offset++;
2314 if (start < 0)
2315 return;
2316 }
2317
2318 if (!offset)
2319 return;
2320
2321 new_block = ni->block_mapping[start] + offset;
2322 nmbm_set_block_state(ni, new_block, BLOCK_ST_GOOD);
2323 ni->block_mapping[block] = new_block;
2324 ni->block_mapping_changed++;
2325
2326 new_block = ni->signature_ba - 1;
2327 for (block = 0; block < ni->data_block_count; block++) {
2328 int cur = ni->block_mapping[block];
2329
2330 if (cur < ni->mapping_blocks_ba)
2331 continue;
2332
2333 if (cur <= new_block)
2334 new_block = cur - 1;
2335 }
2336
2337 ni->mapping_blocks_top_ba = new_block;
2338
2339 nmbm_update_info_table(ni);
2340 }
2341
2342 const struct mtk_bmt_ops mtk_bmt_nmbm_ops = {
2343 .init = mtk_bmt_init_nmbm,
2344 .remap_block = remap_block_nmbm,
2345 .unmap_block = unmap_block_nmbm,
2346 .get_mapping_block = get_mapping_block_index_nmbm,
2347 .debug = mtk_bmt_debug_nmbm,
2348 };