kernel: move mediatek BMT support patch to generic patches
[openwrt/staging/jow.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
19 #include <linux/of.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
25
26 #define MAIN_SIGNATURE_OFFSET 0
27 #define OOB_SIGNATURE_OFFSET 1
28 #define BBPOOL_RATIO 2
29
30 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
31
32 /* Maximum 8k blocks */
33 #define BB_TABLE_MAX bmtd.table_size
34 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
35 #define BMT_TBL_DEF_VAL 0x0
36
37 /*
38 * Burner Bad Block Table
39 * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
40 */
41
42 struct bbbt {
43 char signature[3];
44 /* This version is used to distinguish the legacy and new algorithm */
45 #define BBMT_VERSION 2
46 unsigned char version;
47 /* Below 2 tables will be written in SLC */
48 u16 bb_tbl[];
49 };
50
51 struct bbmt {
52 u16 block;
53 #define NO_MAPPED 0
54 #define NORMAL_MAPPED 1
55 #define BMT_MAPPED 2
56 u16 mapped;
57 };
58
59 static struct bmt_desc {
60 struct mtd_info *mtd;
61
62 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
63 struct mtd_oob_ops *ops);
64 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
65 struct mtd_oob_ops *ops);
66 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
67 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
68 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
69
70 struct bbbt *bbt;
71
72 struct dentry *debugfs_dir;
73
74 u32 table_size;
75 u32 pg_size;
76 u32 blk_size;
77 u16 pg_shift;
78 u16 blk_shift;
79 /* bbt logical address */
80 u16 pool_lba;
81 /* bbt physical address */
82 u16 pool_pba;
83 /* Maximum count of bad blocks that the vendor guaranteed */
84 u16 bb_max;
85 /* Total blocks of the Nand Chip */
86 u16 total_blks;
87 /* The block(n) BMT is located at (bmt_tbl[n]) */
88 u16 bmt_blk_idx;
89 /* How many pages needs to store 'struct bbbt' */
90 u32 bmt_pgs;
91
92 /* to compensate for driver level remapping */
93 u8 oob_offset;
94 } bmtd = {0};
95
96 static unsigned char *nand_bbt_buf;
97 static unsigned char *nand_data_buf;
98
99 /* -------- Unit conversions -------- */
100 static inline u32 blk_pg(u16 block)
101 {
102 return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
103 }
104
105 /* -------- Nand operations wrapper -------- */
106 static inline int
107 bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
108 unsigned char *fdm, int fdm_len)
109 {
110 struct mtd_oob_ops ops = {
111 .mode = MTD_OPS_PLACE_OOB,
112 .ooboffs = bmtd.oob_offset,
113 .oobbuf = fdm,
114 .ooblen = fdm_len,
115 .datbuf = dat,
116 .len = dat_len,
117 };
118
119 return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
120 }
121
122 static inline int bbt_nand_erase(u16 block)
123 {
124 struct mtd_info *mtd = bmtd.mtd;
125 struct erase_info instr = {
126 .addr = (loff_t)block << bmtd.blk_shift,
127 .len = bmtd.blk_size,
128 };
129
130 return bmtd._erase(mtd, &instr);
131 }
132
133 /* -------- Bad Blocks Management -------- */
134 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
135 {
136 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
137 }
138
139 static int
140 read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
141 {
142 u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
143
144 return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
145 }
146
147 static int write_bmt(u16 block, unsigned char *dat)
148 {
149 struct mtd_oob_ops ops = {
150 .mode = MTD_OPS_PLACE_OOB,
151 .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
152 .oobbuf = "bmt",
153 .ooblen = 3,
154 .datbuf = dat,
155 .len = bmtd.bmt_pgs << bmtd.pg_shift,
156 };
157 loff_t addr = (loff_t)block << bmtd.blk_shift;
158
159 return bmtd._write_oob(bmtd.mtd, addr, &ops);
160 }
161
162 static u16 find_valid_block(u16 block)
163 {
164 u8 fdm[4];
165 int ret;
166 int loop = 0;
167
168 retry:
169 if (block >= bmtd.total_blks)
170 return 0;
171
172 ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
173 fdm, sizeof(fdm));
174 /* Read the 1st byte of FDM to judge whether it's a bad
175 * or not
176 */
177 if (ret || fdm[0] != 0xff) {
178 pr_info("nand: found bad block 0x%x\n", block);
179 if (loop >= bmtd.bb_max) {
180 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
181 return 0;
182 }
183
184 loop++;
185 block++;
186 goto retry;
187 }
188
189 return block;
190 }
191
192 /* Find out all bad blocks, and fill in the mapping table */
193 static int scan_bad_blocks(struct bbbt *bbt)
194 {
195 int i;
196 u16 block = 0;
197
198 /* First time download, the block0 MUST NOT be a bad block,
199 * this is guaranteed by vendor
200 */
201 bbt->bb_tbl[0] = 0;
202
203 /*
204 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
205 * G - Good block; B - Bad block
206 * ---------------------------
207 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
208 * ---------------------------
209 * What bb_tbl[i] looks like:
210 * physical block(i):
211 * 0 1 2 3 4 5 6 7 8 9 a b c
212 * mapped block(bb_tbl[i]):
213 * 0 1 3 6 7 8 9 b ......
214 * ATTENTION:
215 * If new bad block ocurred(n), search bmt_tbl to find
216 * a available block(x), and fill in the bb_tbl[n] = x;
217 */
218 for (i = 1; i < bmtd.pool_lba; i++) {
219 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
220 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
221 if (bbt->bb_tbl[i] == 0)
222 return -1;
223 }
224
225 /* Physical Block start Address of BMT pool */
226 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
227 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
228 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
229 return -1;
230 }
231
232 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
233 i = 0;
234 block = bmtd.pool_pba;
235 /*
236 * The bmt table is used for runtime bad block mapping
237 * G - Good block; B - Bad block
238 * ---------------------------
239 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
240 * ---------------------------
241 * block: 0 1 2 3 4 5 6 7 8 9 a b c
242 * What bmt_tbl[i] looks like in initial state:
243 * i:
244 * 0 1 2 3 4 5 6 7
245 * bmt_tbl[i].block:
246 * 0 1 3 6 7 8 9 b
247 * bmt_tbl[i].mapped:
248 * N N N N N N N B
249 * N - Not mapped(Available)
250 * M - Mapped
251 * B - BMT
252 * ATTENTION:
253 * BMT always in the last valid block in pool
254 */
255 while ((block = find_valid_block(block)) != 0) {
256 bmt_tbl(bbt)[i].block = block;
257 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
258 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
259 block++;
260 i++;
261 }
262
263 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
264 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
265 */
266 bmtd.bmt_blk_idx = i - 1;
267 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
268
269 if (i < 1) {
270 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
271 return -1;
272 }
273
274 pr_info("[BBT] %d available blocks in BMT pool\n", i);
275
276 return 0;
277 }
278
279 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
280 {
281 struct bbbt *bbt = (struct bbbt *)buf;
282 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
283
284
285 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
286 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
287 if (bbt->version == BBMT_VERSION)
288 return true;
289 }
290 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
291 sig[0], sig[1], sig[2],
292 fdm[1], fdm[2], fdm[3]);
293 return false;
294 }
295
296 static u16 get_bmt_index(struct bbmt *bmt)
297 {
298 int i = 0;
299
300 while (bmt[i].block != BMT_TBL_DEF_VAL) {
301 if (bmt[i].mapped == BMT_MAPPED)
302 return i;
303 i++;
304 }
305 return 0;
306 }
307
308 static struct bbbt *scan_bmt(u16 block)
309 {
310 u8 fdm[4];
311
312 if (block < bmtd.pool_lba)
313 return NULL;
314
315 if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
316 return scan_bmt(block - 1);
317
318 if (is_valid_bmt(nand_bbt_buf, fdm)) {
319 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
320 if (bmtd.bmt_blk_idx == 0) {
321 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
322 return NULL;
323 }
324 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
325 return (struct bbbt *)nand_bbt_buf;
326 } else
327 return scan_bmt(block - 1);
328 }
329
330 /* Write the Burner Bad Block Table to Nand Flash
331 * n - write BMT to bmt_tbl[n]
332 */
333 static u16 upload_bmt(struct bbbt *bbt, int n)
334 {
335 u16 block;
336
337 retry:
338 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
339 pr_info("nand: FATAL ERR: no space to store BMT!\n");
340 return (u16)-1;
341 }
342
343 block = bmt_tbl(bbt)[n].block;
344 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
345 if (bbt_nand_erase(block)) {
346 bmt_tbl(bbt)[n].block = 0;
347 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
348 n--;
349 goto retry;
350 }
351
352 /* The signature offset is fixed set to 0,
353 * oob signature offset is fixed set to 1
354 */
355 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
356 bbt->version = BBMT_VERSION;
357
358 if (write_bmt(block, (unsigned char *)bbt)) {
359 bmt_tbl(bbt)[n].block = 0;
360
361 /* write failed, try the previous block in bmt_tbl[n - 1] */
362 n--;
363 goto retry;
364 }
365
366 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
367 return n;
368 }
369
370 static u16 find_valid_block_in_pool(struct bbbt *bbt)
371 {
372 int i;
373
374 if (bmtd.bmt_blk_idx == 0)
375 goto error;
376
377 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
378 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
379 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
380 return bmt_tbl(bbt)[i].block;
381 }
382 }
383
384 error:
385 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
386 return 0;
387 }
388
389 /* We met a bad block, mark it as bad and map it to a valid block in pool,
390 * if it's a write failure, we need to write the data to mapped block
391 */
392 static bool update_bmt(u16 block)
393 {
394 u16 mapped_blk;
395 struct bbbt *bbt;
396
397 bbt = bmtd.bbt;
398 mapped_blk = find_valid_block_in_pool(bbt);
399 if (mapped_blk == 0)
400 return false;
401
402 /* Map new bad block to available block in pool */
403 bbt->bb_tbl[block] = mapped_blk;
404 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
405
406 return true;
407 }
408
409 u16 get_mapping_block_index(int block)
410 {
411 int mapping_block;
412
413 if (block < bmtd.pool_lba)
414 mapping_block = bmtd.bbt->bb_tbl[block];
415 else
416 mapping_block = block;
417 BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
418
419 return mapping_block;
420 }
421
422 static int
423 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
424 struct mtd_oob_ops *ops)
425 {
426 struct mtd_oob_ops cur_ops = *ops;
427 int retry_count = 0;
428 loff_t cur_from;
429 int ret;
430
431 ops->retlen = 0;
432 ops->oobretlen = 0;
433
434 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
435 u32 offset = from & (bmtd.blk_size - 1);
436 u32 block = from >> bmtd.blk_shift;
437 u32 cur_block;
438
439 cur_block = get_mapping_block_index(block);
440 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
441
442 cur_ops.oobretlen = 0;
443 cur_ops.retlen = 0;
444 cur_ops.len = min_t(u32, mtd->erasesize - offset,
445 ops->len - ops->retlen);
446 ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
447 if (ret < 0) {
448 update_bmt(block);
449 if (retry_count++ < 10)
450 continue;
451
452 return ret;
453 }
454
455 ops->retlen += cur_ops.retlen;
456 ops->oobretlen += cur_ops.oobretlen;
457
458 cur_ops.ooboffs = 0;
459 cur_ops.datbuf += cur_ops.retlen;
460 cur_ops.oobbuf += cur_ops.oobretlen;
461 cur_ops.ooblen -= cur_ops.oobretlen;
462
463 if (!cur_ops.len)
464 cur_ops.len = mtd->erasesize - offset;
465
466 from += cur_ops.len;
467 retry_count = 0;
468 }
469
470 return 0;
471 }
472
473 static int
474 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
475 struct mtd_oob_ops *ops)
476 {
477 struct mtd_oob_ops cur_ops = *ops;
478 int retry_count = 0;
479 loff_t cur_to;
480 int ret;
481
482 ops->retlen = 0;
483 ops->oobretlen = 0;
484
485 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
486 u32 offset = to & (bmtd.blk_size - 1);
487 u32 block = to >> bmtd.blk_shift;
488 u32 cur_block;
489
490 cur_block = get_mapping_block_index(block);
491 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
492
493 cur_ops.oobretlen = 0;
494 cur_ops.retlen = 0;
495 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
496 ops->len - ops->retlen);
497 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
498 if (ret < 0) {
499 update_bmt(block);
500 if (retry_count++ < 10)
501 continue;
502
503 return ret;
504 }
505
506 ops->retlen += cur_ops.retlen;
507 ops->oobretlen += cur_ops.oobretlen;
508
509 cur_ops.ooboffs = 0;
510 cur_ops.datbuf += cur_ops.retlen;
511 cur_ops.oobbuf += cur_ops.oobretlen;
512 cur_ops.ooblen -= cur_ops.oobretlen;
513
514 if (!cur_ops.len)
515 cur_ops.len = mtd->erasesize - offset;
516
517 to += cur_ops.len;
518 retry_count = 0;
519 }
520
521 return 0;
522 }
523
524 static int
525 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
526 {
527 struct erase_info mapped_instr = {
528 .len = bmtd.blk_size,
529 };
530 int retry_count = 0;
531 u64 start_addr, end_addr;
532 int ret;
533 u16 orig_block, block;
534
535 start_addr = instr->addr & (~mtd->erasesize_mask);
536 end_addr = instr->addr + instr->len;
537
538 while (start_addr < end_addr) {
539 orig_block = start_addr >> bmtd.blk_shift;
540 block = get_mapping_block_index(orig_block);
541 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
542 ret = bmtd._erase(mtd, &mapped_instr);
543 if (ret) {
544 update_bmt(orig_block);
545 if (retry_count++ < 10)
546 continue;
547 instr->fail_addr = start_addr;
548 break;
549 }
550 start_addr += mtd->erasesize;
551 retry_count = 0;
552 }
553
554 return ret;
555 }
556 static int
557 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
558 {
559 int retry_count = 0;
560 u16 orig_block = ofs >> bmtd.blk_shift;
561 u16 block;
562 int ret;
563
564 retry:
565 block = get_mapping_block_index(orig_block);
566 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
567 if (ret) {
568 update_bmt(orig_block);
569 if (retry_count++ < 10)
570 goto retry;
571 }
572 return ret;
573 }
574
575 static int
576 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
577 {
578 u16 orig_block = ofs >> bmtd.blk_shift;
579 u16 block = get_mapping_block_index(orig_block);
580 update_bmt(orig_block);
581 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
582 }
583
584 static void
585 mtk_bmt_replace_ops(struct mtd_info *mtd)
586 {
587 bmtd._read_oob = mtd->_read_oob;
588 bmtd._write_oob = mtd->_write_oob;
589 bmtd._erase = mtd->_erase;
590 bmtd._block_isbad = mtd->_block_isbad;
591 bmtd._block_markbad = mtd->_block_markbad;
592
593 mtd->_read_oob = mtk_bmt_read;
594 mtd->_write_oob = mtk_bmt_write;
595 mtd->_erase = mtk_bmt_mtd_erase;
596 mtd->_block_isbad = mtk_bmt_block_isbad;
597 mtd->_block_markbad = mtk_bmt_block_markbad;
598 }
599
600 static int mtk_bmt_debug_mark_good(void *data, u64 val)
601 {
602 u32 block = val >> bmtd.blk_shift;
603
604 bmtd.bbt->bb_tbl[block] = block;
605 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
606
607 return 0;
608 }
609
610 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
611 {
612 u32 block = val >> bmtd.blk_shift;
613
614 update_bmt(block);
615
616 return 0;
617 }
618
619 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
620 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
621
622 static void
623 mtk_bmt_add_debugfs(void)
624 {
625 struct dentry *dir;
626
627 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
628 if (!dir)
629 return;
630
631 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
632 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
633 }
634
635 void mtk_bmt_detach(struct mtd_info *mtd)
636 {
637 if (bmtd.mtd != mtd)
638 return;
639
640 if (bmtd.debugfs_dir)
641 debugfs_remove_recursive(bmtd.debugfs_dir);
642 bmtd.debugfs_dir = NULL;
643
644 kfree(nand_bbt_buf);
645 kfree(nand_data_buf);
646
647 mtd->_read_oob = bmtd._read_oob;
648 mtd->_write_oob = bmtd._write_oob;
649 mtd->_erase = bmtd._erase;
650 mtd->_block_isbad = bmtd._block_isbad;
651 mtd->_block_markbad = bmtd._block_markbad;
652 mtd->size = bmtd.total_blks << bmtd.blk_shift;
653
654 memset(&bmtd, 0, sizeof(bmtd));
655 }
656
657 /* total_blocks - The total count of blocks that the Nand Chip has */
658 int mtk_bmt_attach(struct mtd_info *mtd)
659 {
660 struct device_node *np;
661 struct bbbt *bbt;
662 u32 bufsz;
663 u32 block;
664 u16 total_blocks, pmt_block;
665 int ret = 0;
666 u32 bmt_pool_size, bmt_table_size;
667
668 if (bmtd.mtd)
669 return -ENOSPC;
670
671 np = mtd_get_of_node(mtd);
672 if (!np)
673 return 0;
674
675 if (!of_property_read_bool(np, "mediatek,bmt-v2"))
676 return 0;
677
678 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
679 &bmt_pool_size) != 0)
680 bmt_pool_size = 80;
681
682 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
683 &bmtd.oob_offset) != 0)
684 bmtd.oob_offset = 0;
685
686 if (of_property_read_u32(np, "mediatek,bmt-table-size",
687 &bmt_table_size) != 0)
688 bmt_table_size = 0x2000U;
689
690 bmtd.mtd = mtd;
691 mtk_bmt_replace_ops(mtd);
692
693 bmtd.table_size = bmt_table_size;
694 bmtd.blk_size = mtd->erasesize;
695 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
696 bmtd.pg_size = mtd->writesize;
697 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
698 total_blocks = mtd->size >> bmtd.blk_shift;
699 pmt_block = total_blocks - bmt_pool_size - 2;
700
701 mtd->size = pmt_block << bmtd.blk_shift;
702
703 /*
704 * ---------------------------------------
705 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
706 * ---------------------------------------
707 * ^ ^
708 * | |
709 * pmt_block pmt_block + 2blocks(pool_lba)
710 *
711 * ATTETION!!!!!!
712 * The blocks ahead of the boundary block are stored in bb_tbl
713 * and blocks behind are stored in bmt_tbl
714 */
715
716 bmtd.pool_lba = (u16)(pmt_block + 2);
717 bmtd.total_blks = total_blocks;
718 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
719
720 /* 3 buffers we need */
721 bufsz = round_up(sizeof(struct bbbt) +
722 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
723 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
724
725 nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
726 nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
727
728 if (!nand_bbt_buf || !nand_data_buf) {
729 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
730 ret = -1;
731 goto error;
732 }
733
734 memset(nand_bbt_buf, 0xff, bufsz);
735 memset(nand_data_buf, 0xff, bmtd.pg_size);
736
737 BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
738 nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
739 BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
740 bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
741
742 /* Scanning start from the first page of the last block
743 * of whole flash
744 */
745 bbt = scan_bmt(bmtd.total_blks - 1);
746 if (!bbt) {
747 /* BMT not found */
748 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
749 pr_info("nand: FATAL: Too many blocks, can not support!\n");
750 ret = -1;
751 goto error;
752 }
753
754 bbt = (struct bbbt *)nand_bbt_buf;
755 memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
756
757 if (scan_bad_blocks(bbt)) {
758 ret = -1;
759 goto error;
760 }
761
762 /* BMT always in the last valid block in pool */
763 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
764 block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
765 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
766
767 if (bmtd.bmt_blk_idx == 0)
768 pr_info("nand: Warning: no available block in BMT pool!\n");
769 else if (bmtd.bmt_blk_idx == (u16)-1) {
770 ret = -1;
771 goto error;
772 }
773 }
774 mtk_bmt_add_debugfs();
775
776 bmtd.bbt = bbt;
777 return 0;
778
779 error:
780 mtk_bmt_detach(mtd);
781 return ret;
782 }
783
784
785 MODULE_LICENSE("GPL");
786 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
787 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
788