kernel: mtk_bmt: add support for limiting range of remapping
[openwrt/staging/chunkeey.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
19 #include <linux/of.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
25 #include <linux/bits.h>
26
27 #define MAIN_SIGNATURE_OFFSET 0
28 #define OOB_SIGNATURE_OFFSET 1
29 #define BBPOOL_RATIO 2
30
31 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
32
33 /* Maximum 8k blocks */
34 #define BB_TABLE_MAX bmtd.table_size
35 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
36 #define BMT_TBL_DEF_VAL 0x0
37
38 /*
39 * Burner Bad Block Table
40 * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
41 */
42
43 struct bbbt {
44 char signature[3];
45 /* This version is used to distinguish the legacy and new algorithm */
46 #define BBMT_VERSION 2
47 unsigned char version;
48 /* Below 2 tables will be written in SLC */
49 u16 bb_tbl[];
50 };
51
52 struct bbmt {
53 u16 block;
54 #define NO_MAPPED 0
55 #define NORMAL_MAPPED 1
56 #define BMT_MAPPED 2
57 u16 mapped;
58 };
59
60 static struct bmt_desc {
61 struct mtd_info *mtd;
62
63 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
64 struct mtd_oob_ops *ops);
65 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
66 struct mtd_oob_ops *ops);
67 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
68 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
69 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
70
71 struct bbbt *bbt;
72
73 struct dentry *debugfs_dir;
74
75 u32 table_size;
76 u32 pg_size;
77 u32 blk_size;
78 u16 pg_shift;
79 u16 blk_shift;
80 /* bbt logical address */
81 u16 pool_lba;
82 /* bbt physical address */
83 u16 pool_pba;
84 /* Maximum count of bad blocks that the vendor guaranteed */
85 u16 bb_max;
86 /* Total blocks of the Nand Chip */
87 u16 total_blks;
88 /* The block(n) BMT is located at (bmt_tbl[n]) */
89 u16 bmt_blk_idx;
90 /* How many pages needs to store 'struct bbbt' */
91 u32 bmt_pgs;
92
93 const __be32 *remap_range;
94 int remap_range_len;
95
96 /* to compensate for driver level remapping */
97 u8 oob_offset;
98 } bmtd = {0};
99
100 static unsigned char *nand_bbt_buf;
101 static unsigned char *nand_data_buf;
102
103 /* -------- Unit conversions -------- */
104 static inline u32 blk_pg(u16 block)
105 {
106 return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
107 }
108
109 /* -------- Nand operations wrapper -------- */
110 static inline int
111 bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
112 unsigned char *fdm, int fdm_len)
113 {
114 struct mtd_oob_ops ops = {
115 .mode = MTD_OPS_PLACE_OOB,
116 .ooboffs = bmtd.oob_offset,
117 .oobbuf = fdm,
118 .ooblen = fdm_len,
119 .datbuf = dat,
120 .len = dat_len,
121 };
122
123 return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
124 }
125
126 static inline int bbt_nand_erase(u16 block)
127 {
128 struct mtd_info *mtd = bmtd.mtd;
129 struct erase_info instr = {
130 .addr = (loff_t)block << bmtd.blk_shift,
131 .len = bmtd.blk_size,
132 };
133
134 return bmtd._erase(mtd, &instr);
135 }
136
137 static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
138 {
139 int pages = bmtd.blk_size >> bmtd.pg_shift;
140 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
141 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
142 loff_t offset = 0;
143 uint8_t oob[64];
144 int i, ret;
145
146 for (i = 0; i < pages; i++) {
147 struct mtd_oob_ops rd_ops = {
148 .mode = MTD_OPS_PLACE_OOB,
149 .oobbuf = oob,
150 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
151 .datbuf = nand_data_buf,
152 .len = bmtd.pg_size,
153 };
154 struct mtd_oob_ops wr_ops = {
155 .mode = MTD_OPS_PLACE_OOB,
156 .oobbuf = oob,
157 .datbuf = nand_data_buf,
158 .len = bmtd.pg_size,
159 };
160
161 if (offset >= max_offset)
162 break;
163
164 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
165 if (ret < 0 && !mtd_is_bitflip(ret))
166 return ret;
167
168 if (!rd_ops.retlen)
169 break;
170
171 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
172 if (ret < 0)
173 return ret;
174
175 wr_ops.ooblen = rd_ops.oobretlen;
176 offset += rd_ops.retlen;
177 }
178
179 return 0;
180 }
181
182 /* -------- Bad Blocks Management -------- */
183 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
184 {
185 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
186 }
187
188 static int
189 read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
190 {
191 u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
192
193 return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
194 }
195
196 static int write_bmt(u16 block, unsigned char *dat)
197 {
198 struct mtd_oob_ops ops = {
199 .mode = MTD_OPS_PLACE_OOB,
200 .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
201 .oobbuf = "bmt",
202 .ooblen = 3,
203 .datbuf = dat,
204 .len = bmtd.bmt_pgs << bmtd.pg_shift,
205 };
206 loff_t addr = (loff_t)block << bmtd.blk_shift;
207
208 return bmtd._write_oob(bmtd.mtd, addr, &ops);
209 }
210
211 static u16 find_valid_block(u16 block)
212 {
213 u8 fdm[4];
214 int ret;
215 int loop = 0;
216
217 retry:
218 if (block >= bmtd.total_blks)
219 return 0;
220
221 ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
222 fdm, sizeof(fdm));
223 /* Read the 1st byte of FDM to judge whether it's a bad
224 * or not
225 */
226 if (ret || fdm[0] != 0xff) {
227 pr_info("nand: found bad block 0x%x\n", block);
228 if (loop >= bmtd.bb_max) {
229 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
230 return 0;
231 }
232
233 loop++;
234 block++;
235 goto retry;
236 }
237
238 return block;
239 }
240
241 /* Find out all bad blocks, and fill in the mapping table */
242 static int scan_bad_blocks(struct bbbt *bbt)
243 {
244 int i;
245 u16 block = 0;
246
247 /* First time download, the block0 MUST NOT be a bad block,
248 * this is guaranteed by vendor
249 */
250 bbt->bb_tbl[0] = 0;
251
252 /*
253 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
254 * G - Good block; B - Bad block
255 * ---------------------------
256 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
257 * ---------------------------
258 * What bb_tbl[i] looks like:
259 * physical block(i):
260 * 0 1 2 3 4 5 6 7 8 9 a b c
261 * mapped block(bb_tbl[i]):
262 * 0 1 3 6 7 8 9 b ......
263 * ATTENTION:
264 * If new bad block ocurred(n), search bmt_tbl to find
265 * a available block(x), and fill in the bb_tbl[n] = x;
266 */
267 for (i = 1; i < bmtd.pool_lba; i++) {
268 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
269 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
270 if (bbt->bb_tbl[i] == 0)
271 return -1;
272 }
273
274 /* Physical Block start Address of BMT pool */
275 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
276 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
277 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
278 return -1;
279 }
280
281 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
282 i = 0;
283 block = bmtd.pool_pba;
284 /*
285 * The bmt table is used for runtime bad block mapping
286 * G - Good block; B - Bad block
287 * ---------------------------
288 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
289 * ---------------------------
290 * block: 0 1 2 3 4 5 6 7 8 9 a b c
291 * What bmt_tbl[i] looks like in initial state:
292 * i:
293 * 0 1 2 3 4 5 6 7
294 * bmt_tbl[i].block:
295 * 0 1 3 6 7 8 9 b
296 * bmt_tbl[i].mapped:
297 * N N N N N N N B
298 * N - Not mapped(Available)
299 * M - Mapped
300 * B - BMT
301 * ATTENTION:
302 * BMT always in the last valid block in pool
303 */
304 while ((block = find_valid_block(block)) != 0) {
305 bmt_tbl(bbt)[i].block = block;
306 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
307 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
308 block++;
309 i++;
310 }
311
312 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
313 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
314 */
315 bmtd.bmt_blk_idx = i - 1;
316 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
317
318 if (i < 1) {
319 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
320 return -1;
321 }
322
323 pr_info("[BBT] %d available blocks in BMT pool\n", i);
324
325 return 0;
326 }
327
328 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
329 {
330 struct bbbt *bbt = (struct bbbt *)buf;
331 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
332
333
334 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
335 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
336 if (bbt->version == BBMT_VERSION)
337 return true;
338 }
339 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
340 sig[0], sig[1], sig[2],
341 fdm[1], fdm[2], fdm[3]);
342 return false;
343 }
344
345 static u16 get_bmt_index(struct bbmt *bmt)
346 {
347 int i = 0;
348
349 while (bmt[i].block != BMT_TBL_DEF_VAL) {
350 if (bmt[i].mapped == BMT_MAPPED)
351 return i;
352 i++;
353 }
354 return 0;
355 }
356
357 static struct bbbt *scan_bmt(u16 block)
358 {
359 u8 fdm[4];
360
361 if (block < bmtd.pool_lba)
362 return NULL;
363
364 if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
365 return scan_bmt(block - 1);
366
367 if (is_valid_bmt(nand_bbt_buf, fdm)) {
368 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
369 if (bmtd.bmt_blk_idx == 0) {
370 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
371 return NULL;
372 }
373 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
374 return (struct bbbt *)nand_bbt_buf;
375 } else
376 return scan_bmt(block - 1);
377 }
378
379 /* Write the Burner Bad Block Table to Nand Flash
380 * n - write BMT to bmt_tbl[n]
381 */
382 static u16 upload_bmt(struct bbbt *bbt, int n)
383 {
384 u16 block;
385
386 retry:
387 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
388 pr_info("nand: FATAL ERR: no space to store BMT!\n");
389 return (u16)-1;
390 }
391
392 block = bmt_tbl(bbt)[n].block;
393 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
394 if (bbt_nand_erase(block)) {
395 bmt_tbl(bbt)[n].block = 0;
396 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
397 n--;
398 goto retry;
399 }
400
401 /* The signature offset is fixed set to 0,
402 * oob signature offset is fixed set to 1
403 */
404 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
405 bbt->version = BBMT_VERSION;
406
407 if (write_bmt(block, (unsigned char *)bbt)) {
408 bmt_tbl(bbt)[n].block = 0;
409
410 /* write failed, try the previous block in bmt_tbl[n - 1] */
411 n--;
412 goto retry;
413 }
414
415 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
416 return n;
417 }
418
419 static u16 find_valid_block_in_pool(struct bbbt *bbt)
420 {
421 int i;
422
423 if (bmtd.bmt_blk_idx == 0)
424 goto error;
425
426 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
427 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
428 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
429 return bmt_tbl(bbt)[i].block;
430 }
431 }
432
433 error:
434 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
435 return 0;
436 }
437
438 /* We met a bad block, mark it as bad and map it to a valid block in pool,
439 * if it's a write failure, we need to write the data to mapped block
440 */
441 static bool update_bmt(u16 block, int copy_len)
442 {
443 u16 mapped_blk;
444 struct bbbt *bbt;
445
446 bbt = bmtd.bbt;
447 mapped_blk = find_valid_block_in_pool(bbt);
448 if (mapped_blk == 0)
449 return false;
450
451 /* Map new bad block to available block in pool */
452 bbt->bb_tbl[block] = mapped_blk;
453
454 /* Erase new block */
455 bbt_nand_erase(mapped_blk);
456 if (copy_len > 0)
457 bbt_nand_copy(mapped_blk, block, copy_len);
458
459 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
460
461 return true;
462 }
463
464 static bool
465 mapping_block_in_range(int block)
466 {
467 const __be32 *cur = bmtd.remap_range;
468 u32 addr = block << bmtd.blk_shift;
469 int i;
470
471 if (!cur || !bmtd.remap_range_len)
472 return true;
473
474 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2)
475 if (addr >= be32_to_cpu(cur[0]) && addr < be32_to_cpu(cur[1]))
476 return true;
477
478 return false;
479 }
480
481 u16 get_mapping_block_index(int block)
482 {
483 if (block >= bmtd.pool_lba)
484 return block;
485
486 if (!mapping_block_in_range(block))
487 return block;
488
489 return bmtd.bbt->bb_tbl[block];
490 }
491
492 static int
493 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
494 struct mtd_oob_ops *ops)
495 {
496 struct mtd_oob_ops cur_ops = *ops;
497 int retry_count = 0;
498 loff_t cur_from;
499 int ret = 0;
500
501 ops->retlen = 0;
502 ops->oobretlen = 0;
503
504 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
505 int cur_ret;
506
507 u32 offset = from & (bmtd.blk_size - 1);
508 u32 block = from >> bmtd.blk_shift;
509 u32 cur_block;
510
511 cur_block = get_mapping_block_index(block);
512 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
513
514 cur_ops.oobretlen = 0;
515 cur_ops.retlen = 0;
516 cur_ops.len = min_t(u32, mtd->erasesize - offset,
517 ops->len - ops->retlen);
518 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
519 if (cur_ret < 0)
520 ret = cur_ret;
521 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
522 update_bmt(block, mtd->erasesize);
523 if (retry_count++ < 10)
524 continue;
525
526 return ret;
527 }
528
529 ops->retlen += cur_ops.retlen;
530 ops->oobretlen += cur_ops.oobretlen;
531
532 cur_ops.ooboffs = 0;
533 cur_ops.datbuf += cur_ops.retlen;
534 cur_ops.oobbuf += cur_ops.oobretlen;
535 cur_ops.ooblen -= cur_ops.oobretlen;
536
537 if (!cur_ops.len)
538 cur_ops.len = mtd->erasesize - offset;
539
540 from += cur_ops.len;
541 retry_count = 0;
542 }
543
544 return ret;
545 }
546
547 static int
548 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
549 struct mtd_oob_ops *ops)
550 {
551 struct mtd_oob_ops cur_ops = *ops;
552 int retry_count = 0;
553 loff_t cur_to;
554 int ret;
555
556 ops->retlen = 0;
557 ops->oobretlen = 0;
558
559 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
560 u32 offset = to & (bmtd.blk_size - 1);
561 u32 block = to >> bmtd.blk_shift;
562 u32 cur_block;
563
564 cur_block = get_mapping_block_index(block);
565 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
566
567 cur_ops.oobretlen = 0;
568 cur_ops.retlen = 0;
569 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
570 ops->len - ops->retlen);
571 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
572 if (ret < 0) {
573 update_bmt(block, offset);
574 if (retry_count++ < 10)
575 continue;
576
577 return ret;
578 }
579
580 ops->retlen += cur_ops.retlen;
581 ops->oobretlen += cur_ops.oobretlen;
582
583 cur_ops.ooboffs = 0;
584 cur_ops.datbuf += cur_ops.retlen;
585 cur_ops.oobbuf += cur_ops.oobretlen;
586 cur_ops.ooblen -= cur_ops.oobretlen;
587
588 if (!cur_ops.len)
589 cur_ops.len = mtd->erasesize - offset;
590
591 to += cur_ops.len;
592 retry_count = 0;
593 }
594
595 return 0;
596 }
597
598 static int
599 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
600 {
601 struct erase_info mapped_instr = {
602 .len = bmtd.blk_size,
603 };
604 int retry_count = 0;
605 u64 start_addr, end_addr;
606 int ret;
607 u16 orig_block, block;
608
609 start_addr = instr->addr & (~mtd->erasesize_mask);
610 end_addr = instr->addr + instr->len;
611
612 while (start_addr < end_addr) {
613 orig_block = start_addr >> bmtd.blk_shift;
614 block = get_mapping_block_index(orig_block);
615 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
616 ret = bmtd._erase(mtd, &mapped_instr);
617 if (ret) {
618 update_bmt(orig_block, 0);
619 if (retry_count++ < 10)
620 continue;
621 instr->fail_addr = start_addr;
622 break;
623 }
624 start_addr += mtd->erasesize;
625 retry_count = 0;
626 }
627
628 return ret;
629 }
630 static int
631 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
632 {
633 int retry_count = 0;
634 u16 orig_block = ofs >> bmtd.blk_shift;
635 u16 block;
636 int ret;
637
638 retry:
639 block = get_mapping_block_index(orig_block);
640 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
641 if (ret) {
642 update_bmt(orig_block, bmtd.blk_size);
643 if (retry_count++ < 10)
644 goto retry;
645 }
646 return ret;
647 }
648
649 static int
650 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
651 {
652 u16 orig_block = ofs >> bmtd.blk_shift;
653 u16 block = get_mapping_block_index(orig_block);
654 update_bmt(orig_block, bmtd.blk_size);
655 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
656 }
657
658 static void
659 mtk_bmt_replace_ops(struct mtd_info *mtd)
660 {
661 bmtd._read_oob = mtd->_read_oob;
662 bmtd._write_oob = mtd->_write_oob;
663 bmtd._erase = mtd->_erase;
664 bmtd._block_isbad = mtd->_block_isbad;
665 bmtd._block_markbad = mtd->_block_markbad;
666
667 mtd->_read_oob = mtk_bmt_read;
668 mtd->_write_oob = mtk_bmt_write;
669 mtd->_erase = mtk_bmt_mtd_erase;
670 mtd->_block_isbad = mtk_bmt_block_isbad;
671 mtd->_block_markbad = mtk_bmt_block_markbad;
672 }
673
674 static int mtk_bmt_debug_mark_good(void *data, u64 val)
675 {
676 u32 block = val >> bmtd.blk_shift;
677
678 bmtd.bbt->bb_tbl[block] = block;
679 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
680
681 return 0;
682 }
683
684 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
685 {
686 u32 block = val >> bmtd.blk_shift;
687
688 update_bmt(block, bmtd.blk_size);
689
690 return 0;
691 }
692
693 static unsigned long *
694 mtk_bmt_get_mapping_mask(void)
695 {
696 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
697 int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
698 unsigned long *used;
699 int i, k;
700
701 used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
702 if (!used)
703 return NULL;
704
705 for (i = 1; i < main_blocks; i++) {
706 if (bmtd.bbt->bb_tbl[i] == i)
707 continue;
708
709 for (k = 0; k < bmtd.bmt_blk_idx; k++) {
710 if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
711 continue;
712
713 set_bit(k, used);
714 break;
715 }
716 }
717
718 return used;
719 }
720
721 static int mtk_bmt_debug(void *data, u64 val)
722 {
723 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
724 struct mtd_info *mtd = bmtd.mtd;
725 unsigned long *used;
726 int main_blocks = mtd->size >> bmtd.blk_shift;
727 int n_remap = 0;
728 int i;
729
730 used = mtk_bmt_get_mapping_mask();
731 if (!used)
732 return -ENOMEM;
733
734 switch (val) {
735 case 0:
736 for (i = 1; i < main_blocks; i++) {
737 if (bmtd.bbt->bb_tbl[i] == i)
738 continue;
739
740 printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
741 n_remap++;
742 }
743 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
744 char c;
745
746 switch (bbmt[i].mapped) {
747 case NO_MAPPED:
748 continue;
749 case NORMAL_MAPPED:
750 c = 'm';
751 if (test_bit(i, used))
752 c = 'M';
753 break;
754 case BMT_MAPPED:
755 c = 'B';
756 break;
757 default:
758 c = 'X';
759 break;
760 }
761 printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
762 }
763 break;
764 case 100:
765 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
766 if (bbmt[i].mapped != NORMAL_MAPPED)
767 continue;
768
769 if (test_bit(i, used))
770 continue;
771
772 n_remap++;
773 bbmt[i].mapped = NO_MAPPED;
774 printk("free block [%d:%x]\n", i, bbmt[i].block);
775 }
776 if (n_remap)
777 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
778 break;
779 }
780
781 kfree(used);
782
783 return 0;
784 }
785
786
787 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
788 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
789 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
790
791 static void
792 mtk_bmt_add_debugfs(void)
793 {
794 struct dentry *dir;
795
796 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
797 if (!dir)
798 return;
799
800 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
801 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
802 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
803 }
804
805 void mtk_bmt_detach(struct mtd_info *mtd)
806 {
807 if (bmtd.mtd != mtd)
808 return;
809
810 if (bmtd.debugfs_dir)
811 debugfs_remove_recursive(bmtd.debugfs_dir);
812 bmtd.debugfs_dir = NULL;
813
814 kfree(nand_bbt_buf);
815 kfree(nand_data_buf);
816
817 mtd->_read_oob = bmtd._read_oob;
818 mtd->_write_oob = bmtd._write_oob;
819 mtd->_erase = bmtd._erase;
820 mtd->_block_isbad = bmtd._block_isbad;
821 mtd->_block_markbad = bmtd._block_markbad;
822 mtd->size = bmtd.total_blks << bmtd.blk_shift;
823
824 memset(&bmtd, 0, sizeof(bmtd));
825 }
826
827 /* total_blocks - The total count of blocks that the Nand Chip has */
828 int mtk_bmt_attach(struct mtd_info *mtd)
829 {
830 struct device_node *np;
831 struct bbbt *bbt;
832 u32 bufsz;
833 u32 block;
834 u16 total_blocks, pmt_block;
835 int ret = 0;
836 u32 bmt_pool_size, bmt_table_size;
837
838 if (bmtd.mtd)
839 return -ENOSPC;
840
841 np = mtd_get_of_node(mtd);
842 if (!np)
843 return 0;
844
845 if (!of_property_read_bool(np, "mediatek,bmt-v2"))
846 return 0;
847
848 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
849 &bmt_pool_size) != 0)
850 bmt_pool_size = 80;
851
852 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
853 &bmtd.oob_offset) != 0)
854 bmtd.oob_offset = 0;
855
856 if (of_property_read_u32(np, "mediatek,bmt-table-size",
857 &bmt_table_size) != 0)
858 bmt_table_size = 0x2000U;
859
860 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
861 &bmtd.remap_range_len);
862 bmtd.remap_range_len /= 8;
863
864 bmtd.mtd = mtd;
865 mtk_bmt_replace_ops(mtd);
866
867 bmtd.table_size = bmt_table_size;
868 bmtd.blk_size = mtd->erasesize;
869 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
870 bmtd.pg_size = mtd->writesize;
871 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
872 total_blocks = mtd->size >> bmtd.blk_shift;
873 pmt_block = total_blocks - bmt_pool_size - 2;
874
875 mtd->size = pmt_block << bmtd.blk_shift;
876
877 /*
878 * ---------------------------------------
879 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
880 * ---------------------------------------
881 * ^ ^
882 * | |
883 * pmt_block pmt_block + 2blocks(pool_lba)
884 *
885 * ATTETION!!!!!!
886 * The blocks ahead of the boundary block are stored in bb_tbl
887 * and blocks behind are stored in bmt_tbl
888 */
889
890 bmtd.pool_lba = (u16)(pmt_block + 2);
891 bmtd.total_blks = total_blocks;
892 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
893
894 /* 3 buffers we need */
895 bufsz = round_up(sizeof(struct bbbt) +
896 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
897 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
898
899 nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
900 nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
901
902 if (!nand_bbt_buf || !nand_data_buf) {
903 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
904 ret = -1;
905 goto error;
906 }
907
908 memset(nand_bbt_buf, 0xff, bufsz);
909 memset(nand_data_buf, 0xff, bmtd.pg_size);
910
911 BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
912 nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
913 BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
914 bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
915
916 /* Scanning start from the first page of the last block
917 * of whole flash
918 */
919 bbt = scan_bmt(bmtd.total_blks - 1);
920 if (!bbt) {
921 /* BMT not found */
922 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
923 pr_info("nand: FATAL: Too many blocks, can not support!\n");
924 ret = -1;
925 goto error;
926 }
927
928 bbt = (struct bbbt *)nand_bbt_buf;
929 memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
930
931 if (scan_bad_blocks(bbt)) {
932 ret = -1;
933 goto error;
934 }
935
936 /* BMT always in the last valid block in pool */
937 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
938 block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
939 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
940
941 if (bmtd.bmt_blk_idx == 0)
942 pr_info("nand: Warning: no available block in BMT pool!\n");
943 else if (bmtd.bmt_blk_idx == (u16)-1) {
944 ret = -1;
945 goto error;
946 }
947 }
948 mtk_bmt_add_debugfs();
949
950 bmtd.bbt = bbt;
951 return 0;
952
953 error:
954 mtk_bmt_detach(mtd);
955 return ret;
956 }
957
958
959 MODULE_LICENSE("GPL");
960 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
961 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
962