2140c619da9624643031106c44a6d24096616748
[openwrt/staging/mkresin.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
19 #include <linux/of.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
25 #include <linux/bits.h>
26
27 #define MAIN_SIGNATURE_OFFSET 0
28 #define OOB_SIGNATURE_OFFSET 1
29 #define BBPOOL_RATIO 2
30
31 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
32
33 /* Maximum 8k blocks */
34 #define BB_TABLE_MAX bmtd.table_size
35 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
36 #define BMT_TBL_DEF_VAL 0x0
37
38 /*
39 * Burner Bad Block Table
40 * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
41 */
42
43 struct bbbt {
44 char signature[3];
45 /* This version is used to distinguish the legacy and new algorithm */
46 #define BBMT_VERSION 2
47 unsigned char version;
48 /* Below 2 tables will be written in SLC */
49 u16 bb_tbl[];
50 };
51
52 struct bbmt {
53 u16 block;
54 #define NO_MAPPED 0
55 #define NORMAL_MAPPED 1
56 #define BMT_MAPPED 2
57 u16 mapped;
58 };
59
60 static struct bmt_desc {
61 struct mtd_info *mtd;
62
63 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
64 struct mtd_oob_ops *ops);
65 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
66 struct mtd_oob_ops *ops);
67 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
68 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
69 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
70
71 struct bbbt *bbt;
72
73 struct dentry *debugfs_dir;
74
75 u32 table_size;
76 u32 pg_size;
77 u32 blk_size;
78 u16 pg_shift;
79 u16 blk_shift;
80 /* bbt logical address */
81 u16 pool_lba;
82 /* bbt physical address */
83 u16 pool_pba;
84 /* Maximum count of bad blocks that the vendor guaranteed */
85 u16 bb_max;
86 /* Total blocks of the Nand Chip */
87 u16 total_blks;
88 /* The block(n) BMT is located at (bmt_tbl[n]) */
89 u16 bmt_blk_idx;
90 /* How many pages needs to store 'struct bbbt' */
91 u32 bmt_pgs;
92
93 const __be32 *remap_range;
94 int remap_range_len;
95
96 /* to compensate for driver level remapping */
97 u8 oob_offset;
98 } bmtd = {0};
99
100 static unsigned char *nand_bbt_buf;
101 static unsigned char *nand_data_buf;
102
103 /* -------- Unit conversions -------- */
104 static inline u32 blk_pg(u16 block)
105 {
106 return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
107 }
108
109 /* -------- Nand operations wrapper -------- */
110 static inline int
111 bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
112 unsigned char *fdm, int fdm_len)
113 {
114 struct mtd_oob_ops ops = {
115 .mode = MTD_OPS_PLACE_OOB,
116 .ooboffs = bmtd.oob_offset,
117 .oobbuf = fdm,
118 .ooblen = fdm_len,
119 .datbuf = dat,
120 .len = dat_len,
121 };
122
123 return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
124 }
125
126 static inline int bbt_nand_erase(u16 block)
127 {
128 struct mtd_info *mtd = bmtd.mtd;
129 struct erase_info instr = {
130 .addr = (loff_t)block << bmtd.blk_shift,
131 .len = bmtd.blk_size,
132 };
133
134 return bmtd._erase(mtd, &instr);
135 }
136
137 static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
138 {
139 int pages = bmtd.blk_size >> bmtd.pg_shift;
140 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
141 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
142 loff_t offset = 0;
143 uint8_t oob[64];
144 int i, ret;
145
146 for (i = 0; i < pages; i++) {
147 struct mtd_oob_ops rd_ops = {
148 .mode = MTD_OPS_PLACE_OOB,
149 .oobbuf = oob,
150 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
151 .datbuf = nand_data_buf,
152 .len = bmtd.pg_size,
153 };
154 struct mtd_oob_ops wr_ops = {
155 .mode = MTD_OPS_PLACE_OOB,
156 .oobbuf = oob,
157 .datbuf = nand_data_buf,
158 .len = bmtd.pg_size,
159 };
160
161 if (offset >= max_offset)
162 break;
163
164 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
165 if (ret < 0 && !mtd_is_bitflip(ret))
166 return ret;
167
168 if (!rd_ops.retlen)
169 break;
170
171 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
172 if (ret < 0)
173 return ret;
174
175 wr_ops.ooblen = rd_ops.oobretlen;
176 offset += rd_ops.retlen;
177 }
178
179 return 0;
180 }
181
182 /* -------- Bad Blocks Management -------- */
183 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
184 {
185 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
186 }
187
188 static int
189 read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
190 {
191 u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
192
193 return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
194 }
195
196 static int write_bmt(u16 block, unsigned char *dat)
197 {
198 struct mtd_oob_ops ops = {
199 .mode = MTD_OPS_PLACE_OOB,
200 .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
201 .oobbuf = "bmt",
202 .ooblen = 3,
203 .datbuf = dat,
204 .len = bmtd.bmt_pgs << bmtd.pg_shift,
205 };
206 loff_t addr = (loff_t)block << bmtd.blk_shift;
207
208 return bmtd._write_oob(bmtd.mtd, addr, &ops);
209 }
210
211 static u16 find_valid_block(u16 block)
212 {
213 u8 fdm[4];
214 int ret;
215 int loop = 0;
216
217 retry:
218 if (block >= bmtd.total_blks)
219 return 0;
220
221 ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
222 fdm, sizeof(fdm));
223 /* Read the 1st byte of FDM to judge whether it's a bad
224 * or not
225 */
226 if (ret || fdm[0] != 0xff) {
227 pr_info("nand: found bad block 0x%x\n", block);
228 if (loop >= bmtd.bb_max) {
229 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
230 return 0;
231 }
232
233 loop++;
234 block++;
235 goto retry;
236 }
237
238 return block;
239 }
240
241 /* Find out all bad blocks, and fill in the mapping table */
242 static int scan_bad_blocks(struct bbbt *bbt)
243 {
244 int i;
245 u16 block = 0;
246
247 /* First time download, the block0 MUST NOT be a bad block,
248 * this is guaranteed by vendor
249 */
250 bbt->bb_tbl[0] = 0;
251
252 /*
253 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
254 * G - Good block; B - Bad block
255 * ---------------------------
256 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
257 * ---------------------------
258 * What bb_tbl[i] looks like:
259 * physical block(i):
260 * 0 1 2 3 4 5 6 7 8 9 a b c
261 * mapped block(bb_tbl[i]):
262 * 0 1 3 6 7 8 9 b ......
263 * ATTENTION:
264 * If new bad block ocurred(n), search bmt_tbl to find
265 * a available block(x), and fill in the bb_tbl[n] = x;
266 */
267 for (i = 1; i < bmtd.pool_lba; i++) {
268 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
269 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
270 if (bbt->bb_tbl[i] == 0)
271 return -1;
272 }
273
274 /* Physical Block start Address of BMT pool */
275 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
276 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
277 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
278 return -1;
279 }
280
281 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
282 i = 0;
283 block = bmtd.pool_pba;
284 /*
285 * The bmt table is used for runtime bad block mapping
286 * G - Good block; B - Bad block
287 * ---------------------------
288 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
289 * ---------------------------
290 * block: 0 1 2 3 4 5 6 7 8 9 a b c
291 * What bmt_tbl[i] looks like in initial state:
292 * i:
293 * 0 1 2 3 4 5 6 7
294 * bmt_tbl[i].block:
295 * 0 1 3 6 7 8 9 b
296 * bmt_tbl[i].mapped:
297 * N N N N N N N B
298 * N - Not mapped(Available)
299 * M - Mapped
300 * B - BMT
301 * ATTENTION:
302 * BMT always in the last valid block in pool
303 */
304 while ((block = find_valid_block(block)) != 0) {
305 bmt_tbl(bbt)[i].block = block;
306 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
307 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
308 block++;
309 i++;
310 }
311
312 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
313 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
314 */
315 bmtd.bmt_blk_idx = i - 1;
316 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
317
318 if (i < 1) {
319 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
320 return -1;
321 }
322
323 pr_info("[BBT] %d available blocks in BMT pool\n", i);
324
325 return 0;
326 }
327
328 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
329 {
330 struct bbbt *bbt = (struct bbbt *)buf;
331 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
332
333
334 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
335 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
336 if (bbt->version == BBMT_VERSION)
337 return true;
338 }
339 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
340 sig[0], sig[1], sig[2],
341 fdm[1], fdm[2], fdm[3]);
342 return false;
343 }
344
345 static u16 get_bmt_index(struct bbmt *bmt)
346 {
347 int i = 0;
348
349 while (bmt[i].block != BMT_TBL_DEF_VAL) {
350 if (bmt[i].mapped == BMT_MAPPED)
351 return i;
352 i++;
353 }
354 return 0;
355 }
356
357 static struct bbbt *scan_bmt(u16 block)
358 {
359 u8 fdm[4];
360
361 if (block < bmtd.pool_lba)
362 return NULL;
363
364 if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
365 return scan_bmt(block - 1);
366
367 if (is_valid_bmt(nand_bbt_buf, fdm)) {
368 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
369 if (bmtd.bmt_blk_idx == 0) {
370 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
371 return NULL;
372 }
373 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
374 return (struct bbbt *)nand_bbt_buf;
375 } else
376 return scan_bmt(block - 1);
377 }
378
379 /* Write the Burner Bad Block Table to Nand Flash
380 * n - write BMT to bmt_tbl[n]
381 */
382 static u16 upload_bmt(struct bbbt *bbt, int n)
383 {
384 u16 block;
385
386 retry:
387 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
388 pr_info("nand: FATAL ERR: no space to store BMT!\n");
389 return (u16)-1;
390 }
391
392 block = bmt_tbl(bbt)[n].block;
393 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
394 if (bbt_nand_erase(block)) {
395 bmt_tbl(bbt)[n].block = 0;
396 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
397 n--;
398 goto retry;
399 }
400
401 /* The signature offset is fixed set to 0,
402 * oob signature offset is fixed set to 1
403 */
404 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
405 bbt->version = BBMT_VERSION;
406
407 if (write_bmt(block, (unsigned char *)bbt)) {
408 bmt_tbl(bbt)[n].block = 0;
409
410 /* write failed, try the previous block in bmt_tbl[n - 1] */
411 n--;
412 goto retry;
413 }
414
415 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
416 return n;
417 }
418
419 static u16 find_valid_block_in_pool(struct bbbt *bbt)
420 {
421 int i;
422
423 if (bmtd.bmt_blk_idx == 0)
424 goto error;
425
426 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
427 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
428 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
429 return bmt_tbl(bbt)[i].block;
430 }
431 }
432
433 error:
434 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
435 return 0;
436 }
437
438 /* We met a bad block, mark it as bad and map it to a valid block in pool,
439 * if it's a write failure, we need to write the data to mapped block
440 */
441 static bool update_bmt(u16 block, int copy_len)
442 {
443 u16 mapped_blk;
444 struct bbbt *bbt;
445
446 bbt = bmtd.bbt;
447 mapped_blk = find_valid_block_in_pool(bbt);
448 if (mapped_blk == 0)
449 return false;
450
451 /* Map new bad block to available block in pool */
452 bbt->bb_tbl[block] = mapped_blk;
453
454 /* Erase new block */
455 bbt_nand_erase(mapped_blk);
456 if (copy_len > 0)
457 bbt_nand_copy(mapped_blk, block, copy_len);
458
459 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
460
461 return true;
462 }
463
464 static bool
465 mapping_block_in_range(int block)
466 {
467 const __be32 *cur = bmtd.remap_range;
468 u32 addr = block << bmtd.blk_shift;
469 int i;
470
471 if (!cur || !bmtd.remap_range_len)
472 return true;
473
474 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2)
475 if (addr >= be32_to_cpu(cur[0]) && addr < be32_to_cpu(cur[1]))
476 return true;
477
478 return false;
479 }
480
481 u16 get_mapping_block_index(int block)
482 {
483 if (block >= bmtd.pool_lba)
484 return block;
485
486 if (!mapping_block_in_range(block))
487 return block;
488
489 return bmtd.bbt->bb_tbl[block];
490 }
491
492 static int
493 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
494 struct mtd_oob_ops *ops)
495 {
496 struct mtd_oob_ops cur_ops = *ops;
497 int retry_count = 0;
498 loff_t cur_from;
499 int ret = 0;
500 int max_bitflips = 0;
501
502 ops->retlen = 0;
503 ops->oobretlen = 0;
504
505 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
506 int cur_ret;
507
508 u32 offset = from & (bmtd.blk_size - 1);
509 u32 block = from >> bmtd.blk_shift;
510 u32 cur_block;
511
512 cur_block = get_mapping_block_index(block);
513 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
514
515 cur_ops.oobretlen = 0;
516 cur_ops.retlen = 0;
517 cur_ops.len = min_t(u32, mtd->erasesize - offset,
518 ops->len - ops->retlen);
519 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
520 if (cur_ret < 0)
521 ret = cur_ret;
522 else
523 max_bitflips = max_t(int, max_bitflips, cur_ret);
524 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
525 update_bmt(block, mtd->erasesize);
526 if (retry_count++ < 10)
527 continue;
528
529 goto out;
530 }
531
532 if (cur_ret >= mtd->bitflip_threshold &&
533 mapping_block_in_range(block))
534 update_bmt(block, mtd->erasesize);
535
536 ops->retlen += cur_ops.retlen;
537 ops->oobretlen += cur_ops.oobretlen;
538
539 cur_ops.ooboffs = 0;
540 cur_ops.datbuf += cur_ops.retlen;
541 cur_ops.oobbuf += cur_ops.oobretlen;
542 cur_ops.ooblen -= cur_ops.oobretlen;
543
544 if (!cur_ops.len)
545 cur_ops.len = mtd->erasesize - offset;
546
547 from += cur_ops.len;
548 retry_count = 0;
549 }
550
551 out:
552 if (ret < 0)
553 return ret;
554
555 return max_bitflips;
556 }
557
558 static int
559 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
560 struct mtd_oob_ops *ops)
561 {
562 struct mtd_oob_ops cur_ops = *ops;
563 int retry_count = 0;
564 loff_t cur_to;
565 int ret;
566
567 ops->retlen = 0;
568 ops->oobretlen = 0;
569
570 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
571 u32 offset = to & (bmtd.blk_size - 1);
572 u32 block = to >> bmtd.blk_shift;
573 u32 cur_block;
574
575 cur_block = get_mapping_block_index(block);
576 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
577
578 cur_ops.oobretlen = 0;
579 cur_ops.retlen = 0;
580 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
581 ops->len - ops->retlen);
582 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
583 if (ret < 0) {
584 update_bmt(block, offset);
585 if (retry_count++ < 10)
586 continue;
587
588 return ret;
589 }
590
591 ops->retlen += cur_ops.retlen;
592 ops->oobretlen += cur_ops.oobretlen;
593
594 cur_ops.ooboffs = 0;
595 cur_ops.datbuf += cur_ops.retlen;
596 cur_ops.oobbuf += cur_ops.oobretlen;
597 cur_ops.ooblen -= cur_ops.oobretlen;
598
599 if (!cur_ops.len)
600 cur_ops.len = mtd->erasesize - offset;
601
602 to += cur_ops.len;
603 retry_count = 0;
604 }
605
606 return 0;
607 }
608
609 static int
610 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
611 {
612 struct erase_info mapped_instr = {
613 .len = bmtd.blk_size,
614 };
615 int retry_count = 0;
616 u64 start_addr, end_addr;
617 int ret;
618 u16 orig_block, block;
619
620 start_addr = instr->addr & (~mtd->erasesize_mask);
621 end_addr = instr->addr + instr->len;
622
623 while (start_addr < end_addr) {
624 orig_block = start_addr >> bmtd.blk_shift;
625 block = get_mapping_block_index(orig_block);
626 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
627 ret = bmtd._erase(mtd, &mapped_instr);
628 if (ret) {
629 update_bmt(orig_block, 0);
630 if (retry_count++ < 10)
631 continue;
632 instr->fail_addr = start_addr;
633 break;
634 }
635 start_addr += mtd->erasesize;
636 retry_count = 0;
637 }
638
639 return ret;
640 }
641 static int
642 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
643 {
644 int retry_count = 0;
645 u16 orig_block = ofs >> bmtd.blk_shift;
646 u16 block;
647 int ret;
648
649 retry:
650 block = get_mapping_block_index(orig_block);
651 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
652 if (ret) {
653 update_bmt(orig_block, bmtd.blk_size);
654 if (retry_count++ < 10)
655 goto retry;
656 }
657 return ret;
658 }
659
660 static int
661 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
662 {
663 u16 orig_block = ofs >> bmtd.blk_shift;
664 u16 block = get_mapping_block_index(orig_block);
665 update_bmt(orig_block, bmtd.blk_size);
666 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
667 }
668
669 static void
670 mtk_bmt_replace_ops(struct mtd_info *mtd)
671 {
672 bmtd._read_oob = mtd->_read_oob;
673 bmtd._write_oob = mtd->_write_oob;
674 bmtd._erase = mtd->_erase;
675 bmtd._block_isbad = mtd->_block_isbad;
676 bmtd._block_markbad = mtd->_block_markbad;
677
678 mtd->_read_oob = mtk_bmt_read;
679 mtd->_write_oob = mtk_bmt_write;
680 mtd->_erase = mtk_bmt_mtd_erase;
681 mtd->_block_isbad = mtk_bmt_block_isbad;
682 mtd->_block_markbad = mtk_bmt_block_markbad;
683 }
684
685 static int mtk_bmt_debug_mark_good(void *data, u64 val)
686 {
687 u32 block = val >> bmtd.blk_shift;
688
689 bmtd.bbt->bb_tbl[block] = block;
690 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
691
692 return 0;
693 }
694
695 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
696 {
697 u32 block = val >> bmtd.blk_shift;
698
699 update_bmt(block, bmtd.blk_size);
700
701 return 0;
702 }
703
704 static unsigned long *
705 mtk_bmt_get_mapping_mask(void)
706 {
707 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
708 int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
709 unsigned long *used;
710 int i, k;
711
712 used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
713 if (!used)
714 return NULL;
715
716 for (i = 1; i < main_blocks; i++) {
717 if (bmtd.bbt->bb_tbl[i] == i)
718 continue;
719
720 for (k = 0; k < bmtd.bmt_blk_idx; k++) {
721 if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
722 continue;
723
724 set_bit(k, used);
725 break;
726 }
727 }
728
729 return used;
730 }
731
732 static int mtk_bmt_debug(void *data, u64 val)
733 {
734 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
735 struct mtd_info *mtd = bmtd.mtd;
736 unsigned long *used;
737 int main_blocks = mtd->size >> bmtd.blk_shift;
738 int n_remap = 0;
739 int i;
740
741 used = mtk_bmt_get_mapping_mask();
742 if (!used)
743 return -ENOMEM;
744
745 switch (val) {
746 case 0:
747 for (i = 1; i < main_blocks; i++) {
748 if (bmtd.bbt->bb_tbl[i] == i)
749 continue;
750
751 printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
752 n_remap++;
753 }
754 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
755 char c;
756
757 switch (bbmt[i].mapped) {
758 case NO_MAPPED:
759 continue;
760 case NORMAL_MAPPED:
761 c = 'm';
762 if (test_bit(i, used))
763 c = 'M';
764 break;
765 case BMT_MAPPED:
766 c = 'B';
767 break;
768 default:
769 c = 'X';
770 break;
771 }
772 printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
773 }
774 break;
775 case 100:
776 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
777 if (bbmt[i].mapped != NORMAL_MAPPED)
778 continue;
779
780 if (test_bit(i, used))
781 continue;
782
783 n_remap++;
784 bbmt[i].mapped = NO_MAPPED;
785 printk("free block [%d:%x]\n", i, bbmt[i].block);
786 }
787 if (n_remap)
788 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
789 break;
790 }
791
792 kfree(used);
793
794 return 0;
795 }
796
797
798 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
799 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
800 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
801
802 static void
803 mtk_bmt_add_debugfs(void)
804 {
805 struct dentry *dir;
806
807 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
808 if (!dir)
809 return;
810
811 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
812 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
813 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
814 }
815
816 void mtk_bmt_detach(struct mtd_info *mtd)
817 {
818 if (bmtd.mtd != mtd)
819 return;
820
821 if (bmtd.debugfs_dir)
822 debugfs_remove_recursive(bmtd.debugfs_dir);
823 bmtd.debugfs_dir = NULL;
824
825 kfree(nand_bbt_buf);
826 kfree(nand_data_buf);
827
828 mtd->_read_oob = bmtd._read_oob;
829 mtd->_write_oob = bmtd._write_oob;
830 mtd->_erase = bmtd._erase;
831 mtd->_block_isbad = bmtd._block_isbad;
832 mtd->_block_markbad = bmtd._block_markbad;
833 mtd->size = bmtd.total_blks << bmtd.blk_shift;
834
835 memset(&bmtd, 0, sizeof(bmtd));
836 }
837
838 /* total_blocks - The total count of blocks that the Nand Chip has */
839 int mtk_bmt_attach(struct mtd_info *mtd)
840 {
841 struct device_node *np;
842 struct bbbt *bbt;
843 u32 bufsz;
844 u32 block;
845 u16 total_blocks, pmt_block;
846 int ret = 0;
847 u32 bmt_pool_size, bmt_table_size;
848
849 if (bmtd.mtd)
850 return -ENOSPC;
851
852 np = mtd_get_of_node(mtd);
853 if (!np)
854 return 0;
855
856 if (!of_property_read_bool(np, "mediatek,bmt-v2"))
857 return 0;
858
859 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
860 &bmt_pool_size) != 0)
861 bmt_pool_size = 80;
862
863 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
864 &bmtd.oob_offset) != 0)
865 bmtd.oob_offset = 0;
866
867 if (of_property_read_u32(np, "mediatek,bmt-table-size",
868 &bmt_table_size) != 0)
869 bmt_table_size = 0x2000U;
870
871 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
872 &bmtd.remap_range_len);
873 bmtd.remap_range_len /= 8;
874
875 bmtd.mtd = mtd;
876 mtk_bmt_replace_ops(mtd);
877
878 bmtd.table_size = bmt_table_size;
879 bmtd.blk_size = mtd->erasesize;
880 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
881 bmtd.pg_size = mtd->writesize;
882 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
883 total_blocks = mtd->size >> bmtd.blk_shift;
884 pmt_block = total_blocks - bmt_pool_size - 2;
885
886 mtd->size = pmt_block << bmtd.blk_shift;
887
888 /*
889 * ---------------------------------------
890 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
891 * ---------------------------------------
892 * ^ ^
893 * | |
894 * pmt_block pmt_block + 2blocks(pool_lba)
895 *
896 * ATTETION!!!!!!
897 * The blocks ahead of the boundary block are stored in bb_tbl
898 * and blocks behind are stored in bmt_tbl
899 */
900
901 bmtd.pool_lba = (u16)(pmt_block + 2);
902 bmtd.total_blks = total_blocks;
903 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
904
905 /* 3 buffers we need */
906 bufsz = round_up(sizeof(struct bbbt) +
907 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
908 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
909
910 nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
911 nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
912
913 if (!nand_bbt_buf || !nand_data_buf) {
914 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
915 ret = -1;
916 goto error;
917 }
918
919 memset(nand_bbt_buf, 0xff, bufsz);
920 memset(nand_data_buf, 0xff, bmtd.pg_size);
921
922 BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
923 nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
924 BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
925 bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
926
927 /* Scanning start from the first page of the last block
928 * of whole flash
929 */
930 bbt = scan_bmt(bmtd.total_blks - 1);
931 if (!bbt) {
932 /* BMT not found */
933 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
934 pr_info("nand: FATAL: Too many blocks, can not support!\n");
935 ret = -1;
936 goto error;
937 }
938
939 bbt = (struct bbbt *)nand_bbt_buf;
940 memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
941
942 if (scan_bad_blocks(bbt)) {
943 ret = -1;
944 goto error;
945 }
946
947 /* BMT always in the last valid block in pool */
948 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
949 block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
950 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
951
952 if (bmtd.bmt_blk_idx == 0)
953 pr_info("nand: Warning: no available block in BMT pool!\n");
954 else if (bmtd.bmt_blk_idx == (u16)-1) {
955 ret = -1;
956 goto error;
957 }
958 }
959 mtk_bmt_add_debugfs();
960
961 bmtd.bbt = bbt;
962 return 0;
963
964 error:
965 mtk_bmt_detach(mtd);
966 return ret;
967 }
968
969
970 MODULE_LICENSE("GPL");
971 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
972 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
973