7f55c1810dc2cb9df869f8bc72269d315d0c9a7d
[openwrt/openwrt.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
19 #include <linux/of.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
25 #include <linux/bits.h>
26
27 #define MAIN_SIGNATURE_OFFSET 0
28 #define OOB_SIGNATURE_OFFSET 1
29 #define BBPOOL_RATIO 2
30
31 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
32
33 /* Maximum 8k blocks */
34 #define BB_TABLE_MAX bmtd.table_size
35 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
36 #define BMT_TBL_DEF_VAL 0x0
37
38 struct mtk_bmt_ops {
39 char *sig;
40 unsigned int sig_len;
41 int (*init)(struct device_node *np);
42 bool (*remap_block)(u16 block, u16 mapped_block, int copy_len);
43 void (*unmap_block)(u16 block);
44 u16 (*get_mapping_block)(int block);
45 int (*debug)(void *data, u64 val);
46 };
47
48 struct bbbt {
49 char signature[3];
50 /* This version is used to distinguish the legacy and new algorithm */
51 #define BBMT_VERSION 2
52 unsigned char version;
53 /* Below 2 tables will be written in SLC */
54 u16 bb_tbl[];
55 };
56
57 struct bbmt {
58 u16 block;
59 #define NO_MAPPED 0
60 #define NORMAL_MAPPED 1
61 #define BMT_MAPPED 2
62 u16 mapped;
63 };
64
65 static struct bmt_desc {
66 struct mtd_info *mtd;
67
68 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
69 struct mtd_oob_ops *ops);
70 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
71 struct mtd_oob_ops *ops);
72 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
73 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
74 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
75
76 const struct mtk_bmt_ops *ops;
77
78 struct bbbt *bbt;
79
80 struct dentry *debugfs_dir;
81
82 u32 table_size;
83 u32 pg_size;
84 u32 blk_size;
85 u16 pg_shift;
86 u16 blk_shift;
87 /* bbt logical address */
88 u16 pool_lba;
89 /* bbt physical address */
90 u16 pool_pba;
91 /* Maximum count of bad blocks that the vendor guaranteed */
92 u16 bb_max;
93 /* Total blocks of the Nand Chip */
94 u16 total_blks;
95 /* The block(n) BMT is located at (bmt_tbl[n]) */
96 u16 bmt_blk_idx;
97 /* How many pages needs to store 'struct bbbt' */
98 u32 bmt_pgs;
99
100 const __be32 *remap_range;
101 int remap_range_len;
102
103 /* to compensate for driver level remapping */
104 u8 oob_offset;
105 } bmtd = {0};
106
107 static unsigned char *nand_bbt_buf;
108 static unsigned char *nand_data_buf;
109
110 /* -------- Unit conversions -------- */
111 static inline u32 blk_pg(u16 block)
112 {
113 return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
114 }
115
116 /* -------- Nand operations wrapper -------- */
117 static inline int
118 bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
119 unsigned char *fdm, int fdm_len)
120 {
121 struct mtd_oob_ops ops = {
122 .mode = MTD_OPS_PLACE_OOB,
123 .ooboffs = bmtd.oob_offset,
124 .oobbuf = fdm,
125 .ooblen = fdm_len,
126 .datbuf = dat,
127 .len = dat_len,
128 };
129
130 return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
131 }
132
133 static inline int bbt_nand_erase(u16 block)
134 {
135 struct mtd_info *mtd = bmtd.mtd;
136 struct erase_info instr = {
137 .addr = (loff_t)block << bmtd.blk_shift,
138 .len = bmtd.blk_size,
139 };
140
141 return bmtd._erase(mtd, &instr);
142 }
143
144 static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
145 {
146 int pages = bmtd.blk_size >> bmtd.pg_shift;
147 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
148 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
149 loff_t offset = 0;
150 uint8_t oob[64];
151 int i, ret;
152
153 for (i = 0; i < pages; i++) {
154 struct mtd_oob_ops rd_ops = {
155 .mode = MTD_OPS_PLACE_OOB,
156 .oobbuf = oob,
157 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
158 .datbuf = nand_data_buf,
159 .len = bmtd.pg_size,
160 };
161 struct mtd_oob_ops wr_ops = {
162 .mode = MTD_OPS_PLACE_OOB,
163 .oobbuf = oob,
164 .datbuf = nand_data_buf,
165 .len = bmtd.pg_size,
166 };
167
168 if (offset >= max_offset)
169 break;
170
171 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
172 if (ret < 0 && !mtd_is_bitflip(ret))
173 return ret;
174
175 if (!rd_ops.retlen)
176 break;
177
178 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
179 if (ret < 0)
180 return ret;
181
182 wr_ops.ooblen = rd_ops.oobretlen;
183 offset += rd_ops.retlen;
184 }
185
186 return 0;
187 }
188
189 /* -------- Bad Blocks Management -------- */
190 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
191 {
192 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
193 }
194
195 static int
196 read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
197 {
198 u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
199
200 return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
201 }
202
203 static int write_bmt(u16 block, unsigned char *dat)
204 {
205 struct mtd_oob_ops ops = {
206 .mode = MTD_OPS_PLACE_OOB,
207 .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
208 .oobbuf = bmtd.ops->sig,
209 .ooblen = bmtd.ops->sig_len,
210 .datbuf = dat,
211 .len = bmtd.bmt_pgs << bmtd.pg_shift,
212 };
213 loff_t addr = (loff_t)block << bmtd.blk_shift;
214
215 return bmtd._write_oob(bmtd.mtd, addr, &ops);
216 }
217
218 static u16 find_valid_block(u16 block)
219 {
220 u8 fdm[4];
221 int ret;
222 int loop = 0;
223
224 retry:
225 if (block >= bmtd.total_blks)
226 return 0;
227
228 ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
229 fdm, sizeof(fdm));
230 /* Read the 1st byte of FDM to judge whether it's a bad
231 * or not
232 */
233 if (ret || fdm[0] != 0xff) {
234 pr_info("nand: found bad block 0x%x\n", block);
235 if (loop >= bmtd.bb_max) {
236 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
237 return 0;
238 }
239
240 loop++;
241 block++;
242 goto retry;
243 }
244
245 return block;
246 }
247
248 /* Find out all bad blocks, and fill in the mapping table */
249 static int scan_bad_blocks(struct bbbt *bbt)
250 {
251 int i;
252 u16 block = 0;
253
254 /* First time download, the block0 MUST NOT be a bad block,
255 * this is guaranteed by vendor
256 */
257 bbt->bb_tbl[0] = 0;
258
259 /*
260 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
261 * G - Good block; B - Bad block
262 * ---------------------------
263 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
264 * ---------------------------
265 * What bb_tbl[i] looks like:
266 * physical block(i):
267 * 0 1 2 3 4 5 6 7 8 9 a b c
268 * mapped block(bb_tbl[i]):
269 * 0 1 3 6 7 8 9 b ......
270 * ATTENTION:
271 * If new bad block ocurred(n), search bmt_tbl to find
272 * a available block(x), and fill in the bb_tbl[n] = x;
273 */
274 for (i = 1; i < bmtd.pool_lba; i++) {
275 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
276 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
277 if (bbt->bb_tbl[i] == 0)
278 return -1;
279 }
280
281 /* Physical Block start Address of BMT pool */
282 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
283 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
284 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
285 return -1;
286 }
287
288 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
289 i = 0;
290 block = bmtd.pool_pba;
291 /*
292 * The bmt table is used for runtime bad block mapping
293 * G - Good block; B - Bad block
294 * ---------------------------
295 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
296 * ---------------------------
297 * block: 0 1 2 3 4 5 6 7 8 9 a b c
298 * What bmt_tbl[i] looks like in initial state:
299 * i:
300 * 0 1 2 3 4 5 6 7
301 * bmt_tbl[i].block:
302 * 0 1 3 6 7 8 9 b
303 * bmt_tbl[i].mapped:
304 * N N N N N N N B
305 * N - Not mapped(Available)
306 * M - Mapped
307 * B - BMT
308 * ATTENTION:
309 * BMT always in the last valid block in pool
310 */
311 while ((block = find_valid_block(block)) != 0) {
312 bmt_tbl(bbt)[i].block = block;
313 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
314 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
315 block++;
316 i++;
317 }
318
319 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
320 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
321 */
322 bmtd.bmt_blk_idx = i - 1;
323 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
324
325 if (i < 1) {
326 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
327 return -1;
328 }
329
330 pr_info("[BBT] %d available blocks in BMT pool\n", i);
331
332 return 0;
333 }
334
335 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
336 {
337 struct bbbt *bbt = (struct bbbt *)buf;
338 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
339
340
341 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
342 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
343 if (bbt->version == BBMT_VERSION)
344 return true;
345 }
346 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
347 sig[0], sig[1], sig[2],
348 fdm[1], fdm[2], fdm[3]);
349 return false;
350 }
351
352 static u16 get_bmt_index(struct bbmt *bmt)
353 {
354 int i = 0;
355
356 while (bmt[i].block != BMT_TBL_DEF_VAL) {
357 if (bmt[i].mapped == BMT_MAPPED)
358 return i;
359 i++;
360 }
361 return 0;
362 }
363
364 static struct bbbt *scan_bmt(u16 block)
365 {
366 u8 fdm[4];
367
368 if (block < bmtd.pool_lba)
369 return NULL;
370
371 if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
372 return scan_bmt(block - 1);
373
374 if (is_valid_bmt(nand_bbt_buf, fdm)) {
375 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
376 if (bmtd.bmt_blk_idx == 0) {
377 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
378 return NULL;
379 }
380 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
381 return (struct bbbt *)nand_bbt_buf;
382 } else
383 return scan_bmt(block - 1);
384 }
385
386 /* Write the Burner Bad Block Table to Nand Flash
387 * n - write BMT to bmt_tbl[n]
388 */
389 static u16 upload_bmt(struct bbbt *bbt, int n)
390 {
391 u16 block;
392
393 retry:
394 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
395 pr_info("nand: FATAL ERR: no space to store BMT!\n");
396 return (u16)-1;
397 }
398
399 block = bmt_tbl(bbt)[n].block;
400 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
401 if (bbt_nand_erase(block)) {
402 bmt_tbl(bbt)[n].block = 0;
403 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
404 n--;
405 goto retry;
406 }
407
408 /* The signature offset is fixed set to 0,
409 * oob signature offset is fixed set to 1
410 */
411 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
412 bbt->version = BBMT_VERSION;
413
414 if (write_bmt(block, (unsigned char *)bbt)) {
415 bmt_tbl(bbt)[n].block = 0;
416
417 /* write failed, try the previous block in bmt_tbl[n - 1] */
418 n--;
419 goto retry;
420 }
421
422 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
423 return n;
424 }
425
426 static u16 find_valid_block_in_pool(struct bbbt *bbt)
427 {
428 int i;
429
430 if (bmtd.bmt_blk_idx == 0)
431 goto error;
432
433 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
434 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
435 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
436 return bmt_tbl(bbt)[i].block;
437 }
438 }
439
440 error:
441 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
442 return 0;
443 }
444
445 /* We met a bad block, mark it as bad and map it to a valid block in pool,
446 * if it's a write failure, we need to write the data to mapped block
447 */
448 static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len)
449 {
450 u16 mapped_blk;
451 struct bbbt *bbt;
452
453 bbt = bmtd.bbt;
454 mapped_blk = find_valid_block_in_pool(bbt);
455 if (mapped_blk == 0)
456 return false;
457
458 /* Map new bad block to available block in pool */
459 bbt->bb_tbl[block] = mapped_blk;
460
461 /* Erase new block */
462 bbt_nand_erase(mapped_blk);
463 if (copy_len > 0)
464 bbt_nand_copy(mapped_blk, block, copy_len);
465
466 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
467
468 return true;
469 }
470
471 static bool
472 mapping_block_in_range(int block)
473 {
474 const __be32 *cur = bmtd.remap_range;
475 u32 addr = block << bmtd.blk_shift;
476 int i;
477
478 if (!cur || !bmtd.remap_range_len)
479 return true;
480
481 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2)
482 if (addr >= be32_to_cpu(cur[0]) && addr < be32_to_cpu(cur[1]))
483 return true;
484
485 return false;
486 }
487
488 static u16
489 get_mapping_block_index_v2(int block)
490 {
491 if (block >= bmtd.pool_lba)
492 return block;
493
494 if (!mapping_block_in_range(block))
495 return block;
496
497 return bmtd.bbt->bb_tbl[block];
498 }
499
500 static int
501 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
502 struct mtd_oob_ops *ops)
503 {
504 struct mtd_oob_ops cur_ops = *ops;
505 int retry_count = 0;
506 loff_t cur_from;
507 int ret = 0;
508 int max_bitflips = 0;
509 int start, end;
510
511 ops->retlen = 0;
512 ops->oobretlen = 0;
513
514 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
515 int cur_ret;
516
517 u32 offset = from & (bmtd.blk_size - 1);
518 u32 block = from >> bmtd.blk_shift;
519 u32 cur_block;
520
521 cur_block = bmtd.ops->get_mapping_block(block);
522 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
523
524 cur_ops.oobretlen = 0;
525 cur_ops.retlen = 0;
526 cur_ops.len = min_t(u32, mtd->erasesize - offset,
527 ops->len - ops->retlen);
528 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
529 if (cur_ret < 0)
530 ret = cur_ret;
531 else
532 max_bitflips = max_t(int, max_bitflips, cur_ret);
533 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
534 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
535 if (retry_count++ < 10)
536 continue;
537
538 goto out;
539 }
540
541 if (cur_ret >= mtd->bitflip_threshold &&
542 mapping_block_in_range(block, &start, &end))
543 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
544
545 ops->retlen += cur_ops.retlen;
546 ops->oobretlen += cur_ops.oobretlen;
547
548 cur_ops.ooboffs = 0;
549 cur_ops.datbuf += cur_ops.retlen;
550 cur_ops.oobbuf += cur_ops.oobretlen;
551 cur_ops.ooblen -= cur_ops.oobretlen;
552
553 if (!cur_ops.len)
554 cur_ops.len = mtd->erasesize - offset;
555
556 from += cur_ops.len;
557 retry_count = 0;
558 }
559
560 out:
561 if (ret < 0)
562 return ret;
563
564 return max_bitflips;
565 }
566
567 static int
568 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
569 struct mtd_oob_ops *ops)
570 {
571 struct mtd_oob_ops cur_ops = *ops;
572 int retry_count = 0;
573 loff_t cur_to;
574 int ret;
575
576 ops->retlen = 0;
577 ops->oobretlen = 0;
578
579 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
580 u32 offset = to & (bmtd.blk_size - 1);
581 u32 block = to >> bmtd.blk_shift;
582 u32 cur_block;
583
584 cur_block = bmtd.ops->get_mapping_block(block);
585 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
586
587 cur_ops.oobretlen = 0;
588 cur_ops.retlen = 0;
589 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
590 ops->len - ops->retlen);
591 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
592 if (ret < 0) {
593 bmtd.ops->remap_block(block, cur_block, offset);
594 if (retry_count++ < 10)
595 continue;
596
597 return ret;
598 }
599
600 ops->retlen += cur_ops.retlen;
601 ops->oobretlen += cur_ops.oobretlen;
602
603 cur_ops.ooboffs = 0;
604 cur_ops.datbuf += cur_ops.retlen;
605 cur_ops.oobbuf += cur_ops.oobretlen;
606 cur_ops.ooblen -= cur_ops.oobretlen;
607
608 if (!cur_ops.len)
609 cur_ops.len = mtd->erasesize - offset;
610
611 to += cur_ops.len;
612 retry_count = 0;
613 }
614
615 return 0;
616 }
617
618 static int
619 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
620 {
621 struct erase_info mapped_instr = {
622 .len = bmtd.blk_size,
623 };
624 int retry_count = 0;
625 u64 start_addr, end_addr;
626 int ret;
627 u16 orig_block, block;
628
629 start_addr = instr->addr & (~mtd->erasesize_mask);
630 end_addr = instr->addr + instr->len;
631
632 while (start_addr < end_addr) {
633 orig_block = start_addr >> bmtd.blk_shift;
634 block = bmtd.ops->get_mapping_block(orig_block);
635 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
636 ret = bmtd._erase(mtd, &mapped_instr);
637 if (ret) {
638 bmtd.ops->remap_block(orig_block, block, 0);
639 if (retry_count++ < 10)
640 continue;
641 instr->fail_addr = start_addr;
642 break;
643 }
644 start_addr += mtd->erasesize;
645 retry_count = 0;
646 }
647
648 return ret;
649 }
650 static int
651 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
652 {
653 int retry_count = 0;
654 u16 orig_block = ofs >> bmtd.blk_shift;
655 u16 block;
656 int ret;
657
658 retry:
659 block = bmtd.ops->get_mapping_block(orig_block);
660 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
661 if (ret) {
662 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
663 if (retry_count++ < 10)
664 goto retry;
665 }
666 return ret;
667 }
668
669 static int
670 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
671 {
672 u16 orig_block = ofs >> bmtd.blk_shift;
673 u16 block = bmtd.ops->get_mapping_block(orig_block);
674
675 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
676
677 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
678 }
679
680 static void
681 mtk_bmt_replace_ops(struct mtd_info *mtd)
682 {
683 bmtd._read_oob = mtd->_read_oob;
684 bmtd._write_oob = mtd->_write_oob;
685 bmtd._erase = mtd->_erase;
686 bmtd._block_isbad = mtd->_block_isbad;
687 bmtd._block_markbad = mtd->_block_markbad;
688
689 mtd->_read_oob = mtk_bmt_read;
690 mtd->_write_oob = mtk_bmt_write;
691 mtd->_erase = mtk_bmt_mtd_erase;
692 mtd->_block_isbad = mtk_bmt_block_isbad;
693 mtd->_block_markbad = mtk_bmt_block_markbad;
694 }
695
696 static void
697 unmap_block_v2(u16 block)
698 {
699 bmtd.bbt->bb_tbl[block] = block;
700 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
701 }
702
703 static int mtk_bmt_debug_mark_good(void *data, u64 val)
704 {
705 bmtd.ops->unmap_block(val >> bmtd.blk_shift);
706
707 return 0;
708 }
709
710 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
711 {
712 u32 block = val >> bmtd.blk_shift;
713 u16 cur_block = bmtd.ops->get_mapping_block(block);
714
715 bmtd.ops->remap_block(block, cur_block, bmtd.blk_size);
716
717 return 0;
718 }
719
720 static unsigned long *
721 mtk_bmt_get_mapping_mask(void)
722 {
723 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
724 int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
725 unsigned long *used;
726 int i, k;
727
728 used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
729 if (!used)
730 return NULL;
731
732 for (i = 1; i < main_blocks; i++) {
733 if (bmtd.bbt->bb_tbl[i] == i)
734 continue;
735
736 for (k = 0; k < bmtd.bmt_blk_idx; k++) {
737 if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
738 continue;
739
740 set_bit(k, used);
741 break;
742 }
743 }
744
745 return used;
746 }
747
748 static int mtk_bmt_debug_v2(void *data, u64 val)
749 {
750 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
751 struct mtd_info *mtd = bmtd.mtd;
752 unsigned long *used;
753 int main_blocks = mtd->size >> bmtd.blk_shift;
754 int n_remap = 0;
755 int i;
756
757 used = mtk_bmt_get_mapping_mask();
758 if (!used)
759 return -ENOMEM;
760
761 switch (val) {
762 case 0:
763 for (i = 1; i < main_blocks; i++) {
764 if (bmtd.bbt->bb_tbl[i] == i)
765 continue;
766
767 printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
768 n_remap++;
769 }
770 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
771 char c;
772
773 switch (bbmt[i].mapped) {
774 case NO_MAPPED:
775 continue;
776 case NORMAL_MAPPED:
777 c = 'm';
778 if (test_bit(i, used))
779 c = 'M';
780 break;
781 case BMT_MAPPED:
782 c = 'B';
783 break;
784 default:
785 c = 'X';
786 break;
787 }
788 printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
789 }
790 break;
791 case 100:
792 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
793 if (bbmt[i].mapped != NORMAL_MAPPED)
794 continue;
795
796 if (test_bit(i, used))
797 continue;
798
799 n_remap++;
800 bbmt[i].mapped = NO_MAPPED;
801 printk("free block [%d:%x]\n", i, bbmt[i].block);
802 }
803 if (n_remap)
804 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
805 break;
806 }
807
808 kfree(used);
809
810 return 0;
811 }
812
813 static int mtk_bmt_debug(void *data, u64 val)
814 {
815 return bmtd.ops->debug(data, val);
816 }
817
818
819 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
820 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
821 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
822
823 static void
824 mtk_bmt_add_debugfs(void)
825 {
826 struct dentry *dir;
827
828 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
829 if (!dir)
830 return;
831
832 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
833 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
834 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
835 }
836
837 void mtk_bmt_detach(struct mtd_info *mtd)
838 {
839 if (bmtd.mtd != mtd)
840 return;
841
842 if (bmtd.debugfs_dir)
843 debugfs_remove_recursive(bmtd.debugfs_dir);
844 bmtd.debugfs_dir = NULL;
845
846 kfree(nand_bbt_buf);
847 kfree(nand_data_buf);
848
849 mtd->_read_oob = bmtd._read_oob;
850 mtd->_write_oob = bmtd._write_oob;
851 mtd->_erase = bmtd._erase;
852 mtd->_block_isbad = bmtd._block_isbad;
853 mtd->_block_markbad = bmtd._block_markbad;
854 mtd->size = bmtd.total_blks << bmtd.blk_shift;
855
856 memset(&bmtd, 0, sizeof(bmtd));
857 }
858
859 static int mtk_bmt_init_v2(struct device_node *np)
860 {
861 u32 bmt_pool_size, bmt_table_size;
862 u32 bufsz, block;
863 u16 pmt_block;
864
865 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
866 &bmt_pool_size) != 0)
867 bmt_pool_size = 80;
868
869 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
870 &bmtd.oob_offset) != 0)
871 bmtd.oob_offset = 0;
872
873 if (of_property_read_u32(np, "mediatek,bmt-table-size",
874 &bmt_table_size) != 0)
875 bmt_table_size = 0x2000U;
876
877 bmtd.table_size = bmt_table_size;
878
879 pmt_block = bmtd.total_blks - bmt_pool_size - 2;
880
881 bmtd.mtd->size = pmt_block << bmtd.blk_shift;
882
883 /*
884 * ---------------------------------------
885 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
886 * ---------------------------------------
887 * ^ ^
888 * | |
889 * pmt_block pmt_block + 2blocks(pool_lba)
890 *
891 * ATTETION!!!!!!
892 * The blocks ahead of the boundary block are stored in bb_tbl
893 * and blocks behind are stored in bmt_tbl
894 */
895
896 bmtd.pool_lba = (u16)(pmt_block + 2);
897 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
898
899 bufsz = round_up(sizeof(struct bbbt) +
900 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
901 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
902
903 nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
904 if (!nand_bbt_buf)
905 return -ENOMEM;
906
907 memset(nand_bbt_buf, 0xff, bufsz);
908
909 /* Scanning start from the first page of the last block
910 * of whole flash
911 */
912 bmtd.bbt = scan_bmt(bmtd.total_blks - 1);
913 if (!bmtd.bbt) {
914 /* BMT not found */
915 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
916 pr_info("nand: FATAL: Too many blocks, can not support!\n");
917 return -1;
918 }
919
920 bmtd.bbt = (struct bbbt *)nand_bbt_buf;
921 memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL,
922 bmtd.table_size * sizeof(struct bbmt));
923
924 if (scan_bad_blocks(bmtd.bbt))
925 return -1;
926
927 /* BMT always in the last valid block in pool */
928 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
929 block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block;
930 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
931
932 if (bmtd.bmt_blk_idx == 0)
933 pr_info("nand: Warning: no available block in BMT pool!\n");
934 else if (bmtd.bmt_blk_idx == (u16)-1)
935 return -1;
936 }
937
938 return 0;
939 }
940
941 int mtk_bmt_attach(struct mtd_info *mtd)
942 {
943 static const struct mtk_bmt_ops v2_ops = {
944 .sig = "bmt",
945 .sig_len = 3,
946 .init = mtk_bmt_init_v2,
947 .remap_block = remap_block_v2,
948 .unmap_block = unmap_block_v2,
949 .get_mapping_block = get_mapping_block_index_v2,
950 .debug = mtk_bmt_debug_v2,
951 };
952 struct device_node *np;
953 int ret = 0;
954
955 if (bmtd.mtd)
956 return -ENOSPC;
957
958 np = mtd_get_of_node(mtd);
959 if (!np)
960 return 0;
961
962 if (of_property_read_bool(np, "mediatek,bmt-v2"))
963 bmtd.ops = &v2_ops;
964 else
965 return 0;
966
967 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
968 &bmtd.remap_range_len);
969 bmtd.remap_range_len /= 8;
970
971 bmtd.mtd = mtd;
972 mtk_bmt_replace_ops(mtd);
973
974 bmtd.blk_size = mtd->erasesize;
975 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
976 bmtd.pg_size = mtd->writesize;
977 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
978 bmtd.total_blks = mtd->size >> bmtd.blk_shift;
979
980 nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
981 if (!nand_data_buf) {
982 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
983 ret = -1;
984 goto error;
985 }
986
987 memset(nand_data_buf, 0xff, bmtd.pg_size);
988
989 ret = bmtd.ops->init(np);
990 if (ret)
991 goto error;
992
993 mtk_bmt_add_debugfs();
994 return 0;
995
996 error:
997 mtk_bmt_detach(mtd);
998 return ret;
999 }
1000
1001
1002 MODULE_LICENSE("GPL");
1003 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
1004 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
1005