b7ce7e2ca8dda3a9e38567163959e126959632ab
[openwrt/openwrt.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt_v2.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include "mtk_bmt.h"
19
20 struct bbbt {
21 char signature[3];
22 /* This version is used to distinguish the legacy and new algorithm */
23 #define BBMT_VERSION 2
24 unsigned char version;
25 /* Below 2 tables will be written in SLC */
26 u16 bb_tbl[];
27 };
28
29 struct bbmt {
30 u16 block;
31 #define NO_MAPPED 0
32 #define NORMAL_MAPPED 1
33 #define BMT_MAPPED 2
34 u16 mapped;
35 };
36
37 /* Maximum 8k blocks */
38 #define BBPOOL_RATIO 2
39 #define BB_TABLE_MAX bmtd.table_size
40 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
41 #define BMT_TBL_DEF_VAL 0x0
42
43 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
44 {
45 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
46 }
47
48 static u16 find_valid_block(u16 block)
49 {
50 u8 fdm[4];
51 int ret;
52 int loop = 0;
53
54 retry:
55 if (block >= bmtd.total_blks)
56 return 0;
57
58 ret = bbt_nand_read(blk_pg(block), bmtd.data_buf, bmtd.pg_size,
59 fdm, sizeof(fdm));
60 /* Read the 1st byte of FDM to judge whether it's a bad
61 * or not
62 */
63 if (ret || fdm[0] != 0xff) {
64 pr_info("nand: found bad block 0x%x\n", block);
65 if (loop >= bmtd.bb_max) {
66 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
67 return 0;
68 }
69
70 loop++;
71 block++;
72 goto retry;
73 }
74
75 return block;
76 }
77
78 /* Find out all bad blocks, and fill in the mapping table */
79 static int scan_bad_blocks(struct bbbt *bbt)
80 {
81 int i;
82 u16 block = 0;
83
84 /* First time download, the block0 MUST NOT be a bad block,
85 * this is guaranteed by vendor
86 */
87 bbt->bb_tbl[0] = 0;
88
89 /*
90 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
91 * G - Good block; B - Bad block
92 * ---------------------------
93 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
94 * ---------------------------
95 * What bb_tbl[i] looks like:
96 * physical block(i):
97 * 0 1 2 3 4 5 6 7 8 9 a b c
98 * mapped block(bb_tbl[i]):
99 * 0 1 3 6 7 8 9 b ......
100 * ATTENTION:
101 * If new bad block ocurred(n), search bmt_tbl to find
102 * a available block(x), and fill in the bb_tbl[n] = x;
103 */
104 for (i = 1; i < bmtd.pool_lba; i++) {
105 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
106 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
107 if (bbt->bb_tbl[i] == 0)
108 return -1;
109 }
110
111 /* Physical Block start Address of BMT pool */
112 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
113 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
114 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
115 return -1;
116 }
117
118 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
119 i = 0;
120 block = bmtd.pool_pba;
121 /*
122 * The bmt table is used for runtime bad block mapping
123 * G - Good block; B - Bad block
124 * ---------------------------
125 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
126 * ---------------------------
127 * block: 0 1 2 3 4 5 6 7 8 9 a b c
128 * What bmt_tbl[i] looks like in initial state:
129 * i:
130 * 0 1 2 3 4 5 6 7
131 * bmt_tbl[i].block:
132 * 0 1 3 6 7 8 9 b
133 * bmt_tbl[i].mapped:
134 * N N N N N N N B
135 * N - Not mapped(Available)
136 * M - Mapped
137 * B - BMT
138 * ATTENTION:
139 * BMT always in the last valid block in pool
140 */
141 while ((block = find_valid_block(block)) != 0) {
142 bmt_tbl(bbt)[i].block = block;
143 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
144 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
145 block++;
146 i++;
147 }
148
149 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
150 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
151 */
152 bmtd.bmt_blk_idx = i - 1;
153 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
154
155 if (i < 1) {
156 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
157 return -1;
158 }
159
160 pr_info("[BBT] %d available blocks in BMT pool\n", i);
161
162 return 0;
163 }
164
165 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
166 {
167 struct bbbt *bbt = (struct bbbt *)buf;
168 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
169
170
171 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
172 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
173 if (bbt->version == BBMT_VERSION)
174 return true;
175 }
176 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
177 sig[0], sig[1], sig[2],
178 fdm[1], fdm[2], fdm[3]);
179 return false;
180 }
181
182 static u16 get_bmt_index(struct bbmt *bmt)
183 {
184 int i = 0;
185
186 while (bmt[i].block != BMT_TBL_DEF_VAL) {
187 if (bmt[i].mapped == BMT_MAPPED)
188 return i;
189 i++;
190 }
191 return 0;
192 }
193
194 static int
195 read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
196 {
197 u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
198
199 return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
200 }
201
202 static struct bbbt *scan_bmt(u16 block)
203 {
204 u8 fdm[4];
205
206 if (block < bmtd.pool_lba)
207 return NULL;
208
209 if (read_bmt(block, bmtd.bbt_buf, fdm, sizeof(fdm)))
210 return scan_bmt(block - 1);
211
212 if (is_valid_bmt(bmtd.bbt_buf, fdm)) {
213 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)bmtd.bbt_buf));
214 if (bmtd.bmt_blk_idx == 0) {
215 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
216 return NULL;
217 }
218 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
219 return (struct bbbt *)bmtd.bbt_buf;
220 } else
221 return scan_bmt(block - 1);
222 }
223
224 /* Write the Burner Bad Block Table to Nand Flash
225 * n - write BMT to bmt_tbl[n]
226 */
227 static u16 upload_bmt(struct bbbt *bbt, int n)
228 {
229 u16 block;
230
231 retry:
232 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
233 pr_info("nand: FATAL ERR: no space to store BMT!\n");
234 return (u16)-1;
235 }
236
237 block = bmt_tbl(bbt)[n].block;
238 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
239 if (bbt_nand_erase(block)) {
240 bmt_tbl(bbt)[n].block = 0;
241 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
242 n--;
243 goto retry;
244 }
245
246 /* The signature offset is fixed set to 0,
247 * oob signature offset is fixed set to 1
248 */
249 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
250 bbt->version = BBMT_VERSION;
251
252 if (write_bmt(block, (unsigned char *)bbt)) {
253 bmt_tbl(bbt)[n].block = 0;
254
255 /* write failed, try the previous block in bmt_tbl[n - 1] */
256 n--;
257 goto retry;
258 }
259
260 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
261 return n;
262 }
263
264 static u16 find_valid_block_in_pool(struct bbbt *bbt)
265 {
266 int i;
267
268 if (bmtd.bmt_blk_idx == 0)
269 goto error;
270
271 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
272 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
273 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
274 return bmt_tbl(bbt)[i].block;
275 }
276 }
277
278 error:
279 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
280 return 0;
281 }
282
283 /* We met a bad block, mark it as bad and map it to a valid block in pool,
284 * if it's a write failure, we need to write the data to mapped block
285 */
286 static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len)
287 {
288 u16 mapped_blk;
289 struct bbbt *bbt;
290
291 bbt = bmtd.bbt;
292 mapped_blk = find_valid_block_in_pool(bbt);
293 if (mapped_blk == 0)
294 return false;
295
296 /* Map new bad block to available block in pool */
297 bbt->bb_tbl[block] = mapped_blk;
298
299 /* Erase new block */
300 bbt_nand_erase(mapped_blk);
301 if (copy_len > 0)
302 bbt_nand_copy(mapped_blk, block, copy_len);
303
304 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
305
306 return true;
307 }
308
309 static u16 get_mapping_block_index_v2(int block)
310 {
311 int start, end;
312
313 if (block >= bmtd.pool_lba)
314 return block;
315
316 if (!mapping_block_in_range(block, &start, &end))
317 return block;
318
319 return bmtd.bbt->bb_tbl[block];
320 }
321
322 static void
323 unmap_block_v2(u16 block)
324 {
325 bmtd.bbt->bb_tbl[block] = block;
326 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
327 }
328
329 static unsigned long *
330 mtk_bmt_get_mapping_mask(void)
331 {
332 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
333 int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
334 unsigned long *used;
335 int i, k;
336
337 used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
338 if (!used)
339 return NULL;
340
341 for (i = 1; i < main_blocks; i++) {
342 if (bmtd.bbt->bb_tbl[i] == i)
343 continue;
344
345 for (k = 0; k < bmtd.bmt_blk_idx; k++) {
346 if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
347 continue;
348
349 set_bit(k, used);
350 break;
351 }
352 }
353
354 return used;
355 }
356
357 static int mtk_bmt_debug_v2(void *data, u64 val)
358 {
359 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
360 struct mtd_info *mtd = bmtd.mtd;
361 unsigned long *used;
362 int main_blocks = mtd->size >> bmtd.blk_shift;
363 int n_remap = 0;
364 int i;
365
366 used = mtk_bmt_get_mapping_mask();
367 if (!used)
368 return -ENOMEM;
369
370 switch (val) {
371 case 0:
372 for (i = 1; i < main_blocks; i++) {
373 if (bmtd.bbt->bb_tbl[i] == i)
374 continue;
375
376 printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
377 n_remap++;
378 }
379 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
380 char c;
381
382 switch (bbmt[i].mapped) {
383 case NO_MAPPED:
384 continue;
385 case NORMAL_MAPPED:
386 c = 'm';
387 if (test_bit(i, used))
388 c = 'M';
389 break;
390 case BMT_MAPPED:
391 c = 'B';
392 break;
393 default:
394 c = 'X';
395 break;
396 }
397 printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
398 }
399 break;
400 case 100:
401 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
402 if (bbmt[i].mapped != NORMAL_MAPPED)
403 continue;
404
405 if (test_bit(i, used))
406 continue;
407
408 n_remap++;
409 bbmt[i].mapped = NO_MAPPED;
410 printk("free block [%d:%x]\n", i, bbmt[i].block);
411 }
412 if (n_remap)
413 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
414 break;
415 }
416
417 kfree(used);
418
419 return 0;
420 }
421
422 static int mtk_bmt_init_v2(struct device_node *np)
423 {
424 u32 bmt_pool_size, bmt_table_size;
425 u32 bufsz, block;
426 u16 pmt_block;
427
428 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
429 &bmt_pool_size) != 0)
430 bmt_pool_size = 80;
431
432 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
433 &bmtd.oob_offset) != 0)
434 bmtd.oob_offset = 0;
435
436 if (of_property_read_u32(np, "mediatek,bmt-table-size",
437 &bmt_table_size) != 0)
438 bmt_table_size = 0x2000U;
439
440 bmtd.table_size = bmt_table_size;
441
442 pmt_block = bmtd.total_blks - bmt_pool_size - 2;
443
444 bmtd.mtd->size = pmt_block << bmtd.blk_shift;
445
446 /*
447 * ---------------------------------------
448 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
449 * ---------------------------------------
450 * ^ ^
451 * | |
452 * pmt_block pmt_block + 2blocks(pool_lba)
453 *
454 * ATTETION!!!!!!
455 * The blocks ahead of the boundary block are stored in bb_tbl
456 * and blocks behind are stored in bmt_tbl
457 */
458
459 bmtd.pool_lba = (u16)(pmt_block + 2);
460 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
461
462 bufsz = round_up(sizeof(struct bbbt) +
463 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
464 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
465
466 bmtd.bbt_buf = kzalloc(bufsz, GFP_KERNEL);
467 if (!bmtd.bbt_buf)
468 return -ENOMEM;
469
470 memset(bmtd.bbt_buf, 0xff, bufsz);
471
472 /* Scanning start from the first page of the last block
473 * of whole flash
474 */
475 bmtd.bbt = scan_bmt(bmtd.total_blks - 1);
476 if (!bmtd.bbt) {
477 /* BMT not found */
478 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
479 pr_info("nand: FATAL: Too many blocks, can not support!\n");
480 return -1;
481 }
482
483 bmtd.bbt = (struct bbbt *)bmtd.bbt_buf;
484 memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL,
485 bmtd.table_size * sizeof(struct bbmt));
486
487 if (scan_bad_blocks(bmtd.bbt))
488 return -1;
489
490 /* BMT always in the last valid block in pool */
491 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
492 block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block;
493 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
494
495 if (bmtd.bmt_blk_idx == 0)
496 pr_info("nand: Warning: no available block in BMT pool!\n");
497 else if (bmtd.bmt_blk_idx == (u16)-1)
498 return -1;
499 }
500
501 return 0;
502 }
503
504
505 const struct mtk_bmt_ops mtk_bmt_v2_ops = {
506 .sig = "bmt",
507 .sig_len = 3,
508 .init = mtk_bmt_init_v2,
509 .remap_block = remap_block_v2,
510 .unmap_block = unmap_block_v2,
511 .get_mapping_block = get_mapping_block_index_v2,
512 .debug = mtk_bmt_debug_v2,
513 };