kernel: mtk_bmt: refactor to avoid deep recursion
[openwrt/staging/dedeckeh.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt_v2.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include "mtk_bmt.h"
19
20 struct bbbt {
21 char signature[3];
22 /* This version is used to distinguish the legacy and new algorithm */
23 #define BBMT_VERSION 2
24 unsigned char version;
25 /* Below 2 tables will be written in SLC */
26 u16 bb_tbl[];
27 };
28
29 struct bbmt {
30 u16 block;
31 #define NO_MAPPED 0
32 #define NORMAL_MAPPED 1
33 #define BMT_MAPPED 2
34 u16 mapped;
35 };
36
37 /* Maximum 8k blocks */
38 #define BBPOOL_RATIO 2
39 #define BB_TABLE_MAX bmtd.table_size
40 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
41 #define BMT_TBL_DEF_VAL 0x0
42
43 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
44 {
45 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
46 }
47
48 static u16 find_valid_block(u16 block)
49 {
50 u8 fdm[4];
51 int ret;
52 int loop = 0;
53
54 retry:
55 if (block >= bmtd.total_blks)
56 return 0;
57
58 ret = bbt_nand_read(blk_pg(block), bmtd.data_buf, bmtd.pg_size,
59 fdm, sizeof(fdm));
60 /* Read the 1st byte of FDM to judge whether it's a bad
61 * or not
62 */
63 if (ret || fdm[0] != 0xff) {
64 pr_info("nand: found bad block 0x%x\n", block);
65 if (loop >= bmtd.bb_max) {
66 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
67 return 0;
68 }
69
70 loop++;
71 block++;
72 goto retry;
73 }
74
75 return block;
76 }
77
78 /* Find out all bad blocks, and fill in the mapping table */
79 static int scan_bad_blocks(struct bbbt *bbt)
80 {
81 int i;
82 u16 block = 0;
83
84 /* First time download, the block0 MUST NOT be a bad block,
85 * this is guaranteed by vendor
86 */
87 bbt->bb_tbl[0] = 0;
88
89 /*
90 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
91 * G - Good block; B - Bad block
92 * ---------------------------
93 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
94 * ---------------------------
95 * What bb_tbl[i] looks like:
96 * physical block(i):
97 * 0 1 2 3 4 5 6 7 8 9 a b c
98 * mapped block(bb_tbl[i]):
99 * 0 1 3 6 7 8 9 b ......
100 * ATTENTION:
101 * If new bad block ocurred(n), search bmt_tbl to find
102 * a available block(x), and fill in the bb_tbl[n] = x;
103 */
104 for (i = 1; i < bmtd.pool_lba; i++) {
105 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
106 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
107 if (bbt->bb_tbl[i] == 0)
108 return -1;
109 }
110
111 /* Physical Block start Address of BMT pool */
112 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
113 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
114 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
115 return -1;
116 }
117
118 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
119 i = 0;
120 block = bmtd.pool_pba;
121 /*
122 * The bmt table is used for runtime bad block mapping
123 * G - Good block; B - Bad block
124 * ---------------------------
125 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
126 * ---------------------------
127 * block: 0 1 2 3 4 5 6 7 8 9 a b c
128 * What bmt_tbl[i] looks like in initial state:
129 * i:
130 * 0 1 2 3 4 5 6 7
131 * bmt_tbl[i].block:
132 * 0 1 3 6 7 8 9 b
133 * bmt_tbl[i].mapped:
134 * N N N N N N N B
135 * N - Not mapped(Available)
136 * M - Mapped
137 * B - BMT
138 * ATTENTION:
139 * BMT always in the last valid block in pool
140 */
141 while ((block = find_valid_block(block)) != 0) {
142 bmt_tbl(bbt)[i].block = block;
143 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
144 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
145 block++;
146 i++;
147 }
148
149 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
150 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
151 */
152 bmtd.bmt_blk_idx = i - 1;
153 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
154
155 if (i < 1) {
156 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
157 return -1;
158 }
159
160 pr_info("[BBT] %d available blocks in BMT pool\n", i);
161
162 return 0;
163 }
164
165 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
166 {
167 struct bbbt *bbt = (struct bbbt *)buf;
168 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
169
170
171 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
172 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
173 if (bbt->version == BBMT_VERSION)
174 return true;
175 }
176 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
177 sig[0], sig[1], sig[2],
178 fdm[1], fdm[2], fdm[3]);
179 return false;
180 }
181
182 static u16 get_bmt_index(struct bbmt *bmt)
183 {
184 int i = 0;
185
186 while (bmt[i].block != BMT_TBL_DEF_VAL) {
187 if (bmt[i].mapped == BMT_MAPPED)
188 return i;
189 i++;
190 }
191 return 0;
192 }
193
194 /* Write the Burner Bad Block Table to Nand Flash
195 * n - write BMT to bmt_tbl[n]
196 */
197 static u16 upload_bmt(struct bbbt *bbt, int n)
198 {
199 u16 block;
200
201 retry:
202 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
203 pr_info("nand: FATAL ERR: no space to store BMT!\n");
204 return (u16)-1;
205 }
206
207 block = bmt_tbl(bbt)[n].block;
208 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
209 if (bbt_nand_erase(block)) {
210 bmt_tbl(bbt)[n].block = 0;
211 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
212 n--;
213 goto retry;
214 }
215
216 /* The signature offset is fixed set to 0,
217 * oob signature offset is fixed set to 1
218 */
219 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
220 bbt->version = BBMT_VERSION;
221
222 if (write_bmt(block, (unsigned char *)bbt)) {
223 bmt_tbl(bbt)[n].block = 0;
224
225 /* write failed, try the previous block in bmt_tbl[n - 1] */
226 n--;
227 goto retry;
228 }
229
230 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
231 return n;
232 }
233
234 static u16 find_valid_block_in_pool(struct bbbt *bbt)
235 {
236 int i;
237
238 if (bmtd.bmt_blk_idx == 0)
239 goto error;
240
241 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
242 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
243 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
244 return bmt_tbl(bbt)[i].block;
245 }
246 }
247
248 error:
249 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
250 return 0;
251 }
252
253 /* We met a bad block, mark it as bad and map it to a valid block in pool,
254 * if it's a write failure, we need to write the data to mapped block
255 */
256 static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len)
257 {
258 u16 new_block;
259 struct bbbt *bbt;
260
261 bbt = bmtd.bbt;
262 new_block = find_valid_block_in_pool(bbt);
263 if (new_block == 0)
264 return false;
265
266 /* Map new bad block to available block in pool */
267 bbt->bb_tbl[block] = new_block;
268
269 /* Erase new block */
270 bbt_nand_erase(new_block);
271 if (copy_len > 0)
272 bbt_nand_copy(new_block, mapped_block, copy_len);
273
274 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
275
276 return true;
277 }
278
279 static int get_mapping_block_index_v2(int block)
280 {
281 int start, end;
282
283 if (block >= bmtd.pool_lba)
284 return block;
285
286 if (!mapping_block_in_range(block, &start, &end))
287 return block;
288
289 return bmtd.bbt->bb_tbl[block];
290 }
291
292 static void
293 unmap_block_v2(u16 block)
294 {
295 bmtd.bbt->bb_tbl[block] = block;
296 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
297 }
298
299 static unsigned long *
300 mtk_bmt_get_mapping_mask(void)
301 {
302 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
303 int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
304 unsigned long *used;
305 int i, k;
306
307 used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
308 if (!used)
309 return NULL;
310
311 for (i = 1; i < main_blocks; i++) {
312 if (bmtd.bbt->bb_tbl[i] == i)
313 continue;
314
315 for (k = 0; k < bmtd.bmt_blk_idx; k++) {
316 if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
317 continue;
318
319 set_bit(k, used);
320 break;
321 }
322 }
323
324 return used;
325 }
326
327 static int mtk_bmt_debug_v2(void *data, u64 val)
328 {
329 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
330 struct mtd_info *mtd = bmtd.mtd;
331 unsigned long *used;
332 int main_blocks = mtd->size >> bmtd.blk_shift;
333 int n_remap = 0;
334 int i;
335
336 used = mtk_bmt_get_mapping_mask();
337 if (!used)
338 return -ENOMEM;
339
340 switch (val) {
341 case 0:
342 for (i = 1; i < main_blocks; i++) {
343 if (bmtd.bbt->bb_tbl[i] == i)
344 continue;
345
346 printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
347 n_remap++;
348 }
349 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
350 char c;
351
352 switch (bbmt[i].mapped) {
353 case NO_MAPPED:
354 continue;
355 case NORMAL_MAPPED:
356 c = 'm';
357 if (test_bit(i, used))
358 c = 'M';
359 break;
360 case BMT_MAPPED:
361 c = 'B';
362 break;
363 default:
364 c = 'X';
365 break;
366 }
367 printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
368 }
369 break;
370 case 100:
371 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
372 if (bbmt[i].mapped != NORMAL_MAPPED)
373 continue;
374
375 if (test_bit(i, used))
376 continue;
377
378 n_remap++;
379 bbmt[i].mapped = NO_MAPPED;
380 printk("free block [%d:%x]\n", i, bbmt[i].block);
381 }
382 if (n_remap)
383 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
384 break;
385 }
386
387 kfree(used);
388
389 return 0;
390 }
391
392 static int mtk_bmt_init_v2(struct device_node *np)
393 {
394 u32 bmt_pool_size, bmt_table_size;
395 u32 bufsz, block;
396 u16 pmt_block;
397
398 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
399 &bmt_pool_size) != 0)
400 bmt_pool_size = 80;
401
402 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
403 &bmtd.oob_offset) != 0)
404 bmtd.oob_offset = 0;
405
406 if (of_property_read_u32(np, "mediatek,bmt-table-size",
407 &bmt_table_size) != 0)
408 bmt_table_size = 0x2000U;
409
410 bmtd.table_size = bmt_table_size;
411
412 pmt_block = bmtd.total_blks - bmt_pool_size - 2;
413
414 bmtd.mtd->size = pmt_block << bmtd.blk_shift;
415
416 /*
417 * ---------------------------------------
418 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
419 * ---------------------------------------
420 * ^ ^
421 * | |
422 * pmt_block pmt_block + 2blocks(pool_lba)
423 *
424 * ATTETION!!!!!!
425 * The blocks ahead of the boundary block are stored in bb_tbl
426 * and blocks behind are stored in bmt_tbl
427 */
428
429 bmtd.pool_lba = (u16)(pmt_block + 2);
430 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
431
432 bufsz = round_up(sizeof(struct bbbt) +
433 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
434 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
435
436 bmtd.bbt_buf = kzalloc(bufsz, GFP_KERNEL);
437 if (!bmtd.bbt_buf)
438 return -ENOMEM;
439
440 memset(bmtd.bbt_buf, 0xff, bufsz);
441
442 /* Scanning start from the first page of the last block
443 * of whole flash
444 */
445 bmtd.bbt = NULL;
446 for (u16 block = bmtd.total_blks - 1; !bmtd.bbt && block >= bmtd.pool_lba; block--) {
447 u8 fdm[4];
448
449 if (bbt_nand_read(blk_pg(block), bmtd.bbt_buf, bufsz, fdm, sizeof(fdm))) {
450 /* Read failed, try the previous block */
451 continue;
452 }
453
454 if (!is_valid_bmt(bmtd.bbt_buf, fdm)) {
455 /* No valid BMT found, try the previous block */
456 continue;
457 }
458
459 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)bmtd.bbt_buf));
460 if (bmtd.bmt_blk_idx == 0) {
461 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
462 break;
463 }
464
465 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
466 bmtd.bbt = (struct bbbt *)bmtd.bbt_buf;
467 }
468
469 if (!bmtd.bbt) {
470 /* BMT not found */
471 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
472 pr_info("nand: FATAL: Too many blocks, can not support!\n");
473 return -1;
474 }
475
476 bmtd.bbt = (struct bbbt *)bmtd.bbt_buf;
477 memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL,
478 bmtd.table_size * sizeof(struct bbmt));
479
480 if (scan_bad_blocks(bmtd.bbt))
481 return -1;
482
483 /* BMT always in the last valid block in pool */
484 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
485 block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block;
486 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
487
488 if (bmtd.bmt_blk_idx == 0)
489 pr_info("nand: Warning: no available block in BMT pool!\n");
490 else if (bmtd.bmt_blk_idx == (u16)-1)
491 return -1;
492 }
493
494 return 0;
495 }
496
497
498 const struct mtk_bmt_ops mtk_bmt_v2_ops = {
499 .sig = "bmt",
500 .sig_len = 3,
501 .init = mtk_bmt_init_v2,
502 .remap_block = remap_block_v2,
503 .unmap_block = unmap_block_v2,
504 .get_mapping_block = get_mapping_block_index_v2,
505 .debug = mtk_bmt_debug_v2,
506 };