fe1040802ecc21122cb62ac62272e4335228b627
[openwrt/openwrt.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt_bbt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include "mtk_bmt.h"
19
20 static bool
21 bbt_block_is_bad(u16 block)
22 {
23 u8 cur = bmtd.bbt_buf[block / 4];
24
25 return cur & (3 << ((block % 4) * 2));
26 }
27
28 static void
29 bbt_set_block_state(u16 block, bool bad)
30 {
31 u8 mask = (3 << ((block % 4) * 2));
32
33 if (bad)
34 bmtd.bbt_buf[block / 4] |= mask;
35 else
36 bmtd.bbt_buf[block / 4] &= ~mask;
37
38 bbt_nand_erase(bmtd.bmt_blk_idx);
39 write_bmt(bmtd.bmt_blk_idx, bmtd.bbt_buf);
40 }
41
42 static u16
43 get_mapping_block_index_bbt(int block)
44 {
45 int start, end, ofs;
46 int bad_blocks = 0;
47 int i;
48
49 if (!mapping_block_in_range(block, &start, &end))
50 return block;
51
52 start >>= bmtd.blk_shift;
53 end >>= bmtd.blk_shift;
54 /* skip bad blocks within the mapping range */
55 ofs = block - start;
56 for (i = start; i < end; i++) {
57 if (bbt_block_is_bad(i))
58 bad_blocks++;
59 else if (ofs)
60 ofs--;
61 else
62 break;
63 }
64
65 if (i < end)
66 return i;
67
68 /* when overflowing, remap remaining blocks to bad ones */
69 for (i = end - 1; bad_blocks > 0; i--) {
70 if (!bbt_block_is_bad(i))
71 continue;
72
73 bad_blocks--;
74 if (bad_blocks <= ofs)
75 return i;
76 }
77
78 return block;
79 }
80
81 static bool remap_block_bbt(u16 block, u16 mapped_blk, int copy_len)
82 {
83 int start, end;
84 u16 new_blk;
85
86 if (!mapping_block_in_range(block, &start, &end))
87 return false;
88
89 bbt_set_block_state(mapped_blk, true);
90
91 new_blk = get_mapping_block_index_bbt(block);
92 bbt_nand_erase(new_blk);
93 if (copy_len > 0)
94 bbt_nand_copy(new_blk, mapped_blk, copy_len);
95
96 return false;
97 }
98
99 static void
100 unmap_block_bbt(u16 block)
101 {
102 bbt_set_block_state(block, false);
103 }
104
105 static int
106 mtk_bmt_read_bbt(void)
107 {
108 u8 oob_buf[8];
109 int i;
110
111 for (i = bmtd.total_blks - 1; i >= bmtd.total_blks - 5; i--) {
112 u32 page = i << (bmtd.blk_shift - bmtd.pg_shift);
113
114 if (bbt_nand_read(page, bmtd.bbt_buf, bmtd.pg_size,
115 oob_buf, sizeof(oob_buf))) {
116 pr_info("read_bbt: could not read block %d\n", i);
117 continue;
118 }
119
120 if (oob_buf[0] != 0xff) {
121 pr_info("read_bbt: bad block at %d\n", i);
122 continue;
123 }
124
125 if (memcmp(&oob_buf[1], "mtknand", 7) != 0) {
126 pr_info("read_bbt: signature mismatch in block %d\n", i);
127 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, oob_buf, 8, 1);
128 continue;
129 }
130
131 pr_info("read_bbt: found bbt at block %d\n", i);
132 bmtd.bmt_blk_idx = i;
133 return 0;
134 }
135
136 return -EIO;
137 }
138
139
140 static int
141 mtk_bmt_init_bbt(struct device_node *np)
142 {
143 int buf_size = round_up(bmtd.total_blks >> 2, bmtd.blk_size);
144 int ret;
145
146 bmtd.bbt_buf = kmalloc(buf_size, GFP_KERNEL);
147 if (!bmtd.bbt_buf)
148 return -ENOMEM;
149
150 memset(bmtd.bbt_buf, 0xff, buf_size);
151 bmtd.mtd->size -= 4 * bmtd.mtd->erasesize;
152
153 ret = mtk_bmt_read_bbt();
154 if (ret)
155 return ret;
156
157 bmtd.bmt_pgs = buf_size / bmtd.pg_size;
158
159 return 0;
160 }
161
162 static int mtk_bmt_debug_bbt(void *data, u64 val)
163 {
164 char buf[5];
165 int i, k;
166
167 switch (val) {
168 case 0:
169 for (i = 0; i < bmtd.total_blks; i += 4) {
170 u8 cur = bmtd.bbt_buf[i / 4];
171
172 for (k = 0; k < 4; k++, cur >>= 2)
173 buf[k] = (cur & 3) ? 'B' : '.';
174
175 buf[4] = 0;
176 printk("[%06x] %s\n", i * bmtd.blk_size, buf);
177 }
178 break;
179 case 100:
180 #if 0
181 for (i = bmtd.bmt_blk_idx; i < bmtd.total_blks - 1; i++)
182 bbt_nand_erase(bmtd.bmt_blk_idx);
183 #endif
184
185 bmtd.bmt_blk_idx = bmtd.total_blks - 1;
186 bbt_nand_erase(bmtd.bmt_blk_idx);
187 write_bmt(bmtd.bmt_blk_idx, bmtd.bbt_buf);
188 break;
189 default:
190 break;
191 }
192 return 0;
193 }
194
195 const struct mtk_bmt_ops mtk_bmt_bbt_ops = {
196 .sig = "mtknand",
197 .sig_len = 7,
198 .init = mtk_bmt_init_bbt,
199 .remap_block = remap_block_bbt,
200 .unmap_block = unmap_block_bbt,
201 .get_mapping_block = get_mapping_block_index_bbt,
202 .debug = mtk_bmt_debug_bbt,
203 };