e7238ee62b322705fbeabcc075c482a483675345
[openwrt/openwrt.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
20 #include "mtk_bmt.h"
21
22 struct bmt_desc bmtd = {};
23
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
26 {
27 int pages = bmtd.blk_size >> bmtd.pg_shift;
28 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
29 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
30 loff_t offset = 0;
31 uint8_t oob[64];
32 int i, ret;
33
34 for (i = 0; i < pages; i++) {
35 struct mtd_oob_ops rd_ops = {
36 .mode = MTD_OPS_PLACE_OOB,
37 .oobbuf = oob,
38 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
39 .datbuf = bmtd.data_buf,
40 .len = bmtd.pg_size,
41 };
42 struct mtd_oob_ops wr_ops = {
43 .mode = MTD_OPS_PLACE_OOB,
44 .oobbuf = oob,
45 .datbuf = bmtd.data_buf,
46 .len = bmtd.pg_size,
47 };
48
49 if (offset >= max_offset)
50 break;
51
52 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
53 if (ret < 0 && !mtd_is_bitflip(ret))
54 return ret;
55
56 if (!rd_ops.retlen)
57 break;
58
59 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
60 if (ret < 0)
61 return ret;
62
63 wr_ops.ooblen = rd_ops.oobretlen;
64 offset += rd_ops.retlen;
65 }
66
67 return 0;
68 }
69
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block, int *start, int *end)
72 {
73 const __be32 *cur = bmtd.remap_range;
74 u32 addr = block << bmtd.blk_shift;
75 int i;
76
77 if (!cur || !bmtd.remap_range_len) {
78 *start = 0;
79 *end = bmtd.total_blks;
80 return true;
81 }
82
83 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
84 if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
85 continue;
86
87 *start = be32_to_cpu(cur[0]);
88 *end = be32_to_cpu(cur[1]);
89 return true;
90 }
91
92 return false;
93 }
94
95 static int
96 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
97 struct mtd_oob_ops *ops)
98 {
99 struct mtd_oob_ops cur_ops = *ops;
100 int retry_count = 0;
101 loff_t cur_from;
102 int ret = 0;
103 int max_bitflips = 0;
104 int start, end;
105
106 ops->retlen = 0;
107 ops->oobretlen = 0;
108
109 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
110 int cur_ret;
111
112 u32 offset = from & (bmtd.blk_size - 1);
113 u32 block = from >> bmtd.blk_shift;
114 u32 cur_block;
115
116 cur_block = bmtd.ops->get_mapping_block(block);
117 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
118
119 cur_ops.oobretlen = 0;
120 cur_ops.retlen = 0;
121 cur_ops.len = min_t(u32, mtd->erasesize - offset,
122 ops->len - ops->retlen);
123 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
124 if (cur_ret < 0)
125 ret = cur_ret;
126 else
127 max_bitflips = max_t(int, max_bitflips, cur_ret);
128 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
129 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
130 if (retry_count++ < 10)
131 continue;
132
133 goto out;
134 }
135
136 if (cur_ret >= mtd->bitflip_threshold &&
137 mapping_block_in_range(block, &start, &end))
138 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
139
140 ops->retlen += cur_ops.retlen;
141 ops->oobretlen += cur_ops.oobretlen;
142
143 cur_ops.ooboffs = 0;
144 cur_ops.datbuf += cur_ops.retlen;
145 cur_ops.oobbuf += cur_ops.oobretlen;
146 cur_ops.ooblen -= cur_ops.oobretlen;
147
148 if (!cur_ops.len)
149 cur_ops.len = mtd->erasesize - offset;
150
151 from += cur_ops.len;
152 retry_count = 0;
153 }
154
155 out:
156 if (ret < 0)
157 return ret;
158
159 return max_bitflips;
160 }
161
162 static int
163 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
164 struct mtd_oob_ops *ops)
165 {
166 struct mtd_oob_ops cur_ops = *ops;
167 int retry_count = 0;
168 loff_t cur_to;
169 int ret;
170
171 ops->retlen = 0;
172 ops->oobretlen = 0;
173
174 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
175 u32 offset = to & (bmtd.blk_size - 1);
176 u32 block = to >> bmtd.blk_shift;
177 u32 cur_block;
178
179 cur_block = bmtd.ops->get_mapping_block(block);
180 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
181
182 cur_ops.oobretlen = 0;
183 cur_ops.retlen = 0;
184 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
185 ops->len - ops->retlen);
186 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
187 if (ret < 0) {
188 bmtd.ops->remap_block(block, cur_block, offset);
189 if (retry_count++ < 10)
190 continue;
191
192 return ret;
193 }
194
195 ops->retlen += cur_ops.retlen;
196 ops->oobretlen += cur_ops.oobretlen;
197
198 cur_ops.ooboffs = 0;
199 cur_ops.datbuf += cur_ops.retlen;
200 cur_ops.oobbuf += cur_ops.oobretlen;
201 cur_ops.ooblen -= cur_ops.oobretlen;
202
203 if (!cur_ops.len)
204 cur_ops.len = mtd->erasesize - offset;
205
206 to += cur_ops.len;
207 retry_count = 0;
208 }
209
210 return 0;
211 }
212
213 static int
214 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
215 {
216 struct erase_info mapped_instr = {
217 .len = bmtd.blk_size,
218 };
219 int retry_count = 0;
220 u64 start_addr, end_addr;
221 int ret;
222 u16 orig_block, block;
223
224 start_addr = instr->addr & (~mtd->erasesize_mask);
225 end_addr = instr->addr + instr->len;
226
227 while (start_addr < end_addr) {
228 orig_block = start_addr >> bmtd.blk_shift;
229 block = bmtd.ops->get_mapping_block(orig_block);
230 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
231 ret = bmtd._erase(mtd, &mapped_instr);
232 if (ret) {
233 bmtd.ops->remap_block(orig_block, block, 0);
234 if (retry_count++ < 10)
235 continue;
236 instr->fail_addr = start_addr;
237 break;
238 }
239 start_addr += mtd->erasesize;
240 retry_count = 0;
241 }
242
243 return ret;
244 }
245 static int
246 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
247 {
248 int retry_count = 0;
249 u16 orig_block = ofs >> bmtd.blk_shift;
250 u16 block;
251 int ret;
252
253 retry:
254 block = bmtd.ops->get_mapping_block(orig_block);
255 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
256 if (ret) {
257 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
258 if (retry_count++ < 10)
259 goto retry;
260 }
261 return ret;
262 }
263
264 static int
265 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
266 {
267 u16 orig_block = ofs >> bmtd.blk_shift;
268 u16 block = bmtd.ops->get_mapping_block(orig_block);
269
270 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
271
272 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
273 }
274
275 static void
276 mtk_bmt_replace_ops(struct mtd_info *mtd)
277 {
278 bmtd._read_oob = mtd->_read_oob;
279 bmtd._write_oob = mtd->_write_oob;
280 bmtd._erase = mtd->_erase;
281 bmtd._block_isbad = mtd->_block_isbad;
282 bmtd._block_markbad = mtd->_block_markbad;
283
284 mtd->_read_oob = mtk_bmt_read;
285 mtd->_write_oob = mtk_bmt_write;
286 mtd->_erase = mtk_bmt_mtd_erase;
287 mtd->_block_isbad = mtk_bmt_block_isbad;
288 mtd->_block_markbad = mtk_bmt_block_markbad;
289 }
290
291 static int mtk_bmt_debug_mark_good(void *data, u64 val)
292 {
293 bmtd.ops->unmap_block(val >> bmtd.blk_shift);
294
295 return 0;
296 }
297
298 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
299 {
300 u32 block = val >> bmtd.blk_shift;
301 u16 cur_block = bmtd.ops->get_mapping_block(block);
302
303 bmtd.ops->remap_block(block, cur_block, bmtd.blk_size);
304
305 return 0;
306 }
307
308 static int mtk_bmt_debug(void *data, u64 val)
309 {
310 return bmtd.ops->debug(data, val);
311 }
312
313
314 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
315 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
316 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
317
318 static void
319 mtk_bmt_add_debugfs(void)
320 {
321 struct dentry *dir;
322
323 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
324 if (!dir)
325 return;
326
327 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
328 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
329 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
330 }
331
332 void mtk_bmt_detach(struct mtd_info *mtd)
333 {
334 if (bmtd.mtd != mtd)
335 return;
336
337 if (bmtd.debugfs_dir)
338 debugfs_remove_recursive(bmtd.debugfs_dir);
339 bmtd.debugfs_dir = NULL;
340
341 kfree(bmtd.bbt_buf);
342 kfree(bmtd.data_buf);
343
344 mtd->_read_oob = bmtd._read_oob;
345 mtd->_write_oob = bmtd._write_oob;
346 mtd->_erase = bmtd._erase;
347 mtd->_block_isbad = bmtd._block_isbad;
348 mtd->_block_markbad = bmtd._block_markbad;
349 mtd->size = bmtd.total_blks << bmtd.blk_shift;
350
351 memset(&bmtd, 0, sizeof(bmtd));
352 }
353
354
355 int mtk_bmt_attach(struct mtd_info *mtd)
356 {
357 struct device_node *np;
358 int ret = 0;
359
360 if (bmtd.mtd)
361 return -ENOSPC;
362
363 np = mtd_get_of_node(mtd);
364 if (!np)
365 return 0;
366
367 if (of_property_read_bool(np, "mediatek,bmt-v2"))
368 bmtd.ops = &mtk_bmt_v2_ops;
369 else if (of_property_read_bool(np, "mediatek,bbt"))
370 bmtd.ops = &mtk_bmt_bbt_ops;
371 else
372 return 0;
373
374 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
375 &bmtd.remap_range_len);
376 bmtd.remap_range_len /= 8;
377
378 bmtd.mtd = mtd;
379 mtk_bmt_replace_ops(mtd);
380
381 bmtd.blk_size = mtd->erasesize;
382 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
383 bmtd.pg_size = mtd->writesize;
384 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
385 bmtd.total_blks = mtd->size >> bmtd.blk_shift;
386
387 bmtd.data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
388 if (!bmtd.data_buf) {
389 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
390 ret = -1;
391 goto error;
392 }
393
394 memset(bmtd.data_buf, 0xff, bmtd.pg_size);
395
396 ret = bmtd.ops->init(np);
397 if (ret)
398 goto error;
399
400 mtk_bmt_add_debugfs();
401 return 0;
402
403 error:
404 mtk_bmt_detach(mtd);
405 return ret;
406 }
407
408
409 MODULE_LICENSE("GPL");
410 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
411 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
412