kernel: mtk_bmt: on error, do not attempt to remap out-of-range blocks
[openwrt/openwrt.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
20 #include "mtk_bmt.h"
21
22 struct bmt_desc bmtd = {};
23
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
26 {
27 int pages = bmtd.blk_size >> bmtd.pg_shift;
28 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
29 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
30 loff_t offset = 0;
31 uint8_t oob[64];
32 int i, ret;
33
34 for (i = 0; i < pages; i++) {
35 struct mtd_oob_ops rd_ops = {
36 .mode = MTD_OPS_PLACE_OOB,
37 .oobbuf = oob,
38 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
39 .datbuf = bmtd.data_buf,
40 .len = bmtd.pg_size,
41 };
42 struct mtd_oob_ops wr_ops = {
43 .mode = MTD_OPS_PLACE_OOB,
44 .oobbuf = oob,
45 .datbuf = bmtd.data_buf,
46 .len = bmtd.pg_size,
47 };
48
49 if (offset >= max_offset)
50 break;
51
52 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
53 if (ret < 0 && !mtd_is_bitflip(ret))
54 return ret;
55
56 if (!rd_ops.retlen)
57 break;
58
59 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
60 if (ret < 0)
61 return ret;
62
63 wr_ops.ooblen = rd_ops.oobretlen;
64 offset += rd_ops.retlen;
65 }
66
67 return 0;
68 }
69
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block, int *start, int *end)
72 {
73 const __be32 *cur = bmtd.remap_range;
74 u32 addr = block << bmtd.blk_shift;
75 int i;
76
77 if (!cur || !bmtd.remap_range_len) {
78 *start = 0;
79 *end = bmtd.total_blks;
80 return true;
81 }
82
83 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
84 if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
85 continue;
86
87 *start = be32_to_cpu(cur[0]);
88 *end = be32_to_cpu(cur[1]);
89 return true;
90 }
91
92 return false;
93 }
94
95 static bool
96 mtk_bmt_remap_block(u32 block, u32 mapped_block, int copy_len)
97 {
98 int start, end;
99
100 if (!mapping_block_in_range(block, &start, &end))
101 return false;
102
103 return bmtd.ops->remap_block(block, mapped_block, copy_len);
104 }
105
106 static int
107 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
108 struct mtd_oob_ops *ops)
109 {
110 struct mtd_oob_ops cur_ops = *ops;
111 int retry_count = 0;
112 loff_t cur_from;
113 int ret = 0;
114 int max_bitflips = 0;
115
116 ops->retlen = 0;
117 ops->oobretlen = 0;
118
119 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
120 int cur_ret;
121
122 u32 offset = from & (bmtd.blk_size - 1);
123 u32 block = from >> bmtd.blk_shift;
124 int cur_block;
125
126 cur_block = bmtd.ops->get_mapping_block(block);
127 if (cur_block < 0)
128 return -EIO;
129
130 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
131
132 cur_ops.oobretlen = 0;
133 cur_ops.retlen = 0;
134 cur_ops.len = min_t(u32, mtd->erasesize - offset,
135 ops->len - ops->retlen);
136 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
137 if (cur_ret < 0)
138 ret = cur_ret;
139 else
140 max_bitflips = max_t(int, max_bitflips, cur_ret);
141 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
142 if (mtk_bmt_remap_block(block, cur_block, mtd->erasesize) &&
143 retry_count++ < 10)
144 continue;
145
146 goto out;
147 }
148
149 if (cur_ret >= mtd->bitflip_threshold)
150 mtk_bmt_remap_block(block, cur_block, mtd->erasesize);
151
152 ops->retlen += cur_ops.retlen;
153 ops->oobretlen += cur_ops.oobretlen;
154
155 cur_ops.ooboffs = 0;
156 cur_ops.datbuf += cur_ops.retlen;
157 cur_ops.oobbuf += cur_ops.oobretlen;
158 cur_ops.ooblen -= cur_ops.oobretlen;
159
160 if (!cur_ops.len)
161 cur_ops.len = mtd->erasesize - offset;
162
163 from += cur_ops.len;
164 retry_count = 0;
165 }
166
167 out:
168 if (ret < 0)
169 return ret;
170
171 return max_bitflips;
172 }
173
174 static int
175 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
176 struct mtd_oob_ops *ops)
177 {
178 struct mtd_oob_ops cur_ops = *ops;
179 int retry_count = 0;
180 loff_t cur_to;
181 int ret;
182
183 ops->retlen = 0;
184 ops->oobretlen = 0;
185
186 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
187 u32 offset = to & (bmtd.blk_size - 1);
188 u32 block = to >> bmtd.blk_shift;
189 int cur_block;
190
191 cur_block = bmtd.ops->get_mapping_block(block);
192 if (cur_block < 0)
193 return -EIO;
194
195 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
196
197 cur_ops.oobretlen = 0;
198 cur_ops.retlen = 0;
199 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
200 ops->len - ops->retlen);
201 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
202 if (ret < 0) {
203 if (mtk_bmt_remap_block(block, cur_block, offset) &&
204 retry_count++ < 10)
205 continue;
206
207 return ret;
208 }
209
210 ops->retlen += cur_ops.retlen;
211 ops->oobretlen += cur_ops.oobretlen;
212
213 cur_ops.ooboffs = 0;
214 cur_ops.datbuf += cur_ops.retlen;
215 cur_ops.oobbuf += cur_ops.oobretlen;
216 cur_ops.ooblen -= cur_ops.oobretlen;
217
218 if (!cur_ops.len)
219 cur_ops.len = mtd->erasesize - offset;
220
221 to += cur_ops.len;
222 retry_count = 0;
223 }
224
225 return 0;
226 }
227
228 static int
229 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
230 {
231 struct erase_info mapped_instr = {
232 .len = bmtd.blk_size,
233 };
234 int retry_count = 0;
235 u64 start_addr, end_addr;
236 int ret;
237 u16 orig_block;
238 int block;
239
240 start_addr = instr->addr & (~mtd->erasesize_mask);
241 end_addr = instr->addr + instr->len;
242
243 while (start_addr < end_addr) {
244 orig_block = start_addr >> bmtd.blk_shift;
245 block = bmtd.ops->get_mapping_block(orig_block);
246 if (block < 0)
247 return -EIO;
248 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
249 ret = bmtd._erase(mtd, &mapped_instr);
250 if (ret) {
251 if (mtk_bmt_remap_block(orig_block, block, 0) &&
252 retry_count++ < 10)
253 continue;
254 instr->fail_addr = start_addr;
255 break;
256 }
257 start_addr += mtd->erasesize;
258 retry_count = 0;
259 }
260
261 return ret;
262 }
263 static int
264 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
265 {
266 int retry_count = 0;
267 u16 orig_block = ofs >> bmtd.blk_shift;
268 u16 block;
269 int ret;
270
271 retry:
272 block = bmtd.ops->get_mapping_block(orig_block);
273 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
274 if (ret) {
275 if (mtk_bmt_remap_block(orig_block, block, bmtd.blk_size) &&
276 retry_count++ < 10)
277 goto retry;
278 }
279 return ret;
280 }
281
282 static int
283 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
284 {
285 u16 orig_block = ofs >> bmtd.blk_shift;
286 int block;
287
288 block = bmtd.ops->get_mapping_block(orig_block);
289 if (block < 0)
290 return -EIO;
291
292 mtk_bmt_remap_block(orig_block, block, bmtd.blk_size);
293
294 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
295 }
296
297 static void
298 mtk_bmt_replace_ops(struct mtd_info *mtd)
299 {
300 bmtd._read_oob = mtd->_read_oob;
301 bmtd._write_oob = mtd->_write_oob;
302 bmtd._erase = mtd->_erase;
303 bmtd._block_isbad = mtd->_block_isbad;
304 bmtd._block_markbad = mtd->_block_markbad;
305
306 mtd->_read_oob = mtk_bmt_read;
307 mtd->_write_oob = mtk_bmt_write;
308 mtd->_erase = mtk_bmt_mtd_erase;
309 mtd->_block_isbad = mtk_bmt_block_isbad;
310 mtd->_block_markbad = mtk_bmt_block_markbad;
311 }
312
313 static int mtk_bmt_debug_mark_good(void *data, u64 val)
314 {
315 bmtd.ops->unmap_block(val >> bmtd.blk_shift);
316
317 return 0;
318 }
319
320 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
321 {
322 u32 block = val >> bmtd.blk_shift;
323 int cur_block;
324
325 cur_block = bmtd.ops->get_mapping_block(block);
326 if (cur_block < 0)
327 return -EIO;
328
329 mtk_bmt_remap_block(block, cur_block, bmtd.blk_size);
330
331 return 0;
332 }
333
334 static int mtk_bmt_debug(void *data, u64 val)
335 {
336 return bmtd.ops->debug(data, val);
337 }
338
339
340 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
341 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
342 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
343
344 static void
345 mtk_bmt_add_debugfs(void)
346 {
347 struct dentry *dir;
348
349 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
350 if (!dir)
351 return;
352
353 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
354 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
355 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
356 }
357
358 void mtk_bmt_detach(struct mtd_info *mtd)
359 {
360 if (bmtd.mtd != mtd)
361 return;
362
363 if (bmtd.debugfs_dir)
364 debugfs_remove_recursive(bmtd.debugfs_dir);
365 bmtd.debugfs_dir = NULL;
366
367 kfree(bmtd.bbt_buf);
368 kfree(bmtd.data_buf);
369
370 mtd->_read_oob = bmtd._read_oob;
371 mtd->_write_oob = bmtd._write_oob;
372 mtd->_erase = bmtd._erase;
373 mtd->_block_isbad = bmtd._block_isbad;
374 mtd->_block_markbad = bmtd._block_markbad;
375 mtd->size = bmtd.total_blks << bmtd.blk_shift;
376
377 memset(&bmtd, 0, sizeof(bmtd));
378 }
379
380
381 int mtk_bmt_attach(struct mtd_info *mtd)
382 {
383 struct device_node *np;
384 int ret = 0;
385
386 if (bmtd.mtd)
387 return -ENOSPC;
388
389 np = mtd_get_of_node(mtd);
390 if (!np)
391 return 0;
392
393 if (of_property_read_bool(np, "mediatek,bmt-v2"))
394 bmtd.ops = &mtk_bmt_v2_ops;
395 else if (of_property_read_bool(np, "mediatek,bbt"))
396 bmtd.ops = &mtk_bmt_bbt_ops;
397 else
398 return 0;
399
400 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
401 &bmtd.remap_range_len);
402 bmtd.remap_range_len /= 8;
403
404 bmtd.mtd = mtd;
405 mtk_bmt_replace_ops(mtd);
406
407 bmtd.blk_size = mtd->erasesize;
408 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
409 bmtd.pg_size = mtd->writesize;
410 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
411 bmtd.total_blks = mtd->size >> bmtd.blk_shift;
412
413 bmtd.data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
414 if (!bmtd.data_buf) {
415 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
416 ret = -1;
417 goto error;
418 }
419
420 memset(bmtd.data_buf, 0xff, bmtd.pg_size);
421
422 ret = bmtd.ops->init(np);
423 if (ret)
424 goto error;
425
426 mtk_bmt_add_debugfs();
427 return 0;
428
429 error:
430 mtk_bmt_detach(mtd);
431 return ret;
432 }
433
434
435 MODULE_LICENSE("GPL");
436 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
437 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
438