kernel: mtk_bmt: allow get_mapping_block to return an error
[openwrt/openwrt.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
20 #include "mtk_bmt.h"
21
22 struct bmt_desc bmtd = {};
23
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
26 {
27 int pages = bmtd.blk_size >> bmtd.pg_shift;
28 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
29 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
30 loff_t offset = 0;
31 uint8_t oob[64];
32 int i, ret;
33
34 for (i = 0; i < pages; i++) {
35 struct mtd_oob_ops rd_ops = {
36 .mode = MTD_OPS_PLACE_OOB,
37 .oobbuf = oob,
38 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
39 .datbuf = bmtd.data_buf,
40 .len = bmtd.pg_size,
41 };
42 struct mtd_oob_ops wr_ops = {
43 .mode = MTD_OPS_PLACE_OOB,
44 .oobbuf = oob,
45 .datbuf = bmtd.data_buf,
46 .len = bmtd.pg_size,
47 };
48
49 if (offset >= max_offset)
50 break;
51
52 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
53 if (ret < 0 && !mtd_is_bitflip(ret))
54 return ret;
55
56 if (!rd_ops.retlen)
57 break;
58
59 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
60 if (ret < 0)
61 return ret;
62
63 wr_ops.ooblen = rd_ops.oobretlen;
64 offset += rd_ops.retlen;
65 }
66
67 return 0;
68 }
69
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block, int *start, int *end)
72 {
73 const __be32 *cur = bmtd.remap_range;
74 u32 addr = block << bmtd.blk_shift;
75 int i;
76
77 if (!cur || !bmtd.remap_range_len) {
78 *start = 0;
79 *end = bmtd.total_blks;
80 return true;
81 }
82
83 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
84 if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
85 continue;
86
87 *start = be32_to_cpu(cur[0]);
88 *end = be32_to_cpu(cur[1]);
89 return true;
90 }
91
92 return false;
93 }
94
95 static int
96 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
97 struct mtd_oob_ops *ops)
98 {
99 struct mtd_oob_ops cur_ops = *ops;
100 int retry_count = 0;
101 loff_t cur_from;
102 int ret = 0;
103 int max_bitflips = 0;
104 int start, end;
105
106 ops->retlen = 0;
107 ops->oobretlen = 0;
108
109 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
110 int cur_ret;
111
112 u32 offset = from & (bmtd.blk_size - 1);
113 u32 block = from >> bmtd.blk_shift;
114 int cur_block;
115
116 cur_block = bmtd.ops->get_mapping_block(block);
117 if (cur_block < 0)
118 return -EIO;
119
120 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
121
122 cur_ops.oobretlen = 0;
123 cur_ops.retlen = 0;
124 cur_ops.len = min_t(u32, mtd->erasesize - offset,
125 ops->len - ops->retlen);
126 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
127 if (cur_ret < 0)
128 ret = cur_ret;
129 else
130 max_bitflips = max_t(int, max_bitflips, cur_ret);
131 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
132 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
133 if (retry_count++ < 10)
134 continue;
135
136 goto out;
137 }
138
139 if (cur_ret >= mtd->bitflip_threshold &&
140 mapping_block_in_range(block, &start, &end))
141 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
142
143 ops->retlen += cur_ops.retlen;
144 ops->oobretlen += cur_ops.oobretlen;
145
146 cur_ops.ooboffs = 0;
147 cur_ops.datbuf += cur_ops.retlen;
148 cur_ops.oobbuf += cur_ops.oobretlen;
149 cur_ops.ooblen -= cur_ops.oobretlen;
150
151 if (!cur_ops.len)
152 cur_ops.len = mtd->erasesize - offset;
153
154 from += cur_ops.len;
155 retry_count = 0;
156 }
157
158 out:
159 if (ret < 0)
160 return ret;
161
162 return max_bitflips;
163 }
164
165 static int
166 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
167 struct mtd_oob_ops *ops)
168 {
169 struct mtd_oob_ops cur_ops = *ops;
170 int retry_count = 0;
171 loff_t cur_to;
172 int ret;
173
174 ops->retlen = 0;
175 ops->oobretlen = 0;
176
177 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
178 u32 offset = to & (bmtd.blk_size - 1);
179 u32 block = to >> bmtd.blk_shift;
180 int cur_block;
181
182 cur_block = bmtd.ops->get_mapping_block(block);
183 if (cur_block < 0)
184 return -EIO;
185
186 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
187
188 cur_ops.oobretlen = 0;
189 cur_ops.retlen = 0;
190 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
191 ops->len - ops->retlen);
192 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
193 if (ret < 0) {
194 bmtd.ops->remap_block(block, cur_block, offset);
195 if (retry_count++ < 10)
196 continue;
197
198 return ret;
199 }
200
201 ops->retlen += cur_ops.retlen;
202 ops->oobretlen += cur_ops.oobretlen;
203
204 cur_ops.ooboffs = 0;
205 cur_ops.datbuf += cur_ops.retlen;
206 cur_ops.oobbuf += cur_ops.oobretlen;
207 cur_ops.ooblen -= cur_ops.oobretlen;
208
209 if (!cur_ops.len)
210 cur_ops.len = mtd->erasesize - offset;
211
212 to += cur_ops.len;
213 retry_count = 0;
214 }
215
216 return 0;
217 }
218
219 static int
220 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
221 {
222 struct erase_info mapped_instr = {
223 .len = bmtd.blk_size,
224 };
225 int retry_count = 0;
226 u64 start_addr, end_addr;
227 int ret;
228 u16 orig_block;
229 int block;
230
231 start_addr = instr->addr & (~mtd->erasesize_mask);
232 end_addr = instr->addr + instr->len;
233
234 while (start_addr < end_addr) {
235 orig_block = start_addr >> bmtd.blk_shift;
236 block = bmtd.ops->get_mapping_block(orig_block);
237 if (block < 0)
238 return -EIO;
239 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
240 ret = bmtd._erase(mtd, &mapped_instr);
241 if (ret) {
242 bmtd.ops->remap_block(orig_block, block, 0);
243 if (retry_count++ < 10)
244 continue;
245 instr->fail_addr = start_addr;
246 break;
247 }
248 start_addr += mtd->erasesize;
249 retry_count = 0;
250 }
251
252 return ret;
253 }
254 static int
255 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
256 {
257 int retry_count = 0;
258 u16 orig_block = ofs >> bmtd.blk_shift;
259 u16 block;
260 int ret;
261
262 retry:
263 block = bmtd.ops->get_mapping_block(orig_block);
264 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
265 if (ret) {
266 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
267 if (retry_count++ < 10)
268 goto retry;
269 }
270 return ret;
271 }
272
273 static int
274 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
275 {
276 u16 orig_block = ofs >> bmtd.blk_shift;
277 int block;
278
279 block = bmtd.ops->get_mapping_block(orig_block);
280 if (block < 0)
281 return -EIO;
282
283 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
284
285 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
286 }
287
288 static void
289 mtk_bmt_replace_ops(struct mtd_info *mtd)
290 {
291 bmtd._read_oob = mtd->_read_oob;
292 bmtd._write_oob = mtd->_write_oob;
293 bmtd._erase = mtd->_erase;
294 bmtd._block_isbad = mtd->_block_isbad;
295 bmtd._block_markbad = mtd->_block_markbad;
296
297 mtd->_read_oob = mtk_bmt_read;
298 mtd->_write_oob = mtk_bmt_write;
299 mtd->_erase = mtk_bmt_mtd_erase;
300 mtd->_block_isbad = mtk_bmt_block_isbad;
301 mtd->_block_markbad = mtk_bmt_block_markbad;
302 }
303
304 static int mtk_bmt_debug_mark_good(void *data, u64 val)
305 {
306 bmtd.ops->unmap_block(val >> bmtd.blk_shift);
307
308 return 0;
309 }
310
311 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
312 {
313 u32 block = val >> bmtd.blk_shift;
314 int cur_block;
315
316 cur_block = bmtd.ops->get_mapping_block(block);
317 if (cur_block < 0)
318 return -EIO;
319
320 bmtd.ops->remap_block(block, cur_block, bmtd.blk_size);
321
322 return 0;
323 }
324
325 static int mtk_bmt_debug(void *data, u64 val)
326 {
327 return bmtd.ops->debug(data, val);
328 }
329
330
331 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
332 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
333 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
334
335 static void
336 mtk_bmt_add_debugfs(void)
337 {
338 struct dentry *dir;
339
340 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
341 if (!dir)
342 return;
343
344 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
345 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
346 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
347 }
348
349 void mtk_bmt_detach(struct mtd_info *mtd)
350 {
351 if (bmtd.mtd != mtd)
352 return;
353
354 if (bmtd.debugfs_dir)
355 debugfs_remove_recursive(bmtd.debugfs_dir);
356 bmtd.debugfs_dir = NULL;
357
358 kfree(bmtd.bbt_buf);
359 kfree(bmtd.data_buf);
360
361 mtd->_read_oob = bmtd._read_oob;
362 mtd->_write_oob = bmtd._write_oob;
363 mtd->_erase = bmtd._erase;
364 mtd->_block_isbad = bmtd._block_isbad;
365 mtd->_block_markbad = bmtd._block_markbad;
366 mtd->size = bmtd.total_blks << bmtd.blk_shift;
367
368 memset(&bmtd, 0, sizeof(bmtd));
369 }
370
371
372 int mtk_bmt_attach(struct mtd_info *mtd)
373 {
374 struct device_node *np;
375 int ret = 0;
376
377 if (bmtd.mtd)
378 return -ENOSPC;
379
380 np = mtd_get_of_node(mtd);
381 if (!np)
382 return 0;
383
384 if (of_property_read_bool(np, "mediatek,bmt-v2"))
385 bmtd.ops = &mtk_bmt_v2_ops;
386 else if (of_property_read_bool(np, "mediatek,bbt"))
387 bmtd.ops = &mtk_bmt_bbt_ops;
388 else
389 return 0;
390
391 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
392 &bmtd.remap_range_len);
393 bmtd.remap_range_len /= 8;
394
395 bmtd.mtd = mtd;
396 mtk_bmt_replace_ops(mtd);
397
398 bmtd.blk_size = mtd->erasesize;
399 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
400 bmtd.pg_size = mtd->writesize;
401 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
402 bmtd.total_blks = mtd->size >> bmtd.blk_shift;
403
404 bmtd.data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
405 if (!bmtd.data_buf) {
406 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
407 ret = -1;
408 goto error;
409 }
410
411 memset(bmtd.data_buf, 0xff, bmtd.pg_size);
412
413 ret = bmtd.ops->init(np);
414 if (ret)
415 goto error;
416
417 mtk_bmt_add_debugfs();
418 return 0;
419
420 error:
421 mtk_bmt_detach(mtd);
422 return ret;
423 }
424
425
426 MODULE_LICENSE("GPL");
427 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
428 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
429