kernel: add missing config symbols for 5.15
[openwrt/openwrt.git] / target / linux / mediatek / files-5.10 / drivers / mtd / mtk-snand / mtk-snand-mtd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/device.h>
12 #include <linux/mutex.h>
13 #include <linux/clk.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/wait.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/of_platform.h>
21
22 #include "mtk-snand.h"
23 #include "mtk-snand-os.h"
24
25 struct mtk_snand_of_id {
26 enum mtk_snand_soc soc;
27 };
28
29 struct mtk_snand_mtd {
30 struct mtk_snand_plat_dev pdev;
31
32 struct clk *nfi_clk;
33 struct clk *pad_clk;
34 struct clk *ecc_clk;
35
36 void __iomem *nfi_regs;
37 void __iomem *ecc_regs;
38
39 int irq;
40
41 bool quad_spi;
42 enum mtk_snand_soc soc;
43
44 struct mtd_info mtd;
45 struct mtk_snand *snf;
46 struct mtk_snand_chip_info cinfo;
47 uint8_t *page_cache;
48 struct mutex lock;
49 };
50
51 #define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
52
53 static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
54 {
55 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
56 u64 start_addr, end_addr;
57 int ret;
58
59 /* Do not allow write past end of device */
60 if ((instr->addr + instr->len) > msm->cinfo.chipsize) {
61 dev_err(msm->pdev.dev,
62 "attempt to erase beyond end of device\n");
63 return -EINVAL;
64 }
65
66 start_addr = instr->addr & (~mtd->erasesize_mask);
67 end_addr = instr->addr + instr->len;
68 if (end_addr & mtd->erasesize_mask) {
69 end_addr = (end_addr + mtd->erasesize_mask) &
70 (~mtd->erasesize_mask);
71 }
72
73 mutex_lock(&msm->lock);
74
75 while (start_addr < end_addr) {
76 if (mtk_snand_block_isbad(msm->snf, start_addr)) {
77 instr->fail_addr = start_addr;
78 ret = -EIO;
79 break;
80 }
81
82 ret = mtk_snand_erase_block(msm->snf, start_addr);
83 if (ret) {
84 instr->fail_addr = start_addr;
85 break;
86 }
87
88 start_addr += mtd->erasesize;
89 }
90
91 mutex_unlock(&msm->lock);
92
93 return ret;
94 }
95
96 static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
97 struct mtd_oob_ops *ops)
98 {
99 struct mtd_info *mtd = &msm->mtd;
100 size_t len, ooblen, maxooblen, chklen;
101 uint32_t col, ooboffs;
102 uint8_t *datcache, *oobcache;
103 bool ecc_failed = false, raw = ops->mode == MTD_OPS_RAW ? true : false;
104 int ret, max_bitflips = 0;
105
106 col = addr & mtd->writesize_mask;
107 addr &= ~mtd->writesize_mask;
108 maxooblen = mtd_oobavail(mtd, ops);
109 ooboffs = ops->ooboffs;
110 ooblen = ops->ooblen;
111 len = ops->len;
112
113 datcache = len ? msm->page_cache : NULL;
114 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
115
116 ops->oobretlen = 0;
117 ops->retlen = 0;
118
119 while (len || ooblen) {
120 if (ops->mode == MTD_OPS_AUTO_OOB)
121 ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
122 datcache, oobcache, maxooblen, NULL, raw);
123 else
124 ret = mtk_snand_read_page(msm->snf, addr, datcache,
125 oobcache, raw);
126
127 if (ret < 0 && ret != -EBADMSG)
128 return ret;
129
130 if (ret == -EBADMSG) {
131 mtd->ecc_stats.failed++;
132 ecc_failed = true;
133 } else {
134 mtd->ecc_stats.corrected += ret;
135 max_bitflips = max_t(int, ret, max_bitflips);
136 }
137
138 if (len) {
139 /* Move data */
140 chklen = mtd->writesize - col;
141 if (chklen > len)
142 chklen = len;
143
144 memcpy(ops->datbuf + ops->retlen, datcache + col,
145 chklen);
146 len -= chklen;
147 col = 0; /* (col + chklen) % */
148 ops->retlen += chklen;
149 }
150
151 if (ooblen) {
152 /* Move oob */
153 chklen = maxooblen - ooboffs;
154 if (chklen > ooblen)
155 chklen = ooblen;
156
157 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
158 chklen);
159 ooblen -= chklen;
160 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
161 ops->oobretlen += chklen;
162 }
163
164 addr += mtd->writesize;
165 }
166
167 return ecc_failed ? -EBADMSG : max_bitflips;
168 }
169
170 static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
171 struct mtd_oob_ops *ops)
172 {
173 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
174 uint32_t maxooblen;
175 int ret;
176
177 if (!ops->oobbuf && !ops->datbuf) {
178 if (ops->ooblen || ops->len)
179 return -EINVAL;
180
181 return 0;
182 }
183
184 switch (ops->mode) {
185 case MTD_OPS_PLACE_OOB:
186 case MTD_OPS_AUTO_OOB:
187 case MTD_OPS_RAW:
188 break;
189 default:
190 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
191 return -EINVAL;
192 }
193
194 maxooblen = mtd_oobavail(mtd, ops);
195
196 /* Do not allow read past end of device */
197 if (ops->datbuf && (from + ops->len) > msm->cinfo.chipsize) {
198 dev_err(msm->pdev.dev,
199 "attempt to read beyond end of device\n");
200 return -EINVAL;
201 }
202
203 if (unlikely(ops->ooboffs >= maxooblen)) {
204 dev_err(msm->pdev.dev, "attempt to start read outside oob\n");
205 return -EINVAL;
206 }
207
208 if (unlikely(from >= msm->cinfo.chipsize ||
209 ops->ooboffs + ops->ooblen >
210 ((msm->cinfo.chipsize >> mtd->writesize_shift) -
211 (from >> mtd->writesize_shift)) *
212 maxooblen)) {
213 dev_err(msm->pdev.dev,
214 "attempt to read beyond end of device\n");
215 return -EINVAL;
216 }
217
218 mutex_lock(&msm->lock);
219 ret = mtk_snand_mtd_read_data(msm, from, ops);
220 mutex_unlock(&msm->lock);
221
222 return ret;
223 }
224
225 static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
226 struct mtd_oob_ops *ops)
227 {
228 struct mtd_info *mtd = &msm->mtd;
229 size_t len, ooblen, maxooblen, chklen, oobwrlen;
230 uint32_t col, ooboffs;
231 uint8_t *datcache, *oobcache;
232 bool raw = ops->mode == MTD_OPS_RAW ? true : false;
233 int ret;
234
235 col = addr & mtd->writesize_mask;
236 addr &= ~mtd->writesize_mask;
237 maxooblen = mtd_oobavail(mtd, ops);
238 ooboffs = ops->ooboffs;
239 ooblen = ops->ooblen;
240 len = ops->len;
241
242 datcache = len ? msm->page_cache : NULL;
243 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
244
245 ops->oobretlen = 0;
246 ops->retlen = 0;
247
248 while (len || ooblen) {
249 if (len) {
250 /* Move data */
251 chklen = mtd->writesize - col;
252 if (chklen > len)
253 chklen = len;
254
255 memset(datcache, 0xff, col);
256 memcpy(datcache + col, ops->datbuf + ops->retlen,
257 chklen);
258 memset(datcache + col + chklen, 0xff,
259 mtd->writesize - col - chklen);
260 len -= chklen;
261 col = 0; /* (col + chklen) % */
262 ops->retlen += chklen;
263 }
264
265 oobwrlen = 0;
266 if (ooblen) {
267 /* Move oob */
268 chklen = maxooblen - ooboffs;
269 if (chklen > ooblen)
270 chklen = ooblen;
271
272 memset(oobcache, 0xff, ooboffs);
273 memcpy(oobcache + ooboffs,
274 ops->oobbuf + ops->oobretlen, chklen);
275 memset(oobcache + ooboffs + chklen, 0xff,
276 mtd->oobsize - ooboffs - chklen);
277 oobwrlen = chklen + ooboffs;
278 ooblen -= chklen;
279 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
280 ops->oobretlen += chklen;
281 }
282
283 if (ops->mode == MTD_OPS_AUTO_OOB)
284 ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
285 datcache, oobcache, oobwrlen, NULL, raw);
286 else
287 ret = mtk_snand_write_page(msm->snf, addr, datcache,
288 oobcache, raw);
289
290 if (ret)
291 return ret;
292
293 addr += mtd->writesize;
294 }
295
296 return 0;
297 }
298
299 static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
300 struct mtd_oob_ops *ops)
301 {
302 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
303 uint32_t maxooblen;
304 int ret;
305
306 if (!ops->oobbuf && !ops->datbuf) {
307 if (ops->ooblen || ops->len)
308 return -EINVAL;
309
310 return 0;
311 }
312
313 switch (ops->mode) {
314 case MTD_OPS_PLACE_OOB:
315 case MTD_OPS_AUTO_OOB:
316 case MTD_OPS_RAW:
317 break;
318 default:
319 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
320 return -EINVAL;
321 }
322
323 maxooblen = mtd_oobavail(mtd, ops);
324
325 /* Do not allow write past end of device */
326 if (ops->datbuf && (to + ops->len) > msm->cinfo.chipsize) {
327 dev_err(msm->pdev.dev,
328 "attempt to write beyond end of device\n");
329 return -EINVAL;
330 }
331
332 if (unlikely(ops->ooboffs >= maxooblen)) {
333 dev_err(msm->pdev.dev,
334 "attempt to start write outside oob\n");
335 return -EINVAL;
336 }
337
338 if (unlikely(to >= msm->cinfo.chipsize ||
339 ops->ooboffs + ops->ooblen >
340 ((msm->cinfo.chipsize >> mtd->writesize_shift) -
341 (to >> mtd->writesize_shift)) *
342 maxooblen)) {
343 dev_err(msm->pdev.dev,
344 "attempt to write beyond end of device\n");
345 return -EINVAL;
346 }
347
348 mutex_lock(&msm->lock);
349 ret = mtk_snand_mtd_write_data(msm, to, ops);
350 mutex_unlock(&msm->lock);
351
352 return ret;
353 }
354
355 static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
356 {
357 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
358 int ret;
359
360 mutex_lock(&msm->lock);
361 ret = mtk_snand_block_isbad(msm->snf, offs);
362 mutex_unlock(&msm->lock);
363
364 return ret;
365 }
366
367 static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
368 {
369 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
370 int ret;
371
372 mutex_lock(&msm->lock);
373 ret = mtk_snand_block_markbad(msm->snf, offs);
374 mutex_unlock(&msm->lock);
375
376 return ret;
377 }
378
379 static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
380 struct mtd_oob_region *oobecc)
381 {
382 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
383
384 if (section)
385 return -ERANGE;
386
387 oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
388 oobecc->length = mtd->oobsize - oobecc->offset;
389
390 return 0;
391 }
392
393 static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
394 struct mtd_oob_region *oobfree)
395 {
396 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
397
398 if (section >= msm->cinfo.num_sectors)
399 return -ERANGE;
400
401 oobfree->length = msm->cinfo.fdm_size - 1;
402 oobfree->offset = section * msm->cinfo.fdm_size + 1;
403
404 return 0;
405 }
406
407 static irqreturn_t mtk_snand_irq(int irq, void *id)
408 {
409 struct mtk_snand_mtd *msm = id;
410 int ret;
411
412 ret = mtk_snand_irq_process(msm->snf);
413 if (ret > 0)
414 return IRQ_HANDLED;
415
416 return IRQ_NONE;
417 }
418
419 static int mtk_snand_enable_clk(struct mtk_snand_mtd *msm)
420 {
421 int ret;
422
423 ret = clk_prepare_enable(msm->nfi_clk);
424 if (ret) {
425 dev_err(msm->pdev.dev, "unable to enable nfi clk\n");
426 return ret;
427 }
428
429 ret = clk_prepare_enable(msm->pad_clk);
430 if (ret) {
431 dev_err(msm->pdev.dev, "unable to enable pad clk\n");
432 clk_disable_unprepare(msm->nfi_clk);
433 return ret;
434 }
435
436 ret = clk_prepare_enable(msm->ecc_clk);
437 if (ret) {
438 dev_err(msm->pdev.dev, "unable to enable ecc clk\n");
439 clk_disable_unprepare(msm->nfi_clk);
440 clk_disable_unprepare(msm->pad_clk);
441 return ret;
442 }
443
444 return 0;
445 }
446
447 static void mtk_snand_disable_clk(struct mtk_snand_mtd *msm)
448 {
449 clk_disable_unprepare(msm->nfi_clk);
450 clk_disable_unprepare(msm->pad_clk);
451 clk_disable_unprepare(msm->ecc_clk);
452 }
453
454 static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
455 .ecc = mtk_snand_ooblayout_ecc,
456 .free = mtk_snand_ooblayout_free,
457 };
458
459 static struct mtk_snand_of_id mt7622_soc_id = { .soc = SNAND_SOC_MT7622 };
460 static struct mtk_snand_of_id mt7629_soc_id = { .soc = SNAND_SOC_MT7629 };
461
462 static const struct of_device_id mtk_snand_ids[] = {
463 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_soc_id },
464 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_soc_id },
465 { },
466 };
467
468 MODULE_DEVICE_TABLE(of, mtk_snand_ids);
469
470 static int mtk_snand_probe(struct platform_device *pdev)
471 {
472 struct mtk_snand_platdata mtk_snand_pdata = {};
473 struct device_node *np = pdev->dev.of_node;
474 const struct of_device_id *of_soc_id;
475 const struct mtk_snand_of_id *soc_id;
476 struct mtk_snand_mtd *msm;
477 struct mtd_info *mtd;
478 struct resource *r;
479 uint32_t size;
480 int ret;
481
482 of_soc_id = of_match_node(mtk_snand_ids, np);
483 if (!of_soc_id)
484 return -EINVAL;
485
486 soc_id = of_soc_id->data;
487
488 msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
489 if (!msm)
490 return -ENOMEM;
491
492 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
493 msm->nfi_regs = devm_ioremap_resource(&pdev->dev, r);
494 if (IS_ERR(msm->nfi_regs)) {
495 ret = PTR_ERR(msm->nfi_regs);
496 goto errout1;
497 }
498
499 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
500 msm->ecc_regs = devm_ioremap_resource(&pdev->dev, r);
501 if (IS_ERR(msm->ecc_regs)) {
502 ret = PTR_ERR(msm->ecc_regs);
503 goto errout1;
504 }
505
506 msm->pdev.dev = &pdev->dev;
507 msm->quad_spi = of_property_read_bool(np, "mediatek,quad-spi");
508 msm->soc = soc_id->soc;
509
510 msm->nfi_clk = devm_clk_get(msm->pdev.dev, "nfi_clk");
511 if (IS_ERR(msm->nfi_clk)) {
512 ret = PTR_ERR(msm->nfi_clk);
513 dev_err(msm->pdev.dev, "unable to get nfi_clk, err = %d\n",
514 ret);
515 goto errout1;
516 }
517
518 msm->ecc_clk = devm_clk_get(msm->pdev.dev, "ecc_clk");
519 if (IS_ERR(msm->ecc_clk)) {
520 ret = PTR_ERR(msm->ecc_clk);
521 dev_err(msm->pdev.dev, "unable to get ecc_clk, err = %d\n",
522 ret);
523 goto errout1;
524 }
525
526 msm->pad_clk = devm_clk_get(msm->pdev.dev, "pad_clk");
527 if (IS_ERR(msm->pad_clk)) {
528 ret = PTR_ERR(msm->pad_clk);
529 dev_err(msm->pdev.dev, "unable to get pad_clk, err = %d\n",
530 ret);
531 goto errout1;
532 }
533
534 ret = mtk_snand_enable_clk(msm);
535 if (ret)
536 goto errout1;
537
538 /* Probe SPI-NAND Flash */
539 mtk_snand_pdata.soc = msm->soc;
540 mtk_snand_pdata.quad_spi = msm->quad_spi;
541 mtk_snand_pdata.nfi_base = msm->nfi_regs;
542 mtk_snand_pdata.ecc_base = msm->ecc_regs;
543
544 ret = mtk_snand_init(&msm->pdev, &mtk_snand_pdata, &msm->snf);
545 if (ret)
546 goto errout1;
547
548 msm->irq = platform_get_irq(pdev, 0);
549 if (msm->irq >= 0) {
550 ret = devm_request_irq(msm->pdev.dev, msm->irq, mtk_snand_irq,
551 0x0, "mtk-snand", msm);
552 if (ret) {
553 dev_err(msm->pdev.dev, "failed to request snfi irq\n");
554 goto errout2;
555 }
556
557 ret = dma_set_mask(msm->pdev.dev, DMA_BIT_MASK(32));
558 if (ret) {
559 dev_err(msm->pdev.dev, "failed to set dma mask\n");
560 goto errout3;
561 }
562 }
563
564 mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
565
566 size = msm->cinfo.pagesize + msm->cinfo.sparesize;
567 msm->page_cache = devm_kmalloc(msm->pdev.dev, size, GFP_KERNEL);
568 if (!msm->page_cache) {
569 dev_err(msm->pdev.dev, "failed to allocate page cache\n");
570 ret = -ENOMEM;
571 goto errout3;
572 }
573
574 mutex_init(&msm->lock);
575
576 dev_info(msm->pdev.dev,
577 "chip is %s, size %lluMB, page size %u, oob size %u\n",
578 msm->cinfo.model, msm->cinfo.chipsize >> 20,
579 msm->cinfo.pagesize, msm->cinfo.sparesize);
580
581 /* Initialize mtd for SPI-NAND */
582 mtd = &msm->mtd;
583
584 mtd->owner = THIS_MODULE;
585 mtd->dev.parent = &pdev->dev;
586 mtd->type = MTD_NANDFLASH;
587 mtd->flags = MTD_CAP_NANDFLASH;
588
589 mtd_set_of_node(mtd, np);
590
591 mtd->size = msm->cinfo.chipsize;
592 mtd->erasesize = msm->cinfo.blocksize;
593 mtd->writesize = msm->cinfo.pagesize;
594 mtd->writebufsize = mtd->writesize;
595 mtd->oobsize = msm->cinfo.sparesize;
596 mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
597
598 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
599 mtd->writesize_shift = ffs(mtd->writesize) - 1;
600 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
601 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
602
603 mtd->ooblayout = &mtk_snand_ooblayout;
604
605 mtd->ecc_strength = msm->cinfo.ecc_strength;
606 mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
607 mtd->ecc_step_size = msm->cinfo.sector_size;
608
609 mtd->_erase = mtk_snand_mtd_erase;
610 mtd->_read_oob = mtk_snand_mtd_read_oob;
611 mtd->_write_oob = mtk_snand_mtd_write_oob;
612 mtd->_block_isbad = mtk_snand_mtd_block_isbad;
613 mtd->_block_markbad = mtk_snand_mtd_block_markbad;
614
615 ret = mtd_device_register(mtd, NULL, 0);
616 if (ret) {
617 dev_err(msm->pdev.dev, "failed to register mtd partition\n");
618 goto errout4;
619 }
620
621 platform_set_drvdata(pdev, msm);
622
623 return 0;
624
625 errout4:
626 devm_kfree(msm->pdev.dev, msm->page_cache);
627
628 errout3:
629 if (msm->irq >= 0)
630 devm_free_irq(msm->pdev.dev, msm->irq, msm);
631
632 errout2:
633 mtk_snand_cleanup(msm->snf);
634
635 errout1:
636 devm_kfree(msm->pdev.dev, msm);
637
638 platform_set_drvdata(pdev, NULL);
639
640 return ret;
641 }
642
643 static int mtk_snand_remove(struct platform_device *pdev)
644 {
645 struct mtk_snand_mtd *msm = platform_get_drvdata(pdev);
646 struct mtd_info *mtd = &msm->mtd;
647 int ret;
648
649 ret = mtd_device_unregister(mtd);
650 if (ret)
651 return ret;
652
653 mtk_snand_cleanup(msm->snf);
654
655 if (msm->irq >= 0)
656 devm_free_irq(msm->pdev.dev, msm->irq, msm);
657
658 mtk_snand_disable_clk(msm);
659
660 devm_kfree(msm->pdev.dev, msm->page_cache);
661 devm_kfree(msm->pdev.dev, msm);
662
663 platform_set_drvdata(pdev, NULL);
664
665 return 0;
666 }
667
668 static struct platform_driver mtk_snand_driver = {
669 .probe = mtk_snand_probe,
670 .remove = mtk_snand_remove,
671 .driver = {
672 .name = "mtk-snand",
673 .of_match_table = mtk_snand_ids,
674 },
675 };
676
677 module_platform_driver(mtk_snand_driver);
678
679 MODULE_LICENSE("GPL");
680 MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
681 MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");