1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/device.h>
12 #include <linux/mutex.h>
13 #include <linux/clk.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/wait.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/of_platform.h>
22 #include "mtk-snand.h"
23 #include "mtk-snand-os.h"
25 struct mtk_snand_of_id
{
26 enum mtk_snand_soc soc
;
29 struct mtk_snand_mtd
{
30 struct mtk_snand_plat_dev pdev
;
36 void __iomem
*nfi_regs
;
37 void __iomem
*ecc_regs
;
42 enum mtk_snand_soc soc
;
45 struct mtk_snand
*snf
;
46 struct mtk_snand_chip_info cinfo
;
51 #define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
53 static int mtk_snand_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
55 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
56 u64 start_addr
, end_addr
;
59 /* Do not allow write past end of device */
60 if ((instr
->addr
+ instr
->len
) > msm
->cinfo
.chipsize
) {
61 dev_err(msm
->pdev
.dev
,
62 "attempt to erase beyond end of device\n");
66 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
67 end_addr
= instr
->addr
+ instr
->len
;
68 if (end_addr
& mtd
->erasesize_mask
) {
69 end_addr
= (end_addr
+ mtd
->erasesize_mask
) &
70 (~mtd
->erasesize_mask
);
73 mutex_lock(&msm
->lock
);
75 while (start_addr
< end_addr
) {
76 if (mtk_snand_block_isbad(msm
->snf
, start_addr
)) {
77 instr
->fail_addr
= start_addr
;
82 ret
= mtk_snand_erase_block(msm
->snf
, start_addr
);
84 instr
->fail_addr
= start_addr
;
88 start_addr
+= mtd
->erasesize
;
91 mutex_unlock(&msm
->lock
);
96 static int mtk_snand_mtd_read_data(struct mtk_snand_mtd
*msm
, uint64_t addr
,
97 struct mtd_oob_ops
*ops
)
99 struct mtd_info
*mtd
= &msm
->mtd
;
100 size_t len
, ooblen
, maxooblen
, chklen
;
101 uint32_t col
, ooboffs
;
102 uint8_t *datcache
, *oobcache
;
103 bool ecc_failed
= false, raw
= ops
->mode
== MTD_OPS_RAW
? true : false;
104 int ret
, max_bitflips
= 0;
106 col
= addr
& mtd
->writesize_mask
;
107 addr
&= ~mtd
->writesize_mask
;
108 maxooblen
= mtd_oobavail(mtd
, ops
);
109 ooboffs
= ops
->ooboffs
;
110 ooblen
= ops
->ooblen
;
113 datcache
= len
? msm
->page_cache
: NULL
;
114 oobcache
= ooblen
? msm
->page_cache
+ mtd
->writesize
: NULL
;
119 while (len
|| ooblen
) {
120 if (ops
->mode
== MTD_OPS_AUTO_OOB
)
121 ret
= mtk_snand_read_page_auto_oob(msm
->snf
, addr
,
122 datcache
, oobcache
, maxooblen
, NULL
, raw
);
124 ret
= mtk_snand_read_page(msm
->snf
, addr
, datcache
,
127 if (ret
< 0 && ret
!= -EBADMSG
)
130 if (ret
== -EBADMSG
) {
131 mtd
->ecc_stats
.failed
++;
134 mtd
->ecc_stats
.corrected
+= ret
;
135 max_bitflips
= max_t(int, ret
, max_bitflips
);
140 chklen
= mtd
->writesize
- col
;
144 memcpy(ops
->datbuf
+ ops
->retlen
, datcache
+ col
,
147 col
= 0; /* (col + chklen) % */
148 ops
->retlen
+= chklen
;
153 chklen
= maxooblen
- ooboffs
;
157 memcpy(ops
->oobbuf
+ ops
->oobretlen
, oobcache
+ ooboffs
,
160 ooboffs
= 0; /* (ooboffs + chklen) % maxooblen; */
161 ops
->oobretlen
+= chklen
;
164 addr
+= mtd
->writesize
;
167 return ecc_failed
? -EBADMSG
: max_bitflips
;
170 static int mtk_snand_mtd_read_oob(struct mtd_info
*mtd
, loff_t from
,
171 struct mtd_oob_ops
*ops
)
173 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
177 if (!ops
->oobbuf
&& !ops
->datbuf
) {
178 if (ops
->ooblen
|| ops
->len
)
185 case MTD_OPS_PLACE_OOB
:
186 case MTD_OPS_AUTO_OOB
:
190 dev_err(msm
->pdev
.dev
, "unsupported oob mode: %u\n", ops
->mode
);
194 maxooblen
= mtd_oobavail(mtd
, ops
);
196 /* Do not allow read past end of device */
197 if (ops
->datbuf
&& (from
+ ops
->len
) > msm
->cinfo
.chipsize
) {
198 dev_err(msm
->pdev
.dev
,
199 "attempt to read beyond end of device\n");
203 if (unlikely(ops
->ooboffs
>= maxooblen
)) {
204 dev_err(msm
->pdev
.dev
, "attempt to start read outside oob\n");
208 if (unlikely(from
>= msm
->cinfo
.chipsize
||
209 ops
->ooboffs
+ ops
->ooblen
>
210 ((msm
->cinfo
.chipsize
>> mtd
->writesize_shift
) -
211 (from
>> mtd
->writesize_shift
)) *
213 dev_err(msm
->pdev
.dev
,
214 "attempt to read beyond end of device\n");
218 mutex_lock(&msm
->lock
);
219 ret
= mtk_snand_mtd_read_data(msm
, from
, ops
);
220 mutex_unlock(&msm
->lock
);
225 static int mtk_snand_mtd_write_data(struct mtk_snand_mtd
*msm
, uint64_t addr
,
226 struct mtd_oob_ops
*ops
)
228 struct mtd_info
*mtd
= &msm
->mtd
;
229 size_t len
, ooblen
, maxooblen
, chklen
, oobwrlen
;
230 uint32_t col
, ooboffs
;
231 uint8_t *datcache
, *oobcache
;
232 bool raw
= ops
->mode
== MTD_OPS_RAW
? true : false;
235 col
= addr
& mtd
->writesize_mask
;
236 addr
&= ~mtd
->writesize_mask
;
237 maxooblen
= mtd_oobavail(mtd
, ops
);
238 ooboffs
= ops
->ooboffs
;
239 ooblen
= ops
->ooblen
;
242 datcache
= len
? msm
->page_cache
: NULL
;
243 oobcache
= ooblen
? msm
->page_cache
+ mtd
->writesize
: NULL
;
248 while (len
|| ooblen
) {
251 chklen
= mtd
->writesize
- col
;
255 memset(datcache
, 0xff, col
);
256 memcpy(datcache
+ col
, ops
->datbuf
+ ops
->retlen
,
258 memset(datcache
+ col
+ chklen
, 0xff,
259 mtd
->writesize
- col
- chklen
);
261 col
= 0; /* (col + chklen) % */
262 ops
->retlen
+= chklen
;
268 chklen
= maxooblen
- ooboffs
;
272 memset(oobcache
, 0xff, ooboffs
);
273 memcpy(oobcache
+ ooboffs
,
274 ops
->oobbuf
+ ops
->oobretlen
, chklen
);
275 memset(oobcache
+ ooboffs
+ chklen
, 0xff,
276 mtd
->oobsize
- ooboffs
- chklen
);
277 oobwrlen
= chklen
+ ooboffs
;
279 ooboffs
= 0; /* (ooboffs + chklen) % maxooblen; */
280 ops
->oobretlen
+= chklen
;
283 if (ops
->mode
== MTD_OPS_AUTO_OOB
)
284 ret
= mtk_snand_write_page_auto_oob(msm
->snf
, addr
,
285 datcache
, oobcache
, oobwrlen
, NULL
, raw
);
287 ret
= mtk_snand_write_page(msm
->snf
, addr
, datcache
,
293 addr
+= mtd
->writesize
;
299 static int mtk_snand_mtd_write_oob(struct mtd_info
*mtd
, loff_t to
,
300 struct mtd_oob_ops
*ops
)
302 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
306 if (!ops
->oobbuf
&& !ops
->datbuf
) {
307 if (ops
->ooblen
|| ops
->len
)
314 case MTD_OPS_PLACE_OOB
:
315 case MTD_OPS_AUTO_OOB
:
319 dev_err(msm
->pdev
.dev
, "unsupported oob mode: %u\n", ops
->mode
);
323 maxooblen
= mtd_oobavail(mtd
, ops
);
325 /* Do not allow write past end of device */
326 if (ops
->datbuf
&& (to
+ ops
->len
) > msm
->cinfo
.chipsize
) {
327 dev_err(msm
->pdev
.dev
,
328 "attempt to write beyond end of device\n");
332 if (unlikely(ops
->ooboffs
>= maxooblen
)) {
333 dev_err(msm
->pdev
.dev
,
334 "attempt to start write outside oob\n");
338 if (unlikely(to
>= msm
->cinfo
.chipsize
||
339 ops
->ooboffs
+ ops
->ooblen
>
340 ((msm
->cinfo
.chipsize
>> mtd
->writesize_shift
) -
341 (to
>> mtd
->writesize_shift
)) *
343 dev_err(msm
->pdev
.dev
,
344 "attempt to write beyond end of device\n");
348 mutex_lock(&msm
->lock
);
349 ret
= mtk_snand_mtd_write_data(msm
, to
, ops
);
350 mutex_unlock(&msm
->lock
);
355 static int mtk_snand_mtd_block_isbad(struct mtd_info
*mtd
, loff_t offs
)
357 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
360 mutex_lock(&msm
->lock
);
361 ret
= mtk_snand_block_isbad(msm
->snf
, offs
);
362 mutex_unlock(&msm
->lock
);
367 static int mtk_snand_mtd_block_markbad(struct mtd_info
*mtd
, loff_t offs
)
369 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
372 mutex_lock(&msm
->lock
);
373 ret
= mtk_snand_block_markbad(msm
->snf
, offs
);
374 mutex_unlock(&msm
->lock
);
379 static int mtk_snand_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
380 struct mtd_oob_region
*oobecc
)
382 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
387 oobecc
->offset
= msm
->cinfo
.fdm_size
* msm
->cinfo
.num_sectors
;
388 oobecc
->length
= mtd
->oobsize
- oobecc
->offset
;
393 static int mtk_snand_ooblayout_free(struct mtd_info
*mtd
, int section
,
394 struct mtd_oob_region
*oobfree
)
396 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
398 if (section
>= msm
->cinfo
.num_sectors
)
401 oobfree
->length
= msm
->cinfo
.fdm_size
- 1;
402 oobfree
->offset
= section
* msm
->cinfo
.fdm_size
+ 1;
407 static irqreturn_t
mtk_snand_irq(int irq
, void *id
)
409 struct mtk_snand_mtd
*msm
= id
;
412 ret
= mtk_snand_irq_process(msm
->snf
);
419 static int mtk_snand_enable_clk(struct mtk_snand_mtd
*msm
)
423 ret
= clk_prepare_enable(msm
->nfi_clk
);
425 dev_err(msm
->pdev
.dev
, "unable to enable nfi clk\n");
429 ret
= clk_prepare_enable(msm
->pad_clk
);
431 dev_err(msm
->pdev
.dev
, "unable to enable pad clk\n");
432 clk_disable_unprepare(msm
->nfi_clk
);
436 ret
= clk_prepare_enable(msm
->ecc_clk
);
438 dev_err(msm
->pdev
.dev
, "unable to enable ecc clk\n");
439 clk_disable_unprepare(msm
->nfi_clk
);
440 clk_disable_unprepare(msm
->pad_clk
);
447 static void mtk_snand_disable_clk(struct mtk_snand_mtd
*msm
)
449 clk_disable_unprepare(msm
->nfi_clk
);
450 clk_disable_unprepare(msm
->pad_clk
);
451 clk_disable_unprepare(msm
->ecc_clk
);
454 static const struct mtd_ooblayout_ops mtk_snand_ooblayout
= {
455 .ecc
= mtk_snand_ooblayout_ecc
,
456 .free
= mtk_snand_ooblayout_free
,
459 static struct mtk_snand_of_id mt7622_soc_id
= { .soc
= SNAND_SOC_MT7622
};
460 static struct mtk_snand_of_id mt7629_soc_id
= { .soc
= SNAND_SOC_MT7629
};
462 static const struct of_device_id mtk_snand_ids
[] = {
463 { .compatible
= "mediatek,mt7622-snand", .data
= &mt7622_soc_id
},
464 { .compatible
= "mediatek,mt7629-snand", .data
= &mt7629_soc_id
},
468 MODULE_DEVICE_TABLE(of
, mtk_snand_ids
);
470 static int mtk_snand_probe(struct platform_device
*pdev
)
472 struct mtk_snand_platdata mtk_snand_pdata
= {};
473 struct device_node
*np
= pdev
->dev
.of_node
;
474 const struct of_device_id
*of_soc_id
;
475 const struct mtk_snand_of_id
*soc_id
;
476 struct mtk_snand_mtd
*msm
;
477 struct mtd_info
*mtd
;
482 of_soc_id
= of_match_node(mtk_snand_ids
, np
);
486 soc_id
= of_soc_id
->data
;
488 msm
= devm_kzalloc(&pdev
->dev
, sizeof(*msm
), GFP_KERNEL
);
492 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nfi");
493 msm
->nfi_regs
= devm_ioremap_resource(&pdev
->dev
, r
);
494 if (IS_ERR(msm
->nfi_regs
)) {
495 ret
= PTR_ERR(msm
->nfi_regs
);
499 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "ecc");
500 msm
->ecc_regs
= devm_ioremap_resource(&pdev
->dev
, r
);
501 if (IS_ERR(msm
->ecc_regs
)) {
502 ret
= PTR_ERR(msm
->ecc_regs
);
506 msm
->pdev
.dev
= &pdev
->dev
;
507 msm
->quad_spi
= of_property_read_bool(np
, "mediatek,quad-spi");
508 msm
->soc
= soc_id
->soc
;
510 msm
->nfi_clk
= devm_clk_get(msm
->pdev
.dev
, "nfi_clk");
511 if (IS_ERR(msm
->nfi_clk
)) {
512 ret
= PTR_ERR(msm
->nfi_clk
);
513 dev_err(msm
->pdev
.dev
, "unable to get nfi_clk, err = %d\n",
518 msm
->ecc_clk
= devm_clk_get(msm
->pdev
.dev
, "ecc_clk");
519 if (IS_ERR(msm
->ecc_clk
)) {
520 ret
= PTR_ERR(msm
->ecc_clk
);
521 dev_err(msm
->pdev
.dev
, "unable to get ecc_clk, err = %d\n",
526 msm
->pad_clk
= devm_clk_get(msm
->pdev
.dev
, "pad_clk");
527 if (IS_ERR(msm
->pad_clk
)) {
528 ret
= PTR_ERR(msm
->pad_clk
);
529 dev_err(msm
->pdev
.dev
, "unable to get pad_clk, err = %d\n",
534 ret
= mtk_snand_enable_clk(msm
);
538 /* Probe SPI-NAND Flash */
539 mtk_snand_pdata
.soc
= msm
->soc
;
540 mtk_snand_pdata
.quad_spi
= msm
->quad_spi
;
541 mtk_snand_pdata
.nfi_base
= msm
->nfi_regs
;
542 mtk_snand_pdata
.ecc_base
= msm
->ecc_regs
;
544 ret
= mtk_snand_init(&msm
->pdev
, &mtk_snand_pdata
, &msm
->snf
);
548 msm
->irq
= platform_get_irq(pdev
, 0);
550 ret
= devm_request_irq(msm
->pdev
.dev
, msm
->irq
, mtk_snand_irq
,
551 0x0, "mtk-snand", msm
);
553 dev_err(msm
->pdev
.dev
, "failed to request snfi irq\n");
557 ret
= dma_set_mask(msm
->pdev
.dev
, DMA_BIT_MASK(32));
559 dev_err(msm
->pdev
.dev
, "failed to set dma mask\n");
564 mtk_snand_get_chip_info(msm
->snf
, &msm
->cinfo
);
566 size
= msm
->cinfo
.pagesize
+ msm
->cinfo
.sparesize
;
567 msm
->page_cache
= devm_kmalloc(msm
->pdev
.dev
, size
, GFP_KERNEL
);
568 if (!msm
->page_cache
) {
569 dev_err(msm
->pdev
.dev
, "failed to allocate page cache\n");
574 mutex_init(&msm
->lock
);
576 dev_info(msm
->pdev
.dev
,
577 "chip is %s, size %lluMB, page size %u, oob size %u\n",
578 msm
->cinfo
.model
, msm
->cinfo
.chipsize
>> 20,
579 msm
->cinfo
.pagesize
, msm
->cinfo
.sparesize
);
581 /* Initialize mtd for SPI-NAND */
584 mtd
->owner
= THIS_MODULE
;
585 mtd
->dev
.parent
= &pdev
->dev
;
586 mtd
->type
= MTD_NANDFLASH
;
587 mtd
->flags
= MTD_CAP_NANDFLASH
;
589 mtd_set_of_node(mtd
, np
);
591 mtd
->size
= msm
->cinfo
.chipsize
;
592 mtd
->erasesize
= msm
->cinfo
.blocksize
;
593 mtd
->writesize
= msm
->cinfo
.pagesize
;
594 mtd
->writebufsize
= mtd
->writesize
;
595 mtd
->oobsize
= msm
->cinfo
.sparesize
;
596 mtd
->oobavail
= msm
->cinfo
.num_sectors
* (msm
->cinfo
.fdm_size
- 1);
598 mtd
->erasesize_shift
= ffs(mtd
->erasesize
) - 1;
599 mtd
->writesize_shift
= ffs(mtd
->writesize
) - 1;
600 mtd
->erasesize_mask
= (1 << mtd
->erasesize_shift
) - 1;
601 mtd
->writesize_mask
= (1 << mtd
->writesize_shift
) - 1;
603 mtd
->ooblayout
= &mtk_snand_ooblayout
;
605 mtd
->ecc_strength
= msm
->cinfo
.ecc_strength
;
606 mtd
->bitflip_threshold
= (mtd
->ecc_strength
* 3) / 4;
607 mtd
->ecc_step_size
= msm
->cinfo
.sector_size
;
609 mtd
->_erase
= mtk_snand_mtd_erase
;
610 mtd
->_read_oob
= mtk_snand_mtd_read_oob
;
611 mtd
->_write_oob
= mtk_snand_mtd_write_oob
;
612 mtd
->_block_isbad
= mtk_snand_mtd_block_isbad
;
613 mtd
->_block_markbad
= mtk_snand_mtd_block_markbad
;
615 ret
= mtd_device_register(mtd
, NULL
, 0);
617 dev_err(msm
->pdev
.dev
, "failed to register mtd partition\n");
621 platform_set_drvdata(pdev
, msm
);
626 devm_kfree(msm
->pdev
.dev
, msm
->page_cache
);
630 devm_free_irq(msm
->pdev
.dev
, msm
->irq
, msm
);
633 mtk_snand_cleanup(msm
->snf
);
636 devm_kfree(msm
->pdev
.dev
, msm
);
638 platform_set_drvdata(pdev
, NULL
);
643 static int mtk_snand_remove(struct platform_device
*pdev
)
645 struct mtk_snand_mtd
*msm
= platform_get_drvdata(pdev
);
646 struct mtd_info
*mtd
= &msm
->mtd
;
649 ret
= mtd_device_unregister(mtd
);
653 mtk_snand_cleanup(msm
->snf
);
656 devm_free_irq(msm
->pdev
.dev
, msm
->irq
, msm
);
658 mtk_snand_disable_clk(msm
);
660 devm_kfree(msm
->pdev
.dev
, msm
->page_cache
);
661 devm_kfree(msm
->pdev
.dev
, msm
);
663 platform_set_drvdata(pdev
, NULL
);
668 static struct platform_driver mtk_snand_driver
= {
669 .probe
= mtk_snand_probe
,
670 .remove
= mtk_snand_remove
,
673 .of_match_table
= mtk_snand_ids
,
677 module_platform_driver(mtk_snand_driver
);
679 MODULE_LICENSE("GPL");
680 MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
681 MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");