1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/device.h>
12 #include <linux/mutex.h>
13 #include <linux/clk.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/wait.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/of_platform.h>
22 #include "mtk-snand.h"
23 #include "mtk-snand-os.h"
25 struct mtk_snand_of_id
{
26 enum mtk_snand_soc soc
;
29 struct mtk_snand_mtd
{
30 struct mtk_snand_plat_dev pdev
;
36 void __iomem
*nfi_regs
;
37 void __iomem
*ecc_regs
;
42 enum mtk_snand_soc soc
;
45 struct mtk_snand
*snf
;
46 struct mtk_snand_chip_info cinfo
;
51 #define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
53 static int mtk_snand_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
55 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
56 u64 start_addr
, end_addr
;
59 /* Do not allow write past end of device */
60 if ((instr
->addr
+ instr
->len
) > mtd
->size
) {
61 dev_err(msm
->pdev
.dev
,
62 "attempt to erase beyond end of device\n");
66 start_addr
= instr
->addr
& (~mtd
->erasesize_mask
);
67 end_addr
= instr
->addr
+ instr
->len
;
68 if (end_addr
& mtd
->erasesize_mask
) {
69 end_addr
= (end_addr
+ mtd
->erasesize_mask
) &
70 (~mtd
->erasesize_mask
);
73 mutex_lock(&msm
->lock
);
75 while (start_addr
< end_addr
) {
76 if (mtk_snand_block_isbad(msm
->snf
, start_addr
)) {
77 instr
->fail_addr
= start_addr
;
82 ret
= mtk_snand_erase_block(msm
->snf
, start_addr
);
84 instr
->fail_addr
= start_addr
;
88 start_addr
+= mtd
->erasesize
;
91 mutex_unlock(&msm
->lock
);
96 static int mtk_snand_mtd_read_data(struct mtk_snand_mtd
*msm
, uint64_t addr
,
97 struct mtd_oob_ops
*ops
)
99 struct mtd_info
*mtd
= &msm
->mtd
;
100 size_t len
, ooblen
, maxooblen
, chklen
;
101 uint32_t col
, ooboffs
;
102 uint8_t *datcache
, *oobcache
;
103 bool ecc_failed
= false, raw
= ops
->mode
== MTD_OPS_RAW
? true : false;
104 int ret
, max_bitflips
= 0;
106 col
= addr
& mtd
->writesize_mask
;
107 addr
&= ~mtd
->writesize_mask
;
108 maxooblen
= mtd_oobavail(mtd
, ops
);
109 ooboffs
= ops
->ooboffs
;
110 ooblen
= ops
->ooblen
;
113 datcache
= len
? msm
->page_cache
: NULL
;
114 oobcache
= ooblen
? msm
->page_cache
+ mtd
->writesize
: NULL
;
119 while (len
|| ooblen
) {
120 if (ops
->mode
== MTD_OPS_AUTO_OOB
)
121 ret
= mtk_snand_read_page_auto_oob(msm
->snf
, addr
,
122 datcache
, oobcache
, maxooblen
, NULL
, raw
);
124 ret
= mtk_snand_read_page(msm
->snf
, addr
, datcache
,
127 if (ret
< 0 && ret
!= -EBADMSG
)
130 if (ret
== -EBADMSG
) {
131 mtd
->ecc_stats
.failed
++;
134 mtd
->ecc_stats
.corrected
+= ret
;
135 max_bitflips
= max_t(int, ret
, max_bitflips
);
140 chklen
= mtd
->writesize
- col
;
144 memcpy(ops
->datbuf
+ ops
->retlen
, datcache
+ col
,
147 col
= 0; /* (col + chklen) % */
148 ops
->retlen
+= chklen
;
153 chklen
= maxooblen
- ooboffs
;
157 memcpy(ops
->oobbuf
+ ops
->oobretlen
, oobcache
+ ooboffs
,
160 ooboffs
= 0; /* (ooboffs + chklen) % maxooblen; */
161 ops
->oobretlen
+= chklen
;
164 addr
+= mtd
->writesize
;
167 return ecc_failed
? -EBADMSG
: max_bitflips
;
170 static int mtk_snand_mtd_read_oob(struct mtd_info
*mtd
, loff_t from
,
171 struct mtd_oob_ops
*ops
)
173 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
177 if (!ops
->oobbuf
&& !ops
->datbuf
) {
178 if (ops
->ooblen
|| ops
->len
)
185 case MTD_OPS_PLACE_OOB
:
186 case MTD_OPS_AUTO_OOB
:
190 dev_err(msm
->pdev
.dev
, "unsupported oob mode: %u\n", ops
->mode
);
194 maxooblen
= mtd_oobavail(mtd
, ops
);
196 /* Do not allow read past end of device */
197 if (ops
->datbuf
&& (from
+ ops
->len
) > mtd
->size
) {
198 dev_err(msm
->pdev
.dev
,
199 "attempt to read beyond end of device\n");
203 if (unlikely(ops
->ooboffs
>= maxooblen
)) {
204 dev_err(msm
->pdev
.dev
, "attempt to start read outside oob\n");
208 if (unlikely(from
>= mtd
->size
||
209 ops
->ooboffs
+ ops
->ooblen
> ((mtd
->size
>> mtd
->writesize_shift
) -
210 (from
>> mtd
->writesize_shift
)) * maxooblen
)) {
211 dev_err(msm
->pdev
.dev
,
212 "attempt to read beyond end of device\n");
216 mutex_lock(&msm
->lock
);
217 ret
= mtk_snand_mtd_read_data(msm
, from
, ops
);
218 mutex_unlock(&msm
->lock
);
223 static int mtk_snand_mtd_write_data(struct mtk_snand_mtd
*msm
, uint64_t addr
,
224 struct mtd_oob_ops
*ops
)
226 struct mtd_info
*mtd
= &msm
->mtd
;
227 size_t len
, ooblen
, maxooblen
, chklen
, oobwrlen
;
228 uint32_t col
, ooboffs
;
229 uint8_t *datcache
, *oobcache
;
230 bool raw
= ops
->mode
== MTD_OPS_RAW
? true : false;
233 col
= addr
& mtd
->writesize_mask
;
234 addr
&= ~mtd
->writesize_mask
;
235 maxooblen
= mtd_oobavail(mtd
, ops
);
236 ooboffs
= ops
->ooboffs
;
237 ooblen
= ops
->ooblen
;
240 datcache
= len
? msm
->page_cache
: NULL
;
241 oobcache
= ooblen
? msm
->page_cache
+ mtd
->writesize
: NULL
;
246 while (len
|| ooblen
) {
249 chklen
= mtd
->writesize
- col
;
253 memset(datcache
, 0xff, col
);
254 memcpy(datcache
+ col
, ops
->datbuf
+ ops
->retlen
,
256 memset(datcache
+ col
+ chklen
, 0xff,
257 mtd
->writesize
- col
- chklen
);
259 col
= 0; /* (col + chklen) % */
260 ops
->retlen
+= chklen
;
266 chklen
= maxooblen
- ooboffs
;
270 memset(oobcache
, 0xff, ooboffs
);
271 memcpy(oobcache
+ ooboffs
,
272 ops
->oobbuf
+ ops
->oobretlen
, chklen
);
273 memset(oobcache
+ ooboffs
+ chklen
, 0xff,
274 mtd
->oobsize
- ooboffs
- chklen
);
275 oobwrlen
= chklen
+ ooboffs
;
277 ooboffs
= 0; /* (ooboffs + chklen) % maxooblen; */
278 ops
->oobretlen
+= chklen
;
281 if (ops
->mode
== MTD_OPS_AUTO_OOB
)
282 ret
= mtk_snand_write_page_auto_oob(msm
->snf
, addr
,
283 datcache
, oobcache
, oobwrlen
, NULL
, raw
);
285 ret
= mtk_snand_write_page(msm
->snf
, addr
, datcache
,
291 addr
+= mtd
->writesize
;
297 static int mtk_snand_mtd_write_oob(struct mtd_info
*mtd
, loff_t to
,
298 struct mtd_oob_ops
*ops
)
300 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
304 if (!ops
->oobbuf
&& !ops
->datbuf
) {
305 if (ops
->ooblen
|| ops
->len
)
312 case MTD_OPS_PLACE_OOB
:
313 case MTD_OPS_AUTO_OOB
:
317 dev_err(msm
->pdev
.dev
, "unsupported oob mode: %u\n", ops
->mode
);
321 maxooblen
= mtd_oobavail(mtd
, ops
);
323 /* Do not allow write past end of device */
324 if (ops
->datbuf
&& (to
+ ops
->len
) > mtd
->size
) {
325 dev_err(msm
->pdev
.dev
,
326 "attempt to write beyond end of device\n");
330 if (unlikely(ops
->ooboffs
>= maxooblen
)) {
331 dev_err(msm
->pdev
.dev
,
332 "attempt to start write outside oob\n");
336 if (unlikely(to
>= mtd
->size
||
337 ops
->ooboffs
+ ops
->ooblen
> ((mtd
->size
>> mtd
->writesize_shift
) -
338 (to
>> mtd
->writesize_shift
)) * maxooblen
)) {
339 dev_err(msm
->pdev
.dev
,
340 "attempt to write beyond end of device\n");
344 mutex_lock(&msm
->lock
);
345 ret
= mtk_snand_mtd_write_data(msm
, to
, ops
);
346 mutex_unlock(&msm
->lock
);
351 static int mtk_snand_mtd_block_isbad(struct mtd_info
*mtd
, loff_t offs
)
353 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
356 mutex_lock(&msm
->lock
);
357 ret
= mtk_snand_block_isbad(msm
->snf
, offs
);
358 mutex_unlock(&msm
->lock
);
363 static int mtk_snand_mtd_block_markbad(struct mtd_info
*mtd
, loff_t offs
)
365 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
368 mutex_lock(&msm
->lock
);
369 ret
= mtk_snand_block_markbad(msm
->snf
, offs
);
370 mutex_unlock(&msm
->lock
);
375 static int mtk_snand_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
376 struct mtd_oob_region
*oobecc
)
378 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
383 oobecc
->offset
= msm
->cinfo
.fdm_size
* msm
->cinfo
.num_sectors
;
384 oobecc
->length
= mtd
->oobsize
- oobecc
->offset
;
389 static int mtk_snand_ooblayout_free(struct mtd_info
*mtd
, int section
,
390 struct mtd_oob_region
*oobfree
)
392 struct mtk_snand_mtd
*msm
= mtd_to_msm(mtd
);
394 if (section
>= msm
->cinfo
.num_sectors
)
397 oobfree
->length
= msm
->cinfo
.fdm_size
- 1;
398 oobfree
->offset
= section
* msm
->cinfo
.fdm_size
+ 1;
403 static irqreturn_t
mtk_snand_irq(int irq
, void *id
)
405 struct mtk_snand_mtd
*msm
= id
;
408 ret
= mtk_snand_irq_process(msm
->snf
);
415 static int mtk_snand_enable_clk(struct mtk_snand_mtd
*msm
)
419 ret
= clk_prepare_enable(msm
->nfi_clk
);
421 dev_err(msm
->pdev
.dev
, "unable to enable nfi clk\n");
425 ret
= clk_prepare_enable(msm
->pad_clk
);
427 dev_err(msm
->pdev
.dev
, "unable to enable pad clk\n");
428 clk_disable_unprepare(msm
->nfi_clk
);
432 ret
= clk_prepare_enable(msm
->ecc_clk
);
434 dev_err(msm
->pdev
.dev
, "unable to enable ecc clk\n");
435 clk_disable_unprepare(msm
->nfi_clk
);
436 clk_disable_unprepare(msm
->pad_clk
);
443 static void mtk_snand_disable_clk(struct mtk_snand_mtd
*msm
)
445 clk_disable_unprepare(msm
->nfi_clk
);
446 clk_disable_unprepare(msm
->pad_clk
);
447 clk_disable_unprepare(msm
->ecc_clk
);
450 static const struct mtd_ooblayout_ops mtk_snand_ooblayout
= {
451 .ecc
= mtk_snand_ooblayout_ecc
,
452 .free
= mtk_snand_ooblayout_free
,
455 static struct mtk_snand_of_id mt7622_soc_id
= { .soc
= SNAND_SOC_MT7622
};
456 static struct mtk_snand_of_id mt7629_soc_id
= { .soc
= SNAND_SOC_MT7629
};
458 static const struct of_device_id mtk_snand_ids
[] = {
459 { .compatible
= "mediatek,mt7622-snand", .data
= &mt7622_soc_id
},
460 { .compatible
= "mediatek,mt7629-snand", .data
= &mt7629_soc_id
},
464 MODULE_DEVICE_TABLE(of
, mtk_snand_ids
);
466 static int mtk_snand_probe(struct platform_device
*pdev
)
468 struct mtk_snand_platdata mtk_snand_pdata
= {};
469 struct device_node
*np
= pdev
->dev
.of_node
;
470 const struct of_device_id
*of_soc_id
;
471 const struct mtk_snand_of_id
*soc_id
;
472 struct mtk_snand_mtd
*msm
;
473 struct mtd_info
*mtd
;
478 of_soc_id
= of_match_node(mtk_snand_ids
, np
);
482 soc_id
= of_soc_id
->data
;
484 msm
= devm_kzalloc(&pdev
->dev
, sizeof(*msm
), GFP_KERNEL
);
488 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nfi");
489 msm
->nfi_regs
= devm_ioremap_resource(&pdev
->dev
, r
);
490 if (IS_ERR(msm
->nfi_regs
)) {
491 ret
= PTR_ERR(msm
->nfi_regs
);
495 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "ecc");
496 msm
->ecc_regs
= devm_ioremap_resource(&pdev
->dev
, r
);
497 if (IS_ERR(msm
->ecc_regs
)) {
498 ret
= PTR_ERR(msm
->ecc_regs
);
502 msm
->pdev
.dev
= &pdev
->dev
;
503 msm
->quad_spi
= of_property_read_bool(np
, "mediatek,quad-spi");
504 msm
->soc
= soc_id
->soc
;
506 msm
->nfi_clk
= devm_clk_get(msm
->pdev
.dev
, "nfi_clk");
507 if (IS_ERR(msm
->nfi_clk
)) {
508 ret
= PTR_ERR(msm
->nfi_clk
);
509 dev_err(msm
->pdev
.dev
, "unable to get nfi_clk, err = %d\n",
514 msm
->ecc_clk
= devm_clk_get(msm
->pdev
.dev
, "ecc_clk");
515 if (IS_ERR(msm
->ecc_clk
)) {
516 ret
= PTR_ERR(msm
->ecc_clk
);
517 dev_err(msm
->pdev
.dev
, "unable to get ecc_clk, err = %d\n",
522 msm
->pad_clk
= devm_clk_get(msm
->pdev
.dev
, "pad_clk");
523 if (IS_ERR(msm
->pad_clk
)) {
524 ret
= PTR_ERR(msm
->pad_clk
);
525 dev_err(msm
->pdev
.dev
, "unable to get pad_clk, err = %d\n",
530 ret
= mtk_snand_enable_clk(msm
);
534 /* Probe SPI-NAND Flash */
535 mtk_snand_pdata
.soc
= msm
->soc
;
536 mtk_snand_pdata
.quad_spi
= msm
->quad_spi
;
537 mtk_snand_pdata
.nfi_base
= msm
->nfi_regs
;
538 mtk_snand_pdata
.ecc_base
= msm
->ecc_regs
;
540 ret
= mtk_snand_init(&msm
->pdev
, &mtk_snand_pdata
, &msm
->snf
);
544 msm
->irq
= platform_get_irq(pdev
, 0);
546 ret
= devm_request_irq(msm
->pdev
.dev
, msm
->irq
, mtk_snand_irq
,
547 0x0, "mtk-snand", msm
);
549 dev_err(msm
->pdev
.dev
, "failed to request snfi irq\n");
553 ret
= dma_set_mask(msm
->pdev
.dev
, DMA_BIT_MASK(32));
555 dev_err(msm
->pdev
.dev
, "failed to set dma mask\n");
560 mtk_snand_get_chip_info(msm
->snf
, &msm
->cinfo
);
562 size
= msm
->cinfo
.pagesize
+ msm
->cinfo
.sparesize
;
563 msm
->page_cache
= devm_kmalloc(msm
->pdev
.dev
, size
, GFP_KERNEL
);
564 if (!msm
->page_cache
) {
565 dev_err(msm
->pdev
.dev
, "failed to allocate page cache\n");
570 mutex_init(&msm
->lock
);
572 dev_info(msm
->pdev
.dev
,
573 "chip is %s, size %lluMB, page size %u, oob size %u\n",
574 msm
->cinfo
.model
, msm
->cinfo
.chipsize
>> 20,
575 msm
->cinfo
.pagesize
, msm
->cinfo
.sparesize
);
577 /* Initialize mtd for SPI-NAND */
580 mtd
->owner
= THIS_MODULE
;
581 mtd
->dev
.parent
= &pdev
->dev
;
582 mtd
->type
= MTD_NANDFLASH
;
583 mtd
->flags
= MTD_CAP_NANDFLASH
;
585 mtd_set_of_node(mtd
, np
);
587 mtd
->size
= msm
->cinfo
.chipsize
;
588 mtd
->erasesize
= msm
->cinfo
.blocksize
;
589 mtd
->writesize
= msm
->cinfo
.pagesize
;
590 mtd
->writebufsize
= mtd
->writesize
;
591 mtd
->oobsize
= msm
->cinfo
.sparesize
;
592 mtd
->oobavail
= msm
->cinfo
.num_sectors
* (msm
->cinfo
.fdm_size
- 1);
594 mtd
->erasesize_shift
= ffs(mtd
->erasesize
) - 1;
595 mtd
->writesize_shift
= ffs(mtd
->writesize
) - 1;
596 mtd
->erasesize_mask
= (1 << mtd
->erasesize_shift
) - 1;
597 mtd
->writesize_mask
= (1 << mtd
->writesize_shift
) - 1;
599 mtd
->ooblayout
= &mtk_snand_ooblayout
;
601 mtd
->ecc_strength
= msm
->cinfo
.ecc_strength
;
602 mtd
->bitflip_threshold
= (mtd
->ecc_strength
* 3) / 4;
603 mtd
->ecc_step_size
= msm
->cinfo
.sector_size
;
605 mtd
->_erase
= mtk_snand_mtd_erase
;
606 mtd
->_read_oob
= mtk_snand_mtd_read_oob
;
607 mtd
->_write_oob
= mtk_snand_mtd_write_oob
;
608 mtd
->_block_isbad
= mtk_snand_mtd_block_isbad
;
609 mtd
->_block_markbad
= mtk_snand_mtd_block_markbad
;
611 ret
= mtd_device_register(mtd
, NULL
, 0);
613 dev_err(msm
->pdev
.dev
, "failed to register mtd partition\n");
617 platform_set_drvdata(pdev
, msm
);
622 devm_kfree(msm
->pdev
.dev
, msm
->page_cache
);
626 devm_free_irq(msm
->pdev
.dev
, msm
->irq
, msm
);
629 mtk_snand_cleanup(msm
->snf
);
632 devm_kfree(msm
->pdev
.dev
, msm
);
634 platform_set_drvdata(pdev
, NULL
);
639 static int mtk_snand_remove(struct platform_device
*pdev
)
641 struct mtk_snand_mtd
*msm
= platform_get_drvdata(pdev
);
642 struct mtd_info
*mtd
= &msm
->mtd
;
645 ret
= mtd_device_unregister(mtd
);
649 mtk_snand_cleanup(msm
->snf
);
652 devm_free_irq(msm
->pdev
.dev
, msm
->irq
, msm
);
654 mtk_snand_disable_clk(msm
);
656 devm_kfree(msm
->pdev
.dev
, msm
->page_cache
);
657 devm_kfree(msm
->pdev
.dev
, msm
);
659 platform_set_drvdata(pdev
, NULL
);
664 static struct platform_driver mtk_snand_driver
= {
665 .probe
= mtk_snand_probe
,
666 .remove
= mtk_snand_remove
,
669 .of_match_table
= mtk_snand_ids
,
673 module_platform_driver(mtk_snand_driver
);
675 MODULE_LICENSE("GPL");
676 MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
677 MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");