1 From 1ecb38eabd90efe93957d0a822a167560c39308a Mon Sep 17 00:00:00 2001
2 From: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
3 Date: Wed, 20 Mar 2019 16:19:51 +0800
4 Subject: [PATCH 6/6] spi: spi-mem: MediaTek: Add SPI NAND Flash interface
5 driver for MediaTek MT7622
7 Change-Id: I3e78406bb9b46b0049d3988a5c71c7069e4f809c
8 Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
10 drivers/spi/Kconfig | 9 +
11 drivers/spi/Makefile | 1 +
12 drivers/spi/spi-mtk-snfi.c | 1183 ++++++++++++++++++++++++++++++++++++
13 3 files changed, 1193 insertions(+)
14 create mode 100644 drivers/spi/spi-mtk-snfi.c
16 Index: linux-4.19.48/drivers/spi/spi-mtk-snfi.c
17 ===================================================================
19 +++ linux-4.19.48/drivers/spi/spi-mtk-snfi.c
21 +// SPDX-License-Identifier: GPL-2.0
23 + * Driver for MediaTek SPI Nand interface
25 + * Copyright (C) 2018 MediaTek Inc.
26 + * Authors: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
30 +#include <linux/clk.h>
31 +#include <linux/delay.h>
32 +#include <linux/dma-mapping.h>
33 +#include <linux/interrupt.h>
34 +#include <linux/iopoll.h>
35 +#include <linux/mtd/mtd.h>
36 +#include <linux/mtd/mtk_ecc.h>
37 +#include <linux/mtd/spinand.h>
38 +#include <linux/module.h>
39 +#include <linux/of.h>
40 +#include <linux/of_device.h>
41 +#include <linux/platform_device.h>
42 +#include <linux/spi/spi.h>
43 +#include <linux/spi/spi-mem.h>
45 +/* NAND controller register definition */
47 +#define NFI_CNFG 0x00
48 +#define CNFG_DMA BIT(0)
49 +#define CNFG_READ_EN BIT(1)
50 +#define CNFG_DMA_BURST_EN BIT(2)
51 +#define CNFG_BYTE_RW BIT(6)
52 +#define CNFG_HW_ECC_EN BIT(8)
53 +#define CNFG_AUTO_FMT_EN BIT(9)
54 +#define CNFG_OP_PROGRAM (3UL << 12)
55 +#define CNFG_OP_CUST (6UL << 12)
56 +#define NFI_PAGEFMT 0x04
57 +#define PAGEFMT_512 0
60 +#define PAGEFMT_FDM_SHIFT 8
61 +#define PAGEFMT_FDM_ECC_SHIFT 12
63 +#define CON_FIFO_FLUSH BIT(0)
64 +#define CON_NFI_RST BIT(1)
65 +#define CON_BRD BIT(8)
66 +#define CON_BWR BIT(9)
67 +#define CON_SEC_SHIFT 12
68 +#define NFI_INTR_EN 0x10
69 +#define INTR_AHB_DONE_EN BIT(6)
70 +#define NFI_INTR_STA 0x14
73 +#define STA_EMP_PAGE BIT(12)
74 +#define NAND_FSM_MASK (0x1f << 24)
75 +#define NFI_FSM_MASK (0xf << 16)
76 +#define NFI_ADDRCNTR 0x70
77 +#define CNTR_MASK GENMASK(16, 12)
78 +#define ADDRCNTR_SEC_SHIFT 12
79 +#define ADDRCNTR_SEC(val) \
80 + (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
81 +#define NFI_STRADDR 0x80
82 +#define NFI_BYTELEN 0x84
83 +#define NFI_CSEL 0x90
84 +#define NFI_FDML(x) (0xa0 + (x) * sizeof(u32) * 2)
85 +#define NFI_FDMM(x) (0xa4 + (x) * sizeof(u32) * 2)
86 +#define NFI_MASTER_STA 0x224
87 +#define MASTER_STA_MASK 0x0fff
88 +/* NFI_SPI control */
89 +#define SNFI_MAC_OUTL 0x504
90 +#define SNFI_MAC_INL 0x508
91 +#define SNFI_RD_CTL2 0x510
92 +#define RD_CMD_MASK 0x00ff
93 +#define RD_DUMMY_SHIFT 8
94 +#define SNFI_RD_CTL3 0x514
95 +#define RD_ADDR_MASK 0xffff
96 +#define SNFI_MISC_CTL 0x538
97 +#define RD_MODE_X2 BIT(16)
98 +#define RD_MODE_X4 (2UL << 16)
99 +#define RD_QDUAL_IO (4UL << 16)
100 +#define RD_MODE_MASK (7UL << 16)
101 +#define RD_CUSTOM_EN BIT(6)
102 +#define WR_CUSTOM_EN BIT(7)
103 +#define WR_X4_EN BIT(20)
104 +#define SW_RST BIT(28)
105 +#define SNFI_MISC_CTL2 0x53c
106 +#define WR_LEN_SHIFT 16
107 +#define SNFI_PG_CTL1 0x524
108 +#define WR_LOAD_CMD_SHIFT 8
109 +#define SNFI_PG_CTL2 0x528
110 +#define WR_LOAD_ADDR_MASK 0xffff
111 +#define SNFI_MAC_CTL 0x500
112 +#define MAC_WIP BIT(0)
113 +#define MAC_WIP_READY BIT(1)
114 +#define MAC_TRIG BIT(2)
115 +#define MAC_EN BIT(3)
116 +#define MAC_SIO_SEL BIT(4)
117 +#define SNFI_STA_CTL1 0x550
118 +#define SPI_STATE_IDLE 0xf
119 +#define SNFI_CNFG 0x55c
120 +#define SNFI_MODE_EN BIT(0)
121 +#define SNFI_GPRAM_DATA 0x800
122 +#define SNFI_GPRAM_MAX_LEN 16
124 +/* Dummy command trigger NFI to spi mode */
125 +#define NAND_CMD_DUMMYREAD 0x00
126 +#define NAND_CMD_DUMMYPROG 0x80
128 +#define MTK_TIMEOUT 500000
129 +#define MTK_RESET_TIMEOUT 1000000
130 +#define MTK_SNFC_MIN_SPARE 16
131 +#define KB(x) ((x) * 1024UL)
134 + * supported spare size of each IP.
135 + * order should be the same with the spare size bitfiled defination of
136 + * register NFI_PAGEFMT.
138 +static const u8 spare_size_mt7622[] = {
142 +struct mtk_snfi_caps {
143 + const u8 *spare_size;
147 + u8 nand_fdm_ecc_size;
148 + u8 ecc_parity_bits;
149 + u8 pageformat_spare_shift;
153 +struct mtk_snfi_bad_mark_ctl {
154 + void (*bm_swap)(struct spi_mem *mem, u8 *buf, int raw);
159 +struct mtk_snfi_nand_chip {
160 + struct mtk_snfi_bad_mark_ctl bad_mark;
161 + u32 spare_per_sector;
164 +struct mtk_snfi_clk {
165 + struct clk *nfi_clk;
166 + struct clk *spi_clk;
170 + const struct mtk_snfi_caps *caps;
171 + struct mtk_snfi_nand_chip snfi_nand;
172 + struct mtk_snfi_clk clk;
173 + struct mtk_ecc_config ecc_cfg;
174 + struct mtk_ecc *ecc;
175 + struct completion done;
176 + struct device *dev;
178 + void __iomem *regs;
183 +static inline u8 *oob_ptr(struct spi_mem *mem, int i)
185 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
186 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
187 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
190 + /* map the sector's FDM data to free oob:
191 + * the beginning of the oob area stores the FDM data of bad mark
194 + if (i < snfi_nand->bad_mark.sec)
195 + poi = spinand->oobbuf + (i + 1) * snfi->caps->nand_fdm_size;
196 + else if (i == snfi_nand->bad_mark.sec)
197 + poi = spinand->oobbuf;
199 + poi = spinand->oobbuf + i * snfi->caps->nand_fdm_size;
204 +static inline int mtk_data_len(struct spi_mem *mem)
206 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
207 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
209 + return snfi->caps->nand_sec_size + snfi_nand->spare_per_sector;
212 +static inline u8 *mtk_oob_ptr(struct spi_mem *mem,
213 + const u8 *p, int i)
215 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
217 + return (u8 *)p + i * mtk_data_len(mem) + snfi->caps->nand_sec_size;
220 +static void mtk_snfi_bad_mark_swap(struct spi_mem *mem,
223 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
224 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
225 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
226 + u32 bad_pos = snfi_nand->bad_mark.pos;
229 + bad_pos += snfi_nand->bad_mark.sec * mtk_data_len(mem);
231 + bad_pos += snfi_nand->bad_mark.sec * snfi->caps->nand_sec_size;
233 + swap(spinand->oobbuf[0], buf[bad_pos]);
236 +static void mtk_snfi_set_bad_mark_ctl(struct mtk_snfi_bad_mark_ctl *bm_ctl,
237 + struct spi_mem *mem)
239 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
240 + struct mtd_info *mtd = spinand_to_mtd(spinand);
242 + bm_ctl->bm_swap = mtk_snfi_bad_mark_swap;
243 + bm_ctl->sec = mtd->writesize / mtk_data_len(mem);
244 + bm_ctl->pos = mtd->writesize % mtk_data_len(mem);
247 +static void mtk_snfi_mac_enable(struct mtk_snfi *snfi)
251 + mac = readl(snfi->regs + SNFI_MAC_CTL);
252 + mac &= ~MAC_SIO_SEL;
255 + writel(mac, snfi->regs + SNFI_MAC_CTL);
258 +static int mtk_snfi_mac_trigger(struct mtk_snfi *snfi)
263 + mac = readl(snfi->regs + SNFI_MAC_CTL);
265 + writel(mac, snfi->regs + SNFI_MAC_CTL);
267 + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
268 + reg & MAC_WIP_READY, 10,
271 + dev_err(snfi->dev, "polling wip ready for read timeout\n");
275 + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
276 + !(reg & MAC_WIP), 10,
279 + dev_err(snfi->dev, "polling flash update timeout\n");
286 +static void mtk_snfi_mac_leave(struct mtk_snfi *snfi)
290 + mac = readl(snfi->regs + SNFI_MAC_CTL);
291 + mac &= ~(MAC_TRIG | MAC_EN | MAC_SIO_SEL);
292 + writel(mac, snfi->regs + SNFI_MAC_CTL);
295 +static int mtk_snfi_mac_op(struct mtk_snfi *snfi)
299 + mtk_snfi_mac_enable(snfi);
301 + ret = mtk_snfi_mac_trigger(snfi);
305 + mtk_snfi_mac_leave(snfi);
310 +static irqreturn_t mtk_snfi_irq(int irq, void *id)
312 + struct mtk_snfi *snfi = id;
315 + sta = readw(snfi->regs + NFI_INTR_STA);
316 + ien = readw(snfi->regs + NFI_INTR_EN);
321 + writew(~sta & ien, snfi->regs + NFI_INTR_EN);
322 + complete(&snfi->done);
324 + return IRQ_HANDLED;
327 +static int mtk_snfi_enable_clk(struct device *dev, struct mtk_snfi_clk *clk)
331 + ret = clk_prepare_enable(clk->nfi_clk);
333 + dev_err(dev, "failed to enable nfi clk\n");
337 + ret = clk_prepare_enable(clk->spi_clk);
339 + dev_err(dev, "failed to enable spi clk\n");
340 + clk_disable_unprepare(clk->nfi_clk);
347 +static void mtk_snfi_disable_clk(struct mtk_snfi_clk *clk)
349 + clk_disable_unprepare(clk->nfi_clk);
350 + clk_disable_unprepare(clk->spi_clk);
353 +static int mtk_snfi_reset(struct mtk_snfi *snfi)
358 + /* SW reset controller */
359 + val = readl(snfi->regs + SNFI_MISC_CTL) | SW_RST;
360 + writel(val, snfi->regs + SNFI_MISC_CTL);
362 + ret = readw_poll_timeout(snfi->regs + SNFI_STA_CTL1, val,
363 + !(val & SPI_STATE_IDLE), 50,
364 + MTK_RESET_TIMEOUT);
366 + dev_warn(snfi->dev, "spi state active in reset [0x%x] = 0x%x\n",
367 + SNFI_STA_CTL1, val);
371 + val = readl(snfi->regs + SNFI_MISC_CTL);
373 + writel(val, snfi->regs + SNFI_MISC_CTL);
375 + /* reset all registers and force the NFI master to terminate */
376 + writew(CON_FIFO_FLUSH | CON_NFI_RST, snfi->regs + NFI_CON);
377 + ret = readw_poll_timeout(snfi->regs + NFI_STA, val,
378 + !(val & (NFI_FSM_MASK | NAND_FSM_MASK)), 50,
379 + MTK_RESET_TIMEOUT);
381 + dev_warn(snfi->dev, "nfi active in reset [0x%x] = 0x%x\n",
389 +static int mtk_snfi_set_spare_per_sector(struct spinand_device *spinand,
390 + const struct mtk_snfi_caps *caps,
393 + struct mtd_info *mtd = spinand_to_mtd(spinand);
394 + const u8 *spare = caps->spare_size;
395 + u32 sectors, i, closest_spare = 0;
397 + sectors = mtd->writesize / caps->nand_sec_size;
398 + *sps = mtd->oobsize / sectors;
400 + if (*sps < MTK_SNFC_MIN_SPARE)
403 + for (i = 0; i < caps->num_spare_size; i++) {
404 + if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
406 + if (*sps == spare[i])
411 + *sps = spare[closest_spare];
416 +static void mtk_snfi_read_fdm_data(struct spi_mem *mem,
419 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
420 + const struct mtk_snfi_caps *caps = snfi->caps;
425 + for (i = 0; i < sectors; i++) {
426 + oobptr = oob_ptr(mem, i);
427 + vall = readl(snfi->regs + NFI_FDML(i));
428 + valm = readl(snfi->regs + NFI_FDMM(i));
430 + for (j = 0; j < caps->nand_fdm_size; j++)
431 + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
435 +static void mtk_snfi_write_fdm_data(struct spi_mem *mem,
438 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
439 + const struct mtk_snfi_caps *caps = snfi->caps;
444 + for (i = 0; i < sectors; i++) {
445 + oobptr = oob_ptr(mem, i);
448 + for (j = 0; j < 8; j++) {
450 + vall |= (j < caps->nand_fdm_size ? oobptr[j] :
453 + valm |= (j < caps->nand_fdm_size ? oobptr[j] :
454 + 0xff) << ((j - 4) * 8);
456 + writel(vall, snfi->regs + NFI_FDML(i));
457 + writel(valm, snfi->regs + NFI_FDMM(i));
461 +static int mtk_snfi_update_ecc_stats(struct spi_mem *mem,
462 + u8 *buf, u32 sectors)
464 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
465 + struct mtd_info *mtd = spinand_to_mtd(spinand);
466 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
467 + struct mtk_ecc_stats stats;
470 + rc = readl(snfi->regs + NFI_STA) & STA_EMP_PAGE;
472 + memset(buf, 0xff, sectors * snfi->caps->nand_sec_size);
473 + for (i = 0; i < sectors; i++)
474 + memset(spinand->oobbuf, 0xff,
475 + snfi->caps->nand_fdm_size);
479 + mtk_ecc_get_stats(snfi->ecc, &stats, sectors);
480 + mtd->ecc_stats.corrected += stats.corrected;
481 + mtd->ecc_stats.failed += stats.failed;
486 +static int mtk_snfi_hw_runtime_config(struct spi_mem *mem)
488 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
489 + struct mtd_info *mtd = spinand_to_mtd(spinand);
490 + struct nand_device *nand = mtd_to_nanddev(mtd);
491 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
492 + const struct mtk_snfi_caps *caps = snfi->caps;
493 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
494 + u32 fmt, spare, i = 0;
497 + ret = mtk_snfi_set_spare_per_sector(spinand, caps, &spare);
501 + /* calculate usable oob bytes for ecc parity data */
502 + snfi_nand->spare_per_sector = spare;
503 + spare -= caps->nand_fdm_size;
505 + nand->memorg.oobsize = snfi_nand->spare_per_sector
506 + * (mtd->writesize / caps->nand_sec_size);
507 + mtd->oobsize = nanddev_per_page_oobsize(nand);
509 + snfi->ecc_cfg.strength = (spare << 3) / caps->ecc_parity_bits;
510 + mtk_ecc_adjust_strength(snfi->ecc, &snfi->ecc_cfg.strength);
512 + switch (mtd->writesize) {
523 + dev_err(snfi->dev, "invalid page len: %d\n", mtd->writesize);
527 + /* Setup PageFormat */
528 + while (caps->spare_size[i] != snfi_nand->spare_per_sector) {
530 + if (i == (caps->num_spare_size - 1)) {
531 + dev_err(snfi->dev, "invalid spare size %d\n",
532 + snfi_nand->spare_per_sector);
537 + fmt |= i << caps->pageformat_spare_shift;
538 + fmt |= caps->nand_fdm_size << PAGEFMT_FDM_SHIFT;
539 + fmt |= caps->nand_fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
540 + writel(fmt, snfi->regs + NFI_PAGEFMT);
542 + snfi->ecc_cfg.len = caps->nand_sec_size + caps->nand_fdm_ecc_size;
544 + mtk_snfi_set_bad_mark_ctl(&snfi_nand->bad_mark, mem);
549 +static int mtk_snfi_read_from_cache(struct spi_mem *mem,
550 + const struct spi_mem_op *op, int oob_on)
552 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
553 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
554 + struct mtd_info *mtd = spinand_to_mtd(spinand);
555 + u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
556 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
557 + u32 reg, len, col_addr = 0;
558 + int dummy_cycle, ret;
559 + dma_addr_t dma_addr;
561 + len = sectors * (snfi->caps->nand_sec_size
562 + + snfi_nand->spare_per_sector);
564 + dma_addr = dma_map_single(snfi->dev, snfi->buffer,
565 + len, DMA_FROM_DEVICE);
566 + ret = dma_mapping_error(snfi->dev, dma_addr);
568 + dev_err(snfi->dev, "dma mapping error\n");
572 + /* set Read cache command and dummy cycle */
573 + dummy_cycle = (op->dummy.nbytes << 3) >> (ffs(op->dummy.buswidth) - 1);
574 + reg = ((op->cmd.opcode & RD_CMD_MASK) |
575 + (dummy_cycle << RD_DUMMY_SHIFT));
576 + writel(reg, snfi->regs + SNFI_RD_CTL2);
578 + writel((col_addr & RD_ADDR_MASK), snfi->regs + SNFI_RD_CTL3);
580 + reg = readl(snfi->regs + SNFI_MISC_CTL);
581 + reg |= RD_CUSTOM_EN;
582 + reg &= ~(RD_MODE_MASK | WR_X4_EN);
584 + /* set data and addr buswidth */
585 + if (op->data.buswidth == 4)
587 + else if (op->data.buswidth == 2)
590 + if (op->addr.buswidth == 4 || op->addr.buswidth == 2)
591 + reg |= RD_QDUAL_IO;
592 + writel(reg, snfi->regs + SNFI_MISC_CTL);
594 + writel(len, snfi->regs + SNFI_MISC_CTL2);
595 + writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
596 + reg = readw(snfi->regs + NFI_CNFG);
597 + reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA | CNFG_OP_CUST;
600 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
601 + writew(reg, snfi->regs + NFI_CNFG);
603 + snfi->ecc_cfg.mode = ECC_NFI_MODE;
604 + snfi->ecc_cfg.sectors = sectors;
605 + snfi->ecc_cfg.op = ECC_DECODE;
606 + ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
608 + dev_err(snfi->dev, "ecc enable failed\n");
609 + /* clear NFI_CNFG */
610 + reg &= ~(CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA |
611 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
612 + writew(reg, snfi->regs + NFI_CNFG);
616 + writew(reg, snfi->regs + NFI_CNFG);
619 + writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
620 + readw(snfi->regs + NFI_INTR_STA);
621 + writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
623 + init_completion(&snfi->done);
625 + /* set dummy command to trigger NFI enter SPI mode */
626 + writew(NAND_CMD_DUMMYREAD, snfi->regs + NFI_CMD);
627 + reg = readl(snfi->regs + NFI_CON) | CON_BRD;
628 + writew(reg, snfi->regs + NFI_CON);
630 + ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
632 + dev_err(snfi->dev, "read ahb done timeout\n");
633 + writew(0, snfi->regs + NFI_INTR_EN);
638 + ret = readl_poll_timeout_atomic(snfi->regs + NFI_BYTELEN, reg,
639 + ADDRCNTR_SEC(reg) >= sectors, 10,
642 + dev_err(snfi->dev, "polling read byte len timeout\n");
646 + ret = mtk_ecc_wait_done(snfi->ecc, ECC_DECODE);
648 + dev_warn(snfi->dev, "wait ecc done timeout\n");
650 + mtk_snfi_update_ecc_stats(mem, snfi->buffer,
652 + mtk_snfi_read_fdm_data(mem, sectors);
660 + mtk_ecc_disable(snfi->ecc);
662 + dma_unmap_single(snfi->dev, dma_addr, len, DMA_FROM_DEVICE);
663 + writel(0, snfi->regs + NFI_CON);
664 + writel(0, snfi->regs + NFI_CNFG);
665 + reg = readl(snfi->regs + SNFI_MISC_CTL);
666 + reg &= ~RD_CUSTOM_EN;
667 + writel(reg, snfi->regs + SNFI_MISC_CTL);
672 +static int mtk_snfi_write_to_cache(struct spi_mem *mem,
673 + const struct spi_mem_op *op,
676 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
677 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
678 + struct mtd_info *mtd = spinand_to_mtd(spinand);
679 + u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
680 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
681 + u32 reg, len, col_addr = 0;
682 + dma_addr_t dma_addr;
685 + len = sectors * (snfi->caps->nand_sec_size
686 + + snfi_nand->spare_per_sector);
688 + dma_addr = dma_map_single(snfi->dev, snfi->buffer, len,
690 + ret = dma_mapping_error(snfi->dev, dma_addr);
692 + dev_err(snfi->dev, "dma mapping error\n");
696 + /* set program load cmd and address */
697 + reg = (op->cmd.opcode << WR_LOAD_CMD_SHIFT);
698 + writel(reg, snfi->regs + SNFI_PG_CTL1);
699 + writel(col_addr & WR_LOAD_ADDR_MASK, snfi->regs + SNFI_PG_CTL2);
701 + reg = readl(snfi->regs + SNFI_MISC_CTL);
702 + reg |= WR_CUSTOM_EN;
703 + reg &= ~(RD_MODE_MASK | WR_X4_EN);
705 + if (op->data.buswidth == 4)
707 + writel(reg, snfi->regs + SNFI_MISC_CTL);
709 + writel(len << WR_LEN_SHIFT, snfi->regs + SNFI_MISC_CTL2);
710 + writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
712 + reg = readw(snfi->regs + NFI_CNFG);
713 + reg &= ~(CNFG_READ_EN | CNFG_BYTE_RW);
714 + reg |= CNFG_DMA | CNFG_DMA_BURST_EN | CNFG_OP_PROGRAM;
717 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
718 + writew(reg, snfi->regs + NFI_CNFG);
720 + snfi->ecc_cfg.mode = ECC_NFI_MODE;
721 + snfi->ecc_cfg.op = ECC_ENCODE;
722 + ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
724 + dev_err(snfi->dev, "ecc enable failed\n");
725 + /* clear NFI_CNFG */
726 + reg &= ~(CNFG_DMA_BURST_EN | CNFG_DMA |
727 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
728 + writew(reg, snfi->regs + NFI_CNFG);
729 + dma_unmap_single(snfi->dev, dma_addr, len,
733 + /* write OOB into the FDM registers (OOB area in MTK NAND) */
734 + mtk_snfi_write_fdm_data(mem, sectors);
736 + writew(reg, snfi->regs + NFI_CNFG);
738 + writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
739 + readw(snfi->regs + NFI_INTR_STA);
740 + writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
742 + init_completion(&snfi->done);
744 + /* set dummy command to trigger NFI enter SPI mode */
745 + writew(NAND_CMD_DUMMYPROG, snfi->regs + NFI_CMD);
746 + reg = readl(snfi->regs + NFI_CON) | CON_BWR;
747 + writew(reg, snfi->regs + NFI_CON);
749 + ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
751 + dev_err(snfi->dev, "custom program done timeout\n");
752 + writew(0, snfi->regs + NFI_INTR_EN);
757 + ret = readl_poll_timeout_atomic(snfi->regs + NFI_ADDRCNTR, reg,
758 + ADDRCNTR_SEC(reg) >= sectors,
761 + dev_err(snfi->dev, "hwecc write timeout\n");
764 + mtk_ecc_disable(snfi->ecc);
767 + dma_unmap_single(snfi->dev, dma_addr, len, DMA_TO_DEVICE);
768 + writel(0, snfi->regs + NFI_CON);
769 + writel(0, snfi->regs + NFI_CNFG);
770 + reg = readl(snfi->regs + SNFI_MISC_CTL);
771 + reg &= ~WR_CUSTOM_EN;
772 + writel(reg, snfi->regs + SNFI_MISC_CTL);
777 +static int mtk_snfi_read(struct spi_mem *mem,
778 + const struct spi_mem_op *op)
780 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
781 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
782 + struct mtd_info *mtd = spinand_to_mtd(spinand);
783 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
784 + u32 col_addr = op->addr.val;
785 + int i, ret, sectors, oob_on = false;
787 + if (col_addr == mtd->writesize)
790 + ret = mtk_snfi_read_from_cache(mem, op, oob_on);
792 + dev_warn(snfi->dev, "read from cache fail\n");
796 + sectors = mtd->writesize / snfi->caps->nand_sec_size;
797 + for (i = 0; i < sectors; i++) {
799 + memcpy(oob_ptr(mem, i),
800 + mtk_oob_ptr(mem, snfi->buffer, i),
801 + snfi->caps->nand_fdm_size);
803 + if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
804 + snfi_nand->bad_mark.bm_swap(mem, snfi->buffer,
809 + memcpy(spinand->databuf, snfi->buffer, mtd->writesize);
814 +static int mtk_snfi_write(struct spi_mem *mem,
815 + const struct spi_mem_op *op)
817 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
818 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
819 + struct mtd_info *mtd = spinand_to_mtd(spinand);
820 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
821 + u32 ret, i, sectors, col_addr = op->addr.val;
822 + int oob_on = false;
824 + if (col_addr == mtd->writesize)
827 + sectors = mtd->writesize / snfi->caps->nand_sec_size;
828 + memset(snfi->buffer, 0xff, mtd->writesize + mtd->oobsize);
831 + memcpy(snfi->buffer, spinand->databuf, mtd->writesize);
833 + for (i = 0; i < sectors; i++) {
834 + if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
835 + snfi_nand->bad_mark.bm_swap(mem, snfi->buffer, oob_on);
838 + memcpy(mtk_oob_ptr(mem, snfi->buffer, i),
840 + snfi->caps->nand_fdm_size);
843 + ret = mtk_snfi_write_to_cache(mem, op, oob_on);
845 + dev_warn(snfi->dev, "write to cache fail\n");
850 +static int mtk_snfi_command_exec(struct mtk_snfi *snfi,
851 + const u8 *txbuf, u8 *rxbuf,
852 + const u32 txlen, const u32 rxlen)
854 + u32 tmp, i, j, reg, m;
855 + u8 *p_tmp = (u8 *)(&tmp);
858 + /* Moving tx data to NFI_SPI GPRAM */
859 + for (i = 0, m = 0; i < txlen; ) {
860 + for (j = 0, tmp = 0; i < txlen && j < 4; i++, j++)
861 + p_tmp[j] = txbuf[i];
863 + writel(tmp, snfi->regs + SNFI_GPRAM_DATA + m);
867 + writel(txlen, snfi->regs + SNFI_MAC_OUTL);
868 + writel(rxlen, snfi->regs + SNFI_MAC_INL);
869 + ret = mtk_snfi_mac_op(snfi);
873 + /* For NULL input data, this loop will be skipped */
875 + for (i = 0, m = 0; i < rxlen; ) {
876 + reg = readl(snfi->regs +
877 + SNFI_GPRAM_DATA + m);
878 + for (j = 0; i < rxlen && j < 4; i++, j++, rxbuf++) {
879 + if (m == 0 && i == 0)
881 + *rxbuf = (reg >> (j * 8)) & 0xFF;
890 + * mtk_snfi_exec_op - to process command/data to send to the
891 + * SPI NAND by mtk controller
893 +static int mtk_snfi_exec_op(struct spi_mem *mem,
894 + const struct spi_mem_op *op)
897 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
898 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
899 + struct mtd_info *mtd = spinand_to_mtd(spinand);
900 + struct nand_device *nand = mtd_to_nanddev(mtd);
901 + const struct spi_mem_op *read_cache;
902 + const struct spi_mem_op *write_cache;
903 + u32 tmpbufsize, txlen = 0, rxlen = 0;
904 + u8 *txbuf, *rxbuf = NULL, *buf;
907 + ret = mtk_snfi_reset(snfi);
909 + dev_warn(snfi->dev, "reset spi memory controller fail\n");
913 + /*if bbt initial, framework have detect nand information */
914 + if (nand->bbt.cache) {
915 + read_cache = spinand->op_templates.read_cache;
916 + write_cache = spinand->op_templates.write_cache;
918 + ret = mtk_snfi_hw_runtime_config(mem);
922 + /* For Read/Write with cache, Erase use framework flow */
923 + if (op->cmd.opcode == read_cache->cmd.opcode) {
924 + ret = mtk_snfi_read(mem, op);
926 + dev_warn(snfi->dev, "snfi read fail\n");
928 + } else if (op->cmd.opcode == write_cache->cmd.opcode) {
929 + ret = mtk_snfi_write(mem, op);
931 + dev_warn(snfi->dev, "snfi write fail\n");
936 + tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
937 + op->dummy.nbytes + op->data.nbytes;
939 + txbuf = kzalloc(tmpbufsize, GFP_KERNEL);
943 + txbuf[txlen++] = op->cmd.opcode;
945 + if (op->addr.nbytes)
946 + for (i = 0; i < op->addr.nbytes; i++)
947 + txbuf[txlen++] = op->addr.val >>
948 + (8 * (op->addr.nbytes - i - 1));
950 + txlen += op->dummy.nbytes;
952 + if (op->data.dir == SPI_MEM_DATA_OUT)
953 + for (i = 0; i < op->data.nbytes; i++) {
954 + buf = (u8 *)op->data.buf.out;
955 + txbuf[txlen++] = buf[i];
958 + if (op->data.dir == SPI_MEM_DATA_IN) {
959 + rxbuf = (u8 *)op->data.buf.in;
960 + rxlen += op->data.nbytes;
963 + ret = mtk_snfi_command_exec(snfi, txbuf, rxbuf, txlen, rxlen);
969 +static int mtk_snfi_init(struct mtk_snfi *snfi)
973 + /* Reset the state machine and data FIFO */
974 + ret = mtk_snfi_reset(snfi);
976 + dev_warn(snfi->dev, "MTK reset controller fail\n");
980 + snfi->buffer = devm_kzalloc(snfi->dev, 4096 + 256, GFP_KERNEL);
984 + /* Clear interrupt, read clear. */
985 + readw(snfi->regs + NFI_INTR_STA);
986 + writew(0, snfi->regs + NFI_INTR_EN);
988 + writel(0, snfi->regs + NFI_CON);
989 + writel(0, snfi->regs + NFI_CNFG);
991 + /* Change to NFI_SPI mode. */
992 + writel(SNFI_MODE_EN, snfi->regs + SNFI_CNFG);
997 +static int mtk_snfi_check_buswidth(u8 width)
1012 +static bool mtk_snfi_supports_op(struct spi_mem *mem,
1013 + const struct spi_mem_op *op)
1017 + /* For MTK Spi Nand controller, cmd buswidth just support 1 bit*/
1018 + if (op->cmd.buswidth != 1)
1021 + if (op->addr.nbytes)
1022 + ret |= mtk_snfi_check_buswidth(op->addr.buswidth);
1024 + if (op->dummy.nbytes)
1025 + ret |= mtk_snfi_check_buswidth(op->dummy.buswidth);
1027 + if (op->data.nbytes)
1028 + ret |= mtk_snfi_check_buswidth(op->data.buswidth);
1036 +static const struct spi_controller_mem_ops mtk_snfi_ops = {
1037 + .supports_op = mtk_snfi_supports_op,
1038 + .exec_op = mtk_snfi_exec_op,
1041 +static const struct mtk_snfi_caps snfi_mt7622 = {
1042 + .spare_size = spare_size_mt7622,
1043 + .num_spare_size = 4,
1044 + .nand_sec_size = 512,
1045 + .nand_fdm_size = 8,
1046 + .nand_fdm_ecc_size = 1,
1047 + .ecc_parity_bits = 13,
1048 + .pageformat_spare_shift = 4,
1049 + .bad_mark_swap = 0,
1052 +static const struct of_device_id mtk_snfi_id_table[] = {
1053 + { .compatible = "mediatek,mt7622-snfi", .data = &snfi_mt7622, },
1054 + { /* sentinel */ }
1057 +static int mtk_snfi_probe(struct platform_device *pdev)
1059 + struct device *dev = &pdev->dev;
1060 + struct device_node *np = dev->of_node;
1061 + struct spi_controller *ctlr;
1062 + struct mtk_snfi *snfi;
1063 + struct resource *res;
1066 + ctlr = spi_alloc_master(&pdev->dev, sizeof(*snfi));
1070 + snfi = spi_controller_get_devdata(ctlr);
1071 + snfi->caps = of_device_get_match_data(dev);
1074 + snfi->ecc = of_mtk_ecc_get(np);
1075 + if (IS_ERR_OR_NULL(snfi->ecc))
1076 + goto err_put_master;
1078 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1079 + snfi->regs = devm_ioremap_resource(dev, res);
1080 + if (IS_ERR(snfi->regs)) {
1081 + ret = PTR_ERR(snfi->regs);
1085 + /* find the clocks */
1086 + snfi->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1087 + if (IS_ERR(snfi->clk.nfi_clk)) {
1088 + dev_err(dev, "no nfi clk\n");
1089 + ret = PTR_ERR(snfi->clk.nfi_clk);
1093 + snfi->clk.spi_clk = devm_clk_get(dev, "spi_clk");
1094 + if (IS_ERR(snfi->clk.spi_clk)) {
1095 + dev_err(dev, "no spi clk\n");
1096 + ret = PTR_ERR(snfi->clk.spi_clk);
1100 + ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1104 + /* find the irq */
1105 + irq = platform_get_irq(pdev, 0);
1107 + dev_err(dev, "no snfi irq resource\n");
1112 + ret = devm_request_irq(dev, irq, mtk_snfi_irq, 0, "mtk-snfi", snfi);
1114 + dev_err(dev, "failed to request snfi irq\n");
1118 + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1120 + dev_err(dev, "failed to set dma mask\n");
1124 + ctlr->dev.of_node = np;
1125 + ctlr->mem_ops = &mtk_snfi_ops;
1127 + platform_set_drvdata(pdev, snfi);
1128 + ret = mtk_snfi_init(snfi);
1130 + dev_err(dev, "failed to init snfi\n");
1134 + ret = devm_spi_register_master(dev, ctlr);
1141 + mtk_snfi_disable_clk(&snfi->clk);
1144 + mtk_ecc_release(snfi->ecc);
1147 + spi_master_put(ctlr);
1149 + dev_err(dev, "MediaTek SPI NAND interface probe failed %d\n", ret);
1153 +static int mtk_snfi_remove(struct platform_device *pdev)
1155 + struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1157 + mtk_snfi_disable_clk(&snfi->clk);
1162 +static int mtk_snfi_suspend(struct platform_device *pdev, pm_message_t state)
1164 + struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1166 + mtk_snfi_disable_clk(&snfi->clk);
1171 +static int mtk_snfi_resume(struct platform_device *pdev)
1173 + struct device *dev = &pdev->dev;
1174 + struct mtk_snfi *snfi = dev_get_drvdata(dev);
1177 + ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1181 + ret = mtk_snfi_init(snfi);
1183 + dev_err(dev, "failed to init snfi controller\n");
1188 +static struct platform_driver mtk_snfi_driver = {
1190 + .name = "mtk-snfi",
1191 + .of_match_table = mtk_snfi_id_table,
1193 + .probe = mtk_snfi_probe,
1194 + .remove = mtk_snfi_remove,
1195 + .suspend = mtk_snfi_suspend,
1196 + .resume = mtk_snfi_resume,
1199 +module_platform_driver(mtk_snfi_driver);
1201 +MODULE_LICENSE("GPL v2");
1202 +MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>");
1203 +MODULE_DESCRIPTION("Mediatek SPI Memory Interface Driver");
1204 Index: linux-4.19.48/drivers/spi/Kconfig
1205 ===================================================================
1206 --- linux-4.19.48.orig/drivers/spi/Kconfig
1207 +++ linux-4.19.48/drivers/spi/Kconfig
1208 @@ -389,6 +389,15 @@ config SPI_MT65XX
1209 say Y or M here.If you are not sure, say N.
1210 SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
1212 +config SPI_MTK_SNFI
1213 + tristate "MediaTek SPI NAND interface"
1214 + select MTD_SPI_NAND
1216 + This selects the SPI NAND FLASH interface(SNFI),
1217 + which could be found on MediaTek Soc.
1218 + Say Y or M here.If you are not sure, say N.
1219 + Note Parallel Nand and SPI NAND is alternative on MediaTek SoCs.
1222 tristate "Nuvoton NUC900 series SPI"
1223 depends on ARCH_W90X900
1224 Index: linux-4.19.48/drivers/spi/Makefile
1225 ===================================================================
1226 --- linux-4.19.48.orig/drivers/spi/Makefile
1227 +++ linux-4.19.48/drivers/spi/Makefile
1228 @@ -57,6 +57,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mp
1229 obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
1230 obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
1231 obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
1232 +obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
1233 obj-$(CONFIG_SPI_MXS) += spi-mxs.o
1234 obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
1235 obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o