1 From e84e2430ee0e483842b4ff013ae8a6e7e2fa2734 Mon Sep 17 00:00:00 2001
2 From: Weijie Gao <weijie.gao@mediatek.com>
3 Date: Wed, 1 Apr 2020 02:07:58 +0800
4 Subject: [PATCH 1/2] mtd: rawnand: add driver support for MT7621 nand
7 This patch adds NAND flash controller driver for MediaTek MT7621 SoC.
9 The NAND flash controller is similar with controllers described in
10 mtk_nand.c, except that the controller from MT7621 doesn't support DMA
11 transmission, and some registers' offset and fields are different.
13 Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
15 drivers/mtd/nand/raw/Kconfig | 8 +
16 drivers/mtd/nand/raw/Makefile | 1 +
17 drivers/mtd/nand/raw/mt7621_nand.c | 1348 ++++++++++++++++++++++++++++++++++++
18 3 files changed, 1357 insertions(+)
19 create mode 100644 drivers/mtd/nand/raw/mt7621_nand.c
21 --- a/drivers/mtd/nand/raw/Kconfig
22 +++ b/drivers/mtd/nand/raw/Kconfig
23 @@ -391,6 +391,14 @@ config MTD_NAND_QCOM
24 Enables support for NAND flash chips on SoCs containing the EBI2 NAND
25 controller. This controller is found on IPQ806x SoC.
27 +config MTD_NAND_MT7621
28 + tristate "MT7621 NAND controller"
29 + depends on SOC_MT7621 || COMPILE_TEST
30 + depends on HAS_IOMEM
32 + Enables support for NAND controller on MT7621 SoC.
33 + This driver uses PIO mode for data transmission instead of DMA mode.
36 tristate "MTK NAND controller"
37 depends on ARCH_MEDIATEK || COMPILE_TEST
38 --- a/drivers/mtd/nand/raw/Makefile
39 +++ b/drivers/mtd/nand/raw/Makefile
40 @@ -52,6 +52,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_n
41 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
42 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
43 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
44 +obj-$(CONFIG_MTD_NAND_MT7621) += mt7621_nand.o
45 obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
46 obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
47 obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
49 +++ b/drivers/mtd/nand/raw/mt7621_nand.c
51 +// SPDX-License-Identifier: GPL-2.0
53 + * MediaTek MT7621 NAND Flash Controller driver
55 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
57 + * Author: Weijie Gao <weijie.gao@mediatek.com>
60 +#include <linux/io.h>
61 +#include <linux/clk.h>
62 +#include <linux/init.h>
63 +#include <linux/errno.h>
64 +#include <linux/sizes.h>
65 +#include <linux/iopoll.h>
66 +#include <linux/kernel.h>
67 +#include <linux/module.h>
68 +#include <linux/mtd/mtd.h>
69 +#include <linux/mtd/rawnand.h>
70 +#include <linux/mtd/partitions.h>
71 +#include <linux/platform_device.h>
72 +#include <asm/addrspace.h>
74 +/* NFI core registers */
75 +#define NFI_CNFG 0x000
76 +#define CNFG_OP_MODE_S 12
77 +#define CNFG_OP_MODE_M GENMASK(14, 12)
78 +#define CNFG_OP_CUSTOM 6
79 +#define CNFG_AUTO_FMT_EN BIT(9)
80 +#define CNFG_HW_ECC_EN BIT(8)
81 +#define CNFG_BYTE_RW BIT(6)
82 +#define CNFG_READ_MODE BIT(1)
84 +#define NFI_PAGEFMT 0x004
85 +#define PAGEFMT_FDM_ECC_S 12
86 +#define PAGEFMT_FDM_ECC_M GENMASK(15, 12)
87 +#define PAGEFMT_FDM_S 8
88 +#define PAGEFMT_FDM_M GENMASK(11, 8)
89 +#define PAGEFMT_SPARE_S 4
90 +#define PAGEFMT_SPARE_M GENMASK(5, 4)
91 +#define PAGEFMT_PAGE_S 0
92 +#define PAGEFMT_PAGE_M GENMASK(1, 0)
94 +#define NFI_CON 0x008
95 +#define CON_NFI_SEC_S 12
96 +#define CON_NFI_SEC_M GENMASK(15, 12)
97 +#define CON_NFI_BWR BIT(9)
98 +#define CON_NFI_BRD BIT(8)
99 +#define CON_NFI_RST BIT(1)
100 +#define CON_FIFO_FLUSH BIT(0)
102 +#define NFI_ACCCON 0x00c
103 +#define ACCCON_POECS_S 28
104 +#define ACCCON_POECS_MAX 0x0f
105 +#define ACCCON_POECS_DEF 3
106 +#define ACCCON_PRECS_S 22
107 +#define ACCCON_PRECS_MAX 0x3f
108 +#define ACCCON_PRECS_DEF 3
109 +#define ACCCON_C2R_S 16
110 +#define ACCCON_C2R_MAX 0x3f
111 +#define ACCCON_C2R_DEF 7
112 +#define ACCCON_W2R_S 12
113 +#define ACCCON_W2R_MAX 0x0f
114 +#define ACCCON_W2R_DEF 7
115 +#define ACCCON_WH_S 8
116 +#define ACCCON_WH_MAX 0x0f
117 +#define ACCCON_WH_DEF 15
118 +#define ACCCON_WST_S 4
119 +#define ACCCON_WST_MAX 0x0f
120 +#define ACCCON_WST_DEF 15
121 +#define ACCCON_WST_MIN 3
122 +#define ACCCON_RLT_S 0
123 +#define ACCCON_RLT_MAX 0x0f
124 +#define ACCCON_RLT_DEF 15
125 +#define ACCCON_RLT_MIN 3
127 +#define NFI_CMD 0x020
129 +#define NFI_ADDRNOB 0x030
130 +#define ADDR_ROW_NOB_S 4
131 +#define ADDR_ROW_NOB_M GENMASK(6, 4)
132 +#define ADDR_COL_NOB_S 0
133 +#define ADDR_COL_NOB_M GENMASK(2, 0)
135 +#define NFI_COLADDR 0x034
136 +#define NFI_ROWADDR 0x038
138 +#define NFI_STRDATA 0x040
139 +#define STR_DATA BIT(0)
141 +#define NFI_CNRNB 0x044
142 +#define CB2R_TIME_S 4
143 +#define CB2R_TIME_M GENMASK(7, 4)
144 +#define STR_CNRNB BIT(0)
146 +#define NFI_DATAW 0x050
147 +#define NFI_DATAR 0x054
149 +#define NFI_PIO_DIRDY 0x058
150 +#define PIO_DIRDY BIT(0)
152 +#define NFI_STA 0x060
153 +#define STA_NFI_FSM_S 16
154 +#define STA_NFI_FSM_M GENMASK(19, 16)
155 +#define STA_FSM_CUSTOM_DATA 14
156 +#define STA_BUSY BIT(8)
157 +#define STA_ADDR BIT(1)
158 +#define STA_CMD BIT(0)
160 +#define NFI_ADDRCNTR 0x070
161 +#define SEC_CNTR_S 12
162 +#define SEC_CNTR_M GENMASK(15, 12)
163 +#define SEC_ADDR_S 0
164 +#define SEC_ADDR_M GENMASK(9, 0)
166 +#define NFI_CSEL 0x090
168 +#define CSEL_M GENMASK(1, 0)
170 +#define NFI_FDM0L 0x0a0
171 +#define NFI_FDML(n) (0x0a0 + ((n) << 3))
173 +#define NFI_FDM0M 0x0a4
174 +#define NFI_FDMM(n) (0x0a4 + ((n) << 3))
176 +#define NFI_MASTER_STA 0x210
177 +#define MAS_ADDR GENMASK(11, 9)
178 +#define MAS_RD GENMASK(8, 6)
179 +#define MAS_WR GENMASK(5, 3)
180 +#define MAS_RDDLY GENMASK(2, 0)
182 +/* ECC engine registers */
183 +#define ECC_ENCCON 0x000
184 +#define ENC_EN BIT(0)
186 +#define ECC_ENCCNFG 0x004
187 +#define ENC_CNFG_MSG_S 16
188 +#define ENC_CNFG_MSG_M GENMASK(28, 16)
189 +#define ENC_MODE_S 4
190 +#define ENC_MODE_M GENMASK(5, 4)
191 +#define ENC_MODE_NFI 1
192 +#define ENC_TNUM_S 0
193 +#define ENC_TNUM_M GENMASK(2, 0)
195 +#define ECC_ENCIDLE 0x00c
196 +#define ENC_IDLE BIT(0)
198 +#define ECC_DECCON 0x100
199 +#define DEC_EN BIT(0)
201 +#define ECC_DECCNFG 0x104
202 +#define DEC_EMPTY_EN BIT(31)
204 +#define DEC_CS_M GENMASK(28, 16)
205 +#define DEC_CON_S 12
206 +#define DEC_CON_M GENMASK(13, 12)
207 +#define DEC_CON_EL 2
208 +#define DEC_MODE_S 4
209 +#define DEC_MODE_M GENMASK(5, 4)
210 +#define DEC_MODE_NFI 1
211 +#define DEC_TNUM_S 0
212 +#define DEC_TNUM_M GENMASK(2, 0)
214 +#define ECC_DECIDLE 0x10c
215 +#define DEC_IDLE BIT(1)
217 +#define ECC_DECENUM 0x114
219 +#define ERRNUM_M GENMASK(3, 0)
221 +#define ECC_DECDONE 0x118
222 +#define DEC_DONE7 BIT(7)
223 +#define DEC_DONE6 BIT(6)
224 +#define DEC_DONE5 BIT(5)
225 +#define DEC_DONE4 BIT(4)
226 +#define DEC_DONE3 BIT(3)
227 +#define DEC_DONE2 BIT(2)
228 +#define DEC_DONE1 BIT(1)
229 +#define DEC_DONE0 BIT(0)
231 +#define ECC_DECEL(n) (0x11c + (n) * 4)
232 +#define DEC_EL_ODD_S 16
233 +#define DEC_EL_EVEN_S 0
234 +#define DEC_EL_M 0x1fff
235 +#define DEC_EL_BYTE_POS_S 3
236 +#define DEC_EL_BIT_POS_M GENMASK(3, 0)
238 +#define ECC_FDMADDR 0x13c
240 +/* ENCIDLE and DECIDLE */
241 +#define ECC_IDLE BIT(0)
243 +#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
244 + ((tpoecs) << ACCCON_POECS_S | (tprecs) << ACCCON_PRECS_S | \
245 + (tc2r) << ACCCON_C2R_S | (tw2r) << ACCCON_W2R_S | \
246 + (twh) << ACCCON_WH_S | (twst) << ACCCON_WST_S | (trlt))
248 +#define MASTER_STA_MASK (MAS_ADDR | MAS_RD | MAS_WR | \
250 +#define NFI_RESET_TIMEOUT 1000000
251 +#define NFI_CORE_TIMEOUT 500000
252 +#define ECC_ENGINE_TIMEOUT 500000
254 +#define ECC_SECTOR_SIZE 512
255 +#define ECC_PARITY_BITS 13
257 +#define NFI_FDM_SIZE 8
259 +#define MT7621_NFC_NAME "mt7621-nand"
262 + struct nand_controller controller;
263 + struct nand_chip nand;
264 + struct clk *nfi_clk;
265 + struct device *dev;
267 + void __iomem *nfi_regs;
268 + void __iomem *ecc_regs;
270 + u32 spare_per_sector;
273 +static const u16 mt7621_nfi_page_size[] = { SZ_512, SZ_2K, SZ_4K };
274 +static const u8 mt7621_nfi_spare_size[] = { 16, 26, 27, 28 };
275 +static const u8 mt7621_ecc_strength[] = { 4, 6, 8, 10, 12 };
277 +static inline u32 nfi_read32(struct mt7621_nfc *nfc, u32 reg)
279 + return readl(nfc->nfi_regs + reg);
282 +static inline void nfi_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
284 + writel(val, nfc->nfi_regs + reg);
287 +static inline u16 nfi_read16(struct mt7621_nfc *nfc, u32 reg)
289 + return readw(nfc->nfi_regs + reg);
292 +static inline void nfi_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
294 + writew(val, nfc->nfi_regs + reg);
297 +static inline void ecc_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
299 + writew(val, nfc->ecc_regs + reg);
302 +static inline u32 ecc_read32(struct mt7621_nfc *nfc, u32 reg)
304 + return readl(nfc->ecc_regs + reg);
307 +static inline void ecc_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
309 + return writel(val, nfc->ecc_regs + reg);
312 +static inline u8 *oob_fdm_ptr(struct nand_chip *nand, int sect)
314 + return nand->oob_poi + sect * NFI_FDM_SIZE;
317 +static inline u8 *oob_ecc_ptr(struct mt7621_nfc *nfc, int sect)
319 + struct nand_chip *nand = &nfc->nand;
321 + return nand->oob_poi + nand->ecc.steps * NFI_FDM_SIZE +
322 + sect * (nfc->spare_per_sector - NFI_FDM_SIZE);
325 +static inline u8 *page_data_ptr(struct nand_chip *nand, const u8 *buf,
328 + return (u8 *)buf + sect * nand->ecc.size;
331 +static int mt7621_ecc_wait_idle(struct mt7621_nfc *nfc, u32 reg)
333 + struct device *dev = nfc->dev;
337 + ret = readw_poll_timeout_atomic(nfc->ecc_regs + reg, val,
338 + val & ECC_IDLE, 10,
339 + ECC_ENGINE_TIMEOUT);
341 + dev_warn(dev, "ECC engine timed out entering idle mode\n");
348 +static int mt7621_ecc_decoder_wait_done(struct mt7621_nfc *nfc, u32 sect)
350 + struct device *dev = nfc->dev;
354 + ret = readw_poll_timeout_atomic(nfc->ecc_regs + ECC_DECDONE, val,
355 + val & (1 << sect), 10,
356 + ECC_ENGINE_TIMEOUT);
359 + dev_warn(dev, "ECC decoder for sector %d timed out\n",
367 +static void mt7621_ecc_encoder_op(struct mt7621_nfc *nfc, bool enable)
369 + mt7621_ecc_wait_idle(nfc, ECC_ENCIDLE);
370 + ecc_write16(nfc, ECC_ENCCON, enable ? ENC_EN : 0);
373 +static void mt7621_ecc_decoder_op(struct mt7621_nfc *nfc, bool enable)
375 + mt7621_ecc_wait_idle(nfc, ECC_DECIDLE);
376 + ecc_write16(nfc, ECC_DECCON, enable ? DEC_EN : 0);
379 +static int mt7621_ecc_correct_check(struct mt7621_nfc *nfc, u8 *sector_buf,
380 + u8 *fdm_buf, u32 sect)
382 + struct nand_chip *nand = &nfc->nand;
383 + u32 decnum, num_error_bits, fdm_end_bits;
384 + u32 error_locations, error_bit_loc;
385 + u32 error_byte_pos, error_bit_pos;
389 + decnum = ecc_read32(nfc, ECC_DECENUM);
390 + num_error_bits = (decnum >> (sect << ERRNUM_S)) & ERRNUM_M;
391 + fdm_end_bits = (nand->ecc.size + NFI_FDM_SIZE) << 3;
393 + if (!num_error_bits)
396 + if (num_error_bits == ERRNUM_M)
399 + for (i = 0; i < num_error_bits; i++) {
400 + error_locations = ecc_read32(nfc, ECC_DECEL(i / 2));
401 + error_bit_loc = (error_locations >> ((i % 2) * DEC_EL_ODD_S)) &
403 + error_byte_pos = error_bit_loc >> DEC_EL_BYTE_POS_S;
404 + error_bit_pos = error_bit_loc & DEC_EL_BIT_POS_M;
406 + if (error_bit_loc < (nand->ecc.size << 3)) {
408 + sector_buf[error_byte_pos] ^=
409 + (1 << error_bit_pos);
411 + } else if (error_bit_loc < fdm_end_bits) {
413 + fdm_buf[error_byte_pos - nand->ecc.size] ^=
414 + (1 << error_bit_pos);
424 +static int mt7621_nfc_wait_write_completion(struct mt7621_nfc *nfc,
425 + struct nand_chip *nand)
427 + struct device *dev = nfc->dev;
431 + ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_ADDRCNTR, val,
432 + ((val & SEC_CNTR_M) >> SEC_CNTR_S) >= nand->ecc.steps, 10,
436 + dev_warn(dev, "NFI core write operation timed out\n");
443 +static void mt7621_nfc_hw_reset(struct mt7621_nfc *nfc)
448 + /* reset all registers and force the NFI master to terminate */
449 + nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
451 + /* wait for the master to finish the last transaction */
452 + ret = readw_poll_timeout(nfc->nfi_regs + NFI_MASTER_STA, val,
453 + !(val & MASTER_STA_MASK), 50,
454 + NFI_RESET_TIMEOUT);
456 + dev_warn(nfc->dev, "Failed to reset NFI master in %dms\n",
457 + NFI_RESET_TIMEOUT);
460 + /* ensure any status register affected by the NFI master is reset */
461 + nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
462 + nfi_write16(nfc, NFI_STRDATA, 0);
465 +static inline void mt7621_nfc_hw_init(struct mt7621_nfc *nfc)
470 + * CNRNB: nand ready/busy register
471 + * -------------------------------
472 + * 7:4: timeout register for polling the NAND busy/ready signal
473 + * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
475 + nfi_write16(nfc, NFI_CNRNB, CB2R_TIME_M | STR_CNRNB);
477 + mt7621_nfc_hw_reset(nfc);
479 + /* Apply default access timing */
480 + acccon = ACCTIMING(ACCCON_POECS_DEF, ACCCON_PRECS_DEF, ACCCON_C2R_DEF,
481 + ACCCON_W2R_DEF, ACCCON_WH_DEF, ACCCON_WST_DEF,
484 + nfi_write32(nfc, NFI_ACCCON, acccon);
487 +static int mt7621_nfc_send_command(struct mt7621_nfc *nfc, u8 command)
489 + struct device *dev = nfc->dev;
493 + nfi_write32(nfc, NFI_CMD, command);
495 + ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
496 + !(val & STA_CMD), 10,
499 + dev_warn(dev, "NFI core timed out entering command mode\n");
506 +static int mt7621_nfc_send_address_byte(struct mt7621_nfc *nfc, int addr)
508 + struct device *dev = nfc->dev;
512 + nfi_write32(nfc, NFI_COLADDR, addr);
513 + nfi_write32(nfc, NFI_ROWADDR, 0);
514 + nfi_write16(nfc, NFI_ADDRNOB, 1);
516 + ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
517 + !(val & STA_ADDR), 10,
520 + dev_warn(dev, "NFI core timed out entering address mode\n");
527 +static int mt7621_nfc_send_address(struct mt7621_nfc *nfc, const u8 *addr,
528 + unsigned int naddrs)
533 + ret = mt7621_nfc_send_address_byte(nfc, *addr);
544 +static void mt7621_nfc_wait_pio_ready(struct mt7621_nfc *nfc)
546 + struct device *dev = nfc->dev;
550 + ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_PIO_DIRDY, val,
551 + val & PIO_DIRDY, 10,
554 + dev_err(dev, "NFI core PIO mode not ready\n");
557 +static u32 mt7621_nfc_pio_read(struct mt7621_nfc *nfc, bool br)
561 + /* after each byte read, the NFI_STA reg is reset by the hardware */
562 + reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S;
563 + if (reg != STA_FSM_CUSTOM_DATA) {
564 + reg = nfi_read16(nfc, NFI_CNFG);
565 + reg |= CNFG_READ_MODE | CNFG_BYTE_RW;
567 + reg &= ~CNFG_BYTE_RW;
568 + nfi_write16(nfc, NFI_CNFG, reg);
571 + * set to max sector to allow the HW to continue reading over
572 + * unaligned accesses
574 + nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BRD);
576 + /* trigger to fetch data */
577 + nfi_write16(nfc, NFI_STRDATA, STR_DATA);
580 + mt7621_nfc_wait_pio_ready(nfc);
582 + return nfi_read32(nfc, NFI_DATAR);
585 +static void mt7621_nfc_read_data(struct mt7621_nfc *nfc, u8 *buf, u32 len)
587 + while (((uintptr_t)buf & 3) && len) {
588 + *buf = mt7621_nfc_pio_read(nfc, true);
594 + *(u32 *)buf = mt7621_nfc_pio_read(nfc, false);
600 + *buf = mt7621_nfc_pio_read(nfc, true);
606 +static void mt7621_nfc_read_data_discard(struct mt7621_nfc *nfc, u32 len)
609 + mt7621_nfc_pio_read(nfc, false);
614 + mt7621_nfc_pio_read(nfc, true);
619 +static void mt7621_nfc_pio_write(struct mt7621_nfc *nfc, u32 val, bool bw)
623 + reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S;
624 + if (reg != STA_FSM_CUSTOM_DATA) {
625 + reg = nfi_read16(nfc, NFI_CNFG);
626 + reg &= ~(CNFG_READ_MODE | CNFG_BYTE_RW);
628 + reg |= CNFG_BYTE_RW;
629 + nfi_write16(nfc, NFI_CNFG, reg);
631 + nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BWR);
632 + nfi_write16(nfc, NFI_STRDATA, STR_DATA);
635 + mt7621_nfc_wait_pio_ready(nfc);
636 + nfi_write32(nfc, NFI_DATAW, val);
639 +static void mt7621_nfc_write_data(struct mt7621_nfc *nfc, const u8 *buf,
642 + while (((uintptr_t)buf & 3) && len) {
643 + mt7621_nfc_pio_write(nfc, *buf, true);
649 + mt7621_nfc_pio_write(nfc, *(const u32 *)buf, false);
655 + mt7621_nfc_pio_write(nfc, *buf, true);
661 +static void mt7621_nfc_write_data_empty(struct mt7621_nfc *nfc, u32 len)
664 + mt7621_nfc_pio_write(nfc, 0xffffffff, false);
669 + mt7621_nfc_pio_write(nfc, 0xff, true);
674 +static int mt7621_nfc_dev_ready(struct mt7621_nfc *nfc,
675 + unsigned int timeout_ms)
679 + return readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
680 + !(val & STA_BUSY), 10,
681 + timeout_ms * 1000);
684 +static int mt7621_nfc_exec_instr(struct nand_chip *nand,
685 + const struct nand_op_instr *instr)
687 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
689 + switch (instr->type) {
690 + case NAND_OP_CMD_INSTR:
691 + mt7621_nfc_hw_reset(nfc);
692 + nfi_write16(nfc, NFI_CNFG, CNFG_OP_CUSTOM << CNFG_OP_MODE_S);
693 + return mt7621_nfc_send_command(nfc, instr->ctx.cmd.opcode);
694 + case NAND_OP_ADDR_INSTR:
695 + return mt7621_nfc_send_address(nfc, instr->ctx.addr.addrs,
696 + instr->ctx.addr.naddrs);
697 + case NAND_OP_DATA_IN_INSTR:
698 + mt7621_nfc_read_data(nfc, instr->ctx.data.buf.in,
699 + instr->ctx.data.len);
701 + case NAND_OP_DATA_OUT_INSTR:
702 + mt7621_nfc_write_data(nfc, instr->ctx.data.buf.out,
703 + instr->ctx.data.len);
705 + case NAND_OP_WAITRDY_INSTR:
706 + return mt7621_nfc_dev_ready(nfc,
707 + instr->ctx.waitrdy.timeout_ms);
709 + WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
716 +static int mt7621_nfc_exec_op(struct nand_chip *nand,
717 + const struct nand_operation *op, bool check_only)
719 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
725 + /* Only CS0 available */
726 + nfi_write16(nfc, NFI_CSEL, 0);
728 + for (i = 0; i < op->ninstrs; i++) {
729 + ret = mt7621_nfc_exec_instr(nand, &op->instrs[i]);
737 +static int mt7621_nfc_setup_data_interface(struct nand_chip *nand, int csline,
738 + const struct nand_data_interface *conf)
740 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
741 + const struct nand_sdr_timings *timings;
742 + u32 acccon, temp, rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
747 + timings = nand_get_sdr_timings(conf);
748 + if (IS_ERR(timings))
751 + rate = clk_get_rate(nfc->nfi_clk);
753 + /* turn clock rate into KHZ */
756 + tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
757 + tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
758 + tpoecs = min_t(u32, tpoecs, ACCCON_POECS_MAX);
760 + tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
761 + tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
762 + tprecs = min_t(u32, tprecs, ACCCON_PRECS_MAX);
764 + /* sdr interface has no tCR which means CE# low to RE# low */
767 + tw2r = timings->tWHR_min / 1000;
768 + tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
769 + tw2r = DIV_ROUND_UP(tw2r - 1, 2);
770 + tw2r = min_t(u32, tw2r, ACCCON_W2R_MAX);
772 + twh = max(timings->tREH_min, timings->tWH_min) / 1000;
773 + twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
774 + twh = min_t(u32, twh, ACCCON_WH_MAX);
776 + /* Calculate real WE#/RE# hold time in nanosecond */
777 + temp = (twh + 1) * 1000000 / rate;
778 + /* nanosecond to picosecond */
782 + * WE# low level time should be expaned to meet WE# pulse time
783 + * and WE# cycle time at the same time.
785 + if (temp < timings->tWC_min)
786 + twst = timings->tWC_min - temp;
789 + twst = max(timings->tWP_min, twst) / 1000;
790 + twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
791 + twst = min_t(u32, twst, ACCCON_WST_MAX);
794 + * RE# low level time should be expaned to meet RE# pulse time
795 + * and RE# cycle time at the same time.
797 + if (temp < timings->tRC_min)
798 + trlt = timings->tRC_min - temp;
801 + trlt = max(trlt, timings->tRP_min) / 1000;
802 + trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
803 + trlt = min_t(u32, trlt, ACCCON_RLT_MAX);
805 + if (csline == NAND_DATA_IFACE_CHECK_ONLY) {
806 + if (twst < ACCCON_WST_MIN || trlt < ACCCON_RLT_MIN)
810 + acccon = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
812 + dev_info(nfc->dev, "Using programmed access timing: %08x\n", acccon);
814 + nfi_write32(nfc, NFI_ACCCON, acccon);
819 +static int mt7621_nfc_calc_ecc_strength(struct mt7621_nfc *nfc,
820 + u32 avail_ecc_bytes)
822 + struct nand_chip *nand = &nfc->nand;
823 + struct mtd_info *mtd = nand_to_mtd(nand);
827 + strength = avail_ecc_bytes * 8 / ECC_PARITY_BITS;
829 + /* Find the closest supported ecc strength */
830 + for (i = ARRAY_SIZE(mt7621_ecc_strength) - 1; i >= 0; i--) {
831 + if (mt7621_ecc_strength[i] <= strength)
835 + if (unlikely(i < 0)) {
836 + dev_err(nfc->dev, "OOB size (%u) is not supported\n",
841 + nand->ecc.strength = mt7621_ecc_strength[i];
843 + DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
845 + dev_info(nfc->dev, "ECC strength adjusted to %u bits\n",
846 + nand->ecc.strength);
851 +static int mt7621_nfc_set_spare_per_sector(struct mt7621_nfc *nfc)
853 + struct nand_chip *nand = &nfc->nand;
854 + struct mtd_info *mtd = nand_to_mtd(nand);
858 + size = nand->ecc.bytes + NFI_FDM_SIZE;
860 + /* Find the closest supported spare size */
861 + for (i = 0; i < ARRAY_SIZE(mt7621_nfi_spare_size); i++) {
862 + if (mt7621_nfi_spare_size[i] >= size)
866 + if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_spare_size))) {
867 + dev_err(nfc->dev, "OOB size (%u) is not supported\n",
872 + nfc->spare_per_sector = mt7621_nfi_spare_size[i];
877 +static int mt7621_nfc_ecc_init(struct mt7621_nfc *nfc)
879 + struct nand_chip *nand = &nfc->nand;
880 + struct mtd_info *mtd = nand_to_mtd(nand);
881 + u32 spare_per_sector, encode_block_size, decode_block_size;
882 + u32 ecc_enccfg, ecc_deccfg;
885 + /* Only hardware ECC mode is supported */
886 + if (nand->ecc.mode != NAND_ECC_HW_SYNDROME) {
887 + dev_err(nfc->dev, "Only hardware ECC mode is supported\n");
891 + nand->ecc.size = ECC_SECTOR_SIZE;
892 + nand->ecc.steps = mtd->writesize / nand->ecc.size;
894 + spare_per_sector = mtd->oobsize / nand->ecc.steps;
896 + ecc_cap = mt7621_nfc_calc_ecc_strength(nfc,
897 + spare_per_sector - NFI_FDM_SIZE);
902 + encode_block_size = (nand->ecc.size + NFI_FDM_SIZE) * 8;
903 + ecc_enccfg = ecc_cap | (ENC_MODE_NFI << ENC_MODE_S) |
904 + (encode_block_size << ENC_CNFG_MSG_S);
906 + /* Sector + FDM + ECC parity bits */
907 + decode_block_size = ((nand->ecc.size + NFI_FDM_SIZE) * 8) +
908 + nand->ecc.strength * ECC_PARITY_BITS;
909 + ecc_deccfg = ecc_cap | (DEC_MODE_NFI << DEC_MODE_S) |
910 + (decode_block_size << DEC_CS_S) |
911 + (DEC_CON_EL << DEC_CON_S) | DEC_EMPTY_EN;
913 + mt7621_ecc_encoder_op(nfc, false);
914 + ecc_write32(nfc, ECC_ENCCNFG, ecc_enccfg);
916 + mt7621_ecc_decoder_op(nfc, false);
917 + ecc_write32(nfc, ECC_DECCNFG, ecc_deccfg);
922 +static int mt7621_nfc_set_page_format(struct mt7621_nfc *nfc)
924 + struct nand_chip *nand = &nfc->nand;
925 + struct mtd_info *mtd = nand_to_mtd(nand);
929 + spare_size = mt7621_nfc_set_spare_per_sector(nfc);
930 + if (spare_size < 0)
933 + for (i = 0; i < ARRAY_SIZE(mt7621_nfi_page_size); i++) {
934 + if (mt7621_nfi_page_size[i] == mtd->writesize)
938 + if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_page_size))) {
939 + dev_err(nfc->dev, "Page size (%u) is not supported\n",
944 + pagefmt = i | (spare_size << PAGEFMT_SPARE_S) |
945 + (NFI_FDM_SIZE << PAGEFMT_FDM_S) |
946 + (NFI_FDM_SIZE << PAGEFMT_FDM_ECC_S);
948 + nfi_write16(nfc, NFI_PAGEFMT, pagefmt);
953 +static int mt7621_nfc_attach_chip(struct nand_chip *nand)
955 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
958 + if (nand->options & NAND_BUSWIDTH_16) {
959 + dev_err(nfc->dev, "16-bit buswidth is not supported");
963 + ret = mt7621_nfc_ecc_init(nfc);
967 + return mt7621_nfc_set_page_format(nfc);
970 +static const struct nand_controller_ops mt7621_nfc_controller_ops = {
971 + .attach_chip = mt7621_nfc_attach_chip,
972 + .exec_op = mt7621_nfc_exec_op,
973 + .setup_data_interface = mt7621_nfc_setup_data_interface,
976 +static int mt7621_nfc_ooblayout_free(struct mtd_info *mtd, int section,
977 + struct mtd_oob_region *oob_region)
979 + struct nand_chip *nand = mtd_to_nand(mtd);
981 + if (section >= nand->ecc.steps)
984 + oob_region->length = NFI_FDM_SIZE - 1;
985 + oob_region->offset = section * NFI_FDM_SIZE + 1;
990 +static int mt7621_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
991 + struct mtd_oob_region *oob_region)
993 + struct nand_chip *nand = mtd_to_nand(mtd);
998 + oob_region->offset = NFI_FDM_SIZE * nand->ecc.steps;
999 + oob_region->length = mtd->oobsize - oob_region->offset;
1004 +static const struct mtd_ooblayout_ops mt7621_nfc_ooblayout_ops = {
1005 + .free = mt7621_nfc_ooblayout_free,
1006 + .ecc = mt7621_nfc_ooblayout_ecc,
1009 +static void mt7621_nfc_write_fdm(struct mt7621_nfc *nfc)
1011 + struct nand_chip *nand = &nfc->nand;
1016 + for (i = 0; i < nand->ecc.steps; i++) {
1019 + oobptr = oob_fdm_ptr(nand, i);
1021 + for (j = 0; j < 4; j++)
1022 + vall |= (u32)oobptr[j] << (j * 8);
1024 + for (j = 0; j < 4; j++)
1025 + valm |= (u32)oobptr[j + 4] << ((j - 4) * 8);
1027 + nfi_write32(nfc, NFI_FDML(i), vall);
1028 + nfi_write32(nfc, NFI_FDMM(i), valm);
1032 +static void mt7621_nfc_read_sector_fdm(struct mt7621_nfc *nfc, u32 sect)
1034 + struct nand_chip *nand = &nfc->nand;
1039 + vall = nfi_read32(nfc, NFI_FDML(sect));
1040 + valm = nfi_read32(nfc, NFI_FDMM(sect));
1041 + oobptr = oob_fdm_ptr(nand, sect);
1043 + for (i = 0; i < 4; i++)
1044 + oobptr[i] = (vall >> (i * 8)) & 0xff;
1046 + for (i = 0; i < 4; i++)
1047 + oobptr[i + 4] = (valm >> (i * 8)) & 0xff;
1050 +static int mt7621_nfc_read_page_hwecc(struct nand_chip *nand, uint8_t *buf,
1051 + int oob_required, int page)
1053 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
1054 + struct mtd_info *mtd = nand_to_mtd(nand);
1058 + nand_read_page_op(nand, page, 0, NULL, 0);
1060 + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
1061 + CNFG_READ_MODE | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
1063 + mt7621_ecc_decoder_op(nfc, true);
1065 + nfi_write16(nfc, NFI_CON,
1066 + CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S));
1068 + for (i = 0; i < nand->ecc.steps; i++) {
1070 + mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
1073 + mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
1075 + rc = mt7621_ecc_decoder_wait_done(nfc, i);
1077 + mt7621_nfc_read_sector_fdm(nfc, i);
1084 + rc = mt7621_ecc_correct_check(nfc,
1085 + buf ? page_data_ptr(nand, buf, i) : NULL,
1086 + oob_fdm_ptr(nand, i), i);
1089 + dev_warn(nfc->dev,
1090 + "Uncorrectable ECC error at page %d.%d\n",
1092 + bitflips = -EBADMSG;
1093 + mtd->ecc_stats.failed++;
1094 + } else if (bitflips >= 0) {
1096 + mtd->ecc_stats.corrected += rc;
1100 + mt7621_ecc_decoder_op(nfc, false);
1102 + nfi_write16(nfc, NFI_CON, 0);
1107 +static int mt7621_nfc_read_page_raw(struct nand_chip *nand, uint8_t *buf,
1108 + int oob_required, int page)
1110 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
1113 + nand_read_page_op(nand, page, 0, NULL, 0);
1115 + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
1118 + nfi_write16(nfc, NFI_CON,
1119 + CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S));
1121 + for (i = 0; i < nand->ecc.steps; i++) {
1124 + mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
1127 + mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
1130 + mt7621_nfc_read_data(nfc, oob_fdm_ptr(nand, i), NFI_FDM_SIZE);
1132 + /* Read ECC parity data */
1133 + mt7621_nfc_read_data(nfc, oob_ecc_ptr(nfc, i),
1134 + nfc->spare_per_sector - NFI_FDM_SIZE);
1137 + nfi_write16(nfc, NFI_CON, 0);
1142 +static int mt7621_nfc_read_oob_hwecc(struct nand_chip *nand, int page)
1144 + return mt7621_nfc_read_page_hwecc(nand, NULL, 1, page);
1147 +static int mt7621_nfc_read_oob_raw(struct nand_chip *nand, int page)
1149 + return mt7621_nfc_read_page_raw(nand, NULL, 1, page);
1152 +static int mt7621_nfc_check_empty_page(struct nand_chip *nand, const u8 *buf)
1154 + struct mtd_info *mtd = nand_to_mtd(nand);
1159 + for (i = 0; i < mtd->writesize; i++)
1160 + if (buf[i] != 0xff)
1164 + for (i = 0; i < nand->ecc.steps; i++) {
1165 + oobptr = oob_fdm_ptr(nand, i);
1166 + for (j = 0; j < NFI_FDM_SIZE; j++)
1167 + if (oobptr[j] != 0xff)
1174 +static int mt7621_nfc_write_page_hwecc(struct nand_chip *nand,
1175 + const uint8_t *buf, int oob_required,
1178 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
1179 + struct mtd_info *mtd = nand_to_mtd(nand);
1181 + if (mt7621_nfc_check_empty_page(nand, buf)) {
1183 + * MT7621 ECC engine always generates parity code for input
1184 + * pages, even for empty pages. Doing so will write back ECC
1185 + * parity code to the oob region, which means such pages will
1186 + * no longer be empty pages.
1188 + * To avoid this, stop write operation if current page is an
1194 + nand_prog_page_begin_op(nand, page, 0, NULL, 0);
1196 + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
1197 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
1199 + mt7621_ecc_encoder_op(nfc, true);
1201 + mt7621_nfc_write_fdm(nfc);
1203 + nfi_write16(nfc, NFI_CON,
1204 + CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S));
1207 + mt7621_nfc_write_data(nfc, buf, mtd->writesize);
1209 + mt7621_nfc_write_data_empty(nfc, mtd->writesize);
1211 + mt7621_nfc_wait_write_completion(nfc, nand);
1213 + mt7621_ecc_encoder_op(nfc, false);
1215 + nfi_write16(nfc, NFI_CON, 0);
1217 + return nand_prog_page_end_op(nand);
1220 +static int mt7621_nfc_write_page_raw(struct nand_chip *nand,
1221 + const uint8_t *buf, int oob_required,
1224 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
1227 + nand_prog_page_begin_op(nand, page, 0, NULL, 0);
1229 + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S));
1231 + nfi_write16(nfc, NFI_CON,
1232 + CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S));
1234 + for (i = 0; i < nand->ecc.steps; i++) {
1237 + mt7621_nfc_write_data(nfc, page_data_ptr(nand, buf, i),
1240 + mt7621_nfc_write_data_empty(nfc, nand->ecc.size);
1243 + mt7621_nfc_write_data(nfc, oob_fdm_ptr(nand, i),
1246 + /* Write dummy ECC parity data */
1247 + mt7621_nfc_write_data_empty(nfc, nfc->spare_per_sector -
1251 + mt7621_nfc_wait_write_completion(nfc, nand);
1253 + nfi_write16(nfc, NFI_CON, 0);
1255 + return nand_prog_page_end_op(nand);
1258 +static int mt7621_nfc_write_oob_hwecc(struct nand_chip *nand, int page)
1260 + return mt7621_nfc_write_page_hwecc(nand, NULL, 1, page);
1263 +static int mt7621_nfc_write_oob_raw(struct nand_chip *nand, int page)
1265 + return mt7621_nfc_write_page_raw(nand, NULL, 1, page);
1268 +static int mt7621_nfc_init_chip(struct mt7621_nfc *nfc)
1270 + struct nand_chip *nand = &nfc->nand;
1271 + struct mtd_info *mtd;
1274 + nand->controller = &nfc->controller;
1275 + nand_set_controller_data(nand, (void *)nfc);
1276 + nand_set_flash_node(nand, nfc->dev->of_node);
1278 + nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_NO_SUBPAGE_WRITE;
1279 + if (!nfc->nfi_clk)
1280 + nand->options |= NAND_KEEP_TIMINGS;
1282 + nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1283 + nand->ecc.read_page = mt7621_nfc_read_page_hwecc;
1284 + nand->ecc.read_page_raw = mt7621_nfc_read_page_raw;
1285 + nand->ecc.write_page = mt7621_nfc_write_page_hwecc;
1286 + nand->ecc.write_page_raw = mt7621_nfc_write_page_raw;
1287 + nand->ecc.read_oob = mt7621_nfc_read_oob_hwecc;
1288 + nand->ecc.read_oob_raw = mt7621_nfc_read_oob_raw;
1289 + nand->ecc.write_oob = mt7621_nfc_write_oob_hwecc;
1290 + nand->ecc.write_oob_raw = mt7621_nfc_write_oob_raw;
1292 + mtd = nand_to_mtd(nand);
1293 + mtd->owner = THIS_MODULE;
1294 + mtd->dev.parent = nfc->dev;
1295 + mtd->name = MT7621_NFC_NAME;
1296 + mtd_set_ooblayout(mtd, &mt7621_nfc_ooblayout_ops);
1298 + mt7621_nfc_hw_init(nfc);
1300 + ret = nand_scan(nand, 1);
1304 + ret = mtd_device_register(mtd, NULL, 0);
1306 + dev_err(nfc->dev, "Failed to register MTD: %d\n", ret);
1307 + nand_release(nand);
1314 +static int mt7621_nfc_probe(struct platform_device *pdev)
1316 + struct device *dev = &pdev->dev;
1317 + struct mt7621_nfc *nfc;
1318 + struct resource *res;
1321 + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
1325 + nand_controller_init(&nfc->controller);
1326 + nfc->controller.ops = &mt7621_nfc_controller_ops;
1329 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
1330 + nfc->nfi_regs = devm_ioremap_resource(dev, res);
1331 + if (IS_ERR(nfc->nfi_regs)) {
1332 + ret = PTR_ERR(nfc->nfi_regs);
1336 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
1337 + nfc->ecc_regs = devm_ioremap_resource(dev, res);
1338 + if (IS_ERR(nfc->ecc_regs)) {
1339 + ret = PTR_ERR(nfc->ecc_regs);
1343 + nfc->nfi_clk = devm_clk_get(dev, "nfi_clk");
1344 + if (IS_ERR(nfc->nfi_clk)) {
1345 + dev_warn(dev, "nfi clk not provided\n");
1346 + nfc->nfi_clk = NULL;
1348 + ret = clk_prepare_enable(nfc->nfi_clk);
1350 + dev_err(dev, "Failed to enable nfi core clock\n");
1355 + platform_set_drvdata(pdev, nfc);
1357 + ret = mt7621_nfc_init_chip(nfc);
1359 + dev_err(dev, "Failed to initialize nand chip\n");
1366 + clk_disable_unprepare(nfc->nfi_clk);
1371 +static int mt7621_nfc_remove(struct platform_device *pdev)
1373 + struct mt7621_nfc *nfc = platform_get_drvdata(pdev);
1375 + nand_release(&nfc->nand);
1376 + clk_disable_unprepare(nfc->nfi_clk);
1381 +static const struct of_device_id mt7621_nfc_id_table[] = {
1382 + { .compatible = "mediatek,mt7621-nfc" },
1385 +MODULE_DEVICE_TABLE(of, match);
1387 +static struct platform_driver mt7621_nfc_driver = {
1388 + .probe = mt7621_nfc_probe,
1389 + .remove = mt7621_nfc_remove,
1391 + .name = MT7621_NFC_NAME,
1392 + .owner = THIS_MODULE,
1393 + .of_match_table = mt7621_nfc_id_table,
1396 +module_platform_driver(mt7621_nfc_driver);
1398 +MODULE_LICENSE("GPL");
1399 +MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
1400 +MODULE_DESCRIPTION("MediaTek MT7621 NAND Flash Controller driver");