1 From 8d94833f13ccd7e1dfea605cfdf9a8eb53505515 Mon Sep 17 00:00:00 2001
2 From: Weijie Gao <weijie.gao@mediatek.com>
3 Date: Fri, 20 May 2022 11:23:47 +0800
4 Subject: [PATCH 20/25] nand: raw: add support for MediaTek MT7621 SoC
6 This patch adds NAND flash controller driver for MediaTek MT7621 SoC.
7 The NAND flash controller of MT7621 supports only SLC NAND flashes.
8 It supports 4~12 bits correction with maximum 4KB page size.
10 Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
12 drivers/mtd/nand/raw/Kconfig | 17 +-
13 drivers/mtd/nand/raw/Makefile | 2 +
14 drivers/mtd/nand/raw/mt7621_nand.c | 1205 ++++++++++++++++++++++++
15 drivers/mtd/nand/raw/mt7621_nand.h | 29 +
16 drivers/mtd/nand/raw/mt7621_nand_spl.c | 237 +++++
17 5 files changed, 1488 insertions(+), 2 deletions(-)
18 create mode 100644 drivers/mtd/nand/raw/mt7621_nand.c
19 create mode 100644 drivers/mtd/nand/raw/mt7621_nand.h
20 create mode 100644 drivers/mtd/nand/raw/mt7621_nand_spl.c
22 --- a/drivers/mtd/nand/raw/Kconfig
23 +++ b/drivers/mtd/nand/raw/Kconfig
24 @@ -521,12 +521,25 @@ config TEGRA_NAND
26 Enables support for NAND Flash chips on Tegra SoCs platforms.
29 + bool "Support for MediaTek MT7621 NAND flash controller"
30 + depends on SOC_MT7621
31 + select SYS_NAND_SELF_INIT
32 + select SPL_SYS_NAND_SELF_INIT
35 + This enables NAND driver for the NAND flash controller on MediaTek
37 + The controller supports 4~12 bits correction per 512 bytes with a
38 + maximum 4KB page size.
40 comment "Generic NAND options"
42 config SYS_NAND_BLOCK_SIZE
43 hex "NAND chip eraseblock size"
44 depends on ARCH_SUNXI || SPL_NAND_SUPPORT || TPL_NAND_SUPPORT
45 - depends on !NAND_MXS && !NAND_DENALI_DT && !NAND_LPC32XX_MLC && !NAND_FSL_IFC
46 + depends on !NAND_MXS && !NAND_DENALI_DT && !NAND_LPC32XX_MLC && \
47 + !NAND_FSL_IFC && !NAND_MT7621
49 Number of data bytes in one eraseblock for the NAND chip on the
50 board. This is the multiple of NAND_PAGE_SIZE and the number of
51 @@ -551,7 +564,7 @@ config SYS_NAND_PAGE_SIZE
52 depends on ARCH_SUNXI || NAND_OMAP_GPMC || NAND_LPC32XX_SLC || \
53 SPL_NAND_SIMPLE || (NAND_MXC && SPL_NAND_SUPPORT) || \
54 (NAND_ATMEL && SPL_NAND_SUPPORT) || SPL_GENERATE_ATMEL_PMECC_HEADER
55 - depends on !NAND_MXS && !NAND_DENALI_DT && !NAND_LPC32XX_MLC
56 + depends on !NAND_MXS && !NAND_DENALI_DT && !NAND_LPC32XX_MLC && !NAND_MT7621
58 Number of data bytes in one page for the NAND chip on the
59 board, not including the OOB area.
60 --- a/drivers/mtd/nand/raw/Makefile
61 +++ b/drivers/mtd/nand/raw/Makefile
62 @@ -72,6 +72,7 @@ obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
63 obj-$(CONFIG_NAND_STM32_FMC2) += stm32_fmc2_nand.o
64 obj-$(CONFIG_CORTINA_NAND) += cortina_nand.o
65 obj-$(CONFIG_ROCKCHIP_NAND) += rockchip_nfc.o
66 +obj-$(CONFIG_NAND_MT7621) += mt7621_nand.o
68 else # minimal SPL drivers
70 @@ -80,5 +81,6 @@ obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_sp
71 obj-$(CONFIG_NAND_MXC) += mxc_nand_spl.o
72 obj-$(CONFIG_NAND_MXS) += mxs_nand_spl.o mxs_nand.o
73 obj-$(CONFIG_NAND_SUNXI) += sunxi_nand_spl.o
74 +obj-$(CONFIG_NAND_MT7621) += mt7621_nand_spl.o mt7621_nand.o
78 +++ b/drivers/mtd/nand/raw/mt7621_nand.c
80 +// SPDX-License-Identifier: GPL-2.0
82 + * Copyright (C) 2022 MediaTek Inc. All rights reserved.
84 + * Author: Weijie Gao <weijie.gao@mediatek.com>
90 +#include <asm/addrspace.h>
91 +#include <linux/io.h>
92 +#include <linux/iopoll.h>
93 +#include <linux/sizes.h>
94 +#include <linux/bitops.h>
95 +#include <linux/bitfield.h>
96 +#include "mt7621_nand.h"
98 +/* NFI core registers */
99 +#define NFI_CNFG 0x000
100 +#define CNFG_OP_MODE GENMASK(14, 12)
101 +#define CNFG_OP_CUSTOM 6
102 +#define CNFG_AUTO_FMT_EN BIT(9)
103 +#define CNFG_HW_ECC_EN BIT(8)
104 +#define CNFG_BYTE_RW BIT(6)
105 +#define CNFG_READ_MODE BIT(1)
107 +#define NFI_PAGEFMT 0x004
108 +#define PAGEFMT_FDM_ECC GENMASK(15, 12)
109 +#define PAGEFMT_FDM GENMASK(11, 8)
110 +#define PAGEFMT_SPARE GENMASK(5, 4)
111 +#define PAGEFMT_PAGE GENMASK(1, 0)
113 +#define NFI_CON 0x008
114 +#define CON_NFI_SEC GENMASK(15, 12)
115 +#define CON_NFI_BWR BIT(9)
116 +#define CON_NFI_BRD BIT(8)
117 +#define CON_NFI_RST BIT(1)
118 +#define CON_FIFO_FLUSH BIT(0)
120 +#define NFI_ACCCON 0x00c
121 +#define ACCCON_POECS GENMASK(31, 28)
122 +#define ACCCON_POECS_DEF 3
123 +#define ACCCON_PRECS GENMASK(27, 22)
124 +#define ACCCON_PRECS_DEF 3
125 +#define ACCCON_C2R GENMASK(21, 16)
126 +#define ACCCON_C2R_DEF 7
127 +#define ACCCON_W2R GENMASK(15, 12)
128 +#define ACCCON_W2R_DEF 7
129 +#define ACCCON_WH GENMASK(11, 8)
130 +#define ACCCON_WH_DEF 15
131 +#define ACCCON_WST GENMASK(7, 4)
132 +#define ACCCON_WST_DEF 15
133 +#define ACCCON_WST_MIN 3
134 +#define ACCCON_RLT GENMASK(3, 0)
135 +#define ACCCON_RLT_DEF 15
136 +#define ACCCON_RLT_MIN 3
138 +#define NFI_CMD 0x020
140 +#define NFI_ADDRNOB 0x030
141 +#define ADDR_ROW_NOB GENMASK(6, 4)
142 +#define ADDR_COL_NOB GENMASK(2, 0)
144 +#define NFI_COLADDR 0x034
145 +#define NFI_ROWADDR 0x038
147 +#define NFI_STRDATA 0x040
148 +#define STR_DATA BIT(0)
150 +#define NFI_CNRNB 0x044
151 +#define CB2R_TIME GENMASK(7, 4)
152 +#define STR_CNRNB BIT(0)
154 +#define NFI_DATAW 0x050
155 +#define NFI_DATAR 0x054
157 +#define NFI_PIO_DIRDY 0x058
158 +#define PIO_DIRDY BIT(0)
160 +#define NFI_STA 0x060
161 +#define STA_NFI_FSM GENMASK(19, 16)
162 +#define STA_FSM_CUSTOM_DATA 14
163 +#define STA_BUSY BIT(8)
164 +#define STA_ADDR BIT(1)
165 +#define STA_CMD BIT(0)
167 +#define NFI_ADDRCNTR 0x070
168 +#define SEC_CNTR GENMASK(15, 12)
169 +#define SEC_ADDR GENMASK(9, 0)
171 +#define NFI_CSEL 0x090
172 +#define CSEL GENMASK(1, 0)
174 +#define NFI_FDM0L 0x0a0
175 +#define NFI_FDML(n) (0x0a0 + ((n) << 3))
177 +#define NFI_FDM0M 0x0a4
178 +#define NFI_FDMM(n) (0x0a4 + ((n) << 3))
180 +#define NFI_MASTER_STA 0x210
181 +#define MAS_ADDR GENMASK(11, 9)
182 +#define MAS_RD GENMASK(8, 6)
183 +#define MAS_WR GENMASK(5, 3)
184 +#define MAS_RDDLY GENMASK(2, 0)
186 +/* ECC engine registers */
187 +#define ECC_ENCCON 0x000
188 +#define ENC_EN BIT(0)
190 +#define ECC_ENCCNFG 0x004
191 +#define ENC_CNFG_MSG GENMASK(28, 16)
192 +#define ENC_MODE GENMASK(5, 4)
193 +#define ENC_MODE_NFI 1
194 +#define ENC_TNUM GENMASK(2, 0)
196 +#define ECC_ENCIDLE 0x00c
197 +#define ENC_IDLE BIT(0)
199 +#define ECC_DECCON 0x100
200 +#define DEC_EN BIT(0)
202 +#define ECC_DECCNFG 0x104
203 +#define DEC_EMPTY_EN BIT(31)
204 +#define DEC_CS GENMASK(28, 16)
205 +#define DEC_CON GENMASK(13, 12)
206 +#define DEC_CON_EL 2
207 +#define DEC_MODE GENMASK(5, 4)
208 +#define DEC_MODE_NFI 1
209 +#define DEC_TNUM GENMASK(2, 0)
211 +#define ECC_DECIDLE 0x10c
212 +#define DEC_IDLE BIT(1)
214 +#define ECC_DECENUM 0x114
216 +#define ERRNUM_M GENMASK(3, 0)
218 +#define ECC_DECDONE 0x118
219 +#define DEC_DONE7 BIT(7)
220 +#define DEC_DONE6 BIT(6)
221 +#define DEC_DONE5 BIT(5)
222 +#define DEC_DONE4 BIT(4)
223 +#define DEC_DONE3 BIT(3)
224 +#define DEC_DONE2 BIT(2)
225 +#define DEC_DONE1 BIT(1)
226 +#define DEC_DONE0 BIT(0)
228 +#define ECC_DECEL(n) (0x11c + (n) * 4)
229 +#define DEC_EL_ODD_S 16
230 +#define DEC_EL_M 0x1fff
231 +#define DEC_EL_BYTE_POS_S 3
232 +#define DEC_EL_BIT_POS_M GENMASK(2, 0)
234 +#define ECC_FDMADDR 0x13c
236 +/* ENCIDLE and DECIDLE */
237 +#define ECC_IDLE BIT(0)
239 +#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
240 + (FIELD_PREP(ACCCON_POECS, tpoecs) | \
241 + FIELD_PREP(ACCCON_PRECS, tprecs) | \
242 + FIELD_PREP(ACCCON_C2R, tc2r) | \
243 + FIELD_PREP(ACCCON_W2R, tw2r) | \
244 + FIELD_PREP(ACCCON_WH, twh) | \
245 + FIELD_PREP(ACCCON_WST, twst) | \
246 + FIELD_PREP(ACCCON_RLT, trlt))
248 +#define MASTER_STA_MASK (MAS_ADDR | MAS_RD | MAS_WR | \
250 +#define NFI_RESET_TIMEOUT 1000000
251 +#define NFI_CORE_TIMEOUT 500000
252 +#define ECC_ENGINE_TIMEOUT 500000
254 +#define ECC_SECTOR_SIZE 512
255 +#define ECC_PARITY_BITS 13
257 +#define NFI_FDM_SIZE 8
260 +#define NFI_BASE 0x1e003000
261 +#define NFI_ECC_BASE 0x1e003800
263 +static struct mt7621_nfc nfc_dev;
265 +static const u16 mt7621_nfi_page_size[] = { SZ_512, SZ_2K, SZ_4K };
266 +static const u8 mt7621_nfi_spare_size[] = { 16, 26, 27, 28 };
267 +static const u8 mt7621_ecc_strength[] = { 4, 6, 8, 10, 12 };
269 +static inline u32 nfi_read32(struct mt7621_nfc *nfc, u32 reg)
271 + return readl(nfc->nfi_regs + reg);
274 +static inline void nfi_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
276 + writel(val, nfc->nfi_regs + reg);
279 +static inline u16 nfi_read16(struct mt7621_nfc *nfc, u32 reg)
281 + return readw(nfc->nfi_regs + reg);
284 +static inline void nfi_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
286 + writew(val, nfc->nfi_regs + reg);
289 +static inline void ecc_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
291 + writew(val, nfc->ecc_regs + reg);
294 +static inline u32 ecc_read32(struct mt7621_nfc *nfc, u32 reg)
296 + return readl(nfc->ecc_regs + reg);
299 +static inline void ecc_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
301 + return writel(val, nfc->ecc_regs + reg);
304 +static inline u8 *oob_fdm_ptr(struct nand_chip *nand, int sect)
306 + return nand->oob_poi + sect * NFI_FDM_SIZE;
309 +static inline u8 *oob_ecc_ptr(struct mt7621_nfc *nfc, int sect)
311 + struct nand_chip *nand = &nfc->nand;
313 + return nand->oob_poi + nand->ecc.steps * NFI_FDM_SIZE +
314 + sect * (nfc->spare_per_sector - NFI_FDM_SIZE);
317 +static inline u8 *page_data_ptr(struct nand_chip *nand, const u8 *buf,
320 + return (u8 *)buf + sect * nand->ecc.size;
323 +static int mt7621_ecc_wait_idle(struct mt7621_nfc *nfc, u32 reg)
328 + ret = readw_poll_timeout(nfc->ecc_regs + reg, val, val & ECC_IDLE,
329 + ECC_ENGINE_TIMEOUT);
331 + pr_warn("ECC engine timed out entering idle mode\n");
338 +static int mt7621_ecc_decoder_wait_done(struct mt7621_nfc *nfc, u32 sect)
343 + ret = readw_poll_timeout(nfc->ecc_regs + ECC_DECDONE, val,
344 + val & (1 << sect), ECC_ENGINE_TIMEOUT);
346 + pr_warn("ECC decoder for sector %d timed out\n", sect);
353 +static void mt7621_ecc_encoder_op(struct mt7621_nfc *nfc, bool enable)
355 + mt7621_ecc_wait_idle(nfc, ECC_ENCIDLE);
356 + ecc_write16(nfc, ECC_ENCCON, enable ? ENC_EN : 0);
359 +static void mt7621_ecc_decoder_op(struct mt7621_nfc *nfc, bool enable)
361 + mt7621_ecc_wait_idle(nfc, ECC_DECIDLE);
362 + ecc_write16(nfc, ECC_DECCON, enable ? DEC_EN : 0);
365 +static int mt7621_ecc_correct_check(struct mt7621_nfc *nfc, u8 *sector_buf,
366 + u8 *fdm_buf, u32 sect)
368 + struct nand_chip *nand = &nfc->nand;
369 + u32 decnum, num_error_bits, fdm_end_bits;
370 + u32 error_locations, error_bit_loc;
371 + u32 error_byte_pos, error_bit_pos;
375 + decnum = ecc_read32(nfc, ECC_DECENUM);
376 + num_error_bits = (decnum >> (sect << ERRNUM_S)) & ERRNUM_M;
377 + fdm_end_bits = (nand->ecc.size + NFI_FDM_SIZE) << 3;
379 + if (!num_error_bits)
382 + if (num_error_bits == ERRNUM_M)
385 + for (i = 0; i < num_error_bits; i++) {
386 + error_locations = ecc_read32(nfc, ECC_DECEL(i / 2));
387 + error_bit_loc = (error_locations >> ((i % 2) * DEC_EL_ODD_S)) &
389 + error_byte_pos = error_bit_loc >> DEC_EL_BYTE_POS_S;
390 + error_bit_pos = error_bit_loc & DEC_EL_BIT_POS_M;
392 + if (error_bit_loc < (nand->ecc.size << 3)) {
394 + sector_buf[error_byte_pos] ^=
395 + (1 << error_bit_pos);
397 + } else if (error_bit_loc < fdm_end_bits) {
399 + fdm_buf[error_byte_pos - nand->ecc.size] ^=
400 + (1 << error_bit_pos);
410 +static int mt7621_nfc_wait_write_completion(struct mt7621_nfc *nfc,
411 + struct nand_chip *nand)
416 + ret = readw_poll_timeout(nfc->nfi_regs + NFI_ADDRCNTR, val,
417 + FIELD_GET(SEC_CNTR, val) >= nand->ecc.steps,
421 + pr_warn("NFI core write operation timed out\n");
428 +static void mt7621_nfc_hw_reset(struct mt7621_nfc *nfc)
433 + /* reset all registers and force the NFI master to terminate */
434 + nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
436 + /* wait for the master to finish the last transaction */
437 + ret = readw_poll_timeout(nfc->nfi_regs + NFI_MASTER_STA, val,
438 + !(val & MASTER_STA_MASK), NFI_RESET_TIMEOUT);
440 + pr_warn("Failed to reset NFI master in %dms\n",
441 + NFI_RESET_TIMEOUT);
444 + /* ensure any status register affected by the NFI master is reset */
445 + nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
446 + nfi_write16(nfc, NFI_STRDATA, 0);
449 +static inline void mt7621_nfc_hw_init(struct mt7621_nfc *nfc)
454 + * CNRNB: nand ready/busy register
455 + * -------------------------------
456 + * 7:4: timeout register for polling the NAND busy/ready signal
457 + * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
459 + nfi_write16(nfc, NFI_CNRNB, CB2R_TIME | STR_CNRNB);
461 + mt7621_nfc_hw_reset(nfc);
463 + /* Apply default access timing */
464 + acccon = ACCTIMING(ACCCON_POECS_DEF, ACCCON_PRECS_DEF, ACCCON_C2R_DEF,
465 + ACCCON_W2R_DEF, ACCCON_WH_DEF, ACCCON_WST_DEF,
468 + nfi_write32(nfc, NFI_ACCCON, acccon);
471 +static int mt7621_nfc_send_command(struct mt7621_nfc *nfc, u8 command)
476 + nfi_write32(nfc, NFI_CMD, command);
478 + ret = readl_poll_timeout(nfc->nfi_regs + NFI_STA, val, !(val & STA_CMD),
481 + pr_warn("NFI core timed out entering command mode\n");
488 +static int mt7621_nfc_send_address_byte(struct mt7621_nfc *nfc, int addr)
493 + nfi_write32(nfc, NFI_COLADDR, addr);
494 + nfi_write32(nfc, NFI_ROWADDR, 0);
495 + nfi_write16(nfc, NFI_ADDRNOB, 1);
497 + ret = readl_poll_timeout(nfc->nfi_regs + NFI_STA, val,
498 + !(val & STA_ADDR), NFI_CORE_TIMEOUT);
500 + pr_warn("NFI core timed out entering address mode\n");
507 +static void mt7621_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
510 + struct mt7621_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
512 + if (ctrl & NAND_ALE) {
513 + mt7621_nfc_send_address_byte(nfc, dat & 0xff);
514 + } else if (ctrl & NAND_CLE) {
515 + mt7621_nfc_hw_reset(nfc);
516 + nfi_write16(nfc, NFI_CNFG,
517 + FIELD_PREP(CNFG_OP_MODE, CNFG_OP_CUSTOM));
518 + mt7621_nfc_send_command(nfc, dat);
522 +static int mt7621_nfc_dev_ready(struct mtd_info *mtd)
524 + struct mt7621_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
526 + if (nfi_read32(nfc, NFI_STA) & STA_BUSY)
532 +static void mt7621_nfc_select_chip(struct mtd_info *mtd, int chipnr)
534 + struct mt7621_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
536 + nfi_write16(nfc, NFI_CSEL, 0);
539 +static void mt7621_nfc_wait_pio_ready(struct mt7621_nfc *nfc)
544 + ret = readw_poll_timeout(nfc->nfi_regs + NFI_PIO_DIRDY, val,
545 + val & PIO_DIRDY, NFI_CORE_TIMEOUT);
547 + pr_err("NFI core PIO mode not ready\n");
550 +static u32 mt7621_nfc_pio_read(struct mt7621_nfc *nfc, bool br)
554 + /* after each byte read, the NFI_STA reg is reset by the hardware */
555 + reg = nfi_read32(nfc, NFI_STA);
556 + fsm = FIELD_GET(STA_NFI_FSM, reg);
558 + if (fsm != STA_FSM_CUSTOM_DATA) {
559 + reg = nfi_read16(nfc, NFI_CNFG);
560 + reg |= CNFG_READ_MODE | CNFG_BYTE_RW;
562 + reg &= ~CNFG_BYTE_RW;
563 + nfi_write16(nfc, NFI_CNFG, reg);
566 + * set to max sector to allow the HW to continue reading over
567 + * unaligned accesses
569 + nfi_write16(nfc, NFI_CON, CON_NFI_SEC | CON_NFI_BRD);
571 + /* trigger to fetch data */
572 + nfi_write16(nfc, NFI_STRDATA, STR_DATA);
575 + mt7621_nfc_wait_pio_ready(nfc);
577 + return nfi_read32(nfc, NFI_DATAR);
580 +static void mt7621_nfc_read_data(struct mt7621_nfc *nfc, u8 *buf, u32 len)
582 + while (((uintptr_t)buf & 3) && len) {
583 + *buf = mt7621_nfc_pio_read(nfc, true);
589 + *(u32 *)buf = mt7621_nfc_pio_read(nfc, false);
595 + *buf = mt7621_nfc_pio_read(nfc, true);
601 +static void mt7621_nfc_read_data_discard(struct mt7621_nfc *nfc, u32 len)
604 + mt7621_nfc_pio_read(nfc, false);
609 + mt7621_nfc_pio_read(nfc, true);
614 +static void mt7621_nfc_pio_write(struct mt7621_nfc *nfc, u32 val, bool bw)
618 + reg = nfi_read32(nfc, NFI_STA);
619 + fsm = FIELD_GET(STA_NFI_FSM, reg);
621 + if (fsm != STA_FSM_CUSTOM_DATA) {
622 + reg = nfi_read16(nfc, NFI_CNFG);
623 + reg &= ~(CNFG_READ_MODE | CNFG_BYTE_RW);
625 + reg |= CNFG_BYTE_RW;
626 + nfi_write16(nfc, NFI_CNFG, reg);
628 + nfi_write16(nfc, NFI_CON, CON_NFI_SEC | CON_NFI_BWR);
629 + nfi_write16(nfc, NFI_STRDATA, STR_DATA);
632 + mt7621_nfc_wait_pio_ready(nfc);
633 + nfi_write32(nfc, NFI_DATAW, val);
636 +static void mt7621_nfc_write_data(struct mt7621_nfc *nfc, const u8 *buf,
639 + while (((uintptr_t)buf & 3) && len) {
640 + mt7621_nfc_pio_write(nfc, *buf, true);
646 + mt7621_nfc_pio_write(nfc, *(const u32 *)buf, false);
652 + mt7621_nfc_pio_write(nfc, *buf, true);
658 +static void mt7621_nfc_write_data_empty(struct mt7621_nfc *nfc, u32 len)
661 + mt7621_nfc_pio_write(nfc, 0xffffffff, false);
666 + mt7621_nfc_pio_write(nfc, 0xff, true);
671 +static void mt7621_nfc_write_byte(struct mtd_info *mtd, u8 byte)
673 + struct mt7621_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
675 + mt7621_nfc_pio_write(nfc, byte, true);
678 +static void mt7621_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
680 + struct mt7621_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
682 + return mt7621_nfc_write_data(nfc, buf, len);
685 +static u8 mt7621_nfc_read_byte(struct mtd_info *mtd)
687 + struct mt7621_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
689 + return mt7621_nfc_pio_read(nfc, true);
692 +static void mt7621_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
694 + struct mt7621_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
696 + mt7621_nfc_read_data(nfc, buf, len);
699 +static int mt7621_nfc_calc_ecc_strength(struct mt7621_nfc *nfc,
700 + u32 avail_ecc_bytes)
702 + struct nand_chip *nand = &nfc->nand;
703 + struct mtd_info *mtd = nand_to_mtd(nand);
707 + strength = avail_ecc_bytes * 8 / ECC_PARITY_BITS;
709 + /* Find the closest supported ecc strength */
710 + for (i = ARRAY_SIZE(mt7621_ecc_strength) - 1; i >= 0; i--) {
711 + if (mt7621_ecc_strength[i] <= strength)
715 + if (unlikely(i < 0)) {
716 + pr_err("OOB size (%u) is not supported\n", mtd->oobsize);
720 + nand->ecc.strength = mt7621_ecc_strength[i];
721 + nand->ecc.bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
723 + pr_debug("ECC strength adjusted to %u bits\n", nand->ecc.strength);
728 +static int mt7621_nfc_set_spare_per_sector(struct mt7621_nfc *nfc)
730 + struct nand_chip *nand = &nfc->nand;
731 + struct mtd_info *mtd = nand_to_mtd(nand);
735 + size = nand->ecc.bytes + NFI_FDM_SIZE;
737 + /* Find the closest supported spare size */
738 + for (i = 0; i < ARRAY_SIZE(mt7621_nfi_spare_size); i++) {
739 + if (mt7621_nfi_spare_size[i] >= size)
743 + if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_spare_size))) {
744 + pr_err("OOB size (%u) is not supported\n", mtd->oobsize);
748 + nfc->spare_per_sector = mt7621_nfi_spare_size[i];
753 +static int mt7621_nfc_ecc_init(struct mt7621_nfc *nfc)
755 + struct nand_chip *nand = &nfc->nand;
756 + struct mtd_info *mtd = nand_to_mtd(nand);
757 + u32 avail_ecc_bytes, encode_block_size, decode_block_size;
758 + u32 ecc_enccfg, ecc_deccfg;
761 + nand->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
763 + nand->ecc.size = ECC_SECTOR_SIZE;
764 + nand->ecc.steps = mtd->writesize / nand->ecc.size;
766 + avail_ecc_bytes = mtd->oobsize / nand->ecc.steps - NFI_FDM_SIZE;
768 + ecc_cap = mt7621_nfc_calc_ecc_strength(nfc, avail_ecc_bytes);
773 + encode_block_size = (nand->ecc.size + NFI_FDM_SIZE) * 8;
774 + ecc_enccfg = ecc_cap | FIELD_PREP(ENC_MODE, ENC_MODE_NFI) |
775 + FIELD_PREP(ENC_CNFG_MSG, encode_block_size);
777 + /* Sector + FDM + ECC parity bits */
778 + decode_block_size = ((nand->ecc.size + NFI_FDM_SIZE) * 8) +
779 + nand->ecc.strength * ECC_PARITY_BITS;
780 + ecc_deccfg = ecc_cap | FIELD_PREP(DEC_MODE, DEC_MODE_NFI) |
781 + FIELD_PREP(DEC_CS, decode_block_size) |
782 + FIELD_PREP(DEC_CON, DEC_CON_EL) | DEC_EMPTY_EN;
784 + mt7621_ecc_encoder_op(nfc, false);
785 + ecc_write32(nfc, ECC_ENCCNFG, ecc_enccfg);
787 + mt7621_ecc_decoder_op(nfc, false);
788 + ecc_write32(nfc, ECC_DECCNFG, ecc_deccfg);
793 +static int mt7621_nfc_set_page_format(struct mt7621_nfc *nfc)
795 + struct nand_chip *nand = &nfc->nand;
796 + struct mtd_info *mtd = nand_to_mtd(nand);
800 + spare_size = mt7621_nfc_set_spare_per_sector(nfc);
801 + if (spare_size < 0)
804 + for (i = 0; i < ARRAY_SIZE(mt7621_nfi_page_size); i++) {
805 + if (mt7621_nfi_page_size[i] == mtd->writesize)
809 + if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_page_size))) {
810 + pr_err("Page size (%u) is not supported\n", mtd->writesize);
814 + pagefmt = FIELD_PREP(PAGEFMT_PAGE, i) |
815 + FIELD_PREP(PAGEFMT_SPARE, spare_size) |
816 + FIELD_PREP(PAGEFMT_FDM, NFI_FDM_SIZE) |
817 + FIELD_PREP(PAGEFMT_FDM_ECC, NFI_FDM_SIZE);
819 + nfi_write16(nfc, NFI_PAGEFMT, pagefmt);
824 +static int mt7621_nfc_attach_chip(struct nand_chip *nand)
826 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
829 + if (nand->options & NAND_BUSWIDTH_16) {
830 + pr_err("16-bit buswidth is not supported");
834 + ret = mt7621_nfc_ecc_init(nfc);
838 + return mt7621_nfc_set_page_format(nfc);
841 +static void mt7621_nfc_write_fdm(struct mt7621_nfc *nfc)
843 + struct nand_chip *nand = &nfc->nand;
848 + for (i = 0; i < nand->ecc.steps; i++) {
851 + oobptr = oob_fdm_ptr(nand, i);
853 + for (j = 0; j < 4; j++)
854 + vall |= (u32)oobptr[j] << (j * 8);
856 + for (j = 0; j < 4; j++)
857 + valm |= (u32)oobptr[j + 4] << (j * 8);
859 + nfi_write32(nfc, NFI_FDML(i), vall);
860 + nfi_write32(nfc, NFI_FDMM(i), valm);
864 +static void mt7621_nfc_read_sector_fdm(struct mt7621_nfc *nfc, u32 sect)
866 + struct nand_chip *nand = &nfc->nand;
871 + vall = nfi_read32(nfc, NFI_FDML(sect));
872 + valm = nfi_read32(nfc, NFI_FDMM(sect));
873 + oobptr = oob_fdm_ptr(nand, sect);
875 + for (i = 0; i < 4; i++)
876 + oobptr[i] = (vall >> (i * 8)) & 0xff;
878 + for (i = 0; i < 4; i++)
879 + oobptr[i + 4] = (valm >> (i * 8)) & 0xff;
882 +static int mt7621_nfc_read_page_hwecc(struct mtd_info *mtd,
883 + struct nand_chip *nand, uint8_t *buf,
884 + int oob_required, int page)
886 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
887 + int bitflips = 0, ret = 0;
890 + nand_read_page_op(nand, page, 0, NULL, 0);
892 + nfi_write16(nfc, NFI_CNFG, FIELD_PREP(CNFG_OP_MODE, CNFG_OP_CUSTOM) |
893 + CNFG_READ_MODE | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
895 + mt7621_ecc_decoder_op(nfc, true);
897 + nfi_write16(nfc, NFI_CON, FIELD_PREP(CON_NFI_SEC, nand->ecc.steps) |
900 + for (i = 0; i < nand->ecc.steps; i++) {
902 + mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
905 + mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
907 + rc = mt7621_ecc_decoder_wait_done(nfc, i);
909 + mt7621_nfc_read_sector_fdm(nfc, i);
916 + rc = mt7621_ecc_correct_check(nfc,
917 + buf ? page_data_ptr(nand, buf, i) : NULL,
918 + oob_fdm_ptr(nand, i), i);
921 + pr_warn("Uncorrectable ECC error at page %d step %d\n",
923 + bitflips = nand->ecc.strength + 1;
924 + mtd->ecc_stats.failed++;
928 + mtd->ecc_stats.corrected += rc;
932 + mt7621_ecc_decoder_op(nfc, false);
934 + nfi_write16(nfc, NFI_CON, 0);
942 +static int mt7621_nfc_read_page_raw(struct mtd_info *mtd,
943 + struct nand_chip *nand, uint8_t *buf,
944 + int oob_required, int page)
946 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
949 + nand_read_page_op(nand, page, 0, NULL, 0);
951 + nfi_write16(nfc, NFI_CNFG, FIELD_PREP(CNFG_OP_MODE, CNFG_OP_CUSTOM) |
954 + nfi_write16(nfc, NFI_CON, FIELD_PREP(CON_NFI_SEC, nand->ecc.steps) |
957 + for (i = 0; i < nand->ecc.steps; i++) {
960 + mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
963 + mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
966 + mt7621_nfc_read_data(nfc, oob_fdm_ptr(nand, i), NFI_FDM_SIZE);
968 + /* Read ECC parity data */
969 + mt7621_nfc_read_data(nfc, oob_ecc_ptr(nfc, i),
970 + nfc->spare_per_sector - NFI_FDM_SIZE);
973 + nfi_write16(nfc, NFI_CON, 0);
978 +static int mt7621_nfc_read_oob_hwecc(struct mtd_info *mtd,
979 + struct nand_chip *nand, int page)
981 + return mt7621_nfc_read_page_hwecc(mtd, nand, NULL, 1, page);
984 +static int mt7621_nfc_read_oob_raw(struct mtd_info *mtd,
985 + struct nand_chip *nand, int page)
987 + return mt7621_nfc_read_page_raw(mtd, nand, NULL, 1, page);
990 +static int mt7621_nfc_check_empty_page(struct nand_chip *nand, const u8 *buf)
992 + struct mtd_info *mtd = nand_to_mtd(nand);
997 + for (i = 0; i < mtd->writesize; i++)
998 + if (buf[i] != 0xff)
1002 + for (i = 0; i < nand->ecc.steps; i++) {
1003 + oobptr = oob_fdm_ptr(nand, i);
1004 + for (j = 0; j < NFI_FDM_SIZE; j++)
1005 + if (oobptr[j] != 0xff)
1012 +static int mt7621_nfc_write_page_hwecc(struct mtd_info *mtd,
1013 + struct nand_chip *nand,
1014 + const u8 *buf, int oob_required,
1017 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
1019 + if (mt7621_nfc_check_empty_page(nand, buf)) {
1021 + * MT7621 ECC engine always generates parity code for input
1022 + * pages, even for empty pages. Doing so will write back ECC
1023 + * parity code to the oob region, which means such pages will
1024 + * no longer be empty pages.
1026 + * To avoid this, stop write operation if current page is an
1032 + nand_prog_page_begin_op(nand, page, 0, NULL, 0);
1034 + nfi_write16(nfc, NFI_CNFG, FIELD_PREP(CNFG_OP_MODE, CNFG_OP_CUSTOM) |
1035 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
1037 + mt7621_ecc_encoder_op(nfc, true);
1039 + mt7621_nfc_write_fdm(nfc);
1041 + nfi_write16(nfc, NFI_CON, FIELD_PREP(CON_NFI_SEC, nand->ecc.steps) |
1045 + mt7621_nfc_write_data(nfc, buf, mtd->writesize);
1047 + mt7621_nfc_write_data_empty(nfc, mtd->writesize);
1049 + mt7621_nfc_wait_write_completion(nfc, nand);
1051 + mt7621_ecc_encoder_op(nfc, false);
1053 + nfi_write16(nfc, NFI_CON, 0);
1055 + return nand_prog_page_end_op(nand);
1058 +static int mt7621_nfc_write_page_raw(struct mtd_info *mtd,
1059 + struct nand_chip *nand,
1060 + const u8 *buf, int oob_required,
1063 + struct mt7621_nfc *nfc = nand_get_controller_data(nand);
1066 + nand_prog_page_begin_op(nand, page, 0, NULL, 0);
1068 + nfi_write16(nfc, NFI_CNFG, FIELD_PREP(CNFG_OP_MODE, CNFG_OP_CUSTOM));
1070 + nfi_write16(nfc, NFI_CON, FIELD_PREP(CON_NFI_SEC, nand->ecc.steps) |
1073 + for (i = 0; i < nand->ecc.steps; i++) {
1076 + mt7621_nfc_write_data(nfc, page_data_ptr(nand, buf, i),
1079 + mt7621_nfc_write_data_empty(nfc, nand->ecc.size);
1082 + mt7621_nfc_write_data(nfc, oob_fdm_ptr(nand, i),
1085 + /* Write dummy ECC parity data */
1086 + mt7621_nfc_write_data_empty(nfc, nfc->spare_per_sector -
1090 + mt7621_nfc_wait_write_completion(nfc, nand);
1092 + nfi_write16(nfc, NFI_CON, 0);
1094 + return nand_prog_page_end_op(nand);
1097 +static int mt7621_nfc_write_oob_hwecc(struct mtd_info *mtd,
1098 + struct nand_chip *nand, int page)
1100 + return mt7621_nfc_write_page_hwecc(mtd, nand, NULL, 1, page);
1103 +static int mt7621_nfc_write_oob_raw(struct mtd_info *mtd,
1104 + struct nand_chip *nand, int page)
1106 + return mt7621_nfc_write_page_raw(mtd, nand, NULL, 1, page);
1109 +static int mt7621_nfc_ooblayout_free(struct mtd_info *mtd, int section,
1110 + struct mtd_oob_region *oob_region)
1112 + struct nand_chip *nand = mtd_to_nand(mtd);
1114 + if (section >= nand->ecc.steps)
1117 + oob_region->length = NFI_FDM_SIZE - 1;
1118 + oob_region->offset = section * NFI_FDM_SIZE + 1;
1123 +static int mt7621_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
1124 + struct mtd_oob_region *oob_region)
1126 + struct nand_chip *nand = mtd_to_nand(mtd);
1131 + oob_region->offset = NFI_FDM_SIZE * nand->ecc.steps;
1132 + oob_region->length = mtd->oobsize - oob_region->offset;
1137 +static const struct mtd_ooblayout_ops mt7621_nfc_ooblayout_ops = {
1138 + .rfree = mt7621_nfc_ooblayout_free,
1139 + .ecc = mt7621_nfc_ooblayout_ecc,
1143 + * This function will override the default one which is not supposed to be
1144 + * used for ECC syndrome based pages.
1146 +static int mt7621_nfc_block_bad(struct mtd_info *mtd, loff_t ofs)
1148 + struct nand_chip *nand = mtd_to_nand(mtd);
1149 + struct mtd_oob_ops ops;
1153 + memset(&ops, 0, sizeof(ops));
1154 + ops.oobbuf = (uint8_t *)&bad;
1155 + ops.ooboffs = nand->badblockpos;
1156 + if (nand->options & NAND_BUSWIDTH_16) {
1157 + ops.ooboffs &= ~0x01;
1162 + ops.mode = MTD_OPS_RAW;
1164 + /* Read from first/last page(s) if necessary */
1165 + if (nand->bbt_options & NAND_BBT_SCANLASTPAGE)
1166 + ofs += mtd->erasesize - mtd->writesize;
1169 + ret = mtd_read_oob(mtd, ofs, &ops);
1173 + if (likely(nand->badblockbits == 8))
1174 + ret = bad != 0xFF;
1176 + ret = hweight8(bad) < nand->badblockbits;
1179 + ofs += mtd->writesize;
1180 + } while (!ret && (nand->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
1185 +static void mt7621_nfc_init_chip(struct mt7621_nfc *nfc)
1187 + struct nand_chip *nand = &nfc->nand;
1188 + struct mtd_info *mtd;
1191 + nand_set_controller_data(nand, nfc);
1193 + nand->options |= NAND_NO_SUBPAGE_WRITE;
1195 + nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1196 + nand->ecc.read_page = mt7621_nfc_read_page_hwecc;
1197 + nand->ecc.read_page_raw = mt7621_nfc_read_page_raw;
1198 + nand->ecc.write_page = mt7621_nfc_write_page_hwecc;
1199 + nand->ecc.write_page_raw = mt7621_nfc_write_page_raw;
1200 + nand->ecc.read_oob = mt7621_nfc_read_oob_hwecc;
1201 + nand->ecc.read_oob_raw = mt7621_nfc_read_oob_raw;
1202 + nand->ecc.write_oob = mt7621_nfc_write_oob_hwecc;
1203 + nand->ecc.write_oob_raw = mt7621_nfc_write_oob_raw;
1205 + nand->dev_ready = mt7621_nfc_dev_ready;
1206 + nand->select_chip = mt7621_nfc_select_chip;
1207 + nand->write_byte = mt7621_nfc_write_byte;
1208 + nand->write_buf = mt7621_nfc_write_buf;
1209 + nand->read_byte = mt7621_nfc_read_byte;
1210 + nand->read_buf = mt7621_nfc_read_buf;
1211 + nand->cmd_ctrl = mt7621_nfc_cmd_ctrl;
1212 + nand->block_bad = mt7621_nfc_block_bad;
1214 + mtd = nand_to_mtd(nand);
1215 + mtd_set_ooblayout(mtd, &mt7621_nfc_ooblayout_ops);
1217 + /* Reset NFI master */
1218 + mt7621_nfc_hw_init(nfc);
1220 + ret = nand_scan_ident(mtd, 1, NULL);
1224 + mt7621_nfc_attach_chip(nand);
1226 + ret = nand_scan_tail(mtd);
1230 + nand_register(0, mtd);
1233 +static void mt7621_nfc_set_regs(struct mt7621_nfc *nfc)
1235 + nfc->nfi_regs = (void __iomem *)CKSEG1ADDR(NFI_BASE);
1236 + nfc->ecc_regs = (void __iomem *)CKSEG1ADDR(NFI_ECC_BASE);
1239 +void mt7621_nfc_spl_init(struct mt7621_nfc *nfc)
1241 + struct nand_chip *nand = &nfc->nand;
1243 + mt7621_nfc_set_regs(nfc);
1245 + nand_set_controller_data(nand, nfc);
1247 + nand->options |= NAND_NO_SUBPAGE_WRITE;
1249 + nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1250 + nand->ecc.read_page = mt7621_nfc_read_page_hwecc;
1252 + nand->dev_ready = mt7621_nfc_dev_ready;
1253 + nand->select_chip = mt7621_nfc_select_chip;
1254 + nand->read_byte = mt7621_nfc_read_byte;
1255 + nand->read_buf = mt7621_nfc_read_buf;
1256 + nand->cmd_ctrl = mt7621_nfc_cmd_ctrl;
1258 + /* Reset NFI master */
1259 + mt7621_nfc_hw_init(nfc);
1262 +int mt7621_nfc_spl_post_init(struct mt7621_nfc *nfc)
1264 + struct nand_chip *nand = &nfc->nand;
1265 + int nand_maf_id, nand_dev_id;
1266 + struct nand_flash_dev *type;
1268 + type = nand_get_flash_type(&nand->mtd, nand, &nand_maf_id,
1269 + &nand_dev_id, NULL);
1272 + return PTR_ERR(type);
1274 + nand->numchips = 1;
1275 + nand->mtd.size = nand->chipsize;
1277 + return mt7621_nfc_attach_chip(nand);
1280 +void board_nand_init(void)
1282 + mt7621_nfc_set_regs(&nfc_dev);
1283 + mt7621_nfc_init_chip(&nfc_dev);
1286 +++ b/drivers/mtd/nand/raw/mt7621_nand.h
1288 +/* SPDX-License-Identifier: GPL-2.0 */
1290 + * Copyright (C) 2022 MediaTek Inc. All rights reserved.
1292 + * Author: Weijie Gao <weijie.gao@mediatek.com>
1295 +#ifndef _MT7621_NAND_H_
1296 +#define _MT7621_NAND_H_
1298 +#include <linux/types.h>
1299 +#include <linux/mtd/mtd.h>
1300 +#include <linux/compiler.h>
1301 +#include <linux/mtd/rawnand.h>
1303 +struct mt7621_nfc {
1304 + struct nand_chip nand;
1306 + void __iomem *nfi_regs;
1307 + void __iomem *ecc_regs;
1309 + u32 spare_per_sector;
1313 +void mt7621_nfc_spl_init(struct mt7621_nfc *nfc);
1314 +int mt7621_nfc_spl_post_init(struct mt7621_nfc *nfc);
1316 +#endif /* _MT7621_NAND_H_ */
1318 +++ b/drivers/mtd/nand/raw/mt7621_nand_spl.c
1320 +// SPDX-License-Identifier: GPL-2.0
1322 + * Copyright (C) 2022 MediaTek Inc. All rights reserved.
1324 + * Author: Weijie Gao <weijie.gao@mediatek.com>
1328 +#include <malloc.h>
1329 +#include <linux/sizes.h>
1330 +#include <linux/delay.h>
1331 +#include <linux/mtd/rawnand.h>
1332 +#include "mt7621_nand.h"
1334 +static struct mt7621_nfc nfc_dev;
1336 +static int nand_valid;
1338 +static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
1339 + int column, int page_addr)
1341 + register struct nand_chip *chip = mtd_to_nand(mtd);
1343 + /* Command latch cycle */
1344 + chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
1346 + if (column != -1 || page_addr != -1) {
1347 + int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
1349 + /* Serially input address */
1350 + if (column != -1) {
1351 + chip->cmd_ctrl(mtd, column, ctrl);
1352 + ctrl &= ~NAND_CTRL_CHANGE;
1353 + if (command != NAND_CMD_READID)
1354 + chip->cmd_ctrl(mtd, column >> 8, ctrl);
1356 + if (page_addr != -1) {
1357 + chip->cmd_ctrl(mtd, page_addr, ctrl);
1358 + chip->cmd_ctrl(mtd, page_addr >> 8,
1359 + NAND_NCE | NAND_ALE);
1360 + if (chip->options & NAND_ROW_ADDR_3)
1361 + chip->cmd_ctrl(mtd, page_addr >> 16,
1362 + NAND_NCE | NAND_ALE);
1365 + chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
1368 + * Program and erase have their own busy handlers status, sequential
1369 + * in and status need no delay.
1371 + switch (command) {
1372 + case NAND_CMD_STATUS:
1373 + case NAND_CMD_READID:
1374 + case NAND_CMD_SET_FEATURES:
1377 + case NAND_CMD_READ0:
1378 + chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
1379 + NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
1380 + chip->cmd_ctrl(mtd, NAND_CMD_NONE,
1381 + NAND_NCE | NAND_CTRL_CHANGE);
1385 + * Apply this short delay always to ensure that we do wait tWB in
1386 + * any case on any machine.
1390 + nand_wait_ready(mtd);
1393 +static int nfc_read_page_hwecc(struct mtd_info *mtd, void *buf,
1394 + unsigned int page)
1396 + struct nand_chip *chip = mtd_to_nand(mtd);
1399 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, page);
1401 + ret = chip->ecc.read_page(mtd, chip, buf, 1, page);
1402 + if (ret < 0 || ret > chip->ecc.strength)
1408 +static int nfc_read_oob_hwecc(struct mtd_info *mtd, void *buf, u32 len,
1409 + unsigned int page)
1411 + struct nand_chip *chip = mtd_to_nand(mtd);
1414 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, page);
1416 + ret = chip->ecc.read_page(mtd, chip, NULL, 1, page);
1420 + if (len > mtd->oobsize)
1421 + len = mtd->oobsize;
1423 + memcpy(buf, chip->oob_poi, len);
1428 +static int nfc_check_bad_block(struct mtd_info *mtd, unsigned int page)
1430 + struct nand_chip *chip = mtd_to_nand(mtd);
1431 + u32 pages_per_block, i = 0;
1435 + pages_per_block = 1 << (mtd->erasesize_shift - mtd->writesize_shift);
1437 + /* Read from first/last page(s) if necessary */
1438 + if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) {
1439 + page += pages_per_block - 1;
1440 + if (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)
1445 + ret = nfc_read_oob_hwecc(mtd, &bad, 1, page);
1449 + ret = bad != 0xFF;
1453 + } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
1458 +int nand_spl_load_image(uint32_t offs, unsigned int size, void *dest)
1460 + struct mt7621_nfc *nfc = &nfc_dev;
1461 + struct nand_chip *chip = &nfc->nand;
1462 + struct mtd_info *mtd = &chip->mtd;
1463 + u32 addr, col, page, chksz;
1464 + bool check_bad = true;
1470 + if (check_bad || !(offs & mtd->erasesize_mask)) {
1471 + addr = offs & (~mtd->erasesize_mask);
1472 + page = addr >> mtd->writesize_shift;
1473 + if (nfc_check_bad_block(mtd, page)) {
1474 + /* Skip bad block */
1475 + if (addr >= mtd->size - mtd->erasesize)
1478 + offs += mtd->erasesize;
1482 + check_bad = false;
1485 + col = offs & mtd->writesize_mask;
1486 + page = offs >> mtd->writesize_shift;
1487 + chksz = min(mtd->writesize - col, (uint32_t)size);
1489 + if (unlikely(chksz < mtd->writesize)) {
1490 + /* Not reading a full page */
1491 + if (nfc_read_page_hwecc(mtd, buffer, page))
1494 + memcpy(dest, buffer + col, chksz);
1496 + if (nfc_read_page_hwecc(mtd, dest, page))
1508 +int nand_default_bbt(struct mtd_info *mtd)
1513 +unsigned long nand_size(void)
1518 + /* Unlikely that NAND size > 2GBytes */
1519 + if (nfc_dev.nand.chipsize <= SZ_2G)
1520 + return nfc_dev.nand.chipsize;
1525 +void nand_deselect(void)
1529 +void nand_init(void)
1531 + struct mtd_info *mtd;
1532 + struct nand_chip *chip;
1537 + mt7621_nfc_spl_init(&nfc_dev);
1539 + chip = &nfc_dev.nand;
1541 + chip->cmdfunc = nand_command_lp;
1543 + if (mt7621_nfc_spl_post_init(&nfc_dev))
1546 + mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
1547 + mtd->writesize_shift = ffs(mtd->writesize) - 1;
1548 + mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
1549 + mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
1551 + buffer = malloc(mtd->writesize);