octeon: disable edgerouter image
[openwrt/staging/mkresin.git] / target / linux / mediatek / patches-4.19 / 0306-spi-spi-mem-MediaTek-Add-SPI-NAND-Flash-interface-dr.patch
1 From 1ecb38eabd90efe93957d0a822a167560c39308a Mon Sep 17 00:00:00 2001
2 From: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
3 Date: Wed, 20 Mar 2019 16:19:51 +0800
4 Subject: [PATCH 6/6] spi: spi-mem: MediaTek: Add SPI NAND Flash interface
5 driver for MediaTek MT7622
6
7 Change-Id: I3e78406bb9b46b0049d3988a5c71c7069e4f809c
8 Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
9 ---
10 drivers/spi/Kconfig | 9 +
11 drivers/spi/Makefile | 1 +
12 drivers/spi/spi-mtk-snfi.c | 1183 ++++++++++++++++++++++++++++++++++++
13 3 files changed, 1193 insertions(+)
14 create mode 100644 drivers/spi/spi-mtk-snfi.c
15
16 --- /dev/null
17 +++ b/drivers/spi/spi-mtk-snfi.c
18 @@ -0,0 +1,1183 @@
19 +// SPDX-License-Identifier: GPL-2.0
20 +/*
21 + * Driver for MediaTek SPI Nand interface
22 + *
23 + * Copyright (C) 2018 MediaTek Inc.
24 + * Authors: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
25 + *
26 + */
27 +
28 +#include <linux/clk.h>
29 +#include <linux/delay.h>
30 +#include <linux/dma-mapping.h>
31 +#include <linux/interrupt.h>
32 +#include <linux/iopoll.h>
33 +#include <linux/mtd/mtd.h>
34 +#include <linux/mtd/mtk_ecc.h>
35 +#include <linux/mtd/spinand.h>
36 +#include <linux/module.h>
37 +#include <linux/of.h>
38 +#include <linux/of_device.h>
39 +#include <linux/platform_device.h>
40 +#include <linux/spi/spi.h>
41 +#include <linux/spi/spi-mem.h>
42 +
43 +/* NAND controller register definition */
44 +/* NFI control */
45 +#define NFI_CNFG 0x00
46 +#define CNFG_DMA BIT(0)
47 +#define CNFG_READ_EN BIT(1)
48 +#define CNFG_DMA_BURST_EN BIT(2)
49 +#define CNFG_BYTE_RW BIT(6)
50 +#define CNFG_HW_ECC_EN BIT(8)
51 +#define CNFG_AUTO_FMT_EN BIT(9)
52 +#define CNFG_OP_PROGRAM (3UL << 12)
53 +#define CNFG_OP_CUST (6UL << 12)
54 +#define NFI_PAGEFMT 0x04
55 +#define PAGEFMT_512 0
56 +#define PAGEFMT_2K 1
57 +#define PAGEFMT_4K 2
58 +#define PAGEFMT_FDM_SHIFT 8
59 +#define PAGEFMT_FDM_ECC_SHIFT 12
60 +#define NFI_CON 0x08
61 +#define CON_FIFO_FLUSH BIT(0)
62 +#define CON_NFI_RST BIT(1)
63 +#define CON_BRD BIT(8)
64 +#define CON_BWR BIT(9)
65 +#define CON_SEC_SHIFT 12
66 +#define NFI_INTR_EN 0x10
67 +#define INTR_AHB_DONE_EN BIT(6)
68 +#define NFI_INTR_STA 0x14
69 +#define NFI_CMD 0x20
70 +#define NFI_STA 0x60
71 +#define STA_EMP_PAGE BIT(12)
72 +#define NAND_FSM_MASK (0x1f << 24)
73 +#define NFI_FSM_MASK (0xf << 16)
74 +#define NFI_ADDRCNTR 0x70
75 +#define CNTR_MASK GENMASK(16, 12)
76 +#define ADDRCNTR_SEC_SHIFT 12
77 +#define ADDRCNTR_SEC(val) \
78 + (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
79 +#define NFI_STRADDR 0x80
80 +#define NFI_BYTELEN 0x84
81 +#define NFI_CSEL 0x90
82 +#define NFI_FDML(x) (0xa0 + (x) * sizeof(u32) * 2)
83 +#define NFI_FDMM(x) (0xa4 + (x) * sizeof(u32) * 2)
84 +#define NFI_MASTER_STA 0x224
85 +#define MASTER_STA_MASK 0x0fff
86 +/* NFI_SPI control */
87 +#define SNFI_MAC_OUTL 0x504
88 +#define SNFI_MAC_INL 0x508
89 +#define SNFI_RD_CTL2 0x510
90 +#define RD_CMD_MASK 0x00ff
91 +#define RD_DUMMY_SHIFT 8
92 +#define SNFI_RD_CTL3 0x514
93 +#define RD_ADDR_MASK 0xffff
94 +#define SNFI_MISC_CTL 0x538
95 +#define RD_MODE_X2 BIT(16)
96 +#define RD_MODE_X4 (2UL << 16)
97 +#define RD_QDUAL_IO (4UL << 16)
98 +#define RD_MODE_MASK (7UL << 16)
99 +#define RD_CUSTOM_EN BIT(6)
100 +#define WR_CUSTOM_EN BIT(7)
101 +#define WR_X4_EN BIT(20)
102 +#define SW_RST BIT(28)
103 +#define SNFI_MISC_CTL2 0x53c
104 +#define WR_LEN_SHIFT 16
105 +#define SNFI_PG_CTL1 0x524
106 +#define WR_LOAD_CMD_SHIFT 8
107 +#define SNFI_PG_CTL2 0x528
108 +#define WR_LOAD_ADDR_MASK 0xffff
109 +#define SNFI_MAC_CTL 0x500
110 +#define MAC_WIP BIT(0)
111 +#define MAC_WIP_READY BIT(1)
112 +#define MAC_TRIG BIT(2)
113 +#define MAC_EN BIT(3)
114 +#define MAC_SIO_SEL BIT(4)
115 +#define SNFI_STA_CTL1 0x550
116 +#define SPI_STATE_IDLE 0xf
117 +#define SNFI_CNFG 0x55c
118 +#define SNFI_MODE_EN BIT(0)
119 +#define SNFI_GPRAM_DATA 0x800
120 +#define SNFI_GPRAM_MAX_LEN 16
121 +
122 +/* Dummy command trigger NFI to spi mode */
123 +#define NAND_CMD_DUMMYREAD 0x00
124 +#define NAND_CMD_DUMMYPROG 0x80
125 +
126 +#define MTK_TIMEOUT 500000
127 +#define MTK_RESET_TIMEOUT 1000000
128 +#define MTK_SNFC_MIN_SPARE 16
129 +#define KB(x) ((x) * 1024UL)
130 +
131 +/*
132 + * supported spare size of each IP.
133 + * order should be the same with the spare size bitfiled defination of
134 + * register NFI_PAGEFMT.
135 + */
136 +static const u8 spare_size_mt7622[] = {
137 + 16, 26, 27, 28
138 +};
139 +
140 +struct mtk_snfi_caps {
141 + const u8 *spare_size;
142 + u8 num_spare_size;
143 + u32 nand_sec_size;
144 + u8 nand_fdm_size;
145 + u8 nand_fdm_ecc_size;
146 + u8 ecc_parity_bits;
147 + u8 pageformat_spare_shift;
148 + u8 bad_mark_swap;
149 +};
150 +
151 +struct mtk_snfi_bad_mark_ctl {
152 + void (*bm_swap)(struct spi_mem *mem, u8 *buf, int raw);
153 + u32 sec;
154 + u32 pos;
155 +};
156 +
157 +struct mtk_snfi_nand_chip {
158 + struct mtk_snfi_bad_mark_ctl bad_mark;
159 + u32 spare_per_sector;
160 +};
161 +
162 +struct mtk_snfi_clk {
163 + struct clk *nfi_clk;
164 + struct clk *spi_clk;
165 +};
166 +
167 +struct mtk_snfi {
168 + const struct mtk_snfi_caps *caps;
169 + struct mtk_snfi_nand_chip snfi_nand;
170 + struct mtk_snfi_clk clk;
171 + struct mtk_ecc_config ecc_cfg;
172 + struct mtk_ecc *ecc;
173 + struct completion done;
174 + struct device *dev;
175 +
176 + void __iomem *regs;
177 +
178 + u8 *buffer;
179 +};
180 +
181 +static inline u8 *oob_ptr(struct spi_mem *mem, int i)
182 +{
183 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
184 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
185 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
186 + u8 *poi;
187 +
188 + /* map the sector's FDM data to free oob:
189 + * the beginning of the oob area stores the FDM data of bad mark
190 + */
191 +
192 + if (i < snfi_nand->bad_mark.sec)
193 + poi = spinand->oobbuf + (i + 1) * snfi->caps->nand_fdm_size;
194 + else if (i == snfi_nand->bad_mark.sec)
195 + poi = spinand->oobbuf;
196 + else
197 + poi = spinand->oobbuf + i * snfi->caps->nand_fdm_size;
198 +
199 + return poi;
200 +}
201 +
202 +static inline int mtk_data_len(struct spi_mem *mem)
203 +{
204 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
205 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
206 +
207 + return snfi->caps->nand_sec_size + snfi_nand->spare_per_sector;
208 +}
209 +
210 +static inline u8 *mtk_oob_ptr(struct spi_mem *mem,
211 + const u8 *p, int i)
212 +{
213 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
214 +
215 + return (u8 *)p + i * mtk_data_len(mem) + snfi->caps->nand_sec_size;
216 +}
217 +
218 +static void mtk_snfi_bad_mark_swap(struct spi_mem *mem,
219 + u8 *buf, int raw)
220 +{
221 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
222 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
223 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
224 + u32 bad_pos = snfi_nand->bad_mark.pos;
225 +
226 + if (raw)
227 + bad_pos += snfi_nand->bad_mark.sec * mtk_data_len(mem);
228 + else
229 + bad_pos += snfi_nand->bad_mark.sec * snfi->caps->nand_sec_size;
230 +
231 + swap(spinand->oobbuf[0], buf[bad_pos]);
232 +}
233 +
234 +static void mtk_snfi_set_bad_mark_ctl(struct mtk_snfi_bad_mark_ctl *bm_ctl,
235 + struct spi_mem *mem)
236 +{
237 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
238 + struct mtd_info *mtd = spinand_to_mtd(spinand);
239 +
240 + bm_ctl->bm_swap = mtk_snfi_bad_mark_swap;
241 + bm_ctl->sec = mtd->writesize / mtk_data_len(mem);
242 + bm_ctl->pos = mtd->writesize % mtk_data_len(mem);
243 +}
244 +
245 +static void mtk_snfi_mac_enable(struct mtk_snfi *snfi)
246 +{
247 + u32 mac;
248 +
249 + mac = readl(snfi->regs + SNFI_MAC_CTL);
250 + mac &= ~MAC_SIO_SEL;
251 + mac |= MAC_EN;
252 +
253 + writel(mac, snfi->regs + SNFI_MAC_CTL);
254 +}
255 +
256 +static int mtk_snfi_mac_trigger(struct mtk_snfi *snfi)
257 +{
258 + u32 mac, reg;
259 + int ret = 0;
260 +
261 + mac = readl(snfi->regs + SNFI_MAC_CTL);
262 + mac |= MAC_TRIG;
263 + writel(mac, snfi->regs + SNFI_MAC_CTL);
264 +
265 + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
266 + reg & MAC_WIP_READY, 10,
267 + MTK_TIMEOUT);
268 + if (ret < 0) {
269 + dev_err(snfi->dev, "polling wip ready for read timeout\n");
270 + return -EIO;
271 + }
272 +
273 + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
274 + !(reg & MAC_WIP), 10,
275 + MTK_TIMEOUT);
276 + if (ret < 0) {
277 + dev_err(snfi->dev, "polling flash update timeout\n");
278 + return -EIO;
279 + }
280 +
281 + return ret;
282 +}
283 +
284 +static void mtk_snfi_mac_leave(struct mtk_snfi *snfi)
285 +{
286 + u32 mac;
287 +
288 + mac = readl(snfi->regs + SNFI_MAC_CTL);
289 + mac &= ~(MAC_TRIG | MAC_EN | MAC_SIO_SEL);
290 + writel(mac, snfi->regs + SNFI_MAC_CTL);
291 +}
292 +
293 +static int mtk_snfi_mac_op(struct mtk_snfi *snfi)
294 +{
295 + int ret = 0;
296 +
297 + mtk_snfi_mac_enable(snfi);
298 +
299 + ret = mtk_snfi_mac_trigger(snfi);
300 + if (ret)
301 + return ret;
302 +
303 + mtk_snfi_mac_leave(snfi);
304 +
305 + return ret;
306 +}
307 +
308 +static irqreturn_t mtk_snfi_irq(int irq, void *id)
309 +{
310 + struct mtk_snfi *snfi = id;
311 + u16 sta, ien;
312 +
313 + sta = readw(snfi->regs + NFI_INTR_STA);
314 + ien = readw(snfi->regs + NFI_INTR_EN);
315 +
316 + if (!(sta & ien))
317 + return IRQ_NONE;
318 +
319 + writew(~sta & ien, snfi->regs + NFI_INTR_EN);
320 + complete(&snfi->done);
321 +
322 + return IRQ_HANDLED;
323 +}
324 +
325 +static int mtk_snfi_enable_clk(struct device *dev, struct mtk_snfi_clk *clk)
326 +{
327 + int ret;
328 +
329 + ret = clk_prepare_enable(clk->nfi_clk);
330 + if (ret) {
331 + dev_err(dev, "failed to enable nfi clk\n");
332 + return ret;
333 + }
334 +
335 + ret = clk_prepare_enable(clk->spi_clk);
336 + if (ret) {
337 + dev_err(dev, "failed to enable spi clk\n");
338 + clk_disable_unprepare(clk->nfi_clk);
339 + return ret;
340 + }
341 +
342 + return 0;
343 +}
344 +
345 +static void mtk_snfi_disable_clk(struct mtk_snfi_clk *clk)
346 +{
347 + clk_disable_unprepare(clk->nfi_clk);
348 + clk_disable_unprepare(clk->spi_clk);
349 +}
350 +
351 +static int mtk_snfi_reset(struct mtk_snfi *snfi)
352 +{
353 + u32 val;
354 + int ret;
355 +
356 + /* SW reset controller */
357 + val = readl(snfi->regs + SNFI_MISC_CTL) | SW_RST;
358 + writel(val, snfi->regs + SNFI_MISC_CTL);
359 +
360 + ret = readw_poll_timeout(snfi->regs + SNFI_STA_CTL1, val,
361 + !(val & SPI_STATE_IDLE), 50,
362 + MTK_RESET_TIMEOUT);
363 + if (ret) {
364 + dev_warn(snfi->dev, "spi state active in reset [0x%x] = 0x%x\n",
365 + SNFI_STA_CTL1, val);
366 + return ret;
367 + }
368 +
369 + val = readl(snfi->regs + SNFI_MISC_CTL);
370 + val &= ~SW_RST;
371 + writel(val, snfi->regs + SNFI_MISC_CTL);
372 +
373 + /* reset all registers and force the NFI master to terminate */
374 + writew(CON_FIFO_FLUSH | CON_NFI_RST, snfi->regs + NFI_CON);
375 + ret = readw_poll_timeout(snfi->regs + NFI_STA, val,
376 + !(val & (NFI_FSM_MASK | NAND_FSM_MASK)), 50,
377 + MTK_RESET_TIMEOUT);
378 + if (ret) {
379 + dev_warn(snfi->dev, "nfi active in reset [0x%x] = 0x%x\n",
380 + NFI_STA, val);
381 + return ret;
382 + }
383 +
384 + return 0;
385 +}
386 +
387 +static int mtk_snfi_set_spare_per_sector(struct spinand_device *spinand,
388 + const struct mtk_snfi_caps *caps,
389 + u32 *sps)
390 +{
391 + struct mtd_info *mtd = spinand_to_mtd(spinand);
392 + const u8 *spare = caps->spare_size;
393 + u32 sectors, i, closest_spare = 0;
394 +
395 + sectors = mtd->writesize / caps->nand_sec_size;
396 + *sps = mtd->oobsize / sectors;
397 +
398 + if (*sps < MTK_SNFC_MIN_SPARE)
399 + return -EINVAL;
400 +
401 + for (i = 0; i < caps->num_spare_size; i++) {
402 + if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
403 + closest_spare = i;
404 + if (*sps == spare[i])
405 + break;
406 + }
407 + }
408 +
409 + *sps = spare[closest_spare];
410 +
411 + return 0;
412 +}
413 +
414 +static void mtk_snfi_read_fdm_data(struct spi_mem *mem,
415 + u32 sectors)
416 +{
417 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
418 + const struct mtk_snfi_caps *caps = snfi->caps;
419 + u32 vall, valm;
420 + int i, j;
421 + u8 *oobptr;
422 +
423 + for (i = 0; i < sectors; i++) {
424 + oobptr = oob_ptr(mem, i);
425 + vall = readl(snfi->regs + NFI_FDML(i));
426 + valm = readl(snfi->regs + NFI_FDMM(i));
427 +
428 + for (j = 0; j < caps->nand_fdm_size; j++)
429 + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
430 + }
431 +}
432 +
433 +static void mtk_snfi_write_fdm_data(struct spi_mem *mem,
434 + u32 sectors)
435 +{
436 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
437 + const struct mtk_snfi_caps *caps = snfi->caps;
438 + u32 vall, valm;
439 + int i, j;
440 + u8 *oobptr;
441 +
442 + for (i = 0; i < sectors; i++) {
443 + oobptr = oob_ptr(mem, i);
444 + vall = 0;
445 + valm = 0;
446 + for (j = 0; j < 8; j++) {
447 + if (j < 4)
448 + vall |= (j < caps->nand_fdm_size ? oobptr[j] :
449 + 0xff) << (j * 8);
450 + else
451 + valm |= (j < caps->nand_fdm_size ? oobptr[j] :
452 + 0xff) << ((j - 4) * 8);
453 + }
454 + writel(vall, snfi->regs + NFI_FDML(i));
455 + writel(valm, snfi->regs + NFI_FDMM(i));
456 + }
457 +}
458 +
459 +static int mtk_snfi_update_ecc_stats(struct spi_mem *mem,
460 + u8 *buf, u32 sectors)
461 +{
462 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
463 + struct mtd_info *mtd = spinand_to_mtd(spinand);
464 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
465 + struct mtk_ecc_stats stats;
466 + int rc, i;
467 +
468 + rc = readl(snfi->regs + NFI_STA) & STA_EMP_PAGE;
469 + if (rc) {
470 + memset(buf, 0xff, sectors * snfi->caps->nand_sec_size);
471 + for (i = 0; i < sectors; i++)
472 + memset(spinand->oobbuf, 0xff,
473 + snfi->caps->nand_fdm_size);
474 + return 0;
475 + }
476 +
477 + mtk_ecc_get_stats(snfi->ecc, &stats, sectors);
478 + mtd->ecc_stats.corrected += stats.corrected;
479 + mtd->ecc_stats.failed += stats.failed;
480 +
481 + return 0;
482 +}
483 +
484 +static int mtk_snfi_hw_runtime_config(struct spi_mem *mem)
485 +{
486 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
487 + struct mtd_info *mtd = spinand_to_mtd(spinand);
488 + struct nand_device *nand = mtd_to_nanddev(mtd);
489 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
490 + const struct mtk_snfi_caps *caps = snfi->caps;
491 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
492 + u32 fmt, spare, i = 0;
493 + int ret;
494 +
495 + ret = mtk_snfi_set_spare_per_sector(spinand, caps, &spare);
496 + if (ret)
497 + return ret;
498 +
499 + /* calculate usable oob bytes for ecc parity data */
500 + snfi_nand->spare_per_sector = spare;
501 + spare -= caps->nand_fdm_size;
502 +
503 + nand->memorg.oobsize = snfi_nand->spare_per_sector
504 + * (mtd->writesize / caps->nand_sec_size);
505 + mtd->oobsize = nanddev_per_page_oobsize(nand);
506 +
507 + snfi->ecc_cfg.strength = (spare << 3) / caps->ecc_parity_bits;
508 + mtk_ecc_adjust_strength(snfi->ecc, &snfi->ecc_cfg.strength);
509 +
510 + switch (mtd->writesize) {
511 + case 512:
512 + fmt = PAGEFMT_512;
513 + break;
514 + case KB(2):
515 + fmt = PAGEFMT_2K;
516 + break;
517 + case KB(4):
518 + fmt = PAGEFMT_4K;
519 + break;
520 + default:
521 + dev_err(snfi->dev, "invalid page len: %d\n", mtd->writesize);
522 + return -EINVAL;
523 + }
524 +
525 + /* Setup PageFormat */
526 + while (caps->spare_size[i] != snfi_nand->spare_per_sector) {
527 + i++;
528 + if (i == (caps->num_spare_size - 1)) {
529 + dev_err(snfi->dev, "invalid spare size %d\n",
530 + snfi_nand->spare_per_sector);
531 + return -EINVAL;
532 + }
533 + }
534 +
535 + fmt |= i << caps->pageformat_spare_shift;
536 + fmt |= caps->nand_fdm_size << PAGEFMT_FDM_SHIFT;
537 + fmt |= caps->nand_fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
538 + writel(fmt, snfi->regs + NFI_PAGEFMT);
539 +
540 + snfi->ecc_cfg.len = caps->nand_sec_size + caps->nand_fdm_ecc_size;
541 +
542 + mtk_snfi_set_bad_mark_ctl(&snfi_nand->bad_mark, mem);
543 +
544 + return 0;
545 +}
546 +
547 +static int mtk_snfi_read_from_cache(struct spi_mem *mem,
548 + const struct spi_mem_op *op, int oob_on)
549 +{
550 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
551 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
552 + struct mtd_info *mtd = spinand_to_mtd(spinand);
553 + u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
554 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
555 + u32 reg, len, col_addr = 0;
556 + int dummy_cycle, ret;
557 + dma_addr_t dma_addr;
558 +
559 + len = sectors * (snfi->caps->nand_sec_size
560 + + snfi_nand->spare_per_sector);
561 +
562 + dma_addr = dma_map_single(snfi->dev, snfi->buffer,
563 + len, DMA_FROM_DEVICE);
564 + ret = dma_mapping_error(snfi->dev, dma_addr);
565 + if (ret) {
566 + dev_err(snfi->dev, "dma mapping error\n");
567 + return -EINVAL;
568 + }
569 +
570 + /* set Read cache command and dummy cycle */
571 + dummy_cycle = (op->dummy.nbytes << 3) >> (ffs(op->dummy.buswidth) - 1);
572 + reg = ((op->cmd.opcode & RD_CMD_MASK) |
573 + (dummy_cycle << RD_DUMMY_SHIFT));
574 + writel(reg, snfi->regs + SNFI_RD_CTL2);
575 +
576 + writel((col_addr & RD_ADDR_MASK), snfi->regs + SNFI_RD_CTL3);
577 +
578 + reg = readl(snfi->regs + SNFI_MISC_CTL);
579 + reg |= RD_CUSTOM_EN;
580 + reg &= ~(RD_MODE_MASK | WR_X4_EN);
581 +
582 + /* set data and addr buswidth */
583 + if (op->data.buswidth == 4)
584 + reg |= RD_MODE_X4;
585 + else if (op->data.buswidth == 2)
586 + reg |= RD_MODE_X2;
587 +
588 + if (op->addr.buswidth == 4 || op->addr.buswidth == 2)
589 + reg |= RD_QDUAL_IO;
590 + writel(reg, snfi->regs + SNFI_MISC_CTL);
591 +
592 + writel(len, snfi->regs + SNFI_MISC_CTL2);
593 + writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
594 + reg = readw(snfi->regs + NFI_CNFG);
595 + reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA | CNFG_OP_CUST;
596 +
597 + if (!oob_on) {
598 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
599 + writew(reg, snfi->regs + NFI_CNFG);
600 +
601 + snfi->ecc_cfg.mode = ECC_NFI_MODE;
602 + snfi->ecc_cfg.sectors = sectors;
603 + snfi->ecc_cfg.op = ECC_DECODE;
604 + ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
605 + if (ret) {
606 + dev_err(snfi->dev, "ecc enable failed\n");
607 + /* clear NFI_CNFG */
608 + reg &= ~(CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA |
609 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
610 + writew(reg, snfi->regs + NFI_CNFG);
611 + goto out;
612 + }
613 + } else {
614 + writew(reg, snfi->regs + NFI_CNFG);
615 + }
616 +
617 + writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
618 + readw(snfi->regs + NFI_INTR_STA);
619 + writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
620 +
621 + init_completion(&snfi->done);
622 +
623 + /* set dummy command to trigger NFI enter SPI mode */
624 + writew(NAND_CMD_DUMMYREAD, snfi->regs + NFI_CMD);
625 + reg = readl(snfi->regs + NFI_CON) | CON_BRD;
626 + writew(reg, snfi->regs + NFI_CON);
627 +
628 + ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
629 + if (!ret) {
630 + dev_err(snfi->dev, "read ahb done timeout\n");
631 + writew(0, snfi->regs + NFI_INTR_EN);
632 + ret = -ETIMEDOUT;
633 + goto out;
634 + }
635 +
636 + ret = readl_poll_timeout_atomic(snfi->regs + NFI_BYTELEN, reg,
637 + ADDRCNTR_SEC(reg) >= sectors, 10,
638 + MTK_TIMEOUT);
639 + if (ret < 0) {
640 + dev_err(snfi->dev, "polling read byte len timeout\n");
641 + ret = -EIO;
642 + } else {
643 + if (!oob_on) {
644 + ret = mtk_ecc_wait_done(snfi->ecc, ECC_DECODE);
645 + if (ret) {
646 + dev_warn(snfi->dev, "wait ecc done timeout\n");
647 + } else {
648 + mtk_snfi_update_ecc_stats(mem, snfi->buffer,
649 + sectors);
650 + mtk_snfi_read_fdm_data(mem, sectors);
651 + }
652 + }
653 + }
654 +
655 + if (oob_on)
656 + goto out;
657 +
658 + mtk_ecc_disable(snfi->ecc);
659 +out:
660 + dma_unmap_single(snfi->dev, dma_addr, len, DMA_FROM_DEVICE);
661 + writel(0, snfi->regs + NFI_CON);
662 + writel(0, snfi->regs + NFI_CNFG);
663 + reg = readl(snfi->regs + SNFI_MISC_CTL);
664 + reg &= ~RD_CUSTOM_EN;
665 + writel(reg, snfi->regs + SNFI_MISC_CTL);
666 +
667 + return ret;
668 +}
669 +
670 +static int mtk_snfi_write_to_cache(struct spi_mem *mem,
671 + const struct spi_mem_op *op,
672 + int oob_on)
673 +{
674 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
675 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
676 + struct mtd_info *mtd = spinand_to_mtd(spinand);
677 + u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
678 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
679 + u32 reg, len, col_addr = 0;
680 + dma_addr_t dma_addr;
681 + int ret;
682 +
683 + len = sectors * (snfi->caps->nand_sec_size
684 + + snfi_nand->spare_per_sector);
685 +
686 + dma_addr = dma_map_single(snfi->dev, snfi->buffer, len,
687 + DMA_TO_DEVICE);
688 + ret = dma_mapping_error(snfi->dev, dma_addr);
689 + if (ret) {
690 + dev_err(snfi->dev, "dma mapping error\n");
691 + return -EINVAL;
692 + }
693 +
694 + /* set program load cmd and address */
695 + reg = (op->cmd.opcode << WR_LOAD_CMD_SHIFT);
696 + writel(reg, snfi->regs + SNFI_PG_CTL1);
697 + writel(col_addr & WR_LOAD_ADDR_MASK, snfi->regs + SNFI_PG_CTL2);
698 +
699 + reg = readl(snfi->regs + SNFI_MISC_CTL);
700 + reg |= WR_CUSTOM_EN;
701 + reg &= ~(RD_MODE_MASK | WR_X4_EN);
702 +
703 + if (op->data.buswidth == 4)
704 + reg |= WR_X4_EN;
705 + writel(reg, snfi->regs + SNFI_MISC_CTL);
706 +
707 + writel(len << WR_LEN_SHIFT, snfi->regs + SNFI_MISC_CTL2);
708 + writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
709 +
710 + reg = readw(snfi->regs + NFI_CNFG);
711 + reg &= ~(CNFG_READ_EN | CNFG_BYTE_RW);
712 + reg |= CNFG_DMA | CNFG_DMA_BURST_EN | CNFG_OP_PROGRAM;
713 +
714 + if (!oob_on) {
715 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
716 + writew(reg, snfi->regs + NFI_CNFG);
717 +
718 + snfi->ecc_cfg.mode = ECC_NFI_MODE;
719 + snfi->ecc_cfg.op = ECC_ENCODE;
720 + ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
721 + if (ret) {
722 + dev_err(snfi->dev, "ecc enable failed\n");
723 + /* clear NFI_CNFG */
724 + reg &= ~(CNFG_DMA_BURST_EN | CNFG_DMA |
725 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
726 + writew(reg, snfi->regs + NFI_CNFG);
727 + dma_unmap_single(snfi->dev, dma_addr, len,
728 + DMA_FROM_DEVICE);
729 + goto out;
730 + }
731 + /* write OOB into the FDM registers (OOB area in MTK NAND) */
732 + mtk_snfi_write_fdm_data(mem, sectors);
733 + } else {
734 + writew(reg, snfi->regs + NFI_CNFG);
735 + }
736 + writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
737 + readw(snfi->regs + NFI_INTR_STA);
738 + writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
739 +
740 + init_completion(&snfi->done);
741 +
742 + /* set dummy command to trigger NFI enter SPI mode */
743 + writew(NAND_CMD_DUMMYPROG, snfi->regs + NFI_CMD);
744 + reg = readl(snfi->regs + NFI_CON) | CON_BWR;
745 + writew(reg, snfi->regs + NFI_CON);
746 +
747 + ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
748 + if (!ret) {
749 + dev_err(snfi->dev, "custom program done timeout\n");
750 + writew(0, snfi->regs + NFI_INTR_EN);
751 + ret = -ETIMEDOUT;
752 + goto ecc_disable;
753 + }
754 +
755 + ret = readl_poll_timeout_atomic(snfi->regs + NFI_ADDRCNTR, reg,
756 + ADDRCNTR_SEC(reg) >= sectors,
757 + 10, MTK_TIMEOUT);
758 + if (ret)
759 + dev_err(snfi->dev, "hwecc write timeout\n");
760 +
761 +ecc_disable:
762 + mtk_ecc_disable(snfi->ecc);
763 +
764 +out:
765 + dma_unmap_single(snfi->dev, dma_addr, len, DMA_TO_DEVICE);
766 + writel(0, snfi->regs + NFI_CON);
767 + writel(0, snfi->regs + NFI_CNFG);
768 + reg = readl(snfi->regs + SNFI_MISC_CTL);
769 + reg &= ~WR_CUSTOM_EN;
770 + writel(reg, snfi->regs + SNFI_MISC_CTL);
771 +
772 + return ret;
773 +}
774 +
775 +static int mtk_snfi_read(struct spi_mem *mem,
776 + const struct spi_mem_op *op)
777 +{
778 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
779 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
780 + struct mtd_info *mtd = spinand_to_mtd(spinand);
781 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
782 + u32 col_addr = op->addr.val;
783 + int i, ret, sectors, oob_on = false;
784 +
785 + if (col_addr == mtd->writesize)
786 + oob_on = true;
787 +
788 + ret = mtk_snfi_read_from_cache(mem, op, oob_on);
789 + if (ret) {
790 + dev_warn(snfi->dev, "read from cache fail\n");
791 + return ret;
792 + }
793 +
794 + sectors = mtd->writesize / snfi->caps->nand_sec_size;
795 + for (i = 0; i < sectors; i++) {
796 + if (oob_on)
797 + memcpy(oob_ptr(mem, i),
798 + mtk_oob_ptr(mem, snfi->buffer, i),
799 + snfi->caps->nand_fdm_size);
800 +
801 + if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
802 + snfi_nand->bad_mark.bm_swap(mem, snfi->buffer,
803 + oob_on);
804 + }
805 +
806 + if (!oob_on)
807 + memcpy(spinand->databuf, snfi->buffer, mtd->writesize);
808 +
809 + return ret;
810 +}
811 +
812 +static int mtk_snfi_write(struct spi_mem *mem,
813 + const struct spi_mem_op *op)
814 +{
815 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
816 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
817 + struct mtd_info *mtd = spinand_to_mtd(spinand);
818 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
819 + u32 ret, i, sectors, col_addr = op->addr.val;
820 + int oob_on = false;
821 +
822 + if (col_addr == mtd->writesize)
823 + oob_on = true;
824 +
825 + sectors = mtd->writesize / snfi->caps->nand_sec_size;
826 + memset(snfi->buffer, 0xff, mtd->writesize + mtd->oobsize);
827 +
828 + if (!oob_on)
829 + memcpy(snfi->buffer, spinand->databuf, mtd->writesize);
830 +
831 + for (i = 0; i < sectors; i++) {
832 + if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
833 + snfi_nand->bad_mark.bm_swap(mem, snfi->buffer, oob_on);
834 +
835 + if (oob_on)
836 + memcpy(mtk_oob_ptr(mem, snfi->buffer, i),
837 + oob_ptr(mem, i),
838 + snfi->caps->nand_fdm_size);
839 + }
840 +
841 + ret = mtk_snfi_write_to_cache(mem, op, oob_on);
842 + if (ret)
843 + dev_warn(snfi->dev, "write to cache fail\n");
844 +
845 + return ret;
846 +}
847 +
848 +static int mtk_snfi_command_exec(struct mtk_snfi *snfi,
849 + const u8 *txbuf, u8 *rxbuf,
850 + const u32 txlen, const u32 rxlen)
851 +{
852 + u32 tmp, i, j, reg, m;
853 + u8 *p_tmp = (u8 *)(&tmp);
854 + int ret = 0;
855 +
856 + /* Moving tx data to NFI_SPI GPRAM */
857 + for (i = 0, m = 0; i < txlen; ) {
858 + for (j = 0, tmp = 0; i < txlen && j < 4; i++, j++)
859 + p_tmp[j] = txbuf[i];
860 +
861 + writel(tmp, snfi->regs + SNFI_GPRAM_DATA + m);
862 + m += 4;
863 + }
864 +
865 + writel(txlen, snfi->regs + SNFI_MAC_OUTL);
866 + writel(rxlen, snfi->regs + SNFI_MAC_INL);
867 + ret = mtk_snfi_mac_op(snfi);
868 + if (ret)
869 + return ret;
870 +
871 + /* For NULL input data, this loop will be skipped */
872 + if (rxlen)
873 + for (i = 0, m = 0; i < rxlen; ) {
874 + reg = readl(snfi->regs +
875 + SNFI_GPRAM_DATA + m);
876 + for (j = 0; i < rxlen && j < 4; i++, j++, rxbuf++) {
877 + if (m == 0 && i == 0)
878 + j = i + txlen;
879 + *rxbuf = (reg >> (j * 8)) & 0xFF;
880 + }
881 + m += 4;
882 + }
883 +
884 + return ret;
885 +}
886 +
887 +/*
888 + * mtk_snfi_exec_op - to process command/data to send to the
889 + * SPI NAND by mtk controller
890 + */
891 +static int mtk_snfi_exec_op(struct spi_mem *mem,
892 + const struct spi_mem_op *op)
893 +
894 +{
895 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
896 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
897 + struct mtd_info *mtd = spinand_to_mtd(spinand);
898 + struct nand_device *nand = mtd_to_nanddev(mtd);
899 + const struct spi_mem_op *read_cache;
900 + const struct spi_mem_op *write_cache;
901 + u32 tmpbufsize, txlen = 0, rxlen = 0;
902 + u8 *txbuf, *rxbuf = NULL, *buf;
903 + int i, ret = 0;
904 +
905 + ret = mtk_snfi_reset(snfi);
906 + if (ret) {
907 + dev_warn(snfi->dev, "reset spi memory controller fail\n");
908 + return ret;
909 + }
910 +
911 + /*if bbt initial, framework have detect nand information */
912 + if (nand->bbt.cache) {
913 + read_cache = spinand->op_templates.read_cache;
914 + write_cache = spinand->op_templates.write_cache;
915 +
916 + ret = mtk_snfi_hw_runtime_config(mem);
917 + if (ret)
918 + return ret;
919 +
920 + /* For Read/Write with cache, Erase use framework flow */
921 + if (op->cmd.opcode == read_cache->cmd.opcode) {
922 + ret = mtk_snfi_read(mem, op);
923 + if (ret)
924 + dev_warn(snfi->dev, "snfi read fail\n");
925 + return ret;
926 + } else if (op->cmd.opcode == write_cache->cmd.opcode) {
927 + ret = mtk_snfi_write(mem, op);
928 + if (ret)
929 + dev_warn(snfi->dev, "snfi write fail\n");
930 + return ret;
931 + }
932 + }
933 +
934 + tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
935 + op->dummy.nbytes + op->data.nbytes;
936 +
937 + txbuf = kzalloc(tmpbufsize, GFP_KERNEL);
938 + if (!txbuf)
939 + return -ENOMEM;
940 +
941 + txbuf[txlen++] = op->cmd.opcode;
942 +
943 + if (op->addr.nbytes)
944 + for (i = 0; i < op->addr.nbytes; i++)
945 + txbuf[txlen++] = op->addr.val >>
946 + (8 * (op->addr.nbytes - i - 1));
947 +
948 + txlen += op->dummy.nbytes;
949 +
950 + if (op->data.dir == SPI_MEM_DATA_OUT)
951 + for (i = 0; i < op->data.nbytes; i++) {
952 + buf = (u8 *)op->data.buf.out;
953 + txbuf[txlen++] = buf[i];
954 + }
955 +
956 + if (op->data.dir == SPI_MEM_DATA_IN) {
957 + rxbuf = (u8 *)op->data.buf.in;
958 + rxlen += op->data.nbytes;
959 + }
960 +
961 + ret = mtk_snfi_command_exec(snfi, txbuf, rxbuf, txlen, rxlen);
962 + kfree(txbuf);
963 +
964 + return ret;
965 +}
966 +
967 +static int mtk_snfi_init(struct mtk_snfi *snfi)
968 +{
969 + int ret;
970 +
971 + /* Reset the state machine and data FIFO */
972 + ret = mtk_snfi_reset(snfi);
973 + if (ret) {
974 + dev_warn(snfi->dev, "MTK reset controller fail\n");
975 + return ret;
976 + }
977 +
978 + snfi->buffer = devm_kzalloc(snfi->dev, 4096 + 256, GFP_KERNEL);
979 + if (!snfi->buffer)
980 + return -ENOMEM;
981 +
982 + /* Clear interrupt, read clear. */
983 + readw(snfi->regs + NFI_INTR_STA);
984 + writew(0, snfi->regs + NFI_INTR_EN);
985 +
986 + writel(0, snfi->regs + NFI_CON);
987 + writel(0, snfi->regs + NFI_CNFG);
988 +
989 + /* Change to NFI_SPI mode. */
990 + writel(SNFI_MODE_EN, snfi->regs + SNFI_CNFG);
991 +
992 + return 0;
993 +}
994 +
995 +static int mtk_snfi_check_buswidth(u8 width)
996 +{
997 + switch (width) {
998 + case 1:
999 + case 2:
1000 + case 4:
1001 + return 0;
1002 +
1003 + default:
1004 + break;
1005 + }
1006 +
1007 + return -ENOTSUPP;
1008 +}
1009 +
1010 +static bool mtk_snfi_supports_op(struct spi_mem *mem,
1011 + const struct spi_mem_op *op)
1012 +{
1013 + int ret = 0;
1014 +
1015 + /* For MTK Spi Nand controller, cmd buswidth just support 1 bit*/
1016 + if (op->cmd.buswidth != 1)
1017 + ret = -ENOTSUPP;
1018 +
1019 + if (op->addr.nbytes)
1020 + ret |= mtk_snfi_check_buswidth(op->addr.buswidth);
1021 +
1022 + if (op->dummy.nbytes)
1023 + ret |= mtk_snfi_check_buswidth(op->dummy.buswidth);
1024 +
1025 + if (op->data.nbytes)
1026 + ret |= mtk_snfi_check_buswidth(op->data.buswidth);
1027 +
1028 + if (ret)
1029 + return false;
1030 +
1031 + return true;
1032 +}
1033 +
1034 +static const struct spi_controller_mem_ops mtk_snfi_ops = {
1035 + .supports_op = mtk_snfi_supports_op,
1036 + .exec_op = mtk_snfi_exec_op,
1037 +};
1038 +
1039 +static const struct mtk_snfi_caps snfi_mt7622 = {
1040 + .spare_size = spare_size_mt7622,
1041 + .num_spare_size = 4,
1042 + .nand_sec_size = 512,
1043 + .nand_fdm_size = 8,
1044 + .nand_fdm_ecc_size = 1,
1045 + .ecc_parity_bits = 13,
1046 + .pageformat_spare_shift = 4,
1047 + .bad_mark_swap = 0,
1048 +};
1049 +
1050 +static const struct of_device_id mtk_snfi_id_table[] = {
1051 + { .compatible = "mediatek,mt7622-snfi", .data = &snfi_mt7622, },
1052 + { /* sentinel */ }
1053 +};
1054 +
1055 +static int mtk_snfi_probe(struct platform_device *pdev)
1056 +{
1057 + struct device *dev = &pdev->dev;
1058 + struct device_node *np = dev->of_node;
1059 + struct spi_controller *ctlr;
1060 + struct mtk_snfi *snfi;
1061 + struct resource *res;
1062 + int ret = 0, irq;
1063 +
1064 + ctlr = spi_alloc_master(&pdev->dev, sizeof(*snfi));
1065 + if (!ctlr)
1066 + return -ENOMEM;
1067 +
1068 + snfi = spi_controller_get_devdata(ctlr);
1069 + snfi->caps = of_device_get_match_data(dev);
1070 + snfi->dev = dev;
1071 +
1072 + snfi->ecc = of_mtk_ecc_get(np);
1073 + if (IS_ERR_OR_NULL(snfi->ecc))
1074 + goto err_put_master;
1075 +
1076 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1077 + snfi->regs = devm_ioremap_resource(dev, res);
1078 + if (IS_ERR(snfi->regs)) {
1079 + ret = PTR_ERR(snfi->regs);
1080 + goto release_ecc;
1081 + }
1082 +
1083 + /* find the clocks */
1084 + snfi->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1085 + if (IS_ERR(snfi->clk.nfi_clk)) {
1086 + dev_err(dev, "no nfi clk\n");
1087 + ret = PTR_ERR(snfi->clk.nfi_clk);
1088 + goto release_ecc;
1089 + }
1090 +
1091 + snfi->clk.spi_clk = devm_clk_get(dev, "spi_clk");
1092 + if (IS_ERR(snfi->clk.spi_clk)) {
1093 + dev_err(dev, "no spi clk\n");
1094 + ret = PTR_ERR(snfi->clk.spi_clk);
1095 + goto release_ecc;
1096 + }
1097 +
1098 + ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1099 + if (ret)
1100 + goto release_ecc;
1101 +
1102 + /* find the irq */
1103 + irq = platform_get_irq(pdev, 0);
1104 + if (irq < 0) {
1105 + dev_err(dev, "no snfi irq resource\n");
1106 + ret = -EINVAL;
1107 + goto clk_disable;
1108 + }
1109 +
1110 + ret = devm_request_irq(dev, irq, mtk_snfi_irq, 0, "mtk-snfi", snfi);
1111 + if (ret) {
1112 + dev_err(dev, "failed to request snfi irq\n");
1113 + goto clk_disable;
1114 + }
1115 +
1116 + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1117 + if (ret) {
1118 + dev_err(dev, "failed to set dma mask\n");
1119 + goto clk_disable;
1120 + }
1121 +
1122 + ctlr->dev.of_node = np;
1123 + ctlr->mem_ops = &mtk_snfi_ops;
1124 +
1125 + platform_set_drvdata(pdev, snfi);
1126 + ret = mtk_snfi_init(snfi);
1127 + if (ret) {
1128 + dev_err(dev, "failed to init snfi\n");
1129 + goto clk_disable;
1130 + }
1131 +
1132 + ret = devm_spi_register_master(dev, ctlr);
1133 + if (ret)
1134 + goto clk_disable;
1135 +
1136 + return 0;
1137 +
1138 +clk_disable:
1139 + mtk_snfi_disable_clk(&snfi->clk);
1140 +
1141 +release_ecc:
1142 + mtk_ecc_release(snfi->ecc);
1143 +
1144 +err_put_master:
1145 + spi_master_put(ctlr);
1146 +
1147 + dev_err(dev, "MediaTek SPI NAND interface probe failed %d\n", ret);
1148 + return ret;
1149 +}
1150 +
1151 +static int mtk_snfi_remove(struct platform_device *pdev)
1152 +{
1153 + struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1154 +
1155 + mtk_snfi_disable_clk(&snfi->clk);
1156 +
1157 + return 0;
1158 +}
1159 +
1160 +static int mtk_snfi_suspend(struct platform_device *pdev, pm_message_t state)
1161 +{
1162 + struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1163 +
1164 + mtk_snfi_disable_clk(&snfi->clk);
1165 +
1166 + return 0;
1167 +}
1168 +
1169 +static int mtk_snfi_resume(struct platform_device *pdev)
1170 +{
1171 + struct device *dev = &pdev->dev;
1172 + struct mtk_snfi *snfi = dev_get_drvdata(dev);
1173 + int ret;
1174 +
1175 + ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1176 + if (ret)
1177 + return ret;
1178 +
1179 + ret = mtk_snfi_init(snfi);
1180 + if (ret)
1181 + dev_err(dev, "failed to init snfi controller\n");
1182 +
1183 + return ret;
1184 +}
1185 +
1186 +static struct platform_driver mtk_snfi_driver = {
1187 + .driver = {
1188 + .name = "mtk-snfi",
1189 + .of_match_table = mtk_snfi_id_table,
1190 + },
1191 + .probe = mtk_snfi_probe,
1192 + .remove = mtk_snfi_remove,
1193 + .suspend = mtk_snfi_suspend,
1194 + .resume = mtk_snfi_resume,
1195 +};
1196 +
1197 +module_platform_driver(mtk_snfi_driver);
1198 +
1199 +MODULE_LICENSE("GPL v2");
1200 +MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>");
1201 +MODULE_DESCRIPTION("Mediatek SPI Memory Interface Driver");
1202 --- a/drivers/spi/Kconfig
1203 +++ b/drivers/spi/Kconfig
1204 @@ -389,6 +389,15 @@ config SPI_MT65XX
1205 say Y or M here.If you are not sure, say N.
1206 SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
1207
1208 +config SPI_MTK_SNFI
1209 + tristate "MediaTek SPI NAND interface"
1210 + select MTD_SPI_NAND
1211 + help
1212 + This selects the SPI NAND FLASH interface(SNFI),
1213 + which could be found on MediaTek Soc.
1214 + Say Y or M here.If you are not sure, say N.
1215 + Note Parallel Nand and SPI NAND is alternative on MediaTek SoCs.
1216 +
1217 config SPI_NUC900
1218 tristate "Nuvoton NUC900 series SPI"
1219 depends on ARCH_W90X900
1220 --- a/drivers/spi/Makefile
1221 +++ b/drivers/spi/Makefile
1222 @@ -57,6 +57,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mp
1223 obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
1224 obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
1225 obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
1226 +obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
1227 obj-$(CONFIG_SPI_MXS) += spi-mxs.o
1228 obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
1229 obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o