a9c776bcfcca3607e02a39e7246c666a1d5f56c8
[openwrt/staging/mkresin.git] / target / linux / mediatek / patches-4.4 / 0075-mtd-mediatek-driver-for-MTK-Smart-Device-Gen1-NAND.patch
1 From de18239fc971cfc17c53320c66ae64dd5ade032d Mon Sep 17 00:00:00 2001
2 From: Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
3 Date: Fri, 29 Apr 2016 12:17:22 -0400
4 Subject: [PATCH 075/102] mtd: mediatek: driver for MTK Smart Device Gen1 NAND
5
6 This patch adds support for mediatek's SDG1 NFC nand controller
7 embedded in SoC 2701
8
9 Signed-off-by: Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
10 ---
11 drivers/mtd/nand/Kconfig | 7 +
12 drivers/mtd/nand/Makefile | 1 +
13 drivers/mtd/nand/mtk_ecc.c | 527 ++++++++++++++++
14 drivers/mtd/nand/mtk_ecc.h | 53 ++
15 drivers/mtd/nand/mtk_nand.c | 1432 +++++++++++++++++++++++++++++++++++++++++++
16 5 files changed, 2020 insertions(+)
17 create mode 100644 drivers/mtd/nand/mtk_ecc.c
18 create mode 100644 drivers/mtd/nand/mtk_ecc.h
19 create mode 100644 drivers/mtd/nand/mtk_nand.c
20
21 diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
22 index f05e0e9..3c26e89 100644
23 --- a/drivers/mtd/nand/Kconfig
24 +++ b/drivers/mtd/nand/Kconfig
25 @@ -563,4 +563,11 @@ config MTD_NAND_QCOM
26 Enables support for NAND flash chips on SoCs containing the EBI2 NAND
27 controller. This controller is found on IPQ806x SoC.
28
29 +config MTD_NAND_MTK
30 + tristate "Support for NAND controller on MTK SoCs"
31 + depends on HAS_DMA
32 + help
33 + Enables support for NAND controller on MTK SoCs.
34 + This controller is found on mt27xx, mt81xx, mt65xx SoCs.
35 +
36 endif # MTD_NAND
37 diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
38 index f553353..cafde6f 100644
39 --- a/drivers/mtd/nand/Makefile
40 +++ b/drivers/mtd/nand/Makefile
41 @@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
42 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
43 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
44 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
45 +obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
46
47 nand-objs := nand_base.o nand_bbt.o nand_timings.o
48 diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
49 new file mode 100644
50 index 0000000..28769f1
51 --- /dev/null
52 +++ b/drivers/mtd/nand/mtk_ecc.c
53 @@ -0,0 +1,527 @@
54 +/*
55 + * MTK ECC controller driver.
56 + * Copyright (C) 2016 MediaTek Inc.
57 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
58 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
59 + *
60 + * This program is free software; you can redistribute it and/or modify
61 + * it under the terms of the GNU General Public License version 2 as
62 + * published by the Free Software Foundation.
63 + *
64 + * This program is distributed in the hope that it will be useful,
65 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
66 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
67 + * GNU General Public License for more details.
68 + */
69 +
70 +#include <linux/platform_device.h>
71 +#include <linux/dma-mapping.h>
72 +#include <linux/interrupt.h>
73 +#include <linux/clk.h>
74 +#include <linux/module.h>
75 +#include <linux/iopoll.h>
76 +#include <linux/of.h>
77 +#include <linux/of_platform.h>
78 +#include <linux/semaphore.h>
79 +
80 +#include "mtk_ecc.h"
81 +
82 +#define ECC_ENCCON (0x00)
83 +#define ENC_EN (1)
84 +#define ENC_DE (0)
85 +#define ECC_ENCCNFG (0x04)
86 +#define ECC_CNFG_4BIT (0)
87 +#define ECC_CNFG_6BIT (1)
88 +#define ECC_CNFG_8BIT (2)
89 +#define ECC_CNFG_10BIT (3)
90 +#define ECC_CNFG_12BIT (4)
91 +#define ECC_CNFG_14BIT (5)
92 +#define ECC_CNFG_16BIT (6)
93 +#define ECC_CNFG_18BIT (7)
94 +#define ECC_CNFG_20BIT (8)
95 +#define ECC_CNFG_22BIT (9)
96 +#define ECC_CNFG_24BIT (0xa)
97 +#define ECC_CNFG_28BIT (0xb)
98 +#define ECC_CNFG_32BIT (0xc)
99 +#define ECC_CNFG_36BIT (0xd)
100 +#define ECC_CNFG_40BIT (0xe)
101 +#define ECC_CNFG_44BIT (0xf)
102 +#define ECC_CNFG_48BIT (0x10)
103 +#define ECC_CNFG_52BIT (0x11)
104 +#define ECC_CNFG_56BIT (0x12)
105 +#define ECC_CNFG_60BIT (0x13)
106 +#define ECC_MODE_SHIFT (5)
107 +#define ECC_MS_SHIFT (16)
108 +#define ECC_ENCDIADDR (0x08)
109 +#define ECC_ENCIDLE (0x0C)
110 +#define ENC_IDLE BIT(0)
111 +#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
112 +#define ECC_ENCIRQ_EN (0x80)
113 +#define ENC_IRQEN BIT(0)
114 +#define ECC_ENCIRQ_STA (0x84)
115 +#define ECC_DECCON (0x100)
116 +#define DEC_EN (1)
117 +#define DEC_DE (0)
118 +#define ECC_DECCNFG (0x104)
119 +#define DEC_EMPTY_EN BIT(31)
120 +#define DEC_CNFG_CORRECT (0x3 << 12)
121 +#define ECC_DECIDLE (0x10C)
122 +#define DEC_IDLE BIT(0)
123 +#define ECC_DECENUM0 (0x114)
124 +#define ERR_MASK (0x3f)
125 +#define ECC_DECDONE (0x124)
126 +#define ECC_DECIRQ_EN (0x200)
127 +#define DEC_IRQEN BIT(0)
128 +#define ECC_DECIRQ_STA (0x204)
129 +
130 +#define ECC_TIMEOUT (500000)
131 +
132 +#define ECC_IDLE_REG(x) ((x) == ECC_ENC ? ECC_ENCIDLE : ECC_DECIDLE)
133 +#define ECC_IDLE_MASK(x) ((x) == ECC_ENC ? ENC_IDLE : DEC_IDLE)
134 +#define ECC_IRQ_REG(x) ((x) == ECC_ENC ? ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
135 +#define ECC_IRQ_EN(x) ((x) == ECC_ENC ? ENC_IRQEN : DEC_IRQEN)
136 +#define ECC_CTL_REG(x) ((x) == ECC_ENC ? ECC_ENCCON : ECC_DECCON)
137 +#define ECC_CODEC_ENABLE(x) ((x) == ECC_ENC ? ENC_EN : DEC_EN)
138 +#define ECC_CODEC_DISABLE(x) ((x) == ECC_ENC ? ENC_DE : DEC_DE)
139 +
140 +struct mtk_ecc {
141 + struct device *dev;
142 + void __iomem *regs;
143 + struct clk *clk;
144 +
145 + struct completion done;
146 + struct semaphore sem;
147 + u32 sec_mask;
148 +};
149 +
150 +static inline void mtk_ecc_codec_wait_idle(struct mtk_ecc *ecc,
151 + enum mtk_ecc_codec codec)
152 +{
153 + struct device *dev = ecc->dev;
154 + u32 val;
155 + int ret;
156 +
157 + ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(codec), val,
158 + val & ECC_IDLE_MASK(codec),
159 + 10, ECC_TIMEOUT);
160 + if (ret)
161 + dev_warn(dev, "%s NOT idle\n",
162 + codec == ECC_ENC ? "encoder" : "decoder");
163 +}
164 +
165 +static irqreturn_t mtk_ecc_irq(int irq, void *id)
166 +{
167 + struct mtk_ecc *ecc = id;
168 + enum mtk_ecc_codec codec;
169 + u32 dec, enc;
170 +
171 + dec = readw(ecc->regs + ECC_DECIRQ_STA) & DEC_IRQEN;
172 + if (dec) {
173 + codec = ECC_DEC;
174 + dec = readw(ecc->regs + ECC_DECDONE);
175 + if (dec & ecc->sec_mask) {
176 + ecc->sec_mask = 0;
177 + complete(&ecc->done);
178 + } else
179 + return IRQ_HANDLED;
180 + } else {
181 + enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ENC_IRQEN;
182 + if (enc) {
183 + codec = ECC_ENC;
184 + complete(&ecc->done);
185 + } else
186 + return IRQ_NONE;
187 + }
188 +
189 + writel(0, ecc->regs + ECC_IRQ_REG(codec));
190 +
191 + return IRQ_HANDLED;
192 +}
193 +
194 +static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
195 +{
196 + u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
197 + u32 reg;
198 +
199 + switch (config->strength) {
200 + case 4:
201 + ecc_bit = ECC_CNFG_4BIT;
202 + break;
203 + case 6:
204 + ecc_bit = ECC_CNFG_6BIT;
205 + break;
206 + case 8:
207 + ecc_bit = ECC_CNFG_8BIT;
208 + break;
209 + case 10:
210 + ecc_bit = ECC_CNFG_10BIT;
211 + break;
212 + case 12:
213 + ecc_bit = ECC_CNFG_12BIT;
214 + break;
215 + case 14:
216 + ecc_bit = ECC_CNFG_14BIT;
217 + break;
218 + case 16:
219 + ecc_bit = ECC_CNFG_16BIT;
220 + break;
221 + case 18:
222 + ecc_bit = ECC_CNFG_18BIT;
223 + break;
224 + case 20:
225 + ecc_bit = ECC_CNFG_20BIT;
226 + break;
227 + case 22:
228 + ecc_bit = ECC_CNFG_22BIT;
229 + break;
230 + case 24:
231 + ecc_bit = ECC_CNFG_24BIT;
232 + break;
233 + case 28:
234 + ecc_bit = ECC_CNFG_28BIT;
235 + break;
236 + case 32:
237 + ecc_bit = ECC_CNFG_32BIT;
238 + break;
239 + case 36:
240 + ecc_bit = ECC_CNFG_36BIT;
241 + break;
242 + case 40:
243 + ecc_bit = ECC_CNFG_40BIT;
244 + break;
245 + case 44:
246 + ecc_bit = ECC_CNFG_44BIT;
247 + break;
248 + case 48:
249 + ecc_bit = ECC_CNFG_48BIT;
250 + break;
251 + case 52:
252 + ecc_bit = ECC_CNFG_52BIT;
253 + break;
254 + case 56:
255 + ecc_bit = ECC_CNFG_56BIT;
256 + break;
257 + case 60:
258 + ecc_bit = ECC_CNFG_60BIT;
259 + break;
260 + default:
261 + dev_err(ecc->dev, "invalid strength %d\n", config->strength);
262 + }
263 +
264 + if (config->codec == ECC_ENC) {
265 + /* configure ECC encoder (in bits) */
266 + enc_sz = config->enc_len << 3;
267 +
268 + reg = ecc_bit | (config->ecc_mode << ECC_MODE_SHIFT);
269 + reg |= (enc_sz << ECC_MS_SHIFT);
270 + writel(reg, ecc->regs + ECC_ENCCNFG);
271 +
272 + if (config->ecc_mode != ECC_NFI_MODE)
273 + writel(lower_32_bits(config->addr),
274 + ecc->regs + ECC_ENCDIADDR);
275 +
276 + } else {
277 + /* configure ECC decoder (in bits) */
278 + dec_sz = config->dec_len;
279 +
280 + reg = ecc_bit | (config->ecc_mode << ECC_MODE_SHIFT);
281 + reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
282 + reg |= DEC_EMPTY_EN;
283 + writel(reg, ecc->regs + ECC_DECCNFG);
284 +
285 + if (config->sec_mask)
286 + ecc->sec_mask = 1 << (config->sec_mask - 1);
287 + }
288 +}
289 +
290 +void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
291 + int sectors)
292 +{
293 + u32 offset, i, err;
294 + u32 bitflips = 0;
295 +
296 + stats->corrected = 0;
297 + stats->failed = 0;
298 +
299 + for (i = 0; i < sectors; i++) {
300 + offset = (i >> 2) << 2;
301 + err = readl(ecc->regs + ECC_DECENUM0 + offset);
302 + err = err >> ((i % 4) * 8);
303 + err &= ERR_MASK;
304 + if (err == ERR_MASK) {
305 + /* uncorrectable errors */
306 + stats->failed++;
307 + continue;
308 + }
309 +
310 + stats->corrected += err;
311 + bitflips = max_t(u32, bitflips, err);
312 + }
313 +
314 + stats->bitflips = bitflips;
315 +}
316 +EXPORT_SYMBOL(mtk_ecc_get_stats);
317 +
318 +void mtk_ecc_release(struct mtk_ecc *ecc)
319 +{
320 + clk_disable_unprepare(ecc->clk);
321 + put_device(ecc->dev);
322 +}
323 +EXPORT_SYMBOL(mtk_ecc_release);
324 +
325 +static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
326 +{
327 + struct platform_device *pdev;
328 + struct mtk_ecc *ecc;
329 +
330 + pdev = of_find_device_by_node(np);
331 + if (!pdev || !platform_get_drvdata(pdev))
332 + return ERR_PTR(-EPROBE_DEFER);
333 +
334 + get_device(&pdev->dev);
335 + ecc = platform_get_drvdata(pdev);
336 + clk_prepare_enable(ecc->clk);
337 + mtk_ecc_hw_init(ecc);
338 +
339 + return ecc;
340 +}
341 +
342 +struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
343 +{
344 + struct mtk_ecc *ecc = NULL;
345 + struct device_node *np;
346 +
347 + np = of_parse_phandle(of_node, "ecc-engine", 0);
348 + if (np) {
349 + ecc = mtk_ecc_get(np);
350 + of_node_put(np);
351 + }
352 +
353 + return ecc;
354 +}
355 +EXPORT_SYMBOL(of_mtk_ecc_get);
356 +
357 +int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
358 +{
359 + enum mtk_ecc_codec codec = config->codec;
360 + int ret;
361 +
362 + ret = down_interruptible(&ecc->sem);
363 + if (ret) {
364 + dev_err(ecc->dev, "interrupted when attempting to lock\n");
365 + return ret;
366 + }
367 +
368 + mtk_ecc_codec_wait_idle(ecc, codec);
369 + mtk_ecc_config(ecc, config);
370 + writew(ECC_CODEC_ENABLE(codec), ecc->regs + ECC_CTL_REG(codec));
371 +
372 + init_completion(&ecc->done);
373 + writew(ECC_IRQ_EN(codec), ecc->regs + ECC_IRQ_REG(codec));
374 +
375 + return 0;
376 +}
377 +EXPORT_SYMBOL(mtk_ecc_enable);
378 +
379 +void mtk_ecc_disable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
380 +{
381 + enum mtk_ecc_codec codec = config->codec;
382 +
383 + mtk_ecc_codec_wait_idle(ecc, codec);
384 + writew(0, ecc->regs + ECC_IRQ_REG(codec));
385 + writew(ECC_CODEC_DISABLE(codec), ecc->regs + ECC_CTL_REG(codec));
386 + up(&ecc->sem);
387 +}
388 +EXPORT_SYMBOL(mtk_ecc_disable);
389 +
390 +int mtk_ecc_wait_irq_done(struct mtk_ecc *ecc, enum mtk_ecc_codec codec)
391 +{
392 + int ret;
393 +
394 + ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
395 + if (!ret) {
396 + dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
397 + (codec == ECC_ENC) ? "encoder" : "decoder");
398 + return -ETIMEDOUT;
399 + }
400 +
401 + return 0;
402 +}
403 +EXPORT_SYMBOL(mtk_ecc_wait_irq_done);
404 +
405 +int mtk_ecc_encode_non_nfi_mode(struct mtk_ecc *ecc,
406 + struct mtk_ecc_config *config, u8 *data, u32 bytes)
407 +{
408 + dma_addr_t addr;
409 + u32 *p, len, i;
410 + int ret = 0;
411 +
412 + addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
413 + ret = dma_mapping_error(ecc->dev, addr);
414 + if (ret) {
415 + dev_err(ecc->dev, "dma mapping error\n");
416 + return -EINVAL;
417 + }
418 +
419 + config->codec = ECC_ENC;
420 + config->addr = addr;
421 + ret = mtk_ecc_enable(ecc, config);
422 + if (ret) {
423 + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
424 + return ret;
425 + }
426 +
427 + ret = mtk_ecc_wait_irq_done(ecc, ECC_ENC);
428 + if (ret)
429 + goto timeout;
430 +
431 + mtk_ecc_codec_wait_idle(ecc, ECC_ENC);
432 +
433 + /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
434 + len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
435 + p = (u32 *) (data + bytes);
436 +
437 + /* write the parity bytes generated by the ECC back to the OOB region */
438 + for (i = 0; i < len; i++)
439 + p[i] = readl(ecc->regs + ECC_ENCPAR(i));
440 +timeout:
441 +
442 + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
443 + mtk_ecc_disable(ecc, config);
444 +
445 + return ret;
446 +}
447 +EXPORT_SYMBOL(mtk_ecc_encode_non_nfi_mode);
448 +
449 +void mtk_ecc_hw_init(struct mtk_ecc *ecc)
450 +{
451 + mtk_ecc_codec_wait_idle(ecc, ECC_ENC);
452 + writew(ENC_DE, ecc->regs + ECC_ENCCON);
453 +
454 + mtk_ecc_codec_wait_idle(ecc, ECC_DEC);
455 + writel(DEC_DE, ecc->regs + ECC_DECCON);
456 +}
457 +
458 +void mtk_ecc_update_strength(u32 *p)
459 +{
460 + u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
461 + 40, 44, 48, 52, 56, 60};
462 + int i;
463 +
464 + for (i = 0; i < ARRAY_SIZE(ecc); i++) {
465 + if (*p <= ecc[i]) {
466 + if (!i)
467 + *p = ecc[i];
468 + else if (*p != ecc[i])
469 + *p = ecc[i - 1];
470 + return;
471 + }
472 + }
473 +
474 + *p = ecc[ARRAY_SIZE(ecc) - 1];
475 +}
476 +EXPORT_SYMBOL(mtk_ecc_update_strength);
477 +
478 +static int mtk_ecc_probe(struct platform_device *pdev)
479 +{
480 + struct device *dev = &pdev->dev;
481 + struct mtk_ecc *ecc;
482 + struct resource *res;
483 + int irq, ret;
484 +
485 + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
486 + if (!ecc)
487 + return -ENOMEM;
488 +
489 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
490 + ecc->regs = devm_ioremap_resource(dev, res);
491 + if (IS_ERR(ecc->regs)) {
492 + dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
493 + return PTR_ERR(ecc->regs);
494 + }
495 +
496 + ecc->clk = devm_clk_get(dev, NULL);
497 + if (IS_ERR(ecc->clk)) {
498 + dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
499 + return PTR_ERR(ecc->clk);
500 + }
501 +
502 + irq = platform_get_irq(pdev, 0);
503 + if (irq < 0) {
504 + dev_err(dev, "failed to get irq\n");
505 + return -EINVAL;
506 + }
507 +
508 + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
509 + if (ret) {
510 + dev_err(dev, "failed to set DMA mask\n");
511 + return ret;
512 + }
513 +
514 + ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
515 + if (ret) {
516 + dev_err(dev, "failed to request irq\n");
517 + return -EINVAL;
518 + }
519 +
520 + ecc->dev = dev;
521 + sema_init(&ecc->sem, 1);
522 + platform_set_drvdata(pdev, ecc);
523 + dev_info(dev, "probed\n");
524 +
525 + return 0;
526 +}
527 +
528 +#ifdef CONFIG_PM_SLEEP
529 +static int mtk_ecc_suspend(struct device *dev)
530 +{
531 + struct mtk_ecc *ecc = dev_get_drvdata(dev);
532 +
533 + clk_disable_unprepare(ecc->clk);
534 +
535 + return 0;
536 +}
537 +
538 +static int mtk_ecc_resume(struct device *dev)
539 +{
540 + struct mtk_ecc *ecc = dev_get_drvdata(dev);
541 + int ret;
542 +
543 + ret = clk_prepare_enable(ecc->clk);
544 + if (ret) {
545 + dev_err(dev, "failed to enable clk\n");
546 + return ret;
547 + }
548 +
549 + mtk_ecc_hw_init(ecc);
550 +
551 + return 0;
552 +}
553 +
554 +static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
555 +#endif
556 +
557 +static const struct of_device_id mtk_ecc_dt_match[] = {
558 + { .compatible = "mediatek,mt2701-ecc" },
559 + {},
560 +};
561 +
562 +MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
563 +
564 +static struct platform_driver mtk_ecc_driver = {
565 + .probe = mtk_ecc_probe,
566 + .driver = {
567 + .name = "mtk-ecc",
568 + .of_match_table = of_match_ptr(mtk_ecc_dt_match),
569 +#ifdef CONFIG_PM_SLEEP
570 + .pm = &mtk_ecc_pm_ops,
571 +#endif
572 + },
573 +};
574 +
575 +module_platform_driver(mtk_ecc_driver);
576 +
577 +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
578 +MODULE_AUTHOR("Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>");
579 +MODULE_DESCRIPTION("MTK Nand ECC Driver");
580 +MODULE_LICENSE("GPL");
581 diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
582 new file mode 100644
583 index 0000000..434826f
584 --- /dev/null
585 +++ b/drivers/mtd/nand/mtk_ecc.h
586 @@ -0,0 +1,53 @@
587 +/*
588 + * MTK SDG1 ECC controller
589 + *
590 + * Copyright (c) 2016 Mediatek
591 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
592 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
593 + * This program is free software; you can redistribute it and/or modify it
594 + * under the terms of the GNU General Public License version 2 as published
595 + * by the Free Software Foundation.
596 + */
597 +
598 +#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
599 +#define __DRIVERS_MTD_NAND_MTK_ECC_H__
600 +
601 +#include <linux/types.h>
602 +
603 +#define ECC_PARITY_BITS (14)
604 +
605 +enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
606 +enum mtk_ecc_codec {ECC_ENC, ECC_DEC};
607 +
608 +struct device_node;
609 +struct mtk_ecc;
610 +
611 +struct mtk_ecc_stats {
612 + u32 corrected;
613 + u32 bitflips;
614 + u32 failed;
615 +};
616 +
617 +struct mtk_ecc_config {
618 + enum mtk_ecc_mode ecc_mode;
619 + enum mtk_ecc_codec codec;
620 + dma_addr_t addr;
621 + u32 sec_mask;
622 + u32 strength;
623 + u32 enc_len;
624 + u32 dec_len;
625 +};
626 +
627 +int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
628 +void mtk_ecc_disable(struct mtk_ecc *, struct mtk_ecc_config *);
629 +int mtk_ecc_encode_non_nfi_mode(struct mtk_ecc *, struct mtk_ecc_config *,
630 + u8 *, u32);
631 +void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
632 +int mtk_ecc_wait_irq_done(struct mtk_ecc *, enum mtk_ecc_codec);
633 +void mtk_ecc_hw_init(struct mtk_ecc *);
634 +void mtk_ecc_update_strength(u32 *);
635 +
636 +struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
637 +void mtk_ecc_release(struct mtk_ecc *);
638 +
639 +#endif
640 diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
641 new file mode 100644
642 index 0000000..907b90c
643 --- /dev/null
644 +++ b/drivers/mtd/nand/mtk_nand.c
645 @@ -0,0 +1,1432 @@
646 +/*
647 + * MTK NAND Flash controller driver.
648 + * Copyright (C) 2016 MediaTek Inc.
649 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
650 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
651 + *
652 + * This program is free software; you can redistribute it and/or modify
653 + * it under the terms of the GNU General Public License version 2 as
654 + * published by the Free Software Foundation.
655 + *
656 + * This program is distributed in the hope that it will be useful,
657 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
658 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
659 + * GNU General Public License for more details.
660 + */
661 +
662 +#include <linux/platform_device.h>
663 +#include <linux/dma-mapping.h>
664 +#include <linux/interrupt.h>
665 +#include <linux/delay.h>
666 +#include <linux/clk.h>
667 +#include <linux/mtd/nand.h>
668 +#include <linux/mtd/mtd.h>
669 +#include <linux/module.h>
670 +#include <linux/iopoll.h>
671 +#include <linux/of.h>
672 +#include "mtk_ecc.h"
673 +
674 +/* NAND controller register definition */
675 +#define NFI_CNFG (0x00)
676 +#define CNFG_AHB BIT(0)
677 +#define CNFG_READ_EN BIT(1)
678 +#define CNFG_DMA_BURST_EN BIT(2)
679 +#define CNFG_BYTE_RW BIT(6)
680 +#define CNFG_HW_ECC_EN BIT(8)
681 +#define CNFG_AUTO_FMT_EN BIT(9)
682 +#define CNFG_OP_CUST (6 << 12)
683 +#define NFI_PAGEFMT (0x04)
684 +#define PAGEFMT_FDM_ECC_SHIFT (12)
685 +#define PAGEFMT_FDM_SHIFT (8)
686 +#define PAGEFMT_SPARE_16 (0)
687 +#define PAGEFMT_SPARE_26 (1)
688 +#define PAGEFMT_SPARE_27 (2)
689 +#define PAGEFMT_SPARE_28 (3)
690 +#define PAGEFMT_SPARE_32 (4)
691 +#define PAGEFMT_SPARE_36 (5)
692 +#define PAGEFMT_SPARE_40 (6)
693 +#define PAGEFMT_SPARE_44 (7)
694 +#define PAGEFMT_SPARE_48 (8)
695 +#define PAGEFMT_SPARE_49 (9)
696 +#define PAGEFMT_SPARE_50 (0xa)
697 +#define PAGEFMT_SPARE_51 (0xb)
698 +#define PAGEFMT_SPARE_52 (0xc)
699 +#define PAGEFMT_SPARE_62 (0xd)
700 +#define PAGEFMT_SPARE_63 (0xe)
701 +#define PAGEFMT_SPARE_64 (0xf)
702 +#define PAGEFMT_SPARE_SHIFT (4)
703 +#define PAGEFMT_SEC_SEL_512 BIT(2)
704 +#define PAGEFMT_512_2K (0)
705 +#define PAGEFMT_2K_4K (1)
706 +#define PAGEFMT_4K_8K (2)
707 +#define PAGEFMT_8K_16K (3)
708 +/* NFI control */
709 +#define NFI_CON (0x08)
710 +#define CON_FIFO_FLUSH BIT(0)
711 +#define CON_NFI_RST BIT(1)
712 +#define CON_BRD BIT(8) /* burst read */
713 +#define CON_BWR BIT(9) /* burst write */
714 +#define CON_SEC_SHIFT (12)
715 +/* Timming control register */
716 +#define NFI_ACCCON (0x0C)
717 +#define NFI_INTR_EN (0x10)
718 +#define INTR_AHB_DONE_EN BIT(6)
719 +#define NFI_INTR_STA (0x14)
720 +#define NFI_CMD (0x20)
721 +#define NFI_ADDRNOB (0x30)
722 +#define NFI_COLADDR (0x34)
723 +#define NFI_ROWADDR (0x38)
724 +#define NFI_STRDATA (0x40)
725 +#define STAR_EN (1)
726 +#define STAR_DE (0)
727 +#define NFI_CNRNB (0x44)
728 +#define NFI_DATAW (0x50)
729 +#define NFI_DATAR (0x54)
730 +#define NFI_PIO_DIRDY (0x58)
731 +#define PIO_DI_RDY (0x01)
732 +#define NFI_STA (0x60)
733 +#define STA_CMD BIT(0)
734 +#define STA_ADDR BIT(1)
735 +#define STA_BUSY BIT(8)
736 +#define STA_EMP_PAGE BIT(12)
737 +#define NFI_FSM_CUSTDATA (0xe << 16)
738 +#define NFI_FSM_MASK (0xf << 16)
739 +#define NFI_ADDRCNTR (0x70)
740 +#define CNTR_MASK GENMASK(16, 12)
741 +#define NFI_STRADDR (0x80)
742 +#define NFI_BYTELEN (0x84)
743 +#define NFI_CSEL (0x90)
744 +#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
745 +#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
746 +#define NFI_FDM_MAX_SIZE (8)
747 +#define NFI_MASTER_STA (0x224)
748 +#define MASTER_STA_MASK (0x0FFF)
749 +#define NFI_EMPTY_THRESH (0x23C)
750 +
751 +#define MTK_NAME "mtk-nand"
752 +#define KB(x) ((x) * 1024UL)
753 +#define MB(x) (KB(x) * 1024UL)
754 +
755 +#define MTK_TIMEOUT (500000)
756 +#define MTK_RESET_TIMEOUT (1000000)
757 +#define MTK_MAX_SECTOR (16)
758 +#define MTK_NAND_MAX_NSELS (2)
759 +
760 +typedef void (*bad_mark_swap)(struct mtd_info *, uint8_t *buf, int raw);
761 +struct mtk_nfc_bad_mark_ctl {
762 + bad_mark_swap bm_swap;
763 + u32 sec;
764 + u32 pos;
765 +};
766 +
767 +/*
768 + * FDM: region used to store free OOB data
769 + */
770 +struct mtk_nfc_fdm {
771 + u32 reg_size;
772 + u32 ecc_size;
773 +};
774 +
775 +struct mtk_nfc_nand_chip {
776 + struct list_head node;
777 + struct nand_chip nand;
778 +
779 + struct mtk_nfc_bad_mark_ctl bad_mark;
780 + struct mtk_nfc_fdm fdm;
781 + u32 spare_per_sector;
782 +
783 + int nsels;
784 + u8 sels[0];
785 + /* nothing after this field */
786 +};
787 +
788 +struct mtk_nfc_clk {
789 + struct clk *nfi_clk;
790 + struct clk *pad_clk;
791 +};
792 +
793 +struct mtk_nfc {
794 + struct nand_hw_control controller;
795 + struct mtk_ecc_config ecc_cfg;
796 + struct mtk_nfc_clk clk;
797 + struct mtk_ecc *ecc;
798 +
799 + struct device *dev;
800 + void __iomem *regs;
801 +
802 + struct completion done;
803 + struct list_head chips;
804 +
805 + u8 *buffer;
806 +};
807 +
808 +static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
809 +{
810 + return container_of(nand, struct mtk_nfc_nand_chip, nand);
811 +}
812 +
813 +static inline uint8_t *data_ptr(struct nand_chip *chip, const uint8_t *p, int i)
814 +{
815 + return (uint8_t *) p + i * chip->ecc.size;
816 +}
817 +
818 +static inline uint8_t *oob_ptr(struct nand_chip *chip, int i)
819 +{
820 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
821 + uint8_t *poi;
822 +
823 + if (i < mtk_nand->bad_mark.sec)
824 + poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
825 + else if (i == mtk_nand->bad_mark.sec)
826 + poi = chip->oob_poi;
827 + else
828 + poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
829 +
830 + return poi;
831 +}
832 +
833 +static inline int mtk_data_len(struct nand_chip *chip)
834 +{
835 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
836 +
837 + return chip->ecc.size + mtk_nand->spare_per_sector;
838 +}
839 +
840 +static inline uint8_t *mtk_data_ptr(struct nand_chip *chip, int i)
841 +{
842 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
843 +
844 + return nfc->buffer + i * mtk_data_len(chip);
845 +}
846 +
847 +static inline uint8_t *mtk_oob_ptr(struct nand_chip *chip, int i)
848 +{
849 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
850 +
851 + return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
852 +}
853 +
854 +static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
855 +{
856 + writel(val, nfc->regs + reg);
857 +}
858 +
859 +static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
860 +{
861 + writew(val, nfc->regs + reg);
862 +}
863 +
864 +static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
865 +{
866 + writeb(val, nfc->regs + reg);
867 +}
868 +
869 +static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
870 +{
871 + return readl_relaxed(nfc->regs + reg);
872 +}
873 +
874 +static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
875 +{
876 + return readw_relaxed(nfc->regs + reg);
877 +}
878 +
879 +static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
880 +{
881 + return readb_relaxed(nfc->regs + reg);
882 +}
883 +
884 +static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
885 +{
886 + struct device *dev = nfc->dev;
887 + u32 val;
888 + int ret;
889 +
890 + /* reset all registers and force the NFI master to terminate */
891 + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
892 +
893 + /* wait for the master to finish the last transaction */
894 + ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
895 + !(val & MASTER_STA_MASK), 50, MTK_RESET_TIMEOUT);
896 + if (ret)
897 + dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
898 + NFI_MASTER_STA, val);
899 +
900 + /* ensure any status register affected by the NFI master is reset */
901 + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
902 + nfi_writew(nfc, STAR_DE, NFI_STRDATA);
903 +}
904 +
905 +static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
906 +{
907 + struct device *dev = nfc->dev;
908 + u32 val;
909 + int ret;
910 +
911 + nfi_writel(nfc, command, NFI_CMD);
912 +
913 + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
914 + !(val & STA_CMD), 10, MTK_TIMEOUT);
915 + if (ret) {
916 + dev_warn(dev, "nfi core timed out entering command mode\n");
917 + return -EIO;
918 + }
919 +
920 + return 0;
921 +}
922 +
923 +static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
924 +{
925 + struct device *dev = nfc->dev;
926 + u32 val;
927 + int ret;
928 +
929 + nfi_writel(nfc, addr, NFI_COLADDR);
930 + nfi_writel(nfc, 0, NFI_ROWADDR);
931 + nfi_writew(nfc, 1, NFI_ADDRNOB);
932 +
933 + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
934 + !(val & STA_ADDR), 10, MTK_TIMEOUT);
935 + if (ret) {
936 + dev_warn(dev, "nfi core timed out entering address mode\n");
937 + return -EIO;
938 + }
939 +
940 + return 0;
941 +}
942 +
943 +static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
944 +{
945 + struct nand_chip *chip = mtd_to_nand(mtd);
946 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
947 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
948 + u32 fmt, spare;
949 +
950 + if (!mtd->writesize)
951 + return 0;
952 +
953 + spare = mtk_nand->spare_per_sector;
954 +
955 + switch (mtd->writesize) {
956 + case 512:
957 + fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
958 + break;
959 + case KB(2):
960 + if (chip->ecc.size == 512)
961 + fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
962 + else
963 + fmt = PAGEFMT_512_2K;
964 + break;
965 + case KB(4):
966 + if (chip->ecc.size == 512)
967 + fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
968 + else
969 + fmt = PAGEFMT_2K_4K;
970 + break;
971 + case KB(8):
972 + if (chip->ecc.size == 512)
973 + fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
974 + else
975 + fmt = PAGEFMT_4K_8K;
976 + break;
977 + case KB(16):
978 + fmt = PAGEFMT_8K_16K;
979 + break;
980 + default:
981 + dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
982 + return -EINVAL;
983 + }
984 +
985 + /* the hardware doubles the value for this eccsize so let's halve it */
986 + if (chip->ecc.size == 1024)
987 + spare >>= 1;
988 +
989 + switch (spare) {
990 + case 16:
991 + fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
992 + break;
993 + case 26:
994 + fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
995 + break;
996 + case 27:
997 + fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
998 + break;
999 + case 28:
1000 + fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
1001 + break;
1002 + case 32:
1003 + fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
1004 + break;
1005 + case 36:
1006 + fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
1007 + break;
1008 + case 40:
1009 + fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
1010 + break;
1011 + case 44:
1012 + fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
1013 + break;
1014 + case 48:
1015 + fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
1016 + break;
1017 + case 49:
1018 + fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
1019 + break;
1020 + case 50:
1021 + fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
1022 + break;
1023 + case 51:
1024 + fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
1025 + break;
1026 + case 52:
1027 + fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
1028 + break;
1029 + case 62:
1030 + fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
1031 + break;
1032 + case 63:
1033 + fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
1034 + break;
1035 + case 64:
1036 + fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
1037 + break;
1038 + default:
1039 + dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
1040 + return -EINVAL;
1041 + }
1042 +
1043 + fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
1044 + fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
1045 + nfi_writew(nfc, fmt, NFI_PAGEFMT);
1046 +
1047 + nfc->ecc_cfg.strength = chip->ecc.strength;
1048 + nfc->ecc_cfg.enc_len = chip->ecc.size + mtk_nand->fdm.ecc_size;
1049 + nfc->ecc_cfg.dec_len = (nfc->ecc_cfg.enc_len << 3)
1050 + + chip->ecc.strength * ECC_PARITY_BITS;
1051 +
1052 + return 0;
1053 +}
1054 +
1055 +static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
1056 +{
1057 + struct nand_chip *nand = mtd_to_nand(mtd);
1058 + struct mtk_nfc *nfc = nand_get_controller_data(nand);
1059 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
1060 +
1061 + if (chip < 0)
1062 + return;
1063 +
1064 + mtk_nfc_hw_runtime_config(mtd);
1065 +
1066 + nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
1067 +}
1068 +
1069 +static int mtk_nfc_dev_ready(struct mtd_info *mtd)
1070 +{
1071 + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
1072 +
1073 + if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
1074 + return 0;
1075 +
1076 + return 1;
1077 +}
1078 +
1079 +static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
1080 +{
1081 + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
1082 +
1083 + if (ctrl & NAND_ALE)
1084 + mtk_nfc_send_address(nfc, dat);
1085 + else if (ctrl & NAND_CLE) {
1086 + mtk_nfc_hw_reset(nfc);
1087 +
1088 + nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
1089 + mtk_nfc_send_command(nfc, dat);
1090 + }
1091 +}
1092 +
1093 +static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
1094 +{
1095 + int rc;
1096 + u8 val;
1097 +
1098 + rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
1099 + val & PIO_DI_RDY, 10, MTK_TIMEOUT);
1100 + if (rc < 0)
1101 + dev_err(nfc->dev, "data not ready\n");
1102 +}
1103 +
1104 +static inline uint8_t mtk_nfc_read_byte(struct mtd_info *mtd)
1105 +{
1106 + struct nand_chip *chip = mtd_to_nand(mtd);
1107 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1108 + u32 reg;
1109 +
1110 + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
1111 + if (reg != NFI_FSM_CUSTDATA) {
1112 + reg = nfi_readw(nfc, NFI_CNFG);
1113 + reg |= CNFG_BYTE_RW | CNFG_READ_EN;
1114 + nfi_writew(nfc, reg, NFI_CNFG);
1115 +
1116 + reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
1117 + nfi_writel(nfc, reg, NFI_CON);
1118 +
1119 + /* trigger to fetch data */
1120 + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1121 + }
1122 +
1123 + mtk_nfc_wait_ioready(nfc);
1124 +
1125 + return nfi_readb(nfc, NFI_DATAR);
1126 +}
1127 +
1128 +static void mtk_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1129 +{
1130 + int i;
1131 +
1132 + for (i = 0; i < len; i++)
1133 + buf[i] = mtk_nfc_read_byte(mtd);
1134 +}
1135 +
1136 +static void mtk_nfc_write_byte(struct mtd_info *mtd, uint8_t byte)
1137 +{
1138 + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
1139 + u32 reg;
1140 +
1141 + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
1142 +
1143 + if (reg != NFI_FSM_CUSTDATA) {
1144 + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
1145 + nfi_writew(nfc, reg, NFI_CNFG);
1146 +
1147 + reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
1148 + nfi_writel(nfc, reg, NFI_CON);
1149 +
1150 + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1151 + }
1152 +
1153 + mtk_nfc_wait_ioready(nfc);
1154 + nfi_writeb(nfc, byte, NFI_DATAW);
1155 +}
1156 +
1157 +static void mtk_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
1158 +{
1159 + int i;
1160 +
1161 + for (i = 0; i < len; i++)
1162 + mtk_nfc_write_byte(mtd, buf[i]);
1163 +}
1164 +
1165 +static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
1166 +{
1167 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1168 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1169 + int size = chip->ecc.size + mtk_nand->fdm.reg_size;
1170 +
1171 + nfc->ecc_cfg.ecc_mode = ECC_DMA_MODE;
1172 + nfc->ecc_cfg.codec = ECC_ENC;
1173 + return mtk_ecc_encode_non_nfi_mode(nfc->ecc, &nfc->ecc_cfg, data, size);
1174 +}
1175 +
1176 +static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, uint8_t *b, int c)
1177 +{
1178 + /* nope */
1179 +}
1180 +
1181 +static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, uint8_t *buf, int raw)
1182 +{
1183 + struct nand_chip *chip = mtd_to_nand(mtd);
1184 + struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
1185 + u32 bad_pos = nand->bad_mark.pos;
1186 +
1187 + if (raw)
1188 + bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
1189 + else
1190 + bad_pos += nand->bad_mark.sec * chip->ecc.size;
1191 +
1192 + swap(chip->oob_poi[0], buf[bad_pos]);
1193 +}
1194 +
1195 +static int mtk_nfc_format_subpage(struct mtd_info *mtd, uint32_t offset,
1196 + uint32_t len, const uint8_t *buf)
1197 +{
1198 + struct nand_chip *chip = mtd_to_nand(mtd);
1199 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1200 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1201 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1202 + u32 start, end;
1203 + int i, ret;
1204 +
1205 + start = offset / chip->ecc.size;
1206 + end = DIV_ROUND_UP(offset + len, chip->ecc.size);
1207 +
1208 + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
1209 + for (i = 0; i < chip->ecc.steps; i++) {
1210 +
1211 + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
1212 + chip->ecc.size);
1213 +
1214 + if (start > i || i >= end)
1215 + continue;
1216 +
1217 + if (i == mtk_nand->bad_mark.sec)
1218 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
1219 +
1220 + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
1221 +
1222 + /* program the CRC back to the OOB */
1223 + ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
1224 + if (ret < 0)
1225 + return ret;
1226 + }
1227 +
1228 + return 0;
1229 +}
1230 +
1231 +static void mtk_nfc_format_page(struct mtd_info *mtd, const uint8_t *buf)
1232 +{
1233 + struct nand_chip *chip = mtd_to_nand(mtd);
1234 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1235 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1236 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1237 + u32 i;
1238 +
1239 + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
1240 + for (i = 0; i < chip->ecc.steps; i++) {
1241 + if (buf)
1242 + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
1243 + chip->ecc.size);
1244 +
1245 + if (i == mtk_nand->bad_mark.sec)
1246 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
1247 +
1248 + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
1249 + }
1250 +}
1251 +
1252 +static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
1253 + u32 sectors)
1254 +{
1255 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1256 + u32 *p;
1257 + int i;
1258 +
1259 + for (i = 0; i < sectors; i++) {
1260 + p = (u32 *) oob_ptr(chip, start + i);
1261 + p[0] = nfi_readl(nfc, NFI_FDML(i));
1262 + p[1] = nfi_readl(nfc, NFI_FDMM(i));
1263 + }
1264 +}
1265 +
1266 +static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
1267 +{
1268 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1269 + u32 *p;
1270 + int i;
1271 +
1272 + for (i = 0; i < chip->ecc.steps ; i++) {
1273 + p = (u32 *) oob_ptr(chip, i);
1274 + nfi_writel(nfc, p[0], NFI_FDML(i));
1275 + nfi_writel(nfc, p[1], NFI_FDMM(i));
1276 + }
1277 +}
1278 +
1279 +static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1280 + const uint8_t *buf, int page, int len)
1281 +{
1282 +
1283 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1284 + struct device *dev = nfc->dev;
1285 + dma_addr_t addr;
1286 + u32 reg;
1287 + int ret;
1288 +
1289 + addr = dma_map_single(dev, (void *) buf, len, DMA_TO_DEVICE);
1290 + ret = dma_mapping_error(nfc->dev, addr);
1291 + if (ret) {
1292 + dev_err(nfc->dev, "dma mapping error\n");
1293 + return -EINVAL;
1294 + }
1295 +
1296 + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
1297 + nfi_writew(nfc, reg, NFI_CNFG);
1298 +
1299 + nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
1300 + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
1301 + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
1302 +
1303 + init_completion(&nfc->done);
1304 +
1305 + reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
1306 + nfi_writel(nfc, reg, NFI_CON);
1307 + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1308 +
1309 + ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
1310 + if (!ret) {
1311 + dev_err(dev, "program ahb done timeout\n");
1312 + nfi_writew(nfc, 0, NFI_INTR_EN);
1313 + ret = -ETIMEDOUT;
1314 + goto timeout;
1315 + }
1316 +
1317 + ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
1318 + (reg & CNTR_MASK) >= chip->ecc.steps, 10, MTK_TIMEOUT);
1319 + if (ret)
1320 + dev_err(dev, "hwecc write timeout\n");
1321 +
1322 +timeout:
1323 +
1324 + dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
1325 + nfi_writel(nfc, 0, NFI_CON);
1326 +
1327 + return ret;
1328 +}
1329 +
1330 +static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1331 + const uint8_t *buf, int page, int raw)
1332 +{
1333 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1334 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1335 + size_t len;
1336 + const u8 *bufpoi;
1337 + u32 reg;
1338 + int ret;
1339 +
1340 + if (!raw) {
1341 + /* OOB => FDM: from register, ECC: from HW */
1342 + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
1343 + nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
1344 +
1345 + nfc->ecc_cfg.codec = ECC_ENC;
1346 + nfc->ecc_cfg.ecc_mode = ECC_NFI_MODE;
1347 + ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
1348 + if (ret) {
1349 + /* clear NFI config */
1350 + reg = nfi_readw(nfc, NFI_CNFG);
1351 + reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
1352 + nfi_writew(nfc, reg, NFI_CNFG);
1353 +
1354 + return ret;
1355 + }
1356 +
1357 + memcpy(nfc->buffer, buf, mtd->writesize);
1358 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
1359 + bufpoi = nfc->buffer;
1360 +
1361 + /* write OOB into the FDM registers (OOB area in MTK NAND) */
1362 + mtk_nfc_write_fdm(chip);
1363 + } else
1364 + bufpoi = buf;
1365 +
1366 + len = mtd->writesize + (raw ? mtd->oobsize : 0);
1367 + ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
1368 +
1369 + if (!raw)
1370 + mtk_ecc_disable(nfc->ecc, &nfc->ecc_cfg);
1371 +
1372 + return ret;
1373 +}
1374 +
1375 +static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
1376 + struct nand_chip *chip, const uint8_t *buf, int oob_on, int page)
1377 +{
1378 + return mtk_nfc_write_page(mtd, chip, buf, page, 0);
1379 +}
1380 +
1381 +static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1382 + const uint8_t *buf, int oob_on, int pg)
1383 +{
1384 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1385 +
1386 + mtk_nfc_format_page(mtd, buf);
1387 + return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
1388 +}
1389 +
1390 +static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
1391 + struct nand_chip *chip, uint32_t offset, uint32_t data_len,
1392 + const uint8_t *buf, int oob_on, int page)
1393 +{
1394 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1395 + int ret;
1396 +
1397 + ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
1398 + if (ret < 0)
1399 + return ret;
1400 +
1401 + /* use the data in the private buffer (now with FDM and CRC) */
1402 + return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
1403 +}
1404 +
1405 +static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1406 + int page)
1407 +{
1408 + int ret;
1409 +
1410 + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1411 +
1412 + ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
1413 + if (ret < 0)
1414 + return -EIO;
1415 +
1416 + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1417 + ret = chip->waitfunc(mtd, chip);
1418 +
1419 + return ret & NAND_STATUS_FAIL ? -EIO : 0;
1420 +}
1421 +
1422 +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
1423 +{
1424 + struct nand_chip *chip = mtd_to_nand(mtd);
1425 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1426 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1427 + struct mtk_ecc_stats stats;
1428 + int rc, i;
1429 +
1430 + rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
1431 + if (rc) {
1432 + memset(buf, 0xff, sectors * chip->ecc.size);
1433 + for (i = 0; i < sectors; i++)
1434 + memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
1435 + return 0;
1436 + }
1437 +
1438 + mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
1439 + mtd->ecc_stats.corrected += stats.corrected;
1440 + mtd->ecc_stats.failed += stats.failed;
1441 +
1442 + return stats.bitflips;
1443 +}
1444 +
1445 +static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1446 + uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1447 + int page, int raw)
1448 +{
1449 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1450 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1451 + u32 spare = mtk_nand->spare_per_sector;
1452 + u32 column, sectors, start, end, reg;
1453 + dma_addr_t addr;
1454 + int bitflips;
1455 + size_t len;
1456 + u8 *buf;
1457 + int rc;
1458 +
1459 + start = data_offs / chip->ecc.size;
1460 + end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
1461 +
1462 + sectors = end - start;
1463 + column = start * (chip->ecc.size + spare);
1464 +
1465 + len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
1466 + buf = bufpoi + start * chip->ecc.size;
1467 +
1468 + if (column != 0)
1469 + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
1470 +
1471 + addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
1472 + rc = dma_mapping_error(nfc->dev, addr);
1473 + if (rc) {
1474 + dev_err(nfc->dev, "dma mapping error\n");
1475 +
1476 + return -EINVAL;
1477 + }
1478 +
1479 + reg = nfi_readw(nfc, NFI_CNFG);
1480 + reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
1481 + if (!raw) {
1482 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
1483 + nfi_writew(nfc, reg, NFI_CNFG);
1484 +
1485 + nfc->ecc_cfg.ecc_mode = ECC_NFI_MODE;
1486 + nfc->ecc_cfg.sec_mask = sectors;
1487 + nfc->ecc_cfg.codec = ECC_DEC;
1488 + rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
1489 + if (rc) {
1490 + dev_err(nfc->dev, "ecc enable\n");
1491 + /* clear NFI_CNFG */
1492 + reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
1493 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
1494 + nfi_writew(nfc, reg, NFI_CNFG);
1495 + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
1496 +
1497 + return rc;
1498 + }
1499 + } else
1500 + nfi_writew(nfc, reg, NFI_CNFG);
1501 +
1502 + nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
1503 + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
1504 + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
1505 +
1506 + init_completion(&nfc->done);
1507 + reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
1508 + nfi_writel(nfc, reg, NFI_CON);
1509 + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1510 +
1511 + rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
1512 + if (!rc)
1513 + dev_warn(nfc->dev, "read ahb/dma done timeout\n");
1514 +
1515 + rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
1516 + (reg & CNTR_MASK) >= sectors, 10, MTK_TIMEOUT);
1517 + if (rc < 0) {
1518 + dev_err(nfc->dev, "subpage done timeout\n");
1519 + bitflips = -EIO;
1520 + } else {
1521 + bitflips = 0;
1522 + if (!raw) {
1523 + rc = mtk_ecc_wait_irq_done(nfc->ecc, ECC_DEC);
1524 + bitflips = rc < 0 ? -ETIMEDOUT :
1525 + mtk_nfc_update_ecc_stats(mtd, buf, sectors);
1526 + mtk_nfc_read_fdm(chip, start, sectors);
1527 + }
1528 + }
1529 +
1530 + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
1531 +
1532 + if (raw)
1533 + goto done;
1534 +
1535 + mtk_ecc_disable(nfc->ecc, &nfc->ecc_cfg);
1536 +
1537 + if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
1538 + mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
1539 +done:
1540 + nfi_writel(nfc, 0, NFI_CON);
1541 +
1542 + return bitflips;
1543 +}
1544 +
1545 +static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
1546 + struct nand_chip *chip, uint32_t off, uint32_t len, uint8_t *p, int pg)
1547 +{
1548 + return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
1549 +}
1550 +
1551 +static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
1552 + struct nand_chip *chip, uint8_t *p, int oob_on, int pg)
1553 +{
1554 + return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
1555 +}
1556 +
1557 +static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1558 + uint8_t *buf, int oob_on, int page)
1559 +{
1560 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1561 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1562 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1563 + int i, ret;
1564 +
1565 + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
1566 + ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
1567 + page, 1);
1568 + if (ret < 0)
1569 + return ret;
1570 +
1571 + for (i = 0; i < chip->ecc.steps; i++) {
1572 + memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
1573 + if (i == mtk_nand->bad_mark.sec)
1574 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
1575 +
1576 + if (buf)
1577 + memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
1578 + chip->ecc.size);
1579 + }
1580 +
1581 + return ret;
1582 +}
1583 +
1584 +static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1585 + int page)
1586 +{
1587 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1588 +
1589 + return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
1590 +}
1591 +
1592 +static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
1593 +{
1594 + nfi_writel(nfc, 0x10804211, NFI_ACCCON);
1595 + nfi_writew(nfc, 0xf1, NFI_CNRNB);
1596 + nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
1597 +
1598 + mtk_nfc_hw_reset(nfc);
1599 +
1600 + nfi_readl(nfc, NFI_INTR_STA);
1601 + nfi_writel(nfc, 0, NFI_INTR_EN);
1602 +}
1603 +
1604 +static irqreturn_t mtk_nfc_irq(int irq, void *id)
1605 +{
1606 + struct mtk_nfc *nfc = id;
1607 + u16 sta, ien;
1608 +
1609 + sta = nfi_readw(nfc, NFI_INTR_STA);
1610 + ien = nfi_readw(nfc, NFI_INTR_EN);
1611 +
1612 + if (!(sta & ien))
1613 + return IRQ_NONE;
1614 +
1615 + nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
1616 + complete(&nfc->done);
1617 +
1618 + return IRQ_HANDLED;
1619 +}
1620 +
1621 +static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
1622 +{
1623 + int ret;
1624 +
1625 + ret = clk_prepare_enable(clk->nfi_clk);
1626 + if (ret) {
1627 + dev_err(dev, "failed to enable nfi clk\n");
1628 + return ret;
1629 + }
1630 +
1631 + ret = clk_prepare_enable(clk->pad_clk);
1632 + if (ret) {
1633 + dev_err(dev, "failed to enable pad clk\n");
1634 + clk_disable_unprepare(clk->nfi_clk);
1635 + return ret;
1636 + }
1637 +
1638 + return 0;
1639 +}
1640 +
1641 +static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
1642 +{
1643 + clk_disable_unprepare(clk->nfi_clk);
1644 + clk_disable_unprepare(clk->pad_clk);
1645 +}
1646 +
1647 +static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
1648 + struct mtd_oob_region *oob_region)
1649 +{
1650 + struct nand_chip *chip = mtd_to_nand(mtd);
1651 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1652 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1653 + u32 eccsteps;
1654 +
1655 + eccsteps = mtd->writesize / chip->ecc.size;
1656 +
1657 + if (section >= eccsteps)
1658 + return -ERANGE;
1659 +
1660 + oob_region->length = fdm->reg_size - fdm->ecc_size;
1661 + oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
1662 +
1663 + return 0;
1664 +}
1665 +
1666 +static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
1667 + struct mtd_oob_region *oob_region)
1668 +{
1669 + struct nand_chip *chip = mtd_to_nand(mtd);
1670 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1671 + u32 eccsteps;
1672 +
1673 + if (section)
1674 + return -ERANGE;
1675 +
1676 + eccsteps = mtd->writesize / chip->ecc.size;
1677 + oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
1678 + oob_region->length = mtd->oobsize - oob_region->offset;
1679 +
1680 + return 0;
1681 +}
1682 +
1683 +static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
1684 + .free = mtk_nfc_ooblayout_free,
1685 + .ecc = mtk_nfc_ooblayout_ecc,
1686 +};
1687 +
1688 +static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
1689 +{
1690 + struct nand_chip *nand = mtd_to_nand(mtd);
1691 + struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
1692 + u32 ecc_bytes;
1693 +
1694 + ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
1695 +
1696 + fdm->reg_size = chip->spare_per_sector - ecc_bytes;
1697 + if (fdm->reg_size > NFI_FDM_MAX_SIZE)
1698 + fdm->reg_size = NFI_FDM_MAX_SIZE;
1699 +
1700 + /* bad block mark storage */
1701 + fdm->ecc_size = 1;
1702 +}
1703 +
1704 +static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
1705 + struct mtd_info *mtd)
1706 +{
1707 + struct nand_chip *nand = mtd_to_nand(mtd);
1708 +
1709 + if (mtd->writesize == 512)
1710 + bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
1711 + else {
1712 + bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
1713 + bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
1714 + bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
1715 + }
1716 +}
1717 +
1718 +static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
1719 +{
1720 + struct nand_chip *nand = mtd_to_nand(mtd);
1721 + u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
1722 + 48, 49, 50, 51, 52, 62, 63, 64};
1723 + u32 eccsteps, i;
1724 +
1725 + eccsteps = mtd->writesize / nand->ecc.size;
1726 + *sps = mtd->oobsize / eccsteps;
1727 +
1728 + if (nand->ecc.size == 1024)
1729 + *sps >>= 1;
1730 +
1731 + for (i = 0; i < ARRAY_SIZE(spare); i++) {
1732 + if (*sps <= spare[i]) {
1733 + if (!i)
1734 + *sps = spare[i];
1735 + else if (*sps != spare[i])
1736 + *sps = spare[i - 1];
1737 + break;
1738 + }
1739 + }
1740 +
1741 + if (i >= ARRAY_SIZE(spare))
1742 + *sps = spare[ARRAY_SIZE(spare) - 1];
1743 +
1744 + if (nand->ecc.size == 1024)
1745 + *sps <<= 1;
1746 +}
1747 +
1748 +static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
1749 +{
1750 + struct nand_chip *nand = mtd_to_nand(mtd);
1751 + u32 spare;
1752 +
1753 + /* support only ecc hw mode */
1754 + if (nand->ecc.mode != NAND_ECC_HW) {
1755 + dev_err(dev, "ecc.mode not supported\n");
1756 + return -EINVAL;
1757 + }
1758 +
1759 + /* if optional DT settings are not present */
1760 + if (!nand->ecc.size || !nand->ecc.strength) {
1761 +
1762 + /* controller only supports sizes 512 and 1024 */
1763 + nand->ecc.size = (mtd->writesize > 512) ? 1024 : 512;
1764 +
1765 + /* get controller valid values */
1766 + mtk_nfc_set_spare_per_sector(&spare, mtd);
1767 + spare = spare - NFI_FDM_MAX_SIZE;
1768 + nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
1769 + }
1770 +
1771 + mtk_ecc_update_strength(&nand->ecc.strength);
1772 +
1773 + dev_info(dev, "eccsize %d eccstrength %d\n",
1774 + nand->ecc.size, nand->ecc.strength);
1775 +
1776 + return 0;
1777 +}
1778 +
1779 +static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
1780 + struct device_node *np)
1781 +{
1782 + struct mtk_nfc_nand_chip *chip;
1783 + struct nand_chip *nand;
1784 + struct mtd_info *mtd;
1785 + int nsels, len;
1786 + u32 tmp;
1787 + int ret;
1788 + int i;
1789 +
1790 + if (!of_get_property(np, "reg", &nsels))
1791 + return -ENODEV;
1792 +
1793 + nsels /= sizeof(u32);
1794 + if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
1795 + dev_err(dev, "invalid reg property size %d\n", nsels);
1796 + return -EINVAL;
1797 + }
1798 +
1799 + chip = devm_kzalloc(dev,
1800 + sizeof(*chip) + nsels * sizeof(u8), GFP_KERNEL);
1801 + if (!chip)
1802 + return -ENOMEM;
1803 +
1804 + chip->nsels = nsels;
1805 + for (i = 0; i < nsels; i++) {
1806 + ret = of_property_read_u32_index(np, "reg", i, &tmp);
1807 + if (ret) {
1808 + dev_err(dev, "reg property failure : %d\n", ret);
1809 + return ret;
1810 + }
1811 + chip->sels[i] = tmp;
1812 + }
1813 +
1814 + nand = &chip->nand;
1815 + nand->controller = &nfc->controller;
1816 +
1817 + nand_set_flash_node(nand, np);
1818 + nand_set_controller_data(nand, nfc);
1819 +
1820 + nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
1821 + nand->dev_ready = mtk_nfc_dev_ready;
1822 + nand->select_chip = mtk_nfc_select_chip;
1823 + nand->write_byte = mtk_nfc_write_byte;
1824 + nand->write_buf = mtk_nfc_write_buf;
1825 + nand->read_byte = mtk_nfc_read_byte;
1826 + nand->read_buf = mtk_nfc_read_buf;
1827 + nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
1828 +
1829 + /* set default mode in case dt entry is missing */
1830 + nand->ecc.mode = NAND_ECC_HW;
1831 +
1832 + nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
1833 + nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
1834 + nand->ecc.write_page = mtk_nfc_write_page_hwecc;
1835 + nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
1836 + nand->ecc.write_oob = mtk_nfc_write_oob_std;
1837 +
1838 + nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
1839 + nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
1840 + nand->ecc.read_page = mtk_nfc_read_page_hwecc;
1841 + nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
1842 + nand->ecc.read_oob = mtk_nfc_read_oob_std;
1843 +
1844 + mtd = nand_to_mtd(nand);
1845 + mtd->owner = THIS_MODULE;
1846 + mtd->dev.parent = dev;
1847 + mtd->name = MTK_NAME;
1848 + mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
1849 +
1850 + mtk_nfc_hw_init(nfc);
1851 +
1852 + ret = nand_scan_ident(mtd, nsels, NULL);
1853 + if (ret)
1854 + return -ENODEV;
1855 +
1856 + /* store bbt magic in page, cause OOB is not protected */
1857 + if (nand->bbt_options & NAND_BBT_USE_FLASH)
1858 + nand->bbt_options |= NAND_BBT_NO_OOB;
1859 +
1860 + ret = mtk_nfc_ecc_init(dev, mtd);
1861 + if (ret)
1862 + return -EINVAL;
1863 +
1864 + mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
1865 + mtk_nfc_set_fdm(&chip->fdm, mtd);
1866 + mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
1867 +
1868 + len = mtd->writesize + mtd->oobsize;
1869 + nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
1870 + if (!nfc->buffer)
1871 + return -ENOMEM;
1872 +
1873 + ret = nand_scan_tail(mtd);
1874 + if (ret)
1875 + return -ENODEV;
1876 +
1877 + ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
1878 + if (ret) {
1879 + dev_err(dev, "mtd parse partition error\n");
1880 + nand_release(mtd);
1881 + return ret;
1882 + }
1883 +
1884 + list_add_tail(&chip->node, &nfc->chips);
1885 +
1886 + return 0;
1887 +}
1888 +
1889 +static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
1890 +{
1891 + struct device_node *np = dev->of_node;
1892 + struct device_node *nand_np;
1893 + int ret;
1894 +
1895 + for_each_child_of_node(np, nand_np) {
1896 + ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
1897 + if (ret) {
1898 + of_node_put(nand_np);
1899 + return ret;
1900 + }
1901 + }
1902 +
1903 + return 0;
1904 +}
1905 +
1906 +static int mtk_nfc_probe(struct platform_device *pdev)
1907 +{
1908 + struct device *dev = &pdev->dev;
1909 + struct device_node *np = dev->of_node;
1910 + struct mtk_nfc *nfc;
1911 + struct resource *res;
1912 + int ret, irq;
1913 +
1914 + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
1915 + if (!nfc)
1916 + return -ENOMEM;
1917 +
1918 + spin_lock_init(&nfc->controller.lock);
1919 + init_waitqueue_head(&nfc->controller.wq);
1920 + INIT_LIST_HEAD(&nfc->chips);
1921 +
1922 + /* probe defer if not ready */
1923 + nfc->ecc = of_mtk_ecc_get(np);
1924 + if (IS_ERR(nfc->ecc))
1925 + return PTR_ERR(nfc->ecc);
1926 + else if (!nfc->ecc)
1927 + return -ENODEV;
1928 +
1929 + nfc->dev = dev;
1930 +
1931 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1932 + nfc->regs = devm_ioremap_resource(dev, res);
1933 + if (IS_ERR(nfc->regs)) {
1934 + ret = PTR_ERR(nfc->regs);
1935 + dev_err(dev, "no nfi base\n");
1936 + goto release_ecc;
1937 + }
1938 +
1939 + nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1940 + if (IS_ERR(nfc->clk.nfi_clk)) {
1941 + dev_err(dev, "no clk\n");
1942 + ret = PTR_ERR(nfc->clk.nfi_clk);
1943 + goto release_ecc;
1944 + }
1945 +
1946 + nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
1947 + if (IS_ERR(nfc->clk.pad_clk)) {
1948 + dev_err(dev, "no pad clk\n");
1949 + ret = PTR_ERR(nfc->clk.pad_clk);
1950 + goto release_ecc;
1951 + }
1952 +
1953 + ret = mtk_nfc_enable_clk(dev, &nfc->clk);
1954 + if (ret)
1955 + goto release_ecc;
1956 +
1957 + irq = platform_get_irq(pdev, 0);
1958 + if (irq < 0) {
1959 + dev_err(dev, "no nfi irq resource\n");
1960 + ret = -EINVAL;
1961 + goto clk_disable;
1962 + }
1963 +
1964 + ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
1965 + if (ret) {
1966 + dev_err(dev, "failed to request nfi irq\n");
1967 + goto clk_disable;
1968 + }
1969 +
1970 + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1971 + if (ret) {
1972 + dev_err(dev, "failed to set dma mask\n");
1973 + goto clk_disable;
1974 + }
1975 +
1976 + platform_set_drvdata(pdev, nfc);
1977 +
1978 + ret = mtk_nfc_nand_chips_init(dev, nfc);
1979 + if (ret) {
1980 + dev_err(dev, "failed to init nand chips\n");
1981 + goto clk_disable;
1982 + }
1983 +
1984 + return 0;
1985 +
1986 +clk_disable:
1987 + mtk_nfc_disable_clk(&nfc->clk);
1988 +
1989 +release_ecc:
1990 + mtk_ecc_release(nfc->ecc);
1991 +
1992 + return ret;
1993 +}
1994 +
1995 +static int mtk_nfc_remove(struct platform_device *pdev)
1996 +{
1997 + struct mtk_nfc *nfc = platform_get_drvdata(pdev);
1998 + struct mtk_nfc_nand_chip *chip;
1999 +
2000 + while (!list_empty(&nfc->chips)) {
2001 + chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
2002 + node);
2003 + nand_release(nand_to_mtd(&chip->nand));
2004 + list_del(&chip->node);
2005 + }
2006 +
2007 + mtk_ecc_release(nfc->ecc);
2008 + mtk_nfc_disable_clk(&nfc->clk);
2009 +
2010 + return 0;
2011 +}
2012 +
2013 +#ifdef CONFIG_PM_SLEEP
2014 +static int mtk_nfc_suspend(struct device *dev)
2015 +{
2016 + struct mtk_nfc *nfc = dev_get_drvdata(dev);
2017 +
2018 + mtk_nfc_disable_clk(&nfc->clk);
2019 +
2020 + return 0;
2021 +}
2022 +
2023 +static int mtk_nfc_resume(struct device *dev)
2024 +{
2025 + struct mtk_nfc *nfc = dev_get_drvdata(dev);
2026 + struct mtk_nfc_nand_chip *chip;
2027 + struct nand_chip *nand;
2028 + struct mtd_info *mtd;
2029 + int ret;
2030 + u32 i;
2031 +
2032 + udelay(200);
2033 +
2034 + ret = mtk_nfc_enable_clk(dev, &nfc->clk);
2035 + if (ret)
2036 + return ret;
2037 +
2038 + mtk_nfc_hw_init(nfc);
2039 +
2040 + list_for_each_entry(chip, &nfc->chips, node) {
2041 + nand = &chip->nand;
2042 + mtd = nand_to_mtd(nand);
2043 + for (i = 0; i < chip->nsels; i++) {
2044 + nand->select_chip(mtd, i);
2045 + nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2046 + }
2047 + }
2048 +
2049 + return 0;
2050 +}
2051 +static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
2052 +#endif
2053 +
2054 +static const struct of_device_id mtk_nfc_id_table[] = {
2055 + { .compatible = "mediatek,mt2701-nfc" },
2056 + {}
2057 +};
2058 +MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
2059 +
2060 +static struct platform_driver mtk_nfc_driver = {
2061 + .probe = mtk_nfc_probe,
2062 + .remove = mtk_nfc_remove,
2063 + .driver = {
2064 + .name = MTK_NAME,
2065 + .of_match_table = mtk_nfc_id_table,
2066 +#ifdef CONFIG_PM_SLEEP
2067 + .pm = &mtk_nfc_pm_ops,
2068 +#endif
2069 + },
2070 +};
2071 +
2072 +module_platform_driver(mtk_nfc_driver);
2073 +
2074 +MODULE_LICENSE("GPL");
2075 +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
2076 +MODULE_AUTHOR("Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>");
2077 +MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
2078 --
2079 1.7.10.4
2080