5a9fb51f037238b2281c48bc4db93e5055958976
[openwrt/staging/wigyori.git] / target / linux / ipq806x / patches / 0150-mtd-nand-Add-Qualcomm-NAND-controller.patch
1 From d2981ca1343b837fc574c4e46806d041b258720d Mon Sep 17 00:00:00 2001
2 From: Andy Gross <agross@codeaurora.org>
3 Date: Mon, 16 Jun 2014 17:13:22 -0500
4 Subject: [PATCH 150/182] mtd: nand: Add Qualcomm NAND controller
5
6 This patch adds the Qualcomm NAND controller and required infrastructure.
7
8 Signed-off-by: Andy Gross <agross@codeaurora.org>
9 ---
10 drivers/mtd/nand/Kconfig | 18 +
11 drivers/mtd/nand/Makefile | 2 +
12 drivers/mtd/nand/qcom_adm_dma.c | 797 +++++
13 drivers/mtd/nand/qcom_adm_dma.h | 268 ++
14 drivers/mtd/nand/qcom_nand.c | 7455 +++++++++++++++++++++++++++++++++++++++
15 drivers/mtd/nand/qcom_nand.h | 196 +
16 6 files changed, 8736 insertions(+)
17 create mode 100644 drivers/mtd/nand/qcom_adm_dma.c
18 create mode 100644 drivers/mtd/nand/qcom_adm_dma.h
19 create mode 100644 drivers/mtd/nand/qcom_nand.c
20 create mode 100644 drivers/mtd/nand/qcom_nand.h
21
22 diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
23 index 90ff447..6e3842f 100644
24 --- a/drivers/mtd/nand/Kconfig
25 +++ b/drivers/mtd/nand/Kconfig
26 @@ -510,4 +510,22 @@ config MTD_NAND_XWAY
27 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
28 to the External Bus Unit (EBU).
29
30 +config MTD_QCOM_DMA
31 + tristate "QCMO NAND DMA Support"
32 + depends on ARCH_QCOM && MTD_QCOM_NAND
33 + default n
34 + help
35 + DMA support for QCOM NAND
36 +
37 +config MTD_QCOM_NAND
38 + tristate "QCOM NAND Device Support"
39 + depends on MTD && ARCH_QCOM
40 + select CRC16
41 + select BITREVERSE
42 + select MTD_NAND_IDS
43 + select MTD_QCOM_DMA
44 + default y
45 + help
46 + Support for some NAND chips connected to the QCOM NAND controller.
47 +
48 endif # MTD_NAND
49 diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
50 index 542b568..6ef3c02 100644
51 --- a/drivers/mtd/nand/Makefile
52 +++ b/drivers/mtd/nand/Makefile
53 @@ -49,5 +49,7 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
54 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
55 obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
56 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
57 +obj-$(CONFIG_MTD_QCOM_NAND) += qcom_nand.o
58 +obj-$(CONFIG_MTD_QCOM_DMA) += qcom_adm_dma.o
59
60 nand-objs := nand_base.o nand_bbt.o
61 diff --git a/drivers/mtd/nand/qcom_adm_dma.c b/drivers/mtd/nand/qcom_adm_dma.c
62 new file mode 100644
63 index 0000000..46d8473
64 --- /dev/null
65 +++ b/drivers/mtd/nand/qcom_adm_dma.c
66 @@ -0,0 +1,797 @@
67 +/* * Copyright (c) 2012 The Linux Foundation. All rights reserved.* */
68 +/* linux/arch/arm/mach-msm/dma.c
69 + *
70 + * Copyright (C) 2007 Google, Inc.
71 + * Copyright (c) 2008-2010, 2012 The Linux Foundation. All rights reserved.
72 + *
73 + * This software is licensed under the terms of the GNU General Public
74 + * License version 2, as published by the Free Software Foundation, and
75 + * may be copied, distributed, and modified under those terms.
76 + *
77 + * This program is distributed in the hope that it will be useful,
78 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
79 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
80 + * GNU General Public License for more details.
81 + *
82 + */
83 +
84 +#include <linux/clk.h>
85 +#include <linux/err.h>
86 +#include <linux/io.h>
87 +#include <linux/interrupt.h>
88 +#include <linux/module.h>
89 +#include <linux/platform_device.h>
90 +#include <linux/spinlock.h>
91 +#include <linux/pm_runtime.h>
92 +#include <linux/reset.h>
93 +#include <linux/reset-controller.h>
94 +#include "qcom_adm_dma.h"
95 +
96 +#define MODULE_NAME "msm_dmov"
97 +
98 +#define MSM_DMOV_CHANNEL_COUNT 16
99 +#define MSM_DMOV_CRCI_COUNT 16
100 +
101 +enum {
102 + CLK_DIS,
103 + CLK_TO_BE_DIS,
104 + CLK_EN
105 +};
106 +
107 +struct msm_dmov_ci_conf {
108 + int start;
109 + int end;
110 + int burst;
111 +};
112 +
113 +struct msm_dmov_crci_conf {
114 + int sd;
115 + int blk_size;
116 +};
117 +
118 +struct msm_dmov_chan_conf {
119 + int sd;
120 + int block;
121 + int priority;
122 +};
123 +
124 +struct msm_dmov_conf {
125 + void *base;
126 + struct msm_dmov_crci_conf *crci_conf;
127 + struct msm_dmov_chan_conf *chan_conf;
128 + int channel_active;
129 + int sd;
130 + size_t sd_size;
131 + struct list_head staged_commands[MSM_DMOV_CHANNEL_COUNT];
132 + struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
133 + struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
134 + struct mutex lock;
135 + spinlock_t list_lock;
136 + unsigned int irq;
137 + struct clk *clk;
138 + struct clk *pclk;
139 + struct clk *ebiclk;
140 + unsigned int clk_ctl;
141 + struct delayed_work work;
142 + struct workqueue_struct *cmd_wq;
143 +
144 + struct reset_control *adm_reset;
145 + struct reset_control *pbus_reset;
146 + struct reset_control *c0_reset;
147 + struct reset_control *c1_reset;
148 + struct reset_control *c2_reset;
149 +
150 +};
151 +
152 +static void msm_dmov_clock_work(struct work_struct *);
153 +
154 +#define DMOV_CRCI_DEFAULT_CONF { .sd = 0, .blk_size = 0 }
155 +#define DMOV_CRCI_CONF(secd, blk) { .sd = secd, .blk_size = blk }
156 +
157 +static struct msm_dmov_crci_conf adm_crci_conf[] = {
158 + DMOV_CRCI_DEFAULT_CONF,
159 + DMOV_CRCI_DEFAULT_CONF,
160 + DMOV_CRCI_DEFAULT_CONF,
161 + DMOV_CRCI_DEFAULT_CONF,
162 + DMOV_CRCI_DEFAULT_CONF,
163 + DMOV_CRCI_DEFAULT_CONF,
164 + DMOV_CRCI_DEFAULT_CONF,
165 + DMOV_CRCI_DEFAULT_CONF,
166 + DMOV_CRCI_DEFAULT_CONF,
167 + DMOV_CRCI_DEFAULT_CONF,
168 + DMOV_CRCI_CONF(0, 1),
169 + DMOV_CRCI_DEFAULT_CONF,
170 + DMOV_CRCI_DEFAULT_CONF,
171 + DMOV_CRCI_DEFAULT_CONF,
172 + DMOV_CRCI_DEFAULT_CONF,
173 + DMOV_CRCI_DEFAULT_CONF,
174 +};
175 +
176 +#define DMOV_CHANNEL_DEFAULT_CONF { .sd = 0, .block = 0, .priority = 1 }
177 +
178 +static struct msm_dmov_chan_conf adm_chan_conf[] = {
179 + DMOV_CHANNEL_DEFAULT_CONF,
180 + DMOV_CHANNEL_DEFAULT_CONF,
181 + DMOV_CHANNEL_DEFAULT_CONF,
182 + DMOV_CHANNEL_DEFAULT_CONF,
183 + DMOV_CHANNEL_DEFAULT_CONF,
184 + DMOV_CHANNEL_DEFAULT_CONF,
185 + DMOV_CHANNEL_DEFAULT_CONF,
186 + DMOV_CHANNEL_DEFAULT_CONF,
187 + DMOV_CHANNEL_DEFAULT_CONF,
188 + DMOV_CHANNEL_DEFAULT_CONF,
189 + DMOV_CHANNEL_DEFAULT_CONF,
190 + DMOV_CHANNEL_DEFAULT_CONF,
191 + DMOV_CHANNEL_DEFAULT_CONF,
192 + DMOV_CHANNEL_DEFAULT_CONF,
193 + DMOV_CHANNEL_DEFAULT_CONF,
194 + DMOV_CHANNEL_DEFAULT_CONF,
195 +};
196 +
197 +#define DMOV_IRQ_TO_ADM(irq) 0
198 +
199 +static struct msm_dmov_conf dmov_conf[] = {
200 + {
201 + .crci_conf = adm_crci_conf,
202 + .chan_conf = adm_chan_conf,
203 + .lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
204 + .list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
205 + .clk_ctl = CLK_EN,
206 + .work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
207 + msm_dmov_clock_work,0),
208 + }
209 +};
210 +
211 +#define MSM_DMOV_ID_COUNT (MSM_DMOV_CHANNEL_COUNT * ARRAY_SIZE(dmov_conf))
212 +#define DMOV_REG(name, adm) ((name) + (dmov_conf[adm].base) +\
213 + (dmov_conf[adm].sd * dmov_conf[adm].sd_size))
214 +#define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
215 +#define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
216 +#define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
217 +
218 +enum {
219 + MSM_DMOV_PRINT_ERRORS = 1,
220 + MSM_DMOV_PRINT_IO = 2,
221 + MSM_DMOV_PRINT_FLOW = 4
222 +};
223 +
224 +unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
225 +
226 +#define MSM_DMOV_DPRINTF(mask, format, args...) \
227 + do { \
228 + if ((mask) & msm_dmov_print_mask) \
229 + printk(KERN_ERR format, args); \
230 + } while (0)
231 +#define PRINT_ERROR(format, args...) \
232 + MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
233 +#define PRINT_IO(format, args...) \
234 + MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
235 +#define PRINT_FLOW(format, args...) \
236 + MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
237 +
238 +static int msm_dmov_clk_on(int adm)
239 +{
240 + int ret;
241 +
242 +return 0;
243 + ret = clk_prepare_enable(dmov_conf[adm].clk);
244 + if (ret)
245 + return ret;
246 + if (dmov_conf[adm].pclk) {
247 + ret = clk_prepare_enable(dmov_conf[adm].pclk);
248 + if (ret) {
249 + clk_disable_unprepare(dmov_conf[adm].clk);
250 + return ret;
251 + }
252 + }
253 + if (dmov_conf[adm].ebiclk) {
254 + ret = clk_prepare_enable(dmov_conf[adm].ebiclk);
255 + if (ret) {
256 + if (dmov_conf[adm].pclk)
257 + clk_disable_unprepare(dmov_conf[adm].pclk);
258 + clk_disable_unprepare(dmov_conf[adm].clk);
259 + }
260 + }
261 + return ret;
262 +}
263 +
264 +static void msm_dmov_clk_off(int adm)
265 +{
266 +#if 0
267 + if (dmov_conf[adm].ebiclk)
268 + clk_disable_unprepare(dmov_conf[adm].ebiclk);
269 + if (dmov_conf[adm].pclk)
270 + clk_disable_unprepare(dmov_conf[adm].pclk);
271 + clk_disable_unprepare(dmov_conf[adm].clk);
272 +#endif
273 +}
274 +
275 +static void msm_dmov_clock_work(struct work_struct *work)
276 +{
277 + struct msm_dmov_conf *conf =
278 + container_of(to_delayed_work(work), struct msm_dmov_conf, work);
279 + int adm = DMOV_IRQ_TO_ADM(conf->irq);
280 + mutex_lock(&conf->lock);
281 + if (conf->clk_ctl == CLK_TO_BE_DIS) {
282 + BUG_ON(conf->channel_active);
283 + msm_dmov_clk_off(adm);
284 + conf->clk_ctl = CLK_DIS;
285 + }
286 + mutex_unlock(&conf->lock);
287 +}
288 +
289 +enum {
290 + NOFLUSH = 0,
291 + GRACEFUL,
292 + NONGRACEFUL,
293 +};
294 +
295 +/* Caller must hold the list lock */
296 +static struct msm_dmov_cmd *start_ready_cmd(unsigned ch, int adm)
297 +{
298 + struct msm_dmov_cmd *cmd;
299 +
300 + if (list_empty(&dmov_conf[adm].ready_commands[ch])) {
301 + return NULL;
302 + }
303 +
304 + cmd = list_entry(dmov_conf[adm].ready_commands[ch].next, typeof(*cmd),
305 + list);
306 + list_del(&cmd->list);
307 + if (cmd->exec_func)
308 + cmd->exec_func(cmd);
309 + list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
310 + if (!dmov_conf[adm].channel_active) {
311 + enable_irq(dmov_conf[adm].irq);
312 + }
313 + dmov_conf[adm].channel_active |= BIT(ch);
314 + PRINT_IO("msm dmov enqueue command, %x, ch %d\n", cmd->cmdptr, ch);
315 + writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
316 +
317 + return cmd;
318 +}
319 +
320 +static void msm_dmov_enqueue_cmd_ext_work(struct work_struct *work)
321 +{
322 + struct msm_dmov_cmd *cmd =
323 + container_of(work, struct msm_dmov_cmd, work);
324 + unsigned id = cmd->id;
325 + unsigned status;
326 + unsigned long flags;
327 + int adm = DMOV_ID_TO_ADM(id);
328 + int ch = DMOV_ID_TO_CHAN(id);
329 +
330 + mutex_lock(&dmov_conf[adm].lock);
331 + if (dmov_conf[adm].clk_ctl == CLK_DIS) {
332 + status = msm_dmov_clk_on(adm);
333 + if (status != 0)
334 + goto error;
335 + }
336 + dmov_conf[adm].clk_ctl = CLK_EN;
337 +
338 + spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
339 +
340 + cmd = list_entry(dmov_conf[adm].staged_commands[ch].next, typeof(*cmd),
341 + list);
342 + list_del(&cmd->list);
343 + list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
344 + status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
345 + if (status & DMOV_STATUS_CMD_PTR_RDY) {
346 + PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
347 + id, status);
348 + cmd = start_ready_cmd(ch, adm);
349 + /*
350 + * We added something to the ready list, and still hold the
351 + * list lock. Thus, no need to check for cmd == NULL
352 + */
353 + if (cmd->toflush) {
354 + int flush = (cmd->toflush == GRACEFUL) ? 1 << 31 : 0;
355 + writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
356 + }
357 + } else {
358 + cmd->toflush = 0;
359 + if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
360 + !list_empty(&dmov_conf[adm].ready_commands[ch]))
361 + PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
362 + "status %x\n", id, status);
363 + PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
364 + "%x\n", id, status);
365 + }
366 + if (!dmov_conf[adm].channel_active) {
367 + dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
368 + schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
369 + }
370 + spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
371 +error:
372 + mutex_unlock(&dmov_conf[adm].lock);
373 +}
374 +
375 +static void __msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
376 +{
377 + int adm = DMOV_ID_TO_ADM(id);
378 + int ch = DMOV_ID_TO_CHAN(id);
379 + unsigned long flags;
380 + cmd->id = id;
381 + cmd->toflush = 0;
382 +
383 + spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
384 + list_add_tail(&cmd->list, &dmov_conf[adm].staged_commands[ch]);
385 + spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
386 +
387 + queue_work(dmov_conf[adm].cmd_wq, &cmd->work);
388 +}
389 +
390 +void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
391 +{
392 + INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
393 + __msm_dmov_enqueue_cmd_ext(id, cmd);
394 +}
395 +EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
396 +
397 +void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
398 +{
399 + /* Disable callback function (for backwards compatibility) */
400 + cmd->exec_func = NULL;
401 + INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
402 + __msm_dmov_enqueue_cmd_ext(id, cmd);
403 +}
404 +EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
405 +
406 +void msm_dmov_flush(unsigned int id, int graceful)
407 +{
408 + unsigned long irq_flags;
409 + int ch = DMOV_ID_TO_CHAN(id);
410 + int adm = DMOV_ID_TO_ADM(id);
411 + int flush = graceful ? DMOV_FLUSH_TYPE : 0;
412 + struct msm_dmov_cmd *cmd;
413 +
414 + spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
415 + /* XXX not checking if flush cmd sent already */
416 + if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
417 + PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
418 + writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
419 + }
420 + list_for_each_entry(cmd, &dmov_conf[adm].staged_commands[ch], list)
421 + cmd->toflush = graceful ? GRACEFUL : NONGRACEFUL;
422 + /* spin_unlock_irqrestore has the necessary barrier */
423 + spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
424 +}
425 +EXPORT_SYMBOL(msm_dmov_flush);
426 +
427 +struct msm_dmov_exec_cmdptr_cmd {
428 + struct msm_dmov_cmd dmov_cmd;
429 + struct completion complete;
430 + unsigned id;
431 + unsigned int result;
432 + struct msm_dmov_errdata err;
433 +};
434 +
435 +static void
436 +dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
437 + unsigned int result,
438 + struct msm_dmov_errdata *err)
439 +{
440 + struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
441 + cmd->result = result;
442 + if (result != 0x80000002 && err)
443 + memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
444 +
445 + complete(&cmd->complete);
446 +}
447 +
448 +int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
449 +{
450 + struct msm_dmov_exec_cmdptr_cmd cmd;
451 +
452 + PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
453 +
454 + cmd.dmov_cmd.cmdptr = cmdptr;
455 + cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
456 + cmd.dmov_cmd.exec_func = NULL;
457 + cmd.id = id;
458 + cmd.result = 0;
459 + INIT_WORK_ONSTACK(&cmd.dmov_cmd.work, msm_dmov_enqueue_cmd_ext_work);
460 + init_completion(&cmd.complete);
461 +
462 + __msm_dmov_enqueue_cmd_ext(id, &cmd.dmov_cmd);
463 + wait_for_completion_timeout(&cmd.complete, msecs_to_jiffies(1000));
464 +
465 + if (cmd.result != 0x80000002) {
466 + PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
467 + PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
468 + id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
469 + return -EIO;
470 + }
471 + PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
472 + return 0;
473 +}
474 +EXPORT_SYMBOL(msm_dmov_exec_cmd);
475 +
476 +static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
477 +{
478 + errdata->flush[0] = readl_relaxed(DMOV_REG(DMOV_FLUSH0(ch), adm));
479 + errdata->flush[1] = readl_relaxed(DMOV_REG(DMOV_FLUSH1(ch), adm));
480 + errdata->flush[2] = 0;
481 + errdata->flush[3] = readl_relaxed(DMOV_REG(DMOV_FLUSH3(ch), adm));
482 + errdata->flush[4] = readl_relaxed(DMOV_REG(DMOV_FLUSH4(ch), adm));
483 + errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
484 +}
485 +
486 +static irqreturn_t msm_dmov_isr(int irq, void *dev_id)
487 +{
488 + unsigned int int_status;
489 + unsigned int mask;
490 + unsigned int id;
491 + unsigned int ch;
492 + unsigned long irq_flags;
493 + unsigned int ch_status;
494 + unsigned int ch_result;
495 + unsigned int valid = 0;
496 + struct msm_dmov_cmd *cmd;
497 + int adm = DMOV_IRQ_TO_ADM(irq);
498 +
499 + mutex_lock(&dmov_conf[adm].lock);
500 + /* read and clear isr */
501 + int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
502 + PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
503 +
504 + spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
505 + while (int_status) {
506 + mask = int_status & -int_status;
507 + ch = fls(mask) - 1;
508 + id = DMOV_CHAN_ADM_TO_ID(ch, adm);
509 + PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
510 + int_status &= ~mask;
511 + ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
512 + if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
513 + PRINT_FLOW("msm_datamover_irq_handler id %d, "
514 + "result not valid %x\n", id, ch_status);
515 + continue;
516 + }
517 + do {
518 + valid = 1;
519 + ch_result = readl_relaxed(DMOV_REG(DMOV_RSLT(ch), adm));
520 + if (list_empty(&dmov_conf[adm].active_commands[ch])) {
521 + PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
522 + "with no active command, status %x, result %x\n",
523 + id, ch_status, ch_result);
524 + cmd = NULL;
525 + } else {
526 + cmd = list_entry(dmov_conf[adm].
527 + active_commands[ch].next, typeof(*cmd),
528 + list);
529 + }
530 + PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
531 + if (ch_result & DMOV_RSLT_DONE) {
532 + PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
533 + id, ch_status);
534 + PRINT_IO("msm_datamover_irq_handler id %d, got result "
535 + "for %p, result %x\n", id, cmd, ch_result);
536 + if (cmd) {
537 + list_del(&cmd->list);
538 + cmd->complete_func(cmd, ch_result, NULL);
539 + }
540 + }
541 + if (ch_result & DMOV_RSLT_FLUSH) {
542 + struct msm_dmov_errdata errdata;
543 +
544 + fill_errdata(&errdata, ch, adm);
545 + PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
546 + PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
547 + if (cmd) {
548 + list_del(&cmd->list);
549 + cmd->complete_func(cmd, ch_result, &errdata);
550 + }
551 + }
552 + if (ch_result & DMOV_RSLT_ERROR) {
553 + struct msm_dmov_errdata errdata;
554 +
555 + fill_errdata(&errdata, ch, adm);
556 +
557 + PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
558 + PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
559 + if (cmd) {
560 + list_del(&cmd->list);
561 + cmd->complete_func(cmd, ch_result, &errdata);
562 + }
563 + /* this does not seem to work, once we get an error */
564 + /* the datamover will no longer accept commands */
565 + writel_relaxed(0, DMOV_REG(DMOV_FLUSH0(ch),
566 + adm));
567 + }
568 + rmb();
569 + ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
570 + adm));
571 + PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
572 + if (ch_status & DMOV_STATUS_CMD_PTR_RDY)
573 + start_ready_cmd(ch, adm);
574 + } while (ch_status & DMOV_STATUS_RSLT_VALID);
575 + if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
576 + list_empty(&dmov_conf[adm].ready_commands[ch]))
577 + dmov_conf[adm].channel_active &= ~(1U << ch);
578 + PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
579 + }
580 + spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
581 +
582 + if (!dmov_conf[adm].channel_active && valid) {
583 + disable_irq_nosync(dmov_conf[adm].irq);
584 + dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
585 + schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
586 + }
587 +
588 + mutex_unlock(&dmov_conf[adm].lock);
589 +
590 + return valid ? IRQ_HANDLED : IRQ_NONE;
591 +}
592 +
593 +static int msm_dmov_suspend_late(struct device *dev)
594 +{
595 + struct platform_device *pdev = to_platform_device(dev);
596 + int adm = (pdev->id >= 0) ? pdev->id : 0;
597 + mutex_lock(&dmov_conf[adm].lock);
598 + if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
599 + BUG_ON(dmov_conf[adm].channel_active);
600 + msm_dmov_clk_off(adm);
601 + dmov_conf[adm].clk_ctl = CLK_DIS;
602 + }
603 + mutex_unlock(&dmov_conf[adm].lock);
604 + return 0;
605 +}
606 +
607 +static int msm_dmov_runtime_suspend(struct device *dev)
608 +{
609 + dev_dbg(dev, "pm_runtime: suspending...\n");
610 + return 0;
611 +}
612 +
613 +static int msm_dmov_runtime_resume(struct device *dev)
614 +{
615 + dev_dbg(dev, "pm_runtime: resuming...\n");
616 + return 0;
617 +}
618 +
619 +static int msm_dmov_runtime_idle(struct device *dev)
620 +{
621 + dev_dbg(dev, "pm_runtime: idling...\n");
622 + return 0;
623 +}
624 +
625 +static struct dev_pm_ops msm_dmov_dev_pm_ops = {
626 + .runtime_suspend = msm_dmov_runtime_suspend,
627 + .runtime_resume = msm_dmov_runtime_resume,
628 + .runtime_idle = msm_dmov_runtime_idle,
629 + .suspend = msm_dmov_suspend_late,
630 +};
631 +
632 +static int msm_dmov_init_clocks(struct platform_device *pdev)
633 +{
634 + int adm = (pdev->id >= 0) ? pdev->id : 0;
635 + int ret;
636 +
637 + dmov_conf[adm].clk = devm_clk_get(&pdev->dev, "core_clk");
638 + if (IS_ERR(dmov_conf[adm].clk)) {
639 + printk(KERN_ERR "%s: Error getting adm_clk\n", __func__);
640 + dmov_conf[adm].clk = NULL;
641 + return -ENOENT;
642 + }
643 +
644 + dmov_conf[adm].pclk = devm_clk_get(&pdev->dev, "iface_clk");
645 + if (IS_ERR(dmov_conf[adm].pclk)) {
646 + dmov_conf[adm].pclk = NULL;
647 + /* pclk not present on all SoCs, don't bail on failure */
648 + }
649 +
650 + dmov_conf[adm].ebiclk = devm_clk_get(&pdev->dev, "mem_clk");
651 + if (IS_ERR(dmov_conf[adm].ebiclk)) {
652 + dmov_conf[adm].ebiclk = NULL;
653 + /* ebiclk not present on all SoCs, don't bail on failure */
654 + } else {
655 + ret = clk_set_rate(dmov_conf[adm].ebiclk, 27000000);
656 + if (ret)
657 + return -ENOENT;
658 + }
659 +
660 + return 0;
661 +}
662 +
663 +static void config_datamover(int adm)
664 +{
665 + int i;
666 +
667 + /* Reset the ADM */
668 + reset_control_assert(dmov_conf[adm].adm_reset);
669 + reset_control_assert(dmov_conf[adm].c0_reset);
670 + reset_control_assert(dmov_conf[adm].c1_reset);
671 + reset_control_assert(dmov_conf[adm].c2_reset);
672 +
673 + reset_control_deassert(dmov_conf[adm].c2_reset);
674 + reset_control_deassert(dmov_conf[adm].c1_reset);
675 + reset_control_deassert(dmov_conf[adm].c0_reset);
676 + reset_control_deassert(dmov_conf[adm].adm_reset);
677 +
678 + for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
679 + struct msm_dmov_chan_conf *chan_conf =
680 + dmov_conf[adm].chan_conf;
681 + unsigned conf;
682 + /* Only configure scorpion channels */
683 + if (chan_conf[i].sd <= 1) {
684 + conf = readl_relaxed(DMOV_REG(DMOV_CONF(i), adm));
685 + conf |= DMOV_CONF_MPU_DISABLE |
686 + DMOV_CONF_PERM_MPU_CONF |
687 + DMOV_CONF_FLUSH_RSLT_EN |
688 + DMOV_CONF_FORCE_RSLT_EN |
689 + DMOV_CONF_IRQ_EN |
690 + DMOV_CONF_PRIORITY(chan_conf[i].priority);
691 +
692 + conf &= ~DMOV_CONF_SD(7);
693 + conf |= DMOV_CONF_SD(chan_conf[i].sd);
694 + writel_relaxed(conf, DMOV_REG(DMOV_CONF(i), adm));
695 + }
696 + }
697 +
698 + for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
699 + writel_relaxed(DMOV_CRCI_CTL_RST,
700 + DMOV_REG(DMOV_CRCI_CTL(i), adm));
701 + }
702 +
703 + /* NAND CRCI Enable */
704 + writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_NAND_CRCI_DATA), adm));
705 + writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_NAND_CRCI_CMD), adm));
706 +
707 + /* GSBI5 CRCI Enable */
708 + writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_SPI_GSBI5_RX_CRCI), adm));
709 + writel_relaxed(0, DMOV_REG(DMOV_CRCI_CTL(DMOV_SPI_GSBI5_TX_CRCI), adm));
710 +
711 + writel_relaxed(DMOV_CI_CONF_RANGE_START(0x40) | /* EBI1 */
712 + DMOV_CI_CONF_RANGE_END(0xb0) |
713 + DMOV_CI_CONF_MAX_BURST(0x8),
714 + DMOV_REG(DMOV_CI_CONF(0), adm));
715 +
716 + writel_relaxed(DMOV_CI_CONF_RANGE_START(0x2a) | /* IMEM */
717 + DMOV_CI_CONF_RANGE_END(0x2c) |
718 + DMOV_CI_CONF_MAX_BURST(0x8),
719 + DMOV_REG(DMOV_CI_CONF(1), adm));
720 +
721 + writel_relaxed(DMOV_CI_CONF_RANGE_START(0x12) | /* CPSS/SPS */
722 + DMOV_CI_CONF_RANGE_END(0x28) |
723 + DMOV_CI_CONF_MAX_BURST(0x8),
724 + DMOV_REG(DMOV_CI_CONF(2), adm));
725 +
726 + writel_relaxed(DMOV_HI_GP_CTL_CORE_CLK_LP_EN | /* will disable LP */
727 + DMOV_HI_GP_CTL_LP_CNT(0xf),
728 + DMOV_REG(DMOV_HI_GP_CTL, adm));
729 +
730 +}
731 +
732 +static int msm_dmov_probe(struct platform_device *pdev)
733 +{
734 +
735 + int adm = (pdev->id >= 0) ? pdev->id : 0;
736 + int i;
737 + int ret;
738 + struct resource *irqres =
739 + platform_get_resource(pdev, IORESOURCE_IRQ, 0);
740 + struct resource *mres =
741 + platform_get_resource(pdev, IORESOURCE_MEM, 0);
742 +
743 + dmov_conf[adm].sd=0;
744 + dmov_conf[adm].sd_size=0x800;
745 +
746 + dmov_conf[adm].irq = irqres->start;
747 +
748 + dmov_conf[adm].base = devm_ioremap_resource(&pdev->dev, mres);
749 + if (!dmov_conf[adm].base)
750 + return -ENOMEM;
751 +
752 + dmov_conf[adm].cmd_wq = alloc_ordered_workqueue("dmov%d_wq", 0, adm);
753 + if (!dmov_conf[adm].cmd_wq) {
754 + PRINT_ERROR("Couldn't allocate ADM%d workqueue.\n", adm);
755 + return -ENOMEM;
756 + }
757 +
758 + /* get resets */
759 + dmov_conf[adm].adm_reset = devm_reset_control_get(&pdev->dev, "adm");
760 + if (IS_ERR(dmov_conf[adm].adm_reset)) {
761 + dev_err(&pdev->dev, "failed to get adm reset\n");
762 + ret = PTR_ERR(dmov_conf[adm].adm_reset);
763 + goto out_wq;
764 + }
765 +
766 + dmov_conf[adm].pbus_reset = devm_reset_control_get(&pdev->dev, "pbus");
767 + if (IS_ERR(dmov_conf[adm].pbus_reset)) {
768 + dev_err(&pdev->dev, "failed to get pbus reset\n");
769 + ret = PTR_ERR(dmov_conf[adm].pbus_reset);
770 + goto out_wq;
771 + }
772 +
773 + dmov_conf[adm].c0_reset = devm_reset_control_get(&pdev->dev, "c0");
774 + if (IS_ERR(dmov_conf[adm].c0_reset)) {
775 + dev_err(&pdev->dev, "failed to get c0 reset\n");
776 + ret = PTR_ERR(dmov_conf[adm].c0_reset);
777 + goto out_wq;
778 + }
779 +
780 + dmov_conf[adm].c1_reset = devm_reset_control_get(&pdev->dev, "c1");
781 + if (IS_ERR(dmov_conf[adm].c1_reset)) {
782 + dev_err(&pdev->dev, "failed to get c1 reset\n");
783 + ret = PTR_ERR(dmov_conf[adm].c1_reset);
784 + goto out_wq;
785 + }
786 +
787 + dmov_conf[adm].c2_reset = devm_reset_control_get(&pdev->dev, "c2");
788 + if (IS_ERR(dmov_conf[adm].c2_reset)) {
789 + dev_err(&pdev->dev, "failed to get c2 reset\n");
790 + ret = PTR_ERR(dmov_conf[adm].c2_reset);
791 + goto out_wq;
792 + }
793 +
794 + ret = devm_request_threaded_irq(&pdev->dev, dmov_conf[adm].irq, NULL,
795 + msm_dmov_isr, IRQF_ONESHOT, "msmdatamover", NULL);
796 +
797 + if (ret) {
798 + PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
799 + dmov_conf[adm].irq);
800 + goto out_wq;
801 + }
802 +
803 + disable_irq(dmov_conf[adm].irq);
804 + ret = msm_dmov_init_clocks(pdev);
805 + if (ret) {
806 + PRINT_ERROR("Requesting ADM%d clocks failed\n", adm);
807 + goto out_wq;
808 + }
809 + clk_prepare_enable(dmov_conf[adm].clk);
810 + clk_prepare_enable(dmov_conf[adm].pclk);
811 +
812 +// ret = msm_dmov_clk_on(adm);
813 +// if (ret) {
814 +// PRINT_ERROR("Enabling ADM%d clocks failed\n", adm);
815 +// goto out_wq;
816 +// }
817 +
818 + config_datamover(adm);
819 + for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
820 + INIT_LIST_HEAD(&dmov_conf[adm].staged_commands[i]);
821 + INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
822 + INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
823 +
824 + writel_relaxed(DMOV_RSLT_CONF_IRQ_EN
825 + | DMOV_RSLT_CONF_FORCE_FLUSH_RSLT,
826 + DMOV_REG(DMOV_RSLT_CONF(i), adm));
827 + }
828 + wmb();
829 +// msm_dmov_clk_off(adm);
830 + return ret;
831 +out_wq:
832 + destroy_workqueue(dmov_conf[adm].cmd_wq);
833 + return ret;
834 +}
835 +
836 +#ifdef CONFIG_OF
837 +static const struct of_device_id adm_of_match[] = {
838 + { .compatible = "qcom,adm", },
839 + {},
840 +};
841 +MODULE_DEVICE_TABLE(of, adm_of_match);
842 +#endif
843 +
844 +static struct platform_driver msm_dmov_driver = {
845 + .probe = msm_dmov_probe,
846 + .driver = {
847 + .name = MODULE_NAME,
848 + .owner = THIS_MODULE,
849 + .of_match_table = adm_of_match,
850 + .pm = &msm_dmov_dev_pm_ops,
851 + },
852 +};
853 +
854 +/* static int __init */
855 +static int __init msm_init_datamover(void)
856 +{
857 + int ret;
858 + ret = platform_driver_register(&msm_dmov_driver);
859 + if (ret)
860 + return ret;
861 + return 0;
862 +}
863 +arch_initcall(msm_init_datamover);
864 diff --git a/drivers/mtd/nand/qcom_adm_dma.h b/drivers/mtd/nand/qcom_adm_dma.h
865 new file mode 100644
866 index 0000000..1014d57
867 --- /dev/null
868 +++ b/drivers/mtd/nand/qcom_adm_dma.h
869 @@ -0,0 +1,268 @@
870 +/* * Copyright (c) 2012 The Linux Foundation. All rights reserved.* */
871 +/* linux/include/asm-arm/arch-msm/dma.h
872 + *
873 + * Copyright (C) 2007 Google, Inc.
874 + * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
875 + *
876 + * This software is licensed under the terms of the GNU General Public
877 + * License version 2, as published by the Free Software Foundation, and
878 + * may be copied, distributed, and modified under those terms.
879 + *
880 + * This program is distributed in the hope that it will be useful,
881 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
882 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
883 + * GNU General Public License for more details.
884 + *
885 + */
886 +
887 +#ifndef __ASM_ARCH_MSM_DMA_H
888 +#define __ASM_ARCH_MSM_DMA_H
889 +#include <linux/list.h>
890 +
891 +struct msm_dmov_errdata {
892 + uint32_t flush[6];
893 +};
894 +
895 +struct msm_dmov_cmd {
896 + struct list_head list;
897 + unsigned int cmdptr;
898 + void (*complete_func)(struct msm_dmov_cmd *cmd,
899 + unsigned int result,
900 + struct msm_dmov_errdata *err);
901 + void (*exec_func)(struct msm_dmov_cmd *cmd);
902 + struct work_struct work;
903 + unsigned id; /* For internal use */
904 + void *user; /* Pointer for caller's reference */
905 + u8 toflush;
906 +};
907 +
908 +struct msm_dmov_pdata {
909 + int sd;
910 + size_t sd_size;
911 +};
912 +
913 +void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
914 +void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd);
915 +void msm_dmov_flush(unsigned int id, int graceful);
916 +int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr);
917 +
918 +#define DMOV_CRCIS_PER_CONF 10
919 +
920 +#define DMOV_ADDR(off, ch) ((off) + ((ch) << 2))
921 +
922 +#define DMOV_CMD_PTR(ch) DMOV_ADDR(0x000, ch)
923 +#define DMOV_CMD_LIST (0 << 29) /* does not work */
924 +#define DMOV_CMD_PTR_LIST (1 << 29) /* works */
925 +#define DMOV_CMD_INPUT_CFG (2 << 29) /* untested */
926 +#define DMOV_CMD_OUTPUT_CFG (3 << 29) /* untested */
927 +#define DMOV_CMD_ADDR(addr) ((addr) >> 3)
928 +
929 +#define DMOV_RSLT(ch) DMOV_ADDR(0x040, ch)
930 +#define DMOV_RSLT_VALID (1 << 31) /* 0 == host has empties result fifo */
931 +#define DMOV_RSLT_ERROR (1 << 3)
932 +#define DMOV_RSLT_FLUSH (1 << 2)
933 +#define DMOV_RSLT_DONE (1 << 1) /* top pointer done */
934 +#define DMOV_RSLT_USER (1 << 0) /* command with FR force result */
935 +
936 +#define DMOV_FLUSH0(ch) DMOV_ADDR(0x080, ch)
937 +#define DMOV_FLUSH1(ch) DMOV_ADDR(0x0C0, ch)
938 +#define DMOV_FLUSH2(ch) DMOV_ADDR(0x100, ch)
939 +#define DMOV_FLUSH3(ch) DMOV_ADDR(0x140, ch)
940 +#define DMOV_FLUSH4(ch) DMOV_ADDR(0x180, ch)
941 +#define DMOV_FLUSH5(ch) DMOV_ADDR(0x1C0, ch)
942 +#define DMOV_FLUSH_TYPE (1 << 31)
943 +
944 +#define DMOV_STATUS(ch) DMOV_ADDR(0x200, ch)
945 +#define DMOV_STATUS_RSLT_COUNT(n) (((n) >> 29))
946 +#define DMOV_STATUS_CMD_COUNT(n) (((n) >> 27) & 3)
947 +#define DMOV_STATUS_RSLT_VALID (1 << 1)
948 +#define DMOV_STATUS_CMD_PTR_RDY (1 << 0)
949 +
950 +#define DMOV_CONF(ch) DMOV_ADDR(0x240, ch)
951 +#define DMOV_CONF_SD(sd) (((sd & 4) << 11) | ((sd & 3) << 4))
952 +#define DMOV_CONF_OTHER_CH_BLK_MASK(m) ((m << 0x10) & 0xffff0000)
953 +#define DMOV_CONF_SHADOW_EN (1 << 12)
954 +#define DMOV_CONF_MPU_DISABLE (1 << 11)
955 +#define DMOV_CONF_PERM_MPU_CONF (1 << 9)
956 +#define DMOV_CONF_FLUSH_RSLT_EN (1 << 8)
957 +#define DMOV_CONF_IRQ_EN (1 << 6)
958 +#define DMOV_CONF_FORCE_RSLT_EN (1 << 7)
959 +#define DMOV_CONF_PRIORITY(n) (n << 0)
960 +
961 +#define DMOV_DBG_ERR(ci) DMOV_ADDR(0x280, ci)
962 +
963 +#define DMOV_RSLT_CONF(ch) DMOV_ADDR(0x300, ch)
964 +#define DMOV_RSLT_CONF_FORCE_TOP_PTR_RSLT (1 << 2)
965 +#define DMOV_RSLT_CONF_FORCE_FLUSH_RSLT (1 << 1)
966 +#define DMOV_RSLT_CONF_IRQ_EN (1 << 0)
967 +
968 +#define DMOV_ISR DMOV_ADDR(0x380, 0)
969 +
970 +#define DMOV_CI_CONF(ci) DMOV_ADDR(0x390, ci)
971 +#define DMOV_CI_CONF_RANGE_END(n) ((n) << 24)
972 +#define DMOV_CI_CONF_RANGE_START(n) ((n) << 16)
973 +#define DMOV_CI_CONF_MAX_BURST(n) ((n) << 0)
974 +
975 +#define DMOV_CI_DBG_ERR(ci) DMOV_ADDR(0x3B0, ci)
976 +
977 +#define DMOV_CRCI_CONF0 DMOV_ADDR(0x3D0, 0)
978 +#define DMOV_CRCI_CONF0_CRCI9_SD (2 << 0x1b)
979 +
980 +#define DMOV_CRCI_CONF1 DMOV_ADDR(0x3D4, 0)
981 +#define DMOV_CRCI_CONF0_SD(crci, sd) (sd << (crci*3))
982 +#define DMOV_CRCI_CONF1_SD(crci, sd) (sd << ((crci-DMOV_CRCIS_PER_CONF)*3))
983 +
984 +#define DMOV_HI_GP_CTL DMOV_ADDR(0x3D8, 0)
985 +#define DMOV_HI_GP_CTL_CORE_CLK_LP_EN (1 << 12)
986 +#define DMOV_HI_GP_CTL_LP_CNT(x) (((x) & 0xf) << 8)
987 +#define DMOV_HI_GP_CTL_CI3_CLK_LP_EN (1 << 7)
988 +#define DMOV_HI_GP_CTL_CI2_CLK_LP_EN (1 << 6)
989 +#define DMOV_HI_GP_CTL_CI1_CLK_LP_EN (1 << 5)
990 +#define DMOV_HI_GP_CTL_CI0_CLK_LP_EN (1 << 4)
991 +
992 +#define DMOV_CRCI_CTL(crci) DMOV_ADDR(0x400, crci)
993 +#define DMOV_CRCI_CTL_BLK_SZ(n) ((n) << 0)
994 +#define DMOV_CRCI_CTL_RST (1 << 17)
995 +#define DMOV_CRCI_MUX (1 << 18)
996 +
997 +/* channel assignments */
998 +
999 +/*
1000 + * Format of CRCI numbers: crci number + (muxsel << 4)
1001 + */
1002 +
1003 +#define DMOV_GP_CHAN 9
1004 +
1005 +#define DMOV_CE_IN_CHAN 0
1006 +#define DMOV_CE_IN_CRCI 2
1007 +
1008 +#define DMOV_CE_OUT_CHAN 1
1009 +#define DMOV_CE_OUT_CRCI 3
1010 +
1011 +#define DMOV_TSIF_CHAN 2
1012 +#define DMOV_TSIF_CRCI 11
1013 +
1014 +#define DMOV_HSUART_GSBI6_TX_CHAN 7
1015 +#define DMOV_HSUART_GSBI6_TX_CRCI 6
1016 +
1017 +#define DMOV_HSUART_GSBI6_RX_CHAN 8
1018 +#define DMOV_HSUART_GSBI6_RX_CRCI 11
1019 +
1020 +#define DMOV_HSUART_GSBI8_TX_CHAN 7
1021 +#define DMOV_HSUART_GSBI8_TX_CRCI 10
1022 +
1023 +#define DMOV_HSUART_GSBI8_RX_CHAN 8
1024 +#define DMOV_HSUART_GSBI8_RX_CRCI 9
1025 +
1026 +#define DMOV_HSUART_GSBI9_TX_CHAN 4
1027 +#define DMOV_HSUART_GSBI9_TX_CRCI 13
1028 +
1029 +#define DMOV_HSUART_GSBI9_RX_CHAN 3
1030 +#define DMOV_HSUART_GSBI9_RX_CRCI 12
1031 +
1032 +#define DMOV_NAND_CHAN 3
1033 +#define DMOV_NAND_CRCI_CMD 15
1034 +#define DMOV_NAND_CRCI_DATA 3
1035 +
1036 +#define DMOV_SPI_GSBI5_RX_CRCI 9
1037 +#define DMOV_SPI_GSBI5_TX_CRCI 10
1038 +#define DMOV_SPI_GSBI5_RX_CHAN 6
1039 +#define DMOV_SPI_GSBI5_TX_CHAN 5
1040 +
1041 +/* channels for APQ8064 */
1042 +#define DMOV8064_CE_IN_CHAN 0
1043 +#define DMOV8064_CE_IN_CRCI 14
1044 +
1045 +#define DMOV8064_CE_OUT_CHAN 1
1046 +#define DMOV8064_CE_OUT_CRCI 15
1047 +
1048 +#define DMOV8064_TSIF_CHAN 2
1049 +#define DMOV8064_TSIF_CRCI 1
1050 +
1051 +/* channels for APQ8064 SGLTE*/
1052 +#define DMOV_APQ8064_HSUART_GSBI4_TX_CHAN 11
1053 +#define DMOV_APQ8064_HSUART_GSBI4_TX_CRCI 8
1054 +
1055 +#define DMOV_APQ8064_HSUART_GSBI4_RX_CHAN 10
1056 +#define DMOV_APQ8064_HSUART_GSBI4_RX_CRCI 7
1057 +
1058 +/* channels for MPQ8064 */
1059 +#define DMOV_MPQ8064_HSUART_GSBI6_TX_CHAN 7
1060 +#define DMOV_MPQ8064_HSUART_GSBI6_TX_CRCI 6
1061 +
1062 +#define DMOV_MPQ8064_HSUART_GSBI6_RX_CHAN 6
1063 +#define DMOV_MPQ8064_HSUART_GSBI6_RX_CRCI 11
1064 +
1065 +#define DMOV_IPQ806X_HSUART_GSBI6_TX_CHAN DMOV_MPQ8064_HSUART_GSBI6_TX_CHAN
1066 +#define DMOV_IPQ806X_HSUART_GSBI6_TX_CRCI DMOV_MPQ8064_HSUART_GSBI6_TX_CRCI
1067 +
1068 +#define DMOV_IPQ806X_HSUART_GSBI6_RX_CHAN DMOV_MPQ8064_HSUART_GSBI6_RX_CHAN
1069 +#define DMOV_IPQ806X_HSUART_GSBI6_RX_CRCI DMOV_MPQ8064_HSUART_GSBI6_RX_CRCI
1070 +
1071 +/* no client rate control ifc (eg, ram) */
1072 +#define DMOV_NONE_CRCI 0
1073 +
1074 +
1075 +/* If the CMD_PTR register has CMD_PTR_LIST selected, the data mover
1076 + * is going to walk a list of 32bit pointers as described below. Each
1077 + * pointer points to a *array* of dmov_s, etc structs. The last pointer
1078 + * in the list is marked with CMD_PTR_LP. The last struct in each array
1079 + * is marked with CMD_LC (see below).
1080 + */
1081 +#define CMD_PTR_ADDR(addr) ((addr) >> 3)
1082 +#define CMD_PTR_LP (1 << 31) /* last pointer */
1083 +#define CMD_PTR_PT (3 << 29) /* ? */
1084 +
1085 +/* Single Item Mode */
1086 +typedef struct {
1087 + unsigned cmd;
1088 + unsigned src;
1089 + unsigned dst;
1090 + unsigned len;
1091 +} dmov_s;
1092 +
1093 +/* Scatter/Gather Mode */
1094 +typedef struct {
1095 + unsigned cmd;
1096 + unsigned src_dscr;
1097 + unsigned dst_dscr;
1098 + unsigned _reserved;
1099 +} dmov_sg;
1100 +
1101 +/* Box mode */
1102 +typedef struct {
1103 + uint32_t cmd;
1104 + uint32_t src_row_addr;
1105 + uint32_t dst_row_addr;
1106 + uint32_t src_dst_len;
1107 + uint32_t num_rows;
1108 + uint32_t row_offset;
1109 +} dmov_box;
1110 +
1111 +/* bits for the cmd field of the above structures */
1112 +
1113 +#define CMD_LC (1 << 31) /* last command */
1114 +#define CMD_FR (1 << 22) /* force result -- does not work? */
1115 +#define CMD_OCU (1 << 21) /* other channel unblock */
1116 +#define CMD_OCB (1 << 20) /* other channel block */
1117 +#define CMD_TCB (1 << 19) /* ? */
1118 +#define CMD_DAH (1 << 18) /* destination address hold -- does not work?*/
1119 +#define CMD_SAH (1 << 17) /* source address hold -- does not work? */
1120 +
1121 +#define CMD_MODE_SINGLE (0 << 0) /* dmov_s structure used */
1122 +#define CMD_MODE_SG (1 << 0) /* untested */
1123 +#define CMD_MODE_IND_SG (2 << 0) /* untested */
1124 +#define CMD_MODE_BOX (3 << 0) /* untested */
1125 +
1126 +#define CMD_DST_SWAP_BYTES (1 << 14) /* exchange each byte n with byte n+1 */
1127 +#define CMD_DST_SWAP_SHORTS (1 << 15) /* exchange each short n with short n+1 */
1128 +#define CMD_DST_SWAP_WORDS (1 << 16) /* exchange each word n with word n+1 */
1129 +
1130 +#define CMD_SRC_SWAP_BYTES (1 << 11) /* exchange each byte n with byte n+1 */
1131 +#define CMD_SRC_SWAP_SHORTS (1 << 12) /* exchange each short n with short n+1 */
1132 +#define CMD_SRC_SWAP_WORDS (1 << 13) /* exchange each word n with word n+1 */
1133 +
1134 +#define CMD_DST_CRCI(n) (((n) & 15) << 7)
1135 +#define CMD_SRC_CRCI(n) (((n) & 15) << 3)
1136 +
1137 +#endif
1138 diff --git a/drivers/mtd/nand/qcom_nand.c b/drivers/mtd/nand/qcom_nand.c
1139 new file mode 100644
1140 index 0000000..9314132
1141 --- /dev/null
1142 +++ b/drivers/mtd/nand/qcom_nand.c
1143 @@ -0,0 +1,7455 @@
1144 +/*
1145 + * Copyright (C) 2007 Google, Inc.
1146 + * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
1147 + *
1148 + * This software is licensed under the terms of the GNU General Public
1149 + * License version 2, as published by the Free Software Foundation, and
1150 + * may be copied, distributed, and modified under those terms.
1151 + *
1152 + * This program is distributed in the hope that it will be useful,
1153 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1154 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1155 + * GNU General Public License for more details.
1156 + *
1157 + */
1158 +
1159 +#include <linux/slab.h>
1160 +#include <linux/kernel.h>
1161 +#include <linux/module.h>
1162 +#include <linux/mtd/mtd.h>
1163 +#include <linux/mtd/nand.h>
1164 +#include <linux/mtd/partitions.h>
1165 +#include <linux/platform_device.h>
1166 +#include <linux/sched.h>
1167 +#include <linux/dma-mapping.h>
1168 +#include <linux/io.h>
1169 +#include <linux/crc16.h>
1170 +#include <linux/bitrev.h>
1171 +#include <linux/clk.h>
1172 +
1173 +#include <asm/dma.h>
1174 +#include <asm/mach/flash.h>
1175 +
1176 +#include "qcom_adm_dma.h"
1177 +
1178 +#include "qcom_nand.h"
1179 +unsigned long msm_nand_phys = 0;
1180 +unsigned long msm_nandc01_phys = 0;
1181 +unsigned long msm_nandc10_phys = 0;
1182 +unsigned long msm_nandc11_phys = 0;
1183 +unsigned long ebi2_register_base = 0;
1184 +static uint32_t dual_nand_ctlr_present;
1185 +static uint32_t interleave_enable;
1186 +static uint32_t enable_bch_ecc;
1187 +static uint32_t boot_layout;
1188 +
1189 +
1190 +#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
1191 +#define MSM_NAND_DMA_BUFFER_SLOTS \
1192 + (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
1193 +
1194 +#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
1195 +#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
1196 +#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
1197 +#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
1198 +
1199 +#define ONFI_IDENTIFIER_LENGTH 0x0004
1200 +#define ONFI_PARAM_INFO_LENGTH 0x0200
1201 +#define ONFI_PARAM_PAGE_LENGTH 0x0100
1202 +
1203 +#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
1204 +
1205 +#define FLASH_READ_ONFI_IDENTIFIER_COMMAND 0x90
1206 +#define FLASH_READ_ONFI_IDENTIFIER_ADDRESS 0x20
1207 +#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
1208 +#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
1209 +
1210 +#define UD_SIZE_BYTES_MASK (0x3FF << 9)
1211 +#define SPARE_SIZE_BYTES_MASK (0xF << 23)
1212 +#define ECC_NUM_DATA_BYTES_MASK (0x3FF << 16)
1213 +
1214 +#define VERBOSE 0
1215 +
1216 +struct msm_nand_chip {
1217 + struct device *dev;
1218 + wait_queue_head_t wait_queue;
1219 + atomic_t dma_buffer_busy;
1220 + unsigned dma_channel;
1221 + uint8_t *dma_buffer;
1222 + dma_addr_t dma_addr;
1223 + unsigned CFG0, CFG1, CFG0_RAW, CFG1_RAW;
1224 + uint32_t ecc_buf_cfg;
1225 + uint32_t ecc_bch_cfg;
1226 + uint32_t ecc_parity_bytes;
1227 + unsigned cw_size;
1228 + unsigned int uncorrectable_bit_mask;
1229 + unsigned int num_err_mask;
1230 +};
1231 +
1232 +#define CFG1_WIDE_FLASH (1U << 1)
1233 +
1234 +/* TODO: move datamover code out */
1235 +
1236 +#define SRC_CRCI_NAND_CMD CMD_SRC_CRCI(DMOV_NAND_CRCI_CMD)
1237 +#define DST_CRCI_NAND_CMD CMD_DST_CRCI(DMOV_NAND_CRCI_CMD)
1238 +#define SRC_CRCI_NAND_DATA CMD_SRC_CRCI(DMOV_NAND_CRCI_DATA)
1239 +#define DST_CRCI_NAND_DATA CMD_DST_CRCI(DMOV_NAND_CRCI_DATA)
1240 +
1241 +#define msm_virt_to_dma(chip, vaddr) \
1242 + ((chip)->dma_addr + \
1243 + ((uint8_t *)(vaddr) - (chip)->dma_buffer))
1244 +
1245 +/**
1246 + * msm_nand_oob_64 - oob info for 2KB page
1247 + */
1248 +static struct nand_ecclayout msm_nand_oob_64 = {
1249 + .eccbytes = 40,
1250 + .eccpos = {
1251 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
1252 + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
1253 + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
1254 + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
1255 + },
1256 + .oobavail = 16,
1257 + .oobfree = {
1258 + {30, 16},
1259 + }
1260 +};
1261 +
1262 +/**
1263 + * msm_nand_oob_128 - oob info for 4KB page
1264 + */
1265 +static struct nand_ecclayout msm_nand_oob_128 = {
1266 + .eccbytes = 80,
1267 + .eccpos = {
1268 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
1269 + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
1270 + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
1271 + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
1272 + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
1273 + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
1274 + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
1275 + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
1276 + },
1277 + .oobavail = 32,
1278 + .oobfree = {
1279 + {70, 32},
1280 + }
1281 +};
1282 +
1283 +/**
1284 + * msm_nand_oob_224 - oob info for 4KB page 8Bit interface
1285 + */
1286 +static struct nand_ecclayout msm_nand_oob_224_x8 = {
1287 + .eccbytes = 104,
1288 + .eccpos = {
1289 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
1290 + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
1291 + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
1292 + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
1293 + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
1294 + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
1295 + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
1296 + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
1297 + },
1298 + .oobavail = 32,
1299 + .oobfree = {
1300 + {91, 32},
1301 + }
1302 +};
1303 +
1304 +/**
1305 + * msm_nand_oob_224 - oob info for 4KB page 16Bit interface
1306 + */
1307 +static struct nand_ecclayout msm_nand_oob_224_x16 = {
1308 + .eccbytes = 112,
1309 + .eccpos = {
1310 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
1311 + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
1312 + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
1313 + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
1314 + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
1315 + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
1316 + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
1317 + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
1318 + },
1319 + .oobavail = 32,
1320 + .oobfree = {
1321 + {98, 32},
1322 + }
1323 +};
1324 +
1325 +/**
1326 + * msm_nand_oob_256 - oob info for 8KB page
1327 + */
1328 +static struct nand_ecclayout msm_nand_oob_256 = {
1329 + .eccbytes = 160,
1330 + .eccpos = {
1331 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
1332 + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
1333 + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
1334 + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
1335 + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
1336 + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
1337 + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
1338 + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
1339 + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
1340 + 90, 91, 92, 93, 94, 96, 97, 98 , 99, 100,
1341 + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
1342 + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
1343 + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
1344 + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
1345 + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
1346 + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
1347 + },
1348 + .oobavail = 64,
1349 + .oobfree = {
1350 + {151, 64},
1351 + }
1352 +};
1353 +
1354 +/**
1355 + * msm_onenand_oob_64 - oob info for large (2KB) page
1356 + */
1357 +static struct nand_ecclayout msm_onenand_oob_64 = {
1358 + .eccbytes = 20,
1359 + .eccpos = {
1360 + 8, 9, 10, 11, 12,
1361 + 24, 25, 26, 27, 28,
1362 + 40, 41, 42, 43, 44,
1363 + 56, 57, 58, 59, 60,
1364 + },
1365 + .oobavail = 20,
1366 + .oobfree = {
1367 + {2, 3}, {14, 2}, {18, 3}, {30, 2},
1368 + {34, 3}, {46, 2}, {50, 3}, {62, 2}
1369 + }
1370 +};
1371 +
1372 +static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
1373 +{
1374 + unsigned int bitmask, free_bitmask, old_bitmask;
1375 + unsigned int need_mask, current_need_mask;
1376 + int free_index;
1377 +
1378 + need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
1379 + bitmask = atomic_read(&chip->dma_buffer_busy);
1380 + free_bitmask = ~bitmask;
1381 + while (free_bitmask) {
1382 + free_index = __ffs(free_bitmask);
1383 + current_need_mask = need_mask << free_index;
1384 +
1385 + if (size + free_index * MSM_NAND_DMA_BUFFER_SLOTS >=
1386 + MSM_NAND_DMA_BUFFER_SIZE)
1387 + return NULL;
1388 +
1389 + if ((bitmask & current_need_mask) == 0) {
1390 + old_bitmask =
1391 + atomic_cmpxchg(&chip->dma_buffer_busy,
1392 + bitmask,
1393 + bitmask | current_need_mask);
1394 + if (old_bitmask == bitmask)
1395 + return chip->dma_buffer +
1396 + free_index * MSM_NAND_DMA_BUFFER_SLOTS;
1397 + free_bitmask = 0; /* force return */
1398 + }
1399 + /* current free range was too small, clear all free bits */
1400 + /* below the top busy bit within current_need_mask */
1401 + free_bitmask &=
1402 + ~(~0U >> (32 - fls(bitmask & current_need_mask)));
1403 + }
1404 +
1405 + return NULL;
1406 +}
1407 +
1408 +static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
1409 + void *buffer, size_t size)
1410 +{
1411 + int index;
1412 + unsigned int used_mask;
1413 +
1414 + used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
1415 + index = ((uint8_t *)buffer - chip->dma_buffer) /
1416 + MSM_NAND_DMA_BUFFER_SLOTS;
1417 + atomic_sub(used_mask << index, &chip->dma_buffer_busy);
1418 +
1419 + wake_up(&chip->wait_queue);
1420 +}
1421 +
1422 +
1423 +unsigned flash_rd_reg(struct msm_nand_chip *chip, unsigned addr)
1424 +{
1425 + struct {
1426 + dmov_s cmd;
1427 + unsigned cmdptr;
1428 + unsigned data;
1429 + } *dma_buffer;
1430 + unsigned rv;
1431 +
1432 + wait_event(chip->wait_queue,
1433 + (dma_buffer = msm_nand_get_dma_buffer(
1434 + chip, sizeof(*dma_buffer))));
1435 +
1436 + dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
1437 + dma_buffer->cmd.src = addr;
1438 + dma_buffer->cmd.dst = msm_virt_to_dma(chip, &dma_buffer->data);
1439 + dma_buffer->cmd.len = 4;
1440 +
1441 + dma_buffer->cmdptr =
1442 + (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
1443 + dma_buffer->data = 0xeeeeeeee;
1444 +
1445 + mb();
1446 + msm_dmov_exec_cmd(
1447 + chip->dma_channel, DMOV_CMD_PTR_LIST |
1448 + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
1449 + mb();
1450 +
1451 + rv = dma_buffer->data;
1452 +
1453 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1454 +
1455 + return rv;
1456 +}
1457 +
1458 +void flash_wr_reg(struct msm_nand_chip *chip, unsigned addr, unsigned val)
1459 +{
1460 + struct {
1461 + dmov_s cmd;
1462 + unsigned cmdptr;
1463 + unsigned data;
1464 + } *dma_buffer;
1465 +
1466 + wait_event(chip->wait_queue,
1467 + (dma_buffer = msm_nand_get_dma_buffer(
1468 + chip, sizeof(*dma_buffer))));
1469 +
1470 + dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
1471 + dma_buffer->cmd.src = msm_virt_to_dma(chip, &dma_buffer->data);
1472 + dma_buffer->cmd.dst = addr;
1473 + dma_buffer->cmd.len = 4;
1474 +
1475 + dma_buffer->cmdptr =
1476 + (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
1477 + dma_buffer->data = val;
1478 +
1479 + mb();
1480 + msm_dmov_exec_cmd(
1481 + chip->dma_channel, DMOV_CMD_PTR_LIST |
1482 + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
1483 + mb();
1484 +
1485 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1486 +}
1487 +
1488 +/*
1489 + * Allocates a bounce buffer, and stores the buffer address in
1490 + * variable pointed to by bounce_buf. bounce_buf should point to a
1491 + * stack variable, to avoid SMP issues.
1492 + */
1493 +static int msm_nand_alloc_bounce(void *addr, size_t size,
1494 + enum dma_data_direction dir,
1495 + uint8_t **bounce_buf)
1496 +{
1497 + if (bounce_buf == NULL) {
1498 + printk(KERN_ERR "not allocating bounce buffer\n");
1499 + return -EINVAL;
1500 + }
1501 +
1502 + *bounce_buf = kmalloc(size, GFP_KERNEL | GFP_NOFS | GFP_DMA);
1503 + if (*bounce_buf == NULL) {
1504 + printk(KERN_ERR "error alloc bounce buffer %zu\n", size);
1505 + return -ENOMEM;
1506 + }
1507 +
1508 + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
1509 + memcpy(*bounce_buf, addr, size);
1510 +
1511 + return 0;
1512 +}
1513 +
1514 +/*
1515 + * Maps the user buffer for DMA. If the buffer is vmalloced and the
1516 + * buffer crosses a page boundary, then we kmalloc a bounce buffer and
1517 + * copy the data into it. The bounce buffer is stored in the variable
1518 + * pointed to by bounce_buf, for freeing up later on. The bounce_buf
1519 + * should point to a stack variable, to avoid SMP issues.
1520 + */
1521 +static dma_addr_t
1522 +msm_nand_dma_map(struct device *dev, void *addr, size_t size,
1523 + enum dma_data_direction dir, uint8_t **bounce_buf)
1524 +{
1525 + int ret;
1526 + struct page *page;
1527 + unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
1528 +
1529 + if (virt_addr_valid(addr)) {
1530 + page = virt_to_page(addr);
1531 + } else {
1532 + if (size + offset > PAGE_SIZE) {
1533 + ret = msm_nand_alloc_bounce(addr, size, dir, bounce_buf);
1534 + if (ret < 0)
1535 + return DMA_ERROR_CODE;
1536 +
1537 + offset = (unsigned long)*bounce_buf & ~PAGE_MASK;
1538 + page = virt_to_page(*bounce_buf);
1539 + } else {
1540 + page = vmalloc_to_page(addr);
1541 + }
1542 + }
1543 +
1544 + return dma_map_page(dev, page, offset, size, dir);
1545 +}
1546 +
1547 +static void msm_nand_dma_unmap(struct device *dev, dma_addr_t addr, size_t size,
1548 + enum dma_data_direction dir,
1549 + void *orig_buf, void *bounce_buf)
1550 +{
1551 + dma_unmap_page(dev, addr, size, dir);
1552 +
1553 + if (bounce_buf != NULL) {
1554 + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1555 + memcpy(orig_buf, bounce_buf, size);
1556 +
1557 + kfree(bounce_buf);
1558 + }
1559 +}
1560 +
1561 +uint32_t flash_read_id(struct msm_nand_chip *chip)
1562 +{
1563 + struct {
1564 + dmov_s cmd[9];
1565 + unsigned cmdptr;
1566 + unsigned data[7];
1567 + } *dma_buffer;
1568 + uint32_t rv;
1569 + dmov_s *cmd;
1570 +
1571 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
1572 + (chip, sizeof(*dma_buffer))));
1573 +
1574 + dma_buffer->data[0] = 0 | 4;
1575 + dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
1576 + dma_buffer->data[2] = 1;
1577 + dma_buffer->data[3] = 0xeeeeeeee;
1578 + dma_buffer->data[4] = 0xeeeeeeee;
1579 + dma_buffer->data[5] = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
1580 + dma_buffer->data[6] = 0x00000000;
1581 + BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->data) - 1);
1582 +
1583 + cmd = dma_buffer->cmd;
1584 +
1585 + cmd->cmd = 0 | CMD_OCB;
1586 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
1587 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
1588 + cmd->len = 4;
1589 + cmd++;
1590 +
1591 + cmd->cmd = 0;
1592 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
1593 + cmd->dst = MSM_NAND_ADDR0;
1594 + cmd->len = 4;
1595 + cmd++;
1596 +
1597 + cmd->cmd = 0;
1598 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
1599 + cmd->dst = MSM_NAND_ADDR1;
1600 + cmd->len = 4;
1601 + cmd++;
1602 +
1603 + cmd->cmd = 0;
1604 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
1605 + cmd->dst = MSM_NAND_FLASH_CHIP_SELECT;
1606 + cmd->len = 4;
1607 + cmd++;
1608 +
1609 + cmd->cmd = DST_CRCI_NAND_CMD;
1610 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[1]);
1611 + cmd->dst = MSM_NAND_FLASH_CMD;
1612 + cmd->len = 4;
1613 + cmd++;
1614 +
1615 + cmd->cmd = 0;
1616 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[2]);
1617 + cmd->dst = MSM_NAND_EXEC_CMD;
1618 + cmd->len = 4;
1619 + cmd++;
1620 +
1621 + cmd->cmd = SRC_CRCI_NAND_DATA;
1622 + cmd->src = MSM_NAND_FLASH_STATUS;
1623 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data[3]);
1624 + cmd->len = 4;
1625 + cmd++;
1626 +
1627 + cmd->cmd = 0;
1628 + cmd->src = MSM_NAND_READ_ID;
1629 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data[4]);
1630 + cmd->len = 4;
1631 + cmd++;
1632 +
1633 + cmd->cmd = CMD_OCU | CMD_LC;
1634 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
1635 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
1636 + cmd->len = 4;
1637 + cmd++;
1638 +
1639 + BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->cmd) - 1);
1640 +
1641 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3
1642 + ) | CMD_PTR_LP;
1643 +
1644 + mb();
1645 + msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
1646 + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
1647 + mb();
1648 +
1649 + pr_info("status: %x\n", dma_buffer->data[3]);
1650 + pr_info("nandid: %x maker %02x device %02x\n",
1651 + dma_buffer->data[4], dma_buffer->data[4] & 0xff,
1652 + (dma_buffer->data[4] >> 8) & 0xff);
1653 + rv = dma_buffer->data[4];
1654 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1655 + return rv;
1656 +}
1657 +
1658 +struct flash_identification {
1659 + uint32_t flash_id;
1660 + uint32_t density;
1661 + uint32_t widebus;
1662 + uint32_t pagesize;
1663 + uint32_t blksize;
1664 + uint32_t oobsize;
1665 + uint32_t ecc_correctability;
1666 +} supported_flash;
1667 +
1668 +uint16_t flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
1669 +{
1670 + int i;
1671 + uint16_t result;
1672 +
1673 + for (i = 0; i < count; i++)
1674 + buffer[i] = bitrev8(buffer[i]);
1675 +
1676 + result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
1677 +
1678 + for (i = 0; i < count; i++)
1679 + buffer[i] = bitrev8(buffer[i]);
1680 +
1681 + return result;
1682 +}
1683 +
1684 +static void flash_reset(struct msm_nand_chip *chip)
1685 +{
1686 + struct {
1687 + dmov_s cmd[6];
1688 + unsigned cmdptr;
1689 + struct {
1690 + uint32_t cmd;
1691 + uint32_t exec;
1692 + uint32_t flash_status;
1693 + uint32_t sflash_bcfg_orig;
1694 + uint32_t sflash_bcfg_mod;
1695 + uint32_t chip_select;
1696 + } data;
1697 + } *dma_buffer;
1698 + dmov_s *cmd;
1699 + dma_addr_t dma_cmd;
1700 + dma_addr_t dma_cmdptr;
1701 +
1702 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
1703 + (chip, sizeof(*dma_buffer))));
1704 +
1705 + dma_buffer->data.sflash_bcfg_orig
1706 + = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
1707 + dma_buffer->data.sflash_bcfg_mod = 0x00000000;
1708 + dma_buffer->data.chip_select = 4;
1709 + dma_buffer->data.cmd = MSM_NAND_CMD_RESET;
1710 + dma_buffer->data.exec = 1;
1711 + dma_buffer->data.flash_status = 0xeeeeeeee;
1712 +
1713 + cmd = dma_buffer->cmd;
1714 +
1715 + /* Put the Nand ctlr in Async mode and disable SFlash ctlr */
1716 + cmd->cmd = 0;
1717 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sflash_bcfg_mod);
1718 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
1719 + cmd->len = 4;
1720 + cmd++;
1721 +
1722 + cmd->cmd = 0;
1723 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chip_select);
1724 + cmd->dst = MSM_NAND_FLASH_CHIP_SELECT;
1725 + cmd->len = 4;
1726 + cmd++;
1727 +
1728 + /* Block on cmd ready, & write Reset command */
1729 + cmd->cmd = DST_CRCI_NAND_CMD;
1730 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
1731 + cmd->dst = MSM_NAND_FLASH_CMD;
1732 + cmd->len = 4;
1733 + cmd++;
1734 +
1735 + cmd->cmd = 0;
1736 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
1737 + cmd->dst = MSM_NAND_EXEC_CMD;
1738 + cmd->len = 4;
1739 + cmd++;
1740 +
1741 + cmd->cmd = SRC_CRCI_NAND_DATA;
1742 + cmd->src = MSM_NAND_FLASH_STATUS;
1743 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status);
1744 + cmd->len = 4;
1745 + cmd++;
1746 +
1747 + /* Restore the SFLASH_BURST_CONFIG register */
1748 + cmd->cmd = 0;
1749 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sflash_bcfg_orig);
1750 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
1751 + cmd->len = 4;
1752 + cmd++;
1753 +
1754 + BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->cmd));
1755 +
1756 + dma_buffer->cmd[0].cmd |= CMD_OCB;
1757 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
1758 +
1759 + dma_cmd = msm_virt_to_dma(chip, dma_buffer->cmd);
1760 + dma_buffer->cmdptr = (dma_cmd >> 3) | CMD_PTR_LP;
1761 +
1762 + mb();
1763 + dma_cmdptr = msm_virt_to_dma(chip, &dma_buffer->cmdptr);
1764 + msm_dmov_exec_cmd(chip->dma_channel,
1765 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(dma_cmdptr));
1766 + mb();
1767 +
1768 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1769 +}
1770 +
1771 +uint32_t flash_onfi_probe(struct msm_nand_chip *chip)
1772 +{
1773 +
1774 +
1775 + struct onfi_param_page {
1776 + uint32_t parameter_page_signature;
1777 + uint16_t revision_number;
1778 + uint16_t features_supported;
1779 + uint16_t optional_commands_supported;
1780 + uint8_t reserved0[22];
1781 + uint8_t device_manufacturer[12];
1782 + uint8_t device_model[20];
1783 + uint8_t jedec_manufacturer_id;
1784 + uint16_t date_code;
1785 + uint8_t reserved1[13];
1786 + uint32_t number_of_data_bytes_per_page;
1787 + uint16_t number_of_spare_bytes_per_page;
1788 + uint32_t number_of_data_bytes_per_partial_page;
1789 + uint16_t number_of_spare_bytes_per_partial_page;
1790 + uint32_t number_of_pages_per_block;
1791 + uint32_t number_of_blocks_per_logical_unit;
1792 + uint8_t number_of_logical_units;
1793 + uint8_t number_of_address_cycles;
1794 + uint8_t number_of_bits_per_cell;
1795 + uint16_t maximum_bad_blocks_per_logical_unit;
1796 + uint16_t block_endurance;
1797 + uint8_t guaranteed_valid_begin_blocks;
1798 + uint16_t guaranteed_valid_begin_blocks_endurance;
1799 + uint8_t number_of_programs_per_page;
1800 + uint8_t partial_program_attributes;
1801 + uint8_t number_of_bits_ecc_correctability;
1802 + uint8_t number_of_interleaved_address_bits;
1803 + uint8_t interleaved_operation_attributes;
1804 + uint8_t reserved2[13];
1805 + uint8_t io_pin_capacitance;
1806 + uint16_t timing_mode_support;
1807 + uint16_t program_cache_timing_mode_support;
1808 + uint16_t maximum_page_programming_time;
1809 + uint16_t maximum_block_erase_time;
1810 + uint16_t maximum_page_read_time;
1811 + uint16_t maximum_change_column_setup_time;
1812 + uint8_t reserved3[23];
1813 + uint16_t vendor_specific_revision_number;
1814 + uint8_t vendor_specific[88];
1815 + uint16_t integrity_crc;
1816 +
1817 + } __attribute__((__packed__));
1818 +
1819 + struct onfi_param_page *onfi_param_page_ptr;
1820 + uint8_t *onfi_identifier_buf = NULL;
1821 + uint8_t *onfi_param_info_buf = NULL;
1822 +
1823 + struct {
1824 + dmov_s cmd[12];
1825 + unsigned cmdptr;
1826 + struct {
1827 + uint32_t cmd;
1828 + uint32_t addr0;
1829 + uint32_t addr1;
1830 + uint32_t cfg0;
1831 + uint32_t cfg1;
1832 + uint32_t exec;
1833 + uint32_t flash_status;
1834 + uint32_t devcmd1_orig;
1835 + uint32_t devcmdvld_orig;
1836 + uint32_t devcmd1_mod;
1837 + uint32_t devcmdvld_mod;
1838 + uint32_t sflash_bcfg_orig;
1839 + uint32_t sflash_bcfg_mod;
1840 + uint32_t chip_select;
1841 + } data;
1842 + } *dma_buffer;
1843 + dmov_s *cmd;
1844 +
1845 + unsigned page_address = 0;
1846 + int err = 0;
1847 + dma_addr_t dma_addr_param_info = 0;
1848 + dma_addr_t dma_addr_identifier = 0;
1849 + unsigned cmd_set_count = 2;
1850 + unsigned crc_chk_count = 0;
1851 +
1852 + /*if (msm_nand_data.nr_parts) {
1853 + page_address = ((msm_nand_data.parts[0]).offset << 6);
1854 + } else {
1855 + pr_err("flash_onfi_probe: "
1856 + "No partition info available\n");
1857 + err = -EIO;
1858 + return err;
1859 + }*/
1860 +
1861 + wait_event(chip->wait_queue, (onfi_identifier_buf =
1862 + msm_nand_get_dma_buffer(chip, ONFI_IDENTIFIER_LENGTH)));
1863 + dma_addr_identifier = msm_virt_to_dma(chip, onfi_identifier_buf);
1864 +
1865 + wait_event(chip->wait_queue, (onfi_param_info_buf =
1866 + msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
1867 + dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
1868 +
1869 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
1870 + (chip, sizeof(*dma_buffer))));
1871 +
1872 + dma_buffer->data.sflash_bcfg_orig = flash_rd_reg
1873 + (chip, MSM_NAND_SFLASHC_BURST_CFG);
1874 + dma_buffer->data.devcmd1_orig = flash_rd_reg(chip, MSM_NAND_DEV_CMD1);
1875 + dma_buffer->data.devcmdvld_orig = flash_rd_reg(chip,
1876 + MSM_NAND_DEV_CMD_VLD);
1877 + dma_buffer->data.chip_select = 4;
1878 +
1879 + while (cmd_set_count-- > 0) {
1880 + cmd = dma_buffer->cmd;
1881 +
1882 + dma_buffer->data.devcmd1_mod = (dma_buffer->data.devcmd1_orig &
1883 + 0xFFFFFF00) | (cmd_set_count
1884 + ? FLASH_READ_ONFI_IDENTIFIER_COMMAND
1885 + : FLASH_READ_ONFI_PARAMETERS_COMMAND);
1886 + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
1887 + dma_buffer->data.addr0 = (page_address << 16) | (cmd_set_count
1888 + ? FLASH_READ_ONFI_IDENTIFIER_ADDRESS
1889 + : FLASH_READ_ONFI_PARAMETERS_ADDRESS);
1890 + dma_buffer->data.addr1 = (page_address >> 16) & 0xFF;
1891 + dma_buffer->data.cfg0 = (cmd_set_count
1892 + ? MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER
1893 + : MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO);
1894 + dma_buffer->data.cfg1 = (cmd_set_count
1895 + ? MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER
1896 + : MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO);
1897 + dma_buffer->data.sflash_bcfg_mod = 0x00000000;
1898 + dma_buffer->data.devcmdvld_mod = (dma_buffer->
1899 + data.devcmdvld_orig & 0xFFFFFFFE);
1900 + dma_buffer->data.exec = 1;
1901 + dma_buffer->data.flash_status = 0xeeeeeeee;
1902 +
1903 + /* Put the Nand ctlr in Async mode and disable SFlash ctlr */
1904 + cmd->cmd = 0;
1905 + cmd->src = msm_virt_to_dma(chip,
1906 + &dma_buffer->data.sflash_bcfg_mod);
1907 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
1908 + cmd->len = 4;
1909 + cmd++;
1910 +
1911 + cmd->cmd = 0;
1912 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chip_select);
1913 + cmd->dst = MSM_NAND_FLASH_CHIP_SELECT;
1914 + cmd->len = 4;
1915 + cmd++;
1916 +
1917 + /* Block on cmd ready, & write CMD,ADDR0,ADDR1,CHIPSEL regs */
1918 + cmd->cmd = DST_CRCI_NAND_CMD;
1919 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
1920 + cmd->dst = MSM_NAND_FLASH_CMD;
1921 + cmd->len = 12;
1922 + cmd++;
1923 +
1924 + /* Configure the CFG0 and CFG1 registers */
1925 + cmd->cmd = 0;
1926 + cmd->src = msm_virt_to_dma(chip,
1927 + &dma_buffer->data.cfg0);
1928 + cmd->dst = MSM_NAND_DEV0_CFG0;
1929 + cmd->len = 8;
1930 + cmd++;
1931 +
1932 + /* Configure the DEV_CMD_VLD register */
1933 + cmd->cmd = 0;
1934 + cmd->src = msm_virt_to_dma(chip,
1935 + &dma_buffer->data.devcmdvld_mod);
1936 + cmd->dst = MSM_NAND_DEV_CMD_VLD;
1937 + cmd->len = 4;
1938 + cmd++;
1939 +
1940 + /* Configure the DEV_CMD1 register */
1941 + cmd->cmd = 0;
1942 + cmd->src = msm_virt_to_dma(chip,
1943 + &dma_buffer->data.devcmd1_mod);
1944 + cmd->dst = MSM_NAND_DEV_CMD1;
1945 + cmd->len = 4;
1946 + cmd++;
1947 +
1948 + /* Kick the execute command */
1949 + cmd->cmd = 0;
1950 + cmd->src = msm_virt_to_dma(chip,
1951 + &dma_buffer->data.exec);
1952 + cmd->dst = MSM_NAND_EXEC_CMD;
1953 + cmd->len = 4;
1954 + cmd++;
1955 +
1956 + /* Block on data ready, and read the two status registers */
1957 + cmd->cmd = SRC_CRCI_NAND_DATA;
1958 + cmd->src = MSM_NAND_FLASH_STATUS;
1959 + cmd->dst = msm_virt_to_dma(chip,
1960 + &dma_buffer->data.flash_status);
1961 + cmd->len = 4;
1962 + cmd++;
1963 +
1964 + /* Read data block - valid only if status says success */
1965 + cmd->cmd = 0;
1966 + cmd->src = MSM_NAND_FLASH_BUFFER;
1967 + cmd->dst = (cmd_set_count ? dma_addr_identifier :
1968 + dma_addr_param_info);
1969 + cmd->len = (cmd_set_count ? ONFI_IDENTIFIER_LENGTH :
1970 + ONFI_PARAM_INFO_LENGTH);
1971 + cmd++;
1972 +
1973 + /* Restore the DEV_CMD1 register */
1974 + cmd->cmd = 0 ;
1975 + cmd->src = msm_virt_to_dma(chip,
1976 + &dma_buffer->data.devcmd1_orig);
1977 + cmd->dst = MSM_NAND_DEV_CMD1;
1978 + cmd->len = 4;
1979 + cmd++;
1980 +
1981 + /* Restore the DEV_CMD_VLD register */
1982 + cmd->cmd = 0;
1983 + cmd->src = msm_virt_to_dma(chip,
1984 + &dma_buffer->data.devcmdvld_orig);
1985 + cmd->dst = MSM_NAND_DEV_CMD_VLD;
1986 + cmd->len = 4;
1987 + cmd++;
1988 +
1989 + /* Restore the SFLASH_BURST_CONFIG register */
1990 + cmd->cmd = 0;
1991 + cmd->src = msm_virt_to_dma(chip,
1992 + &dma_buffer->data.sflash_bcfg_orig);
1993 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
1994 + cmd->len = 4;
1995 + cmd++;
1996 +
1997 + BUILD_BUG_ON(12 != ARRAY_SIZE(dma_buffer->cmd));
1998 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1999 + dma_buffer->cmd[0].cmd |= CMD_OCB;
2000 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
2001 +
2002 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
2003 + >> 3) | CMD_PTR_LP;
2004 +
2005 + mb();
2006 + msm_dmov_exec_cmd(chip->dma_channel,
2007 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
2008 + &dma_buffer->cmdptr)));
2009 + mb();
2010 +
2011 + /* Check for errors, protection violations etc */
2012 + if (dma_buffer->data.flash_status & 0x110) {
2013 + pr_info("MPU/OP error (0x%x) during "
2014 + "ONFI probe\n",
2015 + dma_buffer->data.flash_status);
2016 + err = -EIO;
2017 + break;
2018 + }
2019 +
2020 + if (cmd_set_count) {
2021 + onfi_param_page_ptr = (struct onfi_param_page *)
2022 + (&(onfi_identifier_buf[0]));
2023 + if (onfi_param_page_ptr->parameter_page_signature !=
2024 + ONFI_PARAMETER_PAGE_SIGNATURE) {
2025 + pr_info("ONFI probe : Found a non"
2026 + "ONFI Compliant device \n");
2027 + err = -EIO;
2028 + break;
2029 + }
2030 + } else {
2031 + for (crc_chk_count = 0; crc_chk_count <
2032 + ONFI_PARAM_INFO_LENGTH
2033 + / ONFI_PARAM_PAGE_LENGTH;
2034 + crc_chk_count++) {
2035 + onfi_param_page_ptr =
2036 + (struct onfi_param_page *)
2037 + (&(onfi_param_info_buf
2038 + [ONFI_PARAM_PAGE_LENGTH *
2039 + crc_chk_count]));
2040 + if (flash_onfi_crc_check(
2041 + (uint8_t *)onfi_param_page_ptr,
2042 + ONFI_PARAM_PAGE_LENGTH - 2) ==
2043 + onfi_param_page_ptr->integrity_crc) {
2044 + break;
2045 + }
2046 + }
2047 + if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
2048 + / ONFI_PARAM_PAGE_LENGTH) {
2049 + pr_info("ONFI probe : CRC Check "
2050 + "failed on ONFI Parameter "
2051 + "data \n");
2052 + err = -EIO;
2053 + break;
2054 + } else {
2055 + supported_flash.flash_id =
2056 + flash_read_id(chip);
2057 + supported_flash.widebus =
2058 + onfi_param_page_ptr->
2059 + features_supported & 0x01;
2060 + supported_flash.pagesize =
2061 + onfi_param_page_ptr->
2062 + number_of_data_bytes_per_page;
2063 + supported_flash.blksize =
2064 + onfi_param_page_ptr->
2065 + number_of_pages_per_block *
2066 + supported_flash.pagesize;
2067 + supported_flash.oobsize =
2068 + onfi_param_page_ptr->
2069 + number_of_spare_bytes_per_page;
2070 + supported_flash.density =
2071 + onfi_param_page_ptr->
2072 + number_of_blocks_per_logical_unit
2073 + * supported_flash.blksize;
2074 + supported_flash.ecc_correctability =
2075 + onfi_param_page_ptr->
2076 + number_of_bits_ecc_correctability;
2077 +
2078 + pr_info("ONFI probe : Found an ONFI "
2079 + "compliant device %s\n",
2080 + onfi_param_page_ptr->device_model);
2081 +
2082 + /* Temporary hack for MT29F4G08ABC device.
2083 + * Since the device is not properly adhering
2084 + * to ONFi specification it is reporting
2085 + * as 16 bit device though it is 8 bit device!!!
2086 + */
2087 + if (!strncmp(onfi_param_page_ptr->device_model,
2088 + "MT29F4G08ABC", 12))
2089 + supported_flash.widebus = 0;
2090 + }
2091 + }
2092 + }
2093 +
2094 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
2095 + msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
2096 + ONFI_PARAM_INFO_LENGTH);
2097 + msm_nand_release_dma_buffer(chip, onfi_identifier_buf,
2098 + ONFI_IDENTIFIER_LENGTH);
2099 +
2100 + return err;
2101 +}
2102 +
2103 +static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
2104 + struct mtd_oob_ops *ops)
2105 +{
2106 + struct msm_nand_chip *chip = mtd->priv;
2107 +
2108 + struct {
2109 + dmov_s cmd[8 * 5 + 2];
2110 + unsigned cmdptr;
2111 + struct {
2112 + uint32_t cmd;
2113 + uint32_t addr0;
2114 + uint32_t addr1;
2115 + uint32_t chipsel;
2116 + uint32_t cfg0;
2117 + uint32_t cfg1;
2118 + uint32_t eccbchcfg;
2119 + uint32_t exec;
2120 + uint32_t ecccfg;
2121 + struct {
2122 + uint32_t flash_status;
2123 + uint32_t buffer_status;
2124 + } result[8];
2125 + } data;
2126 + } *dma_buffer;
2127 + dmov_s *cmd;
2128 + unsigned n;
2129 + unsigned page = 0;
2130 + uint32_t oob_len;
2131 + uint32_t sectordatasize;
2132 + uint32_t sectoroobsize;
2133 + int err, pageerr, rawerr;
2134 + dma_addr_t data_dma_addr = 0;
2135 + dma_addr_t oob_dma_addr = 0;
2136 + dma_addr_t data_dma_addr_curr = 0;
2137 + dma_addr_t oob_dma_addr_curr = 0;
2138 + uint8_t *dat_bounce_buf = NULL;
2139 + uint8_t *oob_bounce_buf = NULL;
2140 + uint32_t oob_col = 0;
2141 + unsigned page_count;
2142 + unsigned pages_read = 0;
2143 + unsigned start_sector = 0;
2144 + uint32_t ecc_errors;
2145 + uint32_t total_ecc_errors = 0;
2146 + unsigned cwperpage;
2147 +#if VERBOSE
2148 + pr_info("================================================="
2149 + "================\n");
2150 + pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
2151 + "\noobbuf 0x%p ooblen 0x%x\n",
2152 + __func__, from, ops->mode, ops->datbuf, ops->len,
2153 + ops->oobbuf, ops->ooblen);
2154 +#endif
2155 +
2156 + if (mtd->writesize == 2048)
2157 + page = from >> 11;
2158 +
2159 + if (mtd->writesize == 4096)
2160 + page = from >> 12;
2161 +
2162 + oob_len = ops->ooblen;
2163 + cwperpage = (mtd->writesize >> 9);
2164 +
2165 + if (from & (mtd->writesize - 1)) {
2166 + pr_err("%s: unsupported from, 0x%llx\n",
2167 + __func__, from);
2168 + return -EINVAL;
2169 + }
2170 + if (ops->mode != MTD_OPS_RAW) {
2171 + if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
2172 + /* when ops->datbuf is NULL, ops->len can be ooblen */
2173 + pr_err("%s: unsupported ops->len, %d\n",
2174 + __func__, ops->len);
2175 + return -EINVAL;
2176 + }
2177 + } else {
2178 + if (ops->datbuf != NULL &&
2179 + (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
2180 + pr_err("%s: unsupported ops->len,"
2181 + " %d for MTD_OPS_RAW\n", __func__, ops->len);
2182 + return -EINVAL;
2183 + }
2184 + }
2185 +
2186 + if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
2187 + pr_err("%s: unsupported ops->ooboffs, %d\n",
2188 + __func__, ops->ooboffs);
2189 + return -EINVAL;
2190 + }
2191 +
2192 + if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
2193 + start_sector = cwperpage - 1;
2194 +
2195 + if (ops->oobbuf && !ops->datbuf) {
2196 + page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
2197 + mtd->oobavail : mtd->oobsize);
2198 + if ((page_count == 0) && (ops->ooblen))
2199 + page_count = 1;
2200 + } else if (ops->mode != MTD_OPS_RAW)
2201 + page_count = ops->len / mtd->writesize;
2202 + else
2203 + page_count = ops->len / (mtd->writesize + mtd->oobsize);
2204 +
2205 + if (ops->datbuf) {
2206 + data_dma_addr_curr = data_dma_addr =
2207 + msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
2208 + DMA_FROM_DEVICE, &dat_bounce_buf);
2209 + if (dma_mapping_error(chip->dev, data_dma_addr)) {
2210 + pr_err("msm_nand_read_oob: failed to get dma addr "
2211 + "for %p\n", ops->datbuf);
2212 + return -EIO;
2213 + }
2214 + }
2215 + if (ops->oobbuf) {
2216 + memset(ops->oobbuf, 0xff, ops->ooblen);
2217 + oob_dma_addr_curr = oob_dma_addr =
2218 + msm_nand_dma_map(chip->dev, ops->oobbuf,
2219 + ops->ooblen, DMA_BIDIRECTIONAL,
2220 + &oob_bounce_buf);
2221 + if (dma_mapping_error(chip->dev, oob_dma_addr)) {
2222 + pr_err("msm_nand_read_oob: failed to get dma addr "
2223 + "for %p\n", ops->oobbuf);
2224 + err = -EIO;
2225 + goto err_dma_map_oobbuf_failed;
2226 + }
2227 + }
2228 +
2229 + wait_event(chip->wait_queue,
2230 + (dma_buffer = msm_nand_get_dma_buffer(
2231 + chip, sizeof(*dma_buffer))));
2232 +
2233 + oob_col = start_sector * chip->cw_size;
2234 + if (chip->CFG1 & CFG1_WIDE_FLASH)
2235 + oob_col >>= 1;
2236 +
2237 + err = 0;
2238 + while (page_count-- > 0) {
2239 + cmd = dma_buffer->cmd;
2240 +
2241 + /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
2242 + if (ops->mode != MTD_OPS_RAW) {
2243 + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
2244 + dma_buffer->data.cfg0 =
2245 + (chip->CFG0 & ~(7U << 6))
2246 + | (((cwperpage-1) - start_sector) << 6);
2247 + dma_buffer->data.cfg1 = chip->CFG1;
2248 + if (enable_bch_ecc)
2249 + dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
2250 + } else {
2251 + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
2252 + dma_buffer->data.cfg0 = (chip->CFG0_RAW
2253 + & ~(7U << 6)) | ((cwperpage-1) << 6);
2254 + dma_buffer->data.cfg1 = chip->CFG1_RAW |
2255 + (chip->CFG1 & CFG1_WIDE_FLASH);
2256 + }
2257 +
2258 + dma_buffer->data.addr0 = (page << 16) | oob_col;
2259 + dma_buffer->data.addr1 = (page >> 16) & 0xff;
2260 + /* chipsel_0 + enable DM interface */
2261 + dma_buffer->data.chipsel = 0 | 4;
2262 +
2263 +
2264 + /* GO bit for the EXEC register */
2265 + dma_buffer->data.exec = 1;
2266 +
2267 +
2268 + BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.result));
2269 +
2270 + for (n = start_sector; n < cwperpage; n++) {
2271 + /* flash + buffer status return words */
2272 + dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
2273 + dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
2274 +
2275 + /* block on cmd ready, then
2276 + * write CMD / ADDR0 / ADDR1 / CHIPSEL
2277 + * regs in a burst
2278 + */
2279 + cmd->cmd = DST_CRCI_NAND_CMD;
2280 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
2281 + cmd->dst = MSM_NAND_FLASH_CMD;
2282 + if (n == start_sector)
2283 + cmd->len = 16;
2284 + else
2285 + cmd->len = 4;
2286 + cmd++;
2287 +
2288 + if (n == start_sector) {
2289 + cmd->cmd = 0;
2290 + cmd->src = msm_virt_to_dma(chip,
2291 + &dma_buffer->data.cfg0);
2292 + cmd->dst = MSM_NAND_DEV0_CFG0;
2293 + if (enable_bch_ecc)
2294 + cmd->len = 12;
2295 + else
2296 + cmd->len = 8;
2297 + cmd++;
2298 +
2299 + dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
2300 + cmd->cmd = 0;
2301 + cmd->src = msm_virt_to_dma(chip,
2302 + &dma_buffer->data.ecccfg);
2303 + cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
2304 + cmd->len = 4;
2305 + cmd++;
2306 + }
2307 +
2308 + /* kick the execute register */
2309 + cmd->cmd = 0;
2310 + cmd->src =
2311 + msm_virt_to_dma(chip, &dma_buffer->data.exec);
2312 + cmd->dst = MSM_NAND_EXEC_CMD;
2313 + cmd->len = 4;
2314 + cmd++;
2315 +
2316 + /* block on data ready, then
2317 + * read the status register
2318 + */
2319 + cmd->cmd = SRC_CRCI_NAND_DATA;
2320 + cmd->src = MSM_NAND_FLASH_STATUS;
2321 + cmd->dst = msm_virt_to_dma(chip,
2322 + &dma_buffer->data.result[n]);
2323 + /* MSM_NAND_FLASH_STATUS + MSM_NAND_BUFFER_STATUS */
2324 + cmd->len = 8;
2325 + cmd++;
2326 +
2327 + /* read data block
2328 + * (only valid if status says success)
2329 + */
2330 + if (ops->datbuf) {
2331 + if (ops->mode != MTD_OPS_RAW) {
2332 + if (!boot_layout)
2333 + sectordatasize = (n < (cwperpage - 1))
2334 + ? 516 : (512 - ((cwperpage - 1) << 2));
2335 + else
2336 + sectordatasize = 512;
2337 + } else {
2338 + sectordatasize = chip->cw_size;
2339 + }
2340 +
2341 + cmd->cmd = 0;
2342 + cmd->src = MSM_NAND_FLASH_BUFFER;
2343 + cmd->dst = data_dma_addr_curr;
2344 + data_dma_addr_curr += sectordatasize;
2345 + cmd->len = sectordatasize;
2346 + cmd++;
2347 + }
2348 +
2349 + if (ops->oobbuf && (n == (cwperpage - 1)
2350 + || ops->mode != MTD_OPS_AUTO_OOB)) {
2351 + cmd->cmd = 0;
2352 + if (n == (cwperpage - 1)) {
2353 + cmd->src = MSM_NAND_FLASH_BUFFER +
2354 + (512 - ((cwperpage - 1) << 2));
2355 + sectoroobsize = (cwperpage << 2);
2356 + if (ops->mode != MTD_OPS_AUTO_OOB)
2357 + sectoroobsize +=
2358 + chip->ecc_parity_bytes;
2359 + } else {
2360 + cmd->src = MSM_NAND_FLASH_BUFFER + 516;
2361 + sectoroobsize = chip->ecc_parity_bytes;
2362 + }
2363 +
2364 + cmd->dst = oob_dma_addr_curr;
2365 + if (sectoroobsize < oob_len)
2366 + cmd->len = sectoroobsize;
2367 + else
2368 + cmd->len = oob_len;
2369 + oob_dma_addr_curr += cmd->len;
2370 + oob_len -= cmd->len;
2371 + if (cmd->len > 0)
2372 + cmd++;
2373 + }
2374 + }
2375 +
2376 + BUILD_BUG_ON(8 * 5 + 2 != ARRAY_SIZE(dma_buffer->cmd));
2377 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
2378 + dma_buffer->cmd[0].cmd |= CMD_OCB;
2379 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
2380 +
2381 + dma_buffer->cmdptr =
2382 + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
2383 + | CMD_PTR_LP;
2384 +
2385 + mb();
2386 + msm_dmov_exec_cmd(chip->dma_channel,
2387 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
2388 + &dma_buffer->cmdptr)));
2389 + mb();
2390 +
2391 + /* if any of the writes failed (0x10), or there
2392 + * was a protection violation (0x100), we lose
2393 + */
2394 + pageerr = rawerr = 0;
2395 + for (n = start_sector; n < cwperpage; n++) {
2396 + if (dma_buffer->data.result[n].flash_status & 0x110) {
2397 + rawerr = -EIO;
2398 + break;
2399 + }
2400 + }
2401 + if (rawerr) {
2402 + if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
2403 + uint8_t *datbuf = ops->datbuf +
2404 + pages_read * mtd->writesize;
2405 +
2406 + dma_sync_single_for_cpu(chip->dev,
2407 + data_dma_addr_curr-mtd->writesize,
2408 + mtd->writesize, DMA_BIDIRECTIONAL);
2409 +
2410 + for (n = 0; n < mtd->writesize; n++) {
2411 + /* empty blocks read 0x54 at
2412 + * these offsets
2413 + */
2414 + if ((n % 516 == 3 || n % 516 == 175)
2415 + && datbuf[n] == 0x54)
2416 + datbuf[n] = 0xff;
2417 + if (datbuf[n] != 0xff) {
2418 + pageerr = rawerr;
2419 + break;
2420 + }
2421 + }
2422 +
2423 + dma_sync_single_for_device(chip->dev,
2424 + data_dma_addr_curr-mtd->writesize,
2425 + mtd->writesize, DMA_BIDIRECTIONAL);
2426 +
2427 + }
2428 + if (ops->oobbuf) {
2429 + dma_sync_single_for_cpu(chip->dev,
2430 + oob_dma_addr_curr - (ops->ooblen - oob_len),
2431 + ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
2432 +
2433 + for (n = 0; n < ops->ooblen; n++) {
2434 + if (ops->oobbuf[n] != 0xff) {
2435 + pageerr = rawerr;
2436 + break;
2437 + }
2438 + }
2439 +
2440 + dma_sync_single_for_device(chip->dev,
2441 + oob_dma_addr_curr - (ops->ooblen - oob_len),
2442 + ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
2443 + }
2444 + }
2445 + if (pageerr) {
2446 + for (n = start_sector; n < cwperpage; n++) {
2447 + if (dma_buffer->data.result[n].buffer_status &
2448 + chip->uncorrectable_bit_mask) {
2449 + /* not thread safe */
2450 + mtd->ecc_stats.failed++;
2451 + pageerr = -EBADMSG;
2452 + break;
2453 + }
2454 + }
2455 + }
2456 + if (!rawerr) { /* check for corretable errors */
2457 + for (n = start_sector; n < cwperpage; n++) {
2458 + ecc_errors =
2459 + (dma_buffer->data.result[n].buffer_status
2460 + & chip->num_err_mask);
2461 + if (ecc_errors) {
2462 + total_ecc_errors += ecc_errors;
2463 + /* not thread safe */
2464 + mtd->ecc_stats.corrected += ecc_errors;
2465 + if (ecc_errors > 1)
2466 + pageerr = -EUCLEAN;
2467 + }
2468 + }
2469 + }
2470 + if (pageerr && (pageerr != -EUCLEAN || err == 0))
2471 + err = pageerr;
2472 +
2473 +#if VERBOSE
2474 + if (rawerr && !pageerr) {
2475 + pr_err("msm_nand_read_oob %llx %x %x empty page\n",
2476 + (loff_t)page * mtd->writesize, ops->len,
2477 + ops->ooblen);
2478 + } else {
2479 + for (n = start_sector; n < cwperpage; n++)
2480 + pr_info("flash_status[%d] = %x,\
2481 + buffr_status[%d] = %x\n",
2482 + n, dma_buffer->data.result[n].flash_status,
2483 + n, dma_buffer->data.result[n].buffer_status);
2484 + }
2485 +#endif
2486 + if (err && err != -EUCLEAN && err != -EBADMSG)
2487 + break;
2488 + pages_read++;
2489 + page++;
2490 + }
2491 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
2492 +
2493 + if (ops->oobbuf) {
2494 + msm_nand_dma_unmap(chip->dev, oob_dma_addr,
2495 + ops->ooblen, DMA_FROM_DEVICE,
2496 + ops->oobbuf, oob_bounce_buf);
2497 + }
2498 +err_dma_map_oobbuf_failed:
2499 + if (ops->datbuf) {
2500 + msm_nand_dma_unmap(chip->dev, data_dma_addr,
2501 + ops->len, DMA_BIDIRECTIONAL,
2502 + ops->datbuf, dat_bounce_buf);
2503 + }
2504 +
2505 + if (ops->mode != MTD_OPS_RAW)
2506 + ops->retlen = mtd->writesize * pages_read;
2507 + else
2508 + ops->retlen = (mtd->writesize + mtd->oobsize) *
2509 + pages_read;
2510 + ops->oobretlen = ops->ooblen - oob_len;
2511 + if (err)
2512 + pr_err("msm_nand_read_oob %llx %x %x failed %d, corrected %d\n",
2513 + from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
2514 + total_ecc_errors);
2515 +#if VERBOSE
2516 + pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
2517 + __func__, err, ops->retlen, ops->oobretlen);
2518 +
2519 + pr_info("==================================================="
2520 + "==============\n");
2521 +#endif
2522 + return err;
2523 +}
2524 +
2525 +static int msm_nand_read_oob_dualnandc(struct mtd_info *mtd, loff_t from,
2526 + struct mtd_oob_ops *ops)
2527 +{
2528 + struct msm_nand_chip *chip = mtd->priv;
2529 +
2530 + struct {
2531 + dmov_s cmd[16 * 6 + 20];
2532 + unsigned cmdptr;
2533 + struct {
2534 + uint32_t cmd;
2535 + uint32_t nandc01_addr0;
2536 + uint32_t nandc10_addr0;
2537 + uint32_t nandc11_addr1;
2538 + uint32_t chipsel_cs0;
2539 + uint32_t chipsel_cs1;
2540 + uint32_t cfg0;
2541 + uint32_t cfg1;
2542 + uint32_t eccbchcfg;
2543 + uint32_t exec;
2544 + uint32_t ecccfg;
2545 + uint32_t ebi2_chip_select_cfg0;
2546 + uint32_t adm_mux_data_ack_req_nc01;
2547 + uint32_t adm_mux_cmd_ack_req_nc01;
2548 + uint32_t adm_mux_data_ack_req_nc10;
2549 + uint32_t adm_mux_cmd_ack_req_nc10;
2550 + uint32_t adm_default_mux;
2551 + uint32_t default_ebi2_chip_select_cfg0;
2552 + uint32_t nc10_flash_dev_cmd_vld;
2553 + uint32_t nc10_flash_dev_cmd1;
2554 + uint32_t nc10_flash_dev_cmd_vld_default;
2555 + uint32_t nc10_flash_dev_cmd1_default;
2556 + struct {
2557 + uint32_t flash_status;
2558 + uint32_t buffer_status;
2559 + } result[16];
2560 + } data;
2561 + } *dma_buffer;
2562 + dmov_s *cmd;
2563 + unsigned n;
2564 + unsigned page = 0;
2565 + uint32_t oob_len;
2566 + uint32_t sectordatasize;
2567 + uint32_t sectoroobsize;
2568 + int err, pageerr, rawerr;
2569 + dma_addr_t data_dma_addr = 0;
2570 + dma_addr_t oob_dma_addr = 0;
2571 + dma_addr_t data_dma_addr_curr = 0;
2572 + dma_addr_t oob_dma_addr_curr = 0;
2573 + uint32_t oob_col = 0;
2574 + unsigned page_count;
2575 + unsigned pages_read = 0;
2576 + unsigned start_sector = 0;
2577 + uint32_t ecc_errors;
2578 + uint32_t total_ecc_errors = 0;
2579 + unsigned cwperpage;
2580 + unsigned cw_offset = chip->cw_size;
2581 +#if VERBOSE
2582 + pr_info("================================================="
2583 + "============\n");
2584 + pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
2585 + "\noobbuf 0x%p ooblen 0x%x\n\n",
2586 + __func__, from, ops->mode, ops->datbuf,
2587 + ops->len, ops->oobbuf, ops->ooblen);
2588 +#endif
2589 +
2590 + if (mtd->writesize == 2048)
2591 + page = from >> 11;
2592 +
2593 + if (mtd->writesize == 4096)
2594 + page = from >> 12;
2595 +
2596 + if (interleave_enable)
2597 + page = (from >> 1) >> 12;
2598 +
2599 + oob_len = ops->ooblen;
2600 + cwperpage = (mtd->writesize >> 9);
2601 +
2602 + if (from & (mtd->writesize - 1)) {
2603 + pr_err("%s: unsupported from, 0x%llx\n",
2604 + __func__, from);
2605 + return -EINVAL;
2606 + }
2607 + if (ops->mode != MTD_OPS_RAW) {
2608 + if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
2609 + pr_err("%s: unsupported ops->len, %d\n",
2610 + __func__, ops->len);
2611 + return -EINVAL;
2612 + }
2613 + } else {
2614 + if (ops->datbuf != NULL &&
2615 + (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
2616 + pr_err("%s: unsupported ops->len,"
2617 + " %d for MTD_OPS_RAW\n", __func__, ops->len);
2618 + return -EINVAL;
2619 + }
2620 + }
2621 +
2622 + if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
2623 + pr_err("%s: unsupported ops->ooboffs, %d\n",
2624 + __func__, ops->ooboffs);
2625 + return -EINVAL;
2626 + }
2627 +
2628 + if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
2629 + start_sector = cwperpage - 1;
2630 +
2631 + if (ops->oobbuf && !ops->datbuf) {
2632 + page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
2633 + mtd->oobavail : mtd->oobsize);
2634 + if ((page_count == 0) && (ops->ooblen))
2635 + page_count = 1;
2636 + } else if (ops->mode != MTD_OPS_RAW)
2637 + page_count = ops->len / mtd->writesize;
2638 + else
2639 + page_count = ops->len / (mtd->writesize + mtd->oobsize);
2640 +
2641 + if (ops->datbuf) {
2642 + data_dma_addr_curr = data_dma_addr =
2643 + msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
2644 + DMA_FROM_DEVICE, NULL);
2645 + if (dma_mapping_error(chip->dev, data_dma_addr)) {
2646 + pr_err("msm_nand_read_oob_dualnandc: "
2647 + "failed to get dma addr for %p\n",
2648 + ops->datbuf);
2649 + return -EIO;
2650 + }
2651 + }
2652 + if (ops->oobbuf) {
2653 + memset(ops->oobbuf, 0xff, ops->ooblen);
2654 + oob_dma_addr_curr = oob_dma_addr =
2655 + msm_nand_dma_map(chip->dev, ops->oobbuf,
2656 + ops->ooblen, DMA_BIDIRECTIONAL, NULL);
2657 + if (dma_mapping_error(chip->dev, oob_dma_addr)) {
2658 + pr_err("msm_nand_read_oob_dualnandc: "
2659 + "failed to get dma addr for %p\n",
2660 + ops->oobbuf);
2661 + err = -EIO;
2662 + goto err_dma_map_oobbuf_failed;
2663 + }
2664 + }
2665 +
2666 + wait_event(chip->wait_queue,
2667 + (dma_buffer = msm_nand_get_dma_buffer(
2668 + chip, sizeof(*dma_buffer))));
2669 +
2670 + oob_col = start_sector * chip->cw_size;
2671 + if (chip->CFG1 & CFG1_WIDE_FLASH) {
2672 + oob_col >>= 1;
2673 + cw_offset >>= 1;
2674 + }
2675 +
2676 + err = 0;
2677 + while (page_count-- > 0) {
2678 + cmd = dma_buffer->cmd;
2679 +
2680 + if (ops->mode != MTD_OPS_RAW) {
2681 + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
2682 + if (start_sector == (cwperpage - 1)) {
2683 + dma_buffer->data.cfg0 = (chip->CFG0 &
2684 + ~(7U << 6));
2685 + } else {
2686 + dma_buffer->data.cfg0 = (chip->CFG0 &
2687 + ~(7U << 6))
2688 + | (((cwperpage >> 1)-1) << 6);
2689 + }
2690 + dma_buffer->data.cfg1 = chip->CFG1;
2691 + if (enable_bch_ecc)
2692 + dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
2693 + } else {
2694 + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
2695 + dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
2696 + ~(7U << 6)) | ((((cwperpage >> 1)-1) << 6)));
2697 + dma_buffer->data.cfg1 = chip->CFG1_RAW |
2698 + (chip->CFG1 & CFG1_WIDE_FLASH);
2699 + }
2700 +
2701 + if (!interleave_enable) {
2702 + if (start_sector == (cwperpage - 1)) {
2703 + dma_buffer->data.nandc10_addr0 =
2704 + (page << 16) | oob_col;
2705 + dma_buffer->data.nc10_flash_dev_cmd_vld = 0xD;
2706 + dma_buffer->data.nc10_flash_dev_cmd1 =
2707 + 0xF00F3000;
2708 + } else {
2709 + dma_buffer->data.nandc01_addr0 = page << 16;
2710 + /* NC10 ADDR0 points to the next code word */
2711 + dma_buffer->data.nandc10_addr0 = (page << 16) |
2712 + cw_offset;
2713 + dma_buffer->data.nc10_flash_dev_cmd_vld = 0x1D;
2714 + dma_buffer->data.nc10_flash_dev_cmd1 =
2715 + 0xF00FE005;
2716 + }
2717 + } else {
2718 + dma_buffer->data.nandc01_addr0 =
2719 + dma_buffer->data.nandc10_addr0 =
2720 + (page << 16) | oob_col;
2721 + }
2722 + /* ADDR1 */
2723 + dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
2724 +
2725 + dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
2726 + dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
2727 + dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
2728 + dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
2729 + dma_buffer->data.adm_default_mux = 0x00000FC0;
2730 + dma_buffer->data.nc10_flash_dev_cmd_vld_default = 0x1D;
2731 + dma_buffer->data.nc10_flash_dev_cmd1_default = 0xF00F3000;
2732 +
2733 + dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
2734 + dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
2735 +
2736 + /* chipsel_0 + enable DM interface */
2737 + dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
2738 + /* chipsel_1 + enable DM interface */
2739 + dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
2740 +
2741 + /* GO bit for the EXEC register */
2742 + dma_buffer->data.exec = 1;
2743 +
2744 + BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.result));
2745 +
2746 + for (n = start_sector; n < cwperpage; n++) {
2747 + /* flash + buffer status return words */
2748 + dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
2749 + dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
2750 +
2751 + if (n == start_sector) {
2752 + if (!interleave_enable) {
2753 + cmd->cmd = 0;
2754 + cmd->src = msm_virt_to_dma(chip,
2755 + &dma_buffer->
2756 + data.nc10_flash_dev_cmd_vld);
2757 + cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
2758 + cmd->len = 4;
2759 + cmd++;
2760 +
2761 + cmd->cmd = 0;
2762 + cmd->src = msm_virt_to_dma(chip,
2763 + &dma_buffer->data.nc10_flash_dev_cmd1);
2764 + cmd->dst = NC10(MSM_NAND_DEV_CMD1);
2765 + cmd->len = 4;
2766 + cmd++;
2767 +
2768 + /* NC01, NC10 --> ADDR1 */
2769 + cmd->cmd = 0;
2770 + cmd->src = msm_virt_to_dma(chip,
2771 + &dma_buffer->data.nandc11_addr1);
2772 + cmd->dst = NC11(MSM_NAND_ADDR1);
2773 + cmd->len = 8;
2774 + cmd++;
2775 +
2776 + cmd->cmd = 0;
2777 + cmd->src = msm_virt_to_dma(chip,
2778 + &dma_buffer->data.cfg0);
2779 + cmd->dst = NC11(MSM_NAND_DEV0_CFG0);
2780 + if (enable_bch_ecc)
2781 + cmd->len = 12;
2782 + else
2783 + cmd->len = 8;
2784 + cmd++;
2785 + } else {
2786 + /* enable CS0 & CS1 */
2787 + cmd->cmd = 0;
2788 + cmd->src = msm_virt_to_dma(chip,
2789 + &dma_buffer->
2790 + data.ebi2_chip_select_cfg0);
2791 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
2792 + cmd->len = 4;
2793 + cmd++;
2794 +
2795 + /* NC01, NC10 --> ADDR1 */
2796 + cmd->cmd = 0;
2797 + cmd->src = msm_virt_to_dma(chip,
2798 + &dma_buffer->data.nandc11_addr1);
2799 + cmd->dst = NC11(MSM_NAND_ADDR1);
2800 + cmd->len = 4;
2801 + cmd++;
2802 +
2803 + /* Enable CS0 for NC01 */
2804 + cmd->cmd = 0;
2805 + cmd->src = msm_virt_to_dma(chip,
2806 + &dma_buffer->data.chipsel_cs0);
2807 + cmd->dst =
2808 + NC01(MSM_NAND_FLASH_CHIP_SELECT);
2809 + cmd->len = 4;
2810 + cmd++;
2811 +
2812 + /* Enable CS1 for NC10 */
2813 + cmd->cmd = 0;
2814 + cmd->src = msm_virt_to_dma(chip,
2815 + &dma_buffer->data.chipsel_cs1);
2816 + cmd->dst =
2817 + NC10(MSM_NAND_FLASH_CHIP_SELECT);
2818 + cmd->len = 4;
2819 + cmd++;
2820 +
2821 + /* config DEV0_CFG0 & CFG1 for CS0 */
2822 + cmd->cmd = 0;
2823 + cmd->src = msm_virt_to_dma(chip,
2824 + &dma_buffer->data.cfg0);
2825 + cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
2826 + cmd->len = 8;
2827 + cmd++;
2828 +
2829 + /* config DEV1_CFG0 & CFG1 for CS1 */
2830 + cmd->cmd = 0;
2831 + cmd->src = msm_virt_to_dma(chip,
2832 + &dma_buffer->data.cfg0);
2833 + cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
2834 + cmd->len = 8;
2835 + cmd++;
2836 + }
2837 +
2838 + dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
2839 + cmd->cmd = 0;
2840 + cmd->src = msm_virt_to_dma(chip,
2841 + &dma_buffer->data.ecccfg);
2842 + cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
2843 + cmd->len = 4;
2844 + cmd++;
2845 +
2846 + /* if 'only' the last code word */
2847 + if (n == cwperpage - 1) {
2848 + /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
2849 + cmd->cmd = 0;
2850 + cmd->src = msm_virt_to_dma(chip,
2851 + &dma_buffer->
2852 + data.adm_mux_cmd_ack_req_nc01);
2853 + cmd->dst = EBI2_NAND_ADM_MUX;
2854 + cmd->len = 4;
2855 + cmd++;
2856 +
2857 + /* CMD */
2858 + cmd->cmd = DST_CRCI_NAND_CMD;
2859 + cmd->src = msm_virt_to_dma(chip,
2860 + &dma_buffer->data.cmd);
2861 + cmd->dst = NC10(MSM_NAND_FLASH_CMD);
2862 + cmd->len = 4;
2863 + cmd++;
2864 +
2865 + /* NC10 --> ADDR0 ( 0x0 ) */
2866 + cmd->cmd = 0;
2867 + cmd->src = msm_virt_to_dma(chip,
2868 + &dma_buffer->data.nandc10_addr0);
2869 + cmd->dst = NC10(MSM_NAND_ADDR0);
2870 + cmd->len = 4;
2871 + cmd++;
2872 +
2873 + /* kick the execute reg for NC10 */
2874 + cmd->cmd = 0;
2875 + cmd->src = msm_virt_to_dma(chip,
2876 + &dma_buffer->data.exec);
2877 + cmd->dst = NC10(MSM_NAND_EXEC_CMD);
2878 + cmd->len = 4;
2879 + cmd++;
2880 +
2881 + /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
2882 + cmd->cmd = 0;
2883 + cmd->src = msm_virt_to_dma(chip,
2884 + &dma_buffer->
2885 + data.adm_mux_data_ack_req_nc01);
2886 + cmd->dst = EBI2_NAND_ADM_MUX;
2887 + cmd->len = 4;
2888 + cmd++;
2889 +
2890 + /* block on data ready from NC10, then
2891 + * read the status register
2892 + */
2893 + cmd->cmd = SRC_CRCI_NAND_DATA;
2894 + cmd->src = NC10(MSM_NAND_FLASH_STATUS);
2895 + cmd->dst = msm_virt_to_dma(chip,
2896 + &dma_buffer->data.result[n]);
2897 + /* MSM_NAND_FLASH_STATUS +
2898 + * MSM_NAND_BUFFER_STATUS
2899 + */
2900 + cmd->len = 8;
2901 + cmd++;
2902 + } else {
2903 + /* NC01 --> ADDR0 */
2904 + cmd->cmd = 0;
2905 + cmd->src = msm_virt_to_dma(chip,
2906 + &dma_buffer->data.nandc01_addr0);
2907 + cmd->dst = NC01(MSM_NAND_ADDR0);
2908 + cmd->len = 4;
2909 + cmd++;
2910 +
2911 + /* NC10 --> ADDR1 */
2912 + cmd->cmd = 0;
2913 + cmd->src = msm_virt_to_dma(chip,
2914 + &dma_buffer->data.nandc10_addr0);
2915 + cmd->dst = NC10(MSM_NAND_ADDR0);
2916 + cmd->len = 4;
2917 + cmd++;
2918 +
2919 + /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
2920 + cmd->cmd = 0;
2921 + cmd->src = msm_virt_to_dma(chip,
2922 + &dma_buffer->
2923 + data.adm_mux_cmd_ack_req_nc10);
2924 + cmd->dst = EBI2_NAND_ADM_MUX;
2925 + cmd->len = 4;
2926 + cmd++;
2927 +
2928 + /* CMD */
2929 + cmd->cmd = DST_CRCI_NAND_CMD;
2930 + cmd->src = msm_virt_to_dma(chip,
2931 + &dma_buffer->data.cmd);
2932 + cmd->dst = NC01(MSM_NAND_FLASH_CMD);
2933 + cmd->len = 4;
2934 + cmd++;
2935 +
2936 + /* kick the execute register for NC01*/
2937 + cmd->cmd = 0;
2938 + cmd->src = msm_virt_to_dma(chip,
2939 + &dma_buffer->data.exec);
2940 + cmd->dst = NC01(MSM_NAND_EXEC_CMD);
2941 + cmd->len = 4;
2942 + cmd++;
2943 + }
2944 + }
2945 +
2946 + /* read data block
2947 + * (only valid if status says success)
2948 + */
2949 + if (ops->datbuf || (ops->oobbuf &&
2950 + ops->mode != MTD_OPS_AUTO_OOB)) {
2951 + if (ops->mode != MTD_OPS_RAW)
2952 + sectordatasize = (n < (cwperpage - 1))
2953 + ? 516 : (512 - ((cwperpage - 1) << 2));
2954 + else
2955 + sectordatasize = chip->cw_size;
2956 +
2957 + if (n % 2 == 0) {
2958 + /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
2959 + cmd->cmd = 0;
2960 + cmd->src = msm_virt_to_dma(chip,
2961 + &dma_buffer->
2962 + data.adm_mux_data_ack_req_nc10);
2963 + cmd->dst = EBI2_NAND_ADM_MUX;
2964 + cmd->len = 4;
2965 + cmd++;
2966 +
2967 + /* block on data ready from NC01, then
2968 + * read the status register
2969 + */
2970 + cmd->cmd = SRC_CRCI_NAND_DATA;
2971 + cmd->src = NC01(MSM_NAND_FLASH_STATUS);
2972 + cmd->dst = msm_virt_to_dma(chip,
2973 + &dma_buffer->data.result[n]);
2974 + /* MSM_NAND_FLASH_STATUS +
2975 + * MSM_NAND_BUFFER_STATUS
2976 + */
2977 + cmd->len = 8;
2978 + cmd++;
2979 +
2980 + /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
2981 + cmd->cmd = 0;
2982 + cmd->src = msm_virt_to_dma(chip,
2983 + &dma_buffer->
2984 + data.adm_mux_cmd_ack_req_nc01);
2985 + cmd->dst = EBI2_NAND_ADM_MUX;
2986 + cmd->len = 4;
2987 + cmd++;
2988 +
2989 + /* CMD */
2990 + cmd->cmd = DST_CRCI_NAND_CMD;
2991 + cmd->src = msm_virt_to_dma(chip,
2992 + &dma_buffer->data.cmd);
2993 + cmd->dst = NC10(MSM_NAND_FLASH_CMD);
2994 + cmd->len = 4;
2995 + cmd++;
2996 +
2997 + /* kick the execute register for NC10 */
2998 + cmd->cmd = 0;
2999 + cmd->src = msm_virt_to_dma(chip,
3000 + &dma_buffer->data.exec);
3001 + cmd->dst = NC10(MSM_NAND_EXEC_CMD);
3002 + cmd->len = 4;
3003 + cmd++;
3004 +
3005 + /* Read only when there is data
3006 + * buffer
3007 + */
3008 + if (ops->datbuf) {
3009 + cmd->cmd = 0;
3010 + cmd->src =
3011 + NC01(MSM_NAND_FLASH_BUFFER);
3012 + cmd->dst = data_dma_addr_curr;
3013 + data_dma_addr_curr +=
3014 + sectordatasize;
3015 + cmd->len = sectordatasize;
3016 + cmd++;
3017 + }
3018 + } else {
3019 + /* MASK DATA ACK/REQ -->
3020 + * NC01 (0xA3C)
3021 + */
3022 + cmd->cmd = 0;
3023 + cmd->src = msm_virt_to_dma(chip,
3024 + &dma_buffer->
3025 + data.adm_mux_data_ack_req_nc01);
3026 + cmd->dst = EBI2_NAND_ADM_MUX;
3027 + cmd->len = 4;
3028 + cmd++;
3029 +
3030 + /* block on data ready from NC10
3031 + * then read the status register
3032 + */
3033 + cmd->cmd = SRC_CRCI_NAND_DATA;
3034 + cmd->src =
3035 + NC10(MSM_NAND_FLASH_STATUS);
3036 + cmd->dst = msm_virt_to_dma(chip,
3037 + &dma_buffer->data.result[n]);
3038 + /* MSM_NAND_FLASH_STATUS +
3039 + * MSM_NAND_BUFFER_STATUS
3040 + */
3041 + cmd->len = 8;
3042 + cmd++;
3043 + if (n != cwperpage - 1) {
3044 + /* MASK CMD ACK/REQ -->
3045 + * NC10 (0xF14)
3046 + */
3047 + cmd->cmd = 0;
3048 + cmd->src =
3049 + msm_virt_to_dma(chip,
3050 + &dma_buffer->
3051 + data.adm_mux_cmd_ack_req_nc10);
3052 + cmd->dst = EBI2_NAND_ADM_MUX;
3053 + cmd->len = 4;
3054 + cmd++;
3055 +
3056 + /* CMD */
3057 + cmd->cmd = DST_CRCI_NAND_CMD;
3058 + cmd->src = msm_virt_to_dma(chip,
3059 + &dma_buffer->data.cmd);
3060 + cmd->dst =
3061 + NC01(MSM_NAND_FLASH_CMD);
3062 + cmd->len = 4;
3063 + cmd++;
3064 +
3065 + /* EXEC */
3066 + cmd->cmd = 0;
3067 + cmd->src = msm_virt_to_dma(chip,
3068 + &dma_buffer->data.exec);
3069 + cmd->dst =
3070 + NC01(MSM_NAND_EXEC_CMD);
3071 + cmd->len = 4;
3072 + cmd++;
3073 + }
3074 +
3075 + /* Read only when there is data
3076 + * buffer
3077 + */
3078 + if (ops->datbuf) {
3079 + cmd->cmd = 0;
3080 + cmd->src =
3081 + NC10(MSM_NAND_FLASH_BUFFER);
3082 + cmd->dst = data_dma_addr_curr;
3083 + data_dma_addr_curr +=
3084 + sectordatasize;
3085 + cmd->len = sectordatasize;
3086 + cmd++;
3087 + }
3088 + }
3089 + }
3090 +
3091 + if (ops->oobbuf && (n == (cwperpage - 1)
3092 + || ops->mode != MTD_OPS_AUTO_OOB)) {
3093 + cmd->cmd = 0;
3094 + if (n == (cwperpage - 1)) {
3095 + /* Use NC10 for reading the
3096 + * last codeword!!!
3097 + */
3098 + cmd->src = NC10(MSM_NAND_FLASH_BUFFER) +
3099 + (512 - ((cwperpage - 1) << 2));
3100 + sectoroobsize = (cwperpage << 2);
3101 + if (ops->mode != MTD_OPS_AUTO_OOB)
3102 + sectoroobsize +=
3103 + chip->ecc_parity_bytes;
3104 + } else {
3105 + if (n % 2 == 0)
3106 + cmd->src =
3107 + NC01(MSM_NAND_FLASH_BUFFER)
3108 + + 516;
3109 + else
3110 + cmd->src =
3111 + NC10(MSM_NAND_FLASH_BUFFER)
3112 + + 516;
3113 + sectoroobsize = chip->ecc_parity_bytes;
3114 + }
3115 + cmd->dst = oob_dma_addr_curr;
3116 + if (sectoroobsize < oob_len)
3117 + cmd->len = sectoroobsize;
3118 + else
3119 + cmd->len = oob_len;
3120 + oob_dma_addr_curr += cmd->len;
3121 + oob_len -= cmd->len;
3122 + if (cmd->len > 0)
3123 + cmd++;
3124 + }
3125 + }
3126 + /* ADM --> Default mux state (0xFC0) */
3127 + cmd->cmd = 0;
3128 + cmd->src = msm_virt_to_dma(chip,
3129 + &dma_buffer->data.adm_default_mux);
3130 + cmd->dst = EBI2_NAND_ADM_MUX;
3131 + cmd->len = 4;
3132 + cmd++;
3133 +
3134 + if (!interleave_enable) {
3135 + cmd->cmd = 0;
3136 + cmd->src = msm_virt_to_dma(chip,
3137 + &dma_buffer->data.nc10_flash_dev_cmd_vld_default);
3138 + cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
3139 + cmd->len = 4;
3140 + cmd++;
3141 +
3142 + cmd->cmd = 0;
3143 + cmd->src = msm_virt_to_dma(chip,
3144 + &dma_buffer->data.nc10_flash_dev_cmd1_default);
3145 + cmd->dst = NC10(MSM_NAND_DEV_CMD1);
3146 + cmd->len = 4;
3147 + cmd++;
3148 + } else {
3149 + /* disable CS1 */
3150 + cmd->cmd = 0;
3151 + cmd->src = msm_virt_to_dma(chip,
3152 + &dma_buffer->data.default_ebi2_chip_select_cfg0);
3153 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
3154 + cmd->len = 4;
3155 + cmd++;
3156 + }
3157 +
3158 + BUILD_BUG_ON(16 * 6 + 20 != ARRAY_SIZE(dma_buffer->cmd));
3159 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
3160 + dma_buffer->cmd[0].cmd |= CMD_OCB;
3161 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
3162 +
3163 + dma_buffer->cmdptr =
3164 + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
3165 + | CMD_PTR_LP;
3166 +
3167 + mb();
3168 + msm_dmov_exec_cmd(chip->dma_channel,
3169 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
3170 + &dma_buffer->cmdptr)));
3171 + mb();
3172 +
3173 + /* if any of the writes failed (0x10), or there
3174 + * was a protection violation (0x100), we lose
3175 + */
3176 + pageerr = rawerr = 0;
3177 + for (n = start_sector; n < cwperpage; n++) {
3178 + if (dma_buffer->data.result[n].flash_status & 0x110) {
3179 + rawerr = -EIO;
3180 + break;
3181 + }
3182 + }
3183 + if (rawerr) {
3184 + if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
3185 + uint8_t *datbuf = ops->datbuf +
3186 + pages_read * mtd->writesize;
3187 +
3188 + dma_sync_single_for_cpu(chip->dev,
3189 + data_dma_addr_curr-mtd->writesize,
3190 + mtd->writesize, DMA_BIDIRECTIONAL);
3191 +
3192 + for (n = 0; n < mtd->writesize; n++) {
3193 + /* empty blocks read 0x54 at
3194 + * these offsets
3195 + */
3196 + if ((n % 516 == 3 || n % 516 == 175)
3197 + && datbuf[n] == 0x54)
3198 + datbuf[n] = 0xff;
3199 + if (datbuf[n] != 0xff) {
3200 + pageerr = rawerr;
3201 + break;
3202 + }
3203 + }
3204 +
3205 + dma_sync_single_for_device(chip->dev,
3206 + data_dma_addr_curr-mtd->writesize,
3207 + mtd->writesize, DMA_BIDIRECTIONAL);
3208 +
3209 + }
3210 + if (ops->oobbuf) {
3211 + dma_sync_single_for_cpu(chip->dev,
3212 + oob_dma_addr_curr - (ops->ooblen - oob_len),
3213 + ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
3214 +
3215 + for (n = 0; n < ops->ooblen; n++) {
3216 + if (ops->oobbuf[n] != 0xff) {
3217 + pageerr = rawerr;
3218 + break;
3219 + }
3220 + }
3221 +
3222 + dma_sync_single_for_device(chip->dev,
3223 + oob_dma_addr_curr - (ops->ooblen - oob_len),
3224 + ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
3225 + }
3226 + }
3227 + if (pageerr) {
3228 + for (n = start_sector; n < cwperpage; n++) {
3229 + if (dma_buffer->data.result[n].buffer_status
3230 + & chip->uncorrectable_bit_mask) {
3231 + /* not thread safe */
3232 + mtd->ecc_stats.failed++;
3233 + pageerr = -EBADMSG;
3234 + break;
3235 + }
3236 + }
3237 + }
3238 + if (!rawerr) { /* check for corretable errors */
3239 + for (n = start_sector; n < cwperpage; n++) {
3240 + ecc_errors = dma_buffer->data.
3241 + result[n].buffer_status
3242 + & chip->num_err_mask;
3243 + if (ecc_errors) {
3244 + total_ecc_errors += ecc_errors;
3245 + /* not thread safe */
3246 + mtd->ecc_stats.corrected += ecc_errors;
3247 + if (ecc_errors > 1)
3248 + pageerr = -EUCLEAN;
3249 + }
3250 + }
3251 + }
3252 + if (pageerr && (pageerr != -EUCLEAN || err == 0))
3253 + err = pageerr;
3254 +
3255 +#if VERBOSE
3256 + if (rawerr && !pageerr) {
3257 + pr_err("msm_nand_read_oob_dualnandc "
3258 + "%llx %x %x empty page\n",
3259 + (loff_t)page * mtd->writesize, ops->len,
3260 + ops->ooblen);
3261 + } else {
3262 + for (n = start_sector; n < cwperpage; n++) {
3263 + if (n%2) {
3264 + pr_info("NC10: flash_status[%d] = %x, "
3265 + "buffr_status[%d] = %x\n",
3266 + n, dma_buffer->
3267 + data.result[n].flash_status,
3268 + n, dma_buffer->
3269 + data.result[n].buffer_status);
3270 + } else {
3271 + pr_info("NC01: flash_status[%d] = %x, "
3272 + "buffr_status[%d] = %x\n",
3273 + n, dma_buffer->
3274 + data.result[n].flash_status,
3275 + n, dma_buffer->
3276 + data.result[n].buffer_status);
3277 + }
3278 + }
3279 + }
3280 +#endif
3281 + if (err && err != -EUCLEAN && err != -EBADMSG)
3282 + break;
3283 + pages_read++;
3284 + page++;
3285 + }
3286 +
3287 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
3288 +
3289 + if (ops->oobbuf) {
3290 + dma_unmap_page(chip->dev, oob_dma_addr,
3291 + ops->ooblen, DMA_FROM_DEVICE);
3292 + }
3293 +err_dma_map_oobbuf_failed:
3294 + if (ops->datbuf) {
3295 + dma_unmap_page(chip->dev, data_dma_addr,
3296 + ops->len, DMA_BIDIRECTIONAL);
3297 + }
3298 +
3299 + if (ops->mode != MTD_OPS_RAW)
3300 + ops->retlen = mtd->writesize * pages_read;
3301 + else
3302 + ops->retlen = (mtd->writesize + mtd->oobsize) *
3303 + pages_read;
3304 + ops->oobretlen = ops->ooblen - oob_len;
3305 + if (err)
3306 + pr_err("msm_nand_read_oob_dualnandc "
3307 + "%llx %x %x failed %d, corrected %d\n",
3308 + from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
3309 + total_ecc_errors);
3310 +#if VERBOSE
3311 + pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
3312 + __func__, err, ops->retlen, ops->oobretlen);
3313 +
3314 + pr_info("==================================================="
3315 + "==========\n");
3316 +#endif
3317 + return err;
3318 +}
3319 +
3320 +static int
3321 +msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
3322 + size_t *retlen, u_char *buf)
3323 +{
3324 + int ret;
3325 + struct mtd_ecc_stats stats;
3326 + struct mtd_oob_ops ops;
3327 + int (*read_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
3328 +
3329 + if (!dual_nand_ctlr_present)
3330 + read_oob = msm_nand_read_oob;
3331 + else
3332 + read_oob = msm_nand_read_oob_dualnandc;
3333 +
3334 + ops.mode = MTD_OPS_PLACE_OOB;
3335 + ops.retlen = 0;
3336 + ops.ooblen = 0;
3337 + ops.oobbuf = NULL;
3338 + ret = 0;
3339 + *retlen = 0;
3340 + stats = mtd->ecc_stats;
3341 +
3342 + if ((from & (mtd->writesize - 1)) == 0 && len == mtd->writesize) {
3343 + /* reading a page on page boundary */
3344 + ops.len = len;
3345 + ops.datbuf = buf;
3346 + ret = read_oob(mtd, from, &ops);
3347 + *retlen = ops.retlen;
3348 + } else if (len > 0) {
3349 + /* reading any size on any offset. partial page is supported */
3350 + u8 *bounce_buf;
3351 + loff_t aligned_from;
3352 + loff_t offset;
3353 + size_t actual_len;
3354 +
3355 + bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
3356 + if (!bounce_buf) {
3357 + pr_err("%s: could not allocate memory\n", __func__);
3358 + ret = -ENOMEM;
3359 + goto out;
3360 + }
3361 +
3362 + ops.len = mtd->writesize;
3363 + offset = from & (mtd->writesize - 1);
3364 + aligned_from = from - offset;
3365 +
3366 + for (;;) {
3367 + int no_copy;
3368 +
3369 + actual_len = mtd->writesize - offset;
3370 + if (actual_len > len)
3371 + actual_len = len;
3372 +
3373 + no_copy = (offset == 0 && actual_len == mtd->writesize);
3374 + ops.datbuf = (no_copy) ? buf : bounce_buf;
3375 +
3376 + /*
3377 + * MTD API requires that all the pages are to
3378 + * be read even if uncorrectable or
3379 + * correctable ECC errors occur.
3380 + */
3381 + ret = read_oob(mtd, aligned_from, &ops);
3382 + if (ret == -EBADMSG || ret == -EUCLEAN)
3383 + ret = 0;
3384 +
3385 + if (ret < 0)
3386 + break;
3387 +
3388 + if (!no_copy)
3389 + memcpy(buf, bounce_buf + offset, actual_len);
3390 +
3391 + len -= actual_len;
3392 + *retlen += actual_len;
3393 + if (len == 0)
3394 + break;
3395 +
3396 + buf += actual_len;
3397 + offset = 0;
3398 + aligned_from += mtd->writesize;
3399 + }
3400 +
3401 + kfree(bounce_buf);
3402 + }
3403 +
3404 +out:
3405 + if (ret)
3406 + return ret;
3407 +
3408 + if (mtd->ecc_stats.failed - stats.failed)
3409 + return -EBADMSG;
3410 +
3411 + if (mtd->ecc_stats.corrected - stats.corrected)
3412 + return -EUCLEAN;
3413 +
3414 + return 0;
3415 +}
3416 +
3417 +static int
3418 +msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
3419 +{
3420 + struct msm_nand_chip *chip = mtd->priv;
3421 + struct {
3422 + dmov_s cmd[8 * 7 + 2];
3423 + unsigned cmdptr;
3424 + struct {
3425 + uint32_t cmd;
3426 + uint32_t addr0;
3427 + uint32_t addr1;
3428 + uint32_t chipsel;
3429 + uint32_t cfg0;
3430 + uint32_t cfg1;
3431 + uint32_t eccbchcfg;
3432 + uint32_t exec;
3433 + uint32_t ecccfg;
3434 + uint32_t clrfstatus;
3435 + uint32_t clrrstatus;
3436 + uint32_t flash_status[8];
3437 + } data;
3438 + } *dma_buffer;
3439 + dmov_s *cmd;
3440 + unsigned n;
3441 + unsigned page = 0;
3442 + uint32_t oob_len;
3443 + uint32_t sectordatawritesize;
3444 + int err = 0;
3445 + dma_addr_t data_dma_addr = 0;
3446 + dma_addr_t oob_dma_addr = 0;
3447 + dma_addr_t data_dma_addr_curr = 0;
3448 + dma_addr_t oob_dma_addr_curr = 0;
3449 + uint8_t *dat_bounce_buf = NULL;
3450 + uint8_t *oob_bounce_buf = NULL;
3451 + unsigned page_count;
3452 + unsigned pages_written = 0;
3453 + unsigned cwperpage;
3454 +#if VERBOSE
3455 + pr_info("================================================="
3456 + "================\n");
3457 + pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
3458 + "\noobbuf 0x%p ooblen 0x%x\n",
3459 + __func__, to, ops->mode, ops->datbuf, ops->len,
3460 + ops->oobbuf, ops->ooblen);
3461 +#endif
3462 +
3463 + if (mtd->writesize == 2048)
3464 + page = to >> 11;
3465 +
3466 + if (mtd->writesize == 4096)
3467 + page = to >> 12;
3468 +
3469 + oob_len = ops->ooblen;
3470 + cwperpage = (mtd->writesize >> 9);
3471 +
3472 + if (to & (mtd->writesize - 1)) {
3473 + pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
3474 + return -EINVAL;
3475 + }
3476 +
3477 + if (ops->mode != MTD_OPS_RAW) {
3478 + if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
3479 + pr_err("%s: unsupported ops->mode,%d\n",
3480 + __func__, ops->mode);
3481 + return -EINVAL;
3482 + }
3483 + if ((ops->len % mtd->writesize) != 0) {
3484 + pr_err("%s: unsupported ops->len, %d\n",
3485 + __func__, ops->len);
3486 + return -EINVAL;
3487 + }
3488 + } else {
3489 + if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
3490 + pr_err("%s: unsupported ops->len, "
3491 + "%d for MTD_OPS_RAW mode\n",
3492 + __func__, ops->len);
3493 + return -EINVAL;
3494 + }
3495 + }
3496 +
3497 + if (ops->datbuf == NULL) {
3498 + pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
3499 + return -EINVAL;
3500 + }
3501 + if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
3502 + pr_err("%s: unsupported ops->ooboffs, %d\n",
3503 + __func__, ops->ooboffs);
3504 + return -EINVAL;
3505 + }
3506 +
3507 + if (ops->datbuf) {
3508 + data_dma_addr_curr = data_dma_addr =
3509 + msm_nand_dma_map(chip->dev, ops->datbuf,
3510 + ops->len, DMA_TO_DEVICE,
3511 + &dat_bounce_buf);
3512 + if (dma_mapping_error(chip->dev, data_dma_addr)) {
3513 + pr_err("msm_nand_write_oob: failed to get dma addr "
3514 + "for %p\n", ops->datbuf);
3515 + return -EIO;
3516 + }
3517 + }
3518 + if (ops->oobbuf) {
3519 + oob_dma_addr_curr = oob_dma_addr =
3520 + msm_nand_dma_map(chip->dev, ops->oobbuf,
3521 + ops->ooblen, DMA_TO_DEVICE,
3522 + &oob_bounce_buf);
3523 + if (dma_mapping_error(chip->dev, oob_dma_addr)) {
3524 + pr_err("msm_nand_write_oob: failed to get dma addr "
3525 + "for %p\n", ops->oobbuf);
3526 + err = -EIO;
3527 + goto err_dma_map_oobbuf_failed;
3528 + }
3529 + }
3530 + if (ops->mode != MTD_OPS_RAW)
3531 + page_count = ops->len / mtd->writesize;
3532 + else
3533 + page_count = ops->len / (mtd->writesize + mtd->oobsize);
3534 +
3535 + wait_event(chip->wait_queue, (dma_buffer =
3536 + msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
3537 +
3538 + while (page_count-- > 0) {
3539 + cmd = dma_buffer->cmd;
3540 +
3541 + if (ops->mode != MTD_OPS_RAW) {
3542 + dma_buffer->data.cfg0 = chip->CFG0;
3543 + dma_buffer->data.cfg1 = chip->CFG1;
3544 + if (enable_bch_ecc)
3545 + dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
3546 + } else {
3547 + dma_buffer->data.cfg0 = (chip->CFG0_RAW &
3548 + ~(7U << 6)) | ((cwperpage-1) << 6);
3549 + dma_buffer->data.cfg1 = chip->CFG1_RAW |
3550 + (chip->CFG1 & CFG1_WIDE_FLASH);
3551 + }
3552 +
3553 + /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
3554 + dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
3555 + dma_buffer->data.addr0 = page << 16;
3556 + dma_buffer->data.addr1 = (page >> 16) & 0xff;
3557 + /* chipsel_0 + enable DM interface */
3558 + dma_buffer->data.chipsel = 0 | 4;
3559 +
3560 +
3561 + /* GO bit for the EXEC register */
3562 + dma_buffer->data.exec = 1;
3563 + dma_buffer->data.clrfstatus = 0x00000020;
3564 + dma_buffer->data.clrrstatus = 0x000000C0;
3565 +
3566 + BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.flash_status));
3567 +
3568 + for (n = 0; n < cwperpage ; n++) {
3569 + /* status return words */
3570 + dma_buffer->data.flash_status[n] = 0xeeeeeeee;
3571 + /* block on cmd ready, then
3572 + * write CMD / ADDR0 / ADDR1 / CHIPSEL regs in a burst
3573 + */
3574 + cmd->cmd = DST_CRCI_NAND_CMD;
3575 + cmd->src =
3576 + msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3577 + cmd->dst = MSM_NAND_FLASH_CMD;
3578 + if (n == 0)
3579 + cmd->len = 16;
3580 + else
3581 + cmd->len = 4;
3582 + cmd++;
3583 +
3584 + if (n == 0) {
3585 + cmd->cmd = 0;
3586 + cmd->src = msm_virt_to_dma(chip,
3587 + &dma_buffer->data.cfg0);
3588 + cmd->dst = MSM_NAND_DEV0_CFG0;
3589 + if (enable_bch_ecc)
3590 + cmd->len = 12;
3591 + else
3592 + cmd->len = 8;
3593 + cmd++;
3594 +
3595 + dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
3596 + cmd->cmd = 0;
3597 + cmd->src = msm_virt_to_dma(chip,
3598 + &dma_buffer->data.ecccfg);
3599 + cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
3600 + cmd->len = 4;
3601 + cmd++;
3602 + }
3603 +
3604 + /* write data block */
3605 + if (ops->mode != MTD_OPS_RAW) {
3606 + if (!boot_layout)
3607 + sectordatawritesize = (n < (cwperpage - 1)) ?
3608 + 516 : (512 - ((cwperpage - 1) << 2));
3609 + else
3610 + sectordatawritesize = 512;
3611 + } else {
3612 + sectordatawritesize = chip->cw_size;
3613 + }
3614 +
3615 + cmd->cmd = 0;
3616 + cmd->src = data_dma_addr_curr;
3617 + data_dma_addr_curr += sectordatawritesize;
3618 + cmd->dst = MSM_NAND_FLASH_BUFFER;
3619 + cmd->len = sectordatawritesize;
3620 + cmd++;
3621 +
3622 + if (ops->oobbuf) {
3623 + if (n == (cwperpage - 1)) {
3624 + cmd->cmd = 0;
3625 + cmd->src = oob_dma_addr_curr;
3626 + cmd->dst = MSM_NAND_FLASH_BUFFER +
3627 + (512 - ((cwperpage - 1) << 2));
3628 + if ((cwperpage << 2) < oob_len)
3629 + cmd->len = (cwperpage << 2);
3630 + else
3631 + cmd->len = oob_len;
3632 + oob_dma_addr_curr += cmd->len;
3633 + oob_len -= cmd->len;
3634 + if (cmd->len > 0)
3635 + cmd++;
3636 + }
3637 + if (ops->mode != MTD_OPS_AUTO_OOB) {
3638 + /* skip ecc bytes in oobbuf */
3639 + if (oob_len < chip->ecc_parity_bytes) {
3640 + oob_dma_addr_curr +=
3641 + chip->ecc_parity_bytes;
3642 + oob_len -=
3643 + chip->ecc_parity_bytes;
3644 + } else {
3645 + oob_dma_addr_curr += oob_len;
3646 + oob_len = 0;
3647 + }
3648 + }
3649 + }
3650 +
3651 + /* kick the execute register */
3652 + cmd->cmd = 0;
3653 + cmd->src =
3654 + msm_virt_to_dma(chip, &dma_buffer->data.exec);
3655 + cmd->dst = MSM_NAND_EXEC_CMD;
3656 + cmd->len = 4;
3657 + cmd++;
3658 +
3659 + /* block on data ready, then
3660 + * read the status register
3661 + */
3662 + cmd->cmd = SRC_CRCI_NAND_DATA;
3663 + cmd->src = MSM_NAND_FLASH_STATUS;
3664 + cmd->dst = msm_virt_to_dma(chip,
3665 + &dma_buffer->data.flash_status[n]);
3666 + cmd->len = 4;
3667 + cmd++;
3668 +
3669 + cmd->cmd = 0;
3670 + cmd->src = msm_virt_to_dma(chip,
3671 + &dma_buffer->data.clrfstatus);
3672 + cmd->dst = MSM_NAND_FLASH_STATUS;
3673 + cmd->len = 4;
3674 + cmd++;
3675 +
3676 + cmd->cmd = 0;
3677 + cmd->src = msm_virt_to_dma(chip,
3678 + &dma_buffer->data.clrrstatus);
3679 + cmd->dst = MSM_NAND_READ_STATUS;
3680 + cmd->len = 4;
3681 + cmd++;
3682 +
3683 + }
3684 +
3685 + dma_buffer->cmd[0].cmd |= CMD_OCB;
3686 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
3687 + BUILD_BUG_ON(8 * 7 + 2 != ARRAY_SIZE(dma_buffer->cmd));
3688 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
3689 + dma_buffer->cmdptr =
3690 + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) |
3691 + CMD_PTR_LP;
3692 +
3693 + mb();
3694 + msm_dmov_exec_cmd(chip->dma_channel,
3695 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
3696 + msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
3697 + mb();
3698 +
3699 + /* if any of the writes failed (0x10), or there was a
3700 + * protection violation (0x100), or the program success
3701 + * bit (0x80) is unset, we lose
3702 + */
3703 + err = 0;
3704 + for (n = 0; n < cwperpage; n++) {
3705 + if (dma_buffer->data.flash_status[n] & 0x110) {
3706 + err = -EIO;
3707 + break;
3708 + }
3709 + if (!(dma_buffer->data.flash_status[n] & 0x80)) {
3710 + err = -EIO;
3711 + break;
3712 + }
3713 + }
3714 +
3715 +#if VERBOSE
3716 + for (n = 0; n < cwperpage; n++)
3717 + pr_info("write pg %d: flash_status[%d] = %x\n", page,
3718 + n, dma_buffer->data.flash_status[n]);
3719 +
3720 +#endif
3721 + if (err)
3722 + break;
3723 + pages_written++;
3724 + page++;
3725 + }
3726 + if (ops->mode != MTD_OPS_RAW)
3727 + ops->retlen = mtd->writesize * pages_written;
3728 + else
3729 + ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
3730 +
3731 + ops->oobretlen = ops->ooblen - oob_len;
3732 +
3733 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
3734 +
3735 + if (ops->oobbuf) {
3736 + msm_nand_dma_unmap(chip->dev, oob_dma_addr,
3737 + ops->ooblen, DMA_TO_DEVICE,
3738 + ops->oobbuf, oob_bounce_buf);
3739 + }
3740 +err_dma_map_oobbuf_failed:
3741 + if (ops->datbuf) {
3742 + msm_nand_dma_unmap(chip->dev, data_dma_addr, ops->len,
3743 + DMA_TO_DEVICE, ops->datbuf,
3744 + dat_bounce_buf);
3745 + }
3746 + if (err)
3747 + pr_err("msm_nand_write_oob %llx %x %x failed %d\n",
3748 + to, ops->len, ops->ooblen, err);
3749 +
3750 +#if VERBOSE
3751 + pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
3752 + __func__, err, ops->retlen, ops->oobretlen);
3753 +
3754 + pr_info("==================================================="
3755 + "==============\n");
3756 +#endif
3757 + return err;
3758 +}
3759 +
3760 +static int
3761 +msm_nand_write_oob_dualnandc(struct mtd_info *mtd, loff_t to,
3762 + struct mtd_oob_ops *ops)
3763 +{
3764 + struct msm_nand_chip *chip = mtd->priv;
3765 + struct {
3766 + dmov_s cmd[16 * 6 + 18];
3767 + unsigned cmdptr;
3768 + struct {
3769 + uint32_t cmd;
3770 + uint32_t nandc01_addr0;
3771 + uint32_t nandc10_addr0;
3772 + uint32_t nandc11_addr1;
3773 + uint32_t chipsel_cs0;
3774 + uint32_t chipsel_cs1;
3775 + uint32_t cfg0;
3776 + uint32_t cfg1;
3777 + uint32_t eccbchcfg;
3778 + uint32_t exec;
3779 + uint32_t ecccfg;
3780 + uint32_t cfg0_nc01;
3781 + uint32_t ebi2_chip_select_cfg0;
3782 + uint32_t adm_mux_data_ack_req_nc01;
3783 + uint32_t adm_mux_cmd_ack_req_nc01;
3784 + uint32_t adm_mux_data_ack_req_nc10;
3785 + uint32_t adm_mux_cmd_ack_req_nc10;
3786 + uint32_t adm_default_mux;
3787 + uint32_t default_ebi2_chip_select_cfg0;
3788 + uint32_t nc01_flash_dev_cmd_vld;
3789 + uint32_t nc10_flash_dev_cmd0;
3790 + uint32_t nc01_flash_dev_cmd_vld_default;
3791 + uint32_t nc10_flash_dev_cmd0_default;
3792 + uint32_t flash_status[16];
3793 + uint32_t clrfstatus;
3794 + uint32_t clrrstatus;
3795 + } data;
3796 + } *dma_buffer;
3797 + dmov_s *cmd;
3798 + unsigned n;
3799 + unsigned page = 0;
3800 + uint32_t oob_len;
3801 + uint32_t sectordatawritesize;
3802 + int err = 0;
3803 + dma_addr_t data_dma_addr = 0;
3804 + dma_addr_t oob_dma_addr = 0;
3805 + dma_addr_t data_dma_addr_curr = 0;
3806 + dma_addr_t oob_dma_addr_curr = 0;
3807 + unsigned page_count;
3808 + unsigned pages_written = 0;
3809 + unsigned cwperpage;
3810 + unsigned cw_offset = chip->cw_size;
3811 +#if VERBOSE
3812 + pr_info("================================================="
3813 + "============\n");
3814 + pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
3815 + "\noobbuf 0x%p ooblen 0x%x\n\n",
3816 + __func__, to, ops->mode, ops->datbuf, ops->len,
3817 + ops->oobbuf, ops->ooblen);
3818 +#endif
3819 +
3820 + if (mtd->writesize == 2048)
3821 + page = to >> 11;
3822 +
3823 + if (mtd->writesize == 4096)
3824 + page = to >> 12;
3825 +
3826 + if (interleave_enable)
3827 + page = (to >> 1) >> 12;
3828 +
3829 + oob_len = ops->ooblen;
3830 + cwperpage = (mtd->writesize >> 9);
3831 +
3832 + if (to & (mtd->writesize - 1)) {
3833 + pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
3834 + return -EINVAL;
3835 + }
3836 +
3837 + if (ops->mode != MTD_OPS_RAW) {
3838 + if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
3839 + pr_err("%s: unsupported ops->mode,%d\n",
3840 + __func__, ops->mode);
3841 + return -EINVAL;
3842 + }
3843 + if ((ops->len % mtd->writesize) != 0) {
3844 + pr_err("%s: unsupported ops->len, %d\n",
3845 + __func__, ops->len);
3846 + return -EINVAL;
3847 + }
3848 + } else {
3849 + if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
3850 + pr_err("%s: unsupported ops->len, "
3851 + "%d for MTD_OPS_RAW mode\n",
3852 + __func__, ops->len);
3853 + return -EINVAL;
3854 + }
3855 + }
3856 +
3857 + if (ops->datbuf == NULL) {
3858 + pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
3859 + return -EINVAL;
3860 + }
3861 +
3862 + if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
3863 + pr_err("%s: unsupported ops->ooboffs, %d\n",
3864 + __func__, ops->ooboffs);
3865 + return -EINVAL;
3866 + }
3867 +
3868 + if (ops->datbuf) {
3869 + data_dma_addr_curr = data_dma_addr =
3870 + msm_nand_dma_map(chip->dev, ops->datbuf,
3871 + ops->len, DMA_TO_DEVICE, NULL);
3872 + if (dma_mapping_error(chip->dev, data_dma_addr)) {
3873 + pr_err("msm_nand_write_oob_dualnandc:"
3874 + "failed to get dma addr "
3875 + "for %p\n", ops->datbuf);
3876 + return -EIO;
3877 + }
3878 + }
3879 + if (ops->oobbuf) {
3880 + oob_dma_addr_curr = oob_dma_addr =
3881 + msm_nand_dma_map(chip->dev, ops->oobbuf,
3882 + ops->ooblen, DMA_TO_DEVICE, NULL);
3883 + if (dma_mapping_error(chip->dev, oob_dma_addr)) {
3884 + pr_err("msm_nand_write_oob_dualnandc:"
3885 + "failed to get dma addr "
3886 + "for %p\n", ops->oobbuf);
3887 + err = -EIO;
3888 + goto err_dma_map_oobbuf_failed;
3889 + }
3890 + }
3891 + if (ops->mode != MTD_OPS_RAW)
3892 + page_count = ops->len / mtd->writesize;
3893 + else
3894 + page_count = ops->len / (mtd->writesize + mtd->oobsize);
3895 +
3896 + wait_event(chip->wait_queue, (dma_buffer =
3897 + msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
3898 +
3899 + if (chip->CFG1 & CFG1_WIDE_FLASH)
3900 + cw_offset >>= 1;
3901 +
3902 + dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
3903 + dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
3904 + dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
3905 + dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
3906 + dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
3907 + dma_buffer->data.adm_default_mux = 0x00000FC0;
3908 + dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
3909 + dma_buffer->data.nc01_flash_dev_cmd_vld = 0x9;
3910 + dma_buffer->data.nc10_flash_dev_cmd0 = 0x1085D060;
3911 + dma_buffer->data.nc01_flash_dev_cmd_vld_default = 0x1D;
3912 + dma_buffer->data.nc10_flash_dev_cmd0_default = 0x1080D060;
3913 + dma_buffer->data.clrfstatus = 0x00000020;
3914 + dma_buffer->data.clrrstatus = 0x000000C0;
3915 +
3916 + while (page_count-- > 0) {
3917 + cmd = dma_buffer->cmd;
3918 +
3919 + if (ops->mode != MTD_OPS_RAW) {
3920 + dma_buffer->data.cfg0 = ((chip->CFG0 & ~(7U << 6))
3921 + & ~(1 << 4)) | ((((cwperpage >> 1)-1)) << 6);
3922 + dma_buffer->data.cfg1 = chip->CFG1;
3923 + if (enable_bch_ecc)
3924 + dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
3925 + } else {
3926 + dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
3927 + ~(7U << 6)) & ~(1 << 4)) | (((cwperpage >> 1)-1) << 6);
3928 + dma_buffer->data.cfg1 = chip->CFG1_RAW |
3929 + (chip->CFG1 & CFG1_WIDE_FLASH);
3930 + }
3931 +
3932 + /* Disables the automatic issuing of the read
3933 + * status command for first NAND controller.
3934 + */
3935 + if (!interleave_enable)
3936 + dma_buffer->data.cfg0_nc01 = dma_buffer->data.cfg0
3937 + | (1 << 4);
3938 + else
3939 + dma_buffer->data.cfg0 |= (1 << 4);
3940 +
3941 + dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
3942 + dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
3943 + dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
3944 +
3945 + /* GO bit for the EXEC register */
3946 + dma_buffer->data.exec = 1;
3947 +
3948 + if (!interleave_enable) {
3949 + dma_buffer->data.nandc01_addr0 = (page << 16) | 0x0;
3950 + /* NC10 ADDR0 points to the next code word */
3951 + dma_buffer->data.nandc10_addr0 =
3952 + (page << 16) | cw_offset;
3953 + } else {
3954 + dma_buffer->data.nandc01_addr0 =
3955 + dma_buffer->data.nandc10_addr0 = (page << 16) | 0x0;
3956 + }
3957 + /* ADDR1 */
3958 + dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
3959 +
3960 + BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.flash_status));
3961 +
3962 + for (n = 0; n < cwperpage; n++) {
3963 + /* status return words */
3964 + dma_buffer->data.flash_status[n] = 0xeeeeeeee;
3965 +
3966 + if (n == 0) {
3967 + if (!interleave_enable) {
3968 + cmd->cmd = 0;
3969 + cmd->src = msm_virt_to_dma(chip,
3970 + &dma_buffer->
3971 + data.nc01_flash_dev_cmd_vld);
3972 + cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
3973 + cmd->len = 4;
3974 + cmd++;
3975 +
3976 + cmd->cmd = 0;
3977 + cmd->src = msm_virt_to_dma(chip,
3978 + &dma_buffer->data.nc10_flash_dev_cmd0);
3979 + cmd->dst = NC10(MSM_NAND_DEV_CMD0);
3980 + cmd->len = 4;
3981 + cmd++;
3982 +
3983 + /* common settings for both NC01 & NC10
3984 + * NC01, NC10 --> ADDR1 / CHIPSEL
3985 + */
3986 + cmd->cmd = 0;
3987 + cmd->src = msm_virt_to_dma(chip,
3988 + &dma_buffer->data.nandc11_addr1);
3989 + cmd->dst = NC11(MSM_NAND_ADDR1);
3990 + cmd->len = 8;
3991 + cmd++;
3992 +
3993 + /* Disables the automatic issue of the
3994 + * read status command after the write
3995 + * operation.
3996 + */
3997 + cmd->cmd = 0;
3998 + cmd->src = msm_virt_to_dma(chip,
3999 + &dma_buffer->data.cfg0_nc01);
4000 + cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
4001 + cmd->len = 4;
4002 + cmd++;
4003 +
4004 + cmd->cmd = 0;
4005 + cmd->src = msm_virt_to_dma(chip,
4006 + &dma_buffer->data.cfg0);
4007 + cmd->dst = NC10(MSM_NAND_DEV0_CFG0);
4008 + cmd->len = 4;
4009 + cmd++;
4010 +
4011 + cmd->cmd = 0;
4012 + cmd->src = msm_virt_to_dma(chip,
4013 + &dma_buffer->data.cfg1);
4014 + cmd->dst = NC11(MSM_NAND_DEV0_CFG1);
4015 + if (enable_bch_ecc)
4016 + cmd->len = 8;
4017 + else
4018 + cmd->len = 4;
4019 + cmd++;
4020 + } else {
4021 + /* enable CS1 */
4022 + cmd->cmd = 0;
4023 + cmd->src = msm_virt_to_dma(chip,
4024 + &dma_buffer->
4025 + data.ebi2_chip_select_cfg0);
4026 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
4027 + cmd->len = 4;
4028 + cmd++;
4029 +
4030 + /* NC11 --> ADDR1 */
4031 + cmd->cmd = 0;
4032 + cmd->src = msm_virt_to_dma(chip,
4033 + &dma_buffer->data.nandc11_addr1);
4034 + cmd->dst = NC11(MSM_NAND_ADDR1);
4035 + cmd->len = 4;
4036 + cmd++;
4037 +
4038 + /* Enable CS0 for NC01 */
4039 + cmd->cmd = 0;
4040 + cmd->src = msm_virt_to_dma(chip,
4041 + &dma_buffer->data.chipsel_cs0);
4042 + cmd->dst =
4043 + NC01(MSM_NAND_FLASH_CHIP_SELECT);
4044 + cmd->len = 4;
4045 + cmd++;
4046 +
4047 + /* Enable CS1 for NC10 */
4048 + cmd->cmd = 0;
4049 + cmd->src = msm_virt_to_dma(chip,
4050 + &dma_buffer->data.chipsel_cs1);
4051 + cmd->dst =
4052 + NC10(MSM_NAND_FLASH_CHIP_SELECT);
4053 + cmd->len = 4;
4054 + cmd++;
4055 +
4056 + /* config DEV0_CFG0 & CFG1 for CS0 */
4057 + cmd->cmd = 0;
4058 + cmd->src = msm_virt_to_dma(chip,
4059 + &dma_buffer->data.cfg0);
4060 + cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
4061 + cmd->len = 8;
4062 + cmd++;
4063 +
4064 + /* config DEV1_CFG0 & CFG1 for CS1 */
4065 + cmd->cmd = 0;
4066 + cmd->src = msm_virt_to_dma(chip,
4067 + &dma_buffer->data.cfg0);
4068 + cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
4069 + cmd->len = 8;
4070 + cmd++;
4071 + }
4072 +
4073 + dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
4074 + cmd->cmd = 0;
4075 + cmd->src = msm_virt_to_dma(chip,
4076 + &dma_buffer->data.ecccfg);
4077 + cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
4078 + cmd->len = 4;
4079 + cmd++;
4080 +
4081 + /* NC01 --> ADDR0 */
4082 + cmd->cmd = 0;
4083 + cmd->src = msm_virt_to_dma(chip,
4084 + &dma_buffer->data.nandc01_addr0);
4085 + cmd->dst = NC01(MSM_NAND_ADDR0);
4086 + cmd->len = 4;
4087 + cmd++;
4088 +
4089 + /* NC10 --> ADDR0 */
4090 + cmd->cmd = 0;
4091 + cmd->src = msm_virt_to_dma(chip,
4092 + &dma_buffer->data.nandc10_addr0);
4093 + cmd->dst = NC10(MSM_NAND_ADDR0);
4094 + cmd->len = 4;
4095 + cmd++;
4096 + }
4097 +
4098 + if (n % 2 == 0) {
4099 + /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
4100 + cmd->cmd = 0;
4101 + cmd->src = msm_virt_to_dma(chip,
4102 + &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
4103 + cmd->dst = EBI2_NAND_ADM_MUX;
4104 + cmd->len = 4;
4105 + cmd++;
4106 +
4107 + /* CMD */
4108 + cmd->cmd = DST_CRCI_NAND_CMD;
4109 + cmd->src = msm_virt_to_dma(chip,
4110 + &dma_buffer->data.cmd);
4111 + cmd->dst = NC01(MSM_NAND_FLASH_CMD);
4112 + cmd->len = 4;
4113 + cmd++;
4114 + } else {
4115 + /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
4116 + cmd->cmd = 0;
4117 + cmd->src = msm_virt_to_dma(chip,
4118 + &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
4119 + cmd->dst = EBI2_NAND_ADM_MUX;
4120 + cmd->len = 4;
4121 + cmd++;
4122 +
4123 + /* CMD */
4124 + cmd->cmd = DST_CRCI_NAND_CMD;
4125 + cmd->src = msm_virt_to_dma(chip,
4126 + &dma_buffer->data.cmd);
4127 + cmd->dst = NC10(MSM_NAND_FLASH_CMD);
4128 + cmd->len = 4;
4129 + cmd++;
4130 + }
4131 +
4132 + if (ops->mode != MTD_OPS_RAW)
4133 + sectordatawritesize = (n < (cwperpage - 1)) ?
4134 + 516 : (512 - ((cwperpage - 1) << 2));
4135 + else
4136 + sectordatawritesize = chip->cw_size;
4137 +
4138 + cmd->cmd = 0;
4139 + cmd->src = data_dma_addr_curr;
4140 + data_dma_addr_curr += sectordatawritesize;
4141 +
4142 + if (n % 2 == 0)
4143 + cmd->dst = NC01(MSM_NAND_FLASH_BUFFER);
4144 + else
4145 + cmd->dst = NC10(MSM_NAND_FLASH_BUFFER);
4146 + cmd->len = sectordatawritesize;
4147 + cmd++;
4148 +
4149 + if (ops->oobbuf) {
4150 + if (n == (cwperpage - 1)) {
4151 + cmd->cmd = 0;
4152 + cmd->src = oob_dma_addr_curr;
4153 + cmd->dst = NC10(MSM_NAND_FLASH_BUFFER) +
4154 + (512 - ((cwperpage - 1) << 2));
4155 + if ((cwperpage << 2) < oob_len)
4156 + cmd->len = (cwperpage << 2);
4157 + else
4158 + cmd->len = oob_len;
4159 + oob_dma_addr_curr += cmd->len;
4160 + oob_len -= cmd->len;
4161 + if (cmd->len > 0)
4162 + cmd++;
4163 + }
4164 + if (ops->mode != MTD_OPS_AUTO_OOB) {
4165 + /* skip ecc bytes in oobbuf */
4166 + if (oob_len < chip->ecc_parity_bytes) {
4167 + oob_dma_addr_curr +=
4168 + chip->ecc_parity_bytes;
4169 + oob_len -=
4170 + chip->ecc_parity_bytes;
4171 + } else {
4172 + oob_dma_addr_curr += oob_len;
4173 + oob_len = 0;
4174 + }
4175 + }
4176 + }
4177 +
4178 + if (n % 2 == 0) {
4179 + if (n != 0) {
4180 + /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
4181 + cmd->cmd = 0;
4182 + cmd->src = msm_virt_to_dma(chip,
4183 + &dma_buffer->
4184 + data.adm_mux_data_ack_req_nc01);
4185 + cmd->dst = EBI2_NAND_ADM_MUX;
4186 + cmd->len = 4;
4187 + cmd++;
4188 +
4189 + /* block on data ready from NC10, then
4190 + * read the status register
4191 + */
4192 + cmd->cmd = SRC_CRCI_NAND_DATA;
4193 + cmd->src = NC10(MSM_NAND_FLASH_STATUS);
4194 + cmd->dst = msm_virt_to_dma(chip,
4195 + &dma_buffer->data.flash_status[n-1]);
4196 + cmd->len = 4;
4197 + cmd++;
4198 + }
4199 + /* kick the NC01 execute register */
4200 + cmd->cmd = 0;
4201 + cmd->src = msm_virt_to_dma(chip,
4202 + &dma_buffer->data.exec);
4203 + cmd->dst = NC01(MSM_NAND_EXEC_CMD);
4204 + cmd->len = 4;
4205 + cmd++;
4206 + } else {
4207 + /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
4208 + cmd->cmd = 0;
4209 + cmd->src = msm_virt_to_dma(chip,
4210 + &dma_buffer->data.adm_mux_data_ack_req_nc10);
4211 + cmd->dst = EBI2_NAND_ADM_MUX;
4212 + cmd->len = 4;
4213 + cmd++;
4214 +
4215 + /* block on data ready from NC01, then
4216 + * read the status register
4217 + */
4218 + cmd->cmd = SRC_CRCI_NAND_DATA;
4219 + cmd->src = NC01(MSM_NAND_FLASH_STATUS);
4220 + cmd->dst = msm_virt_to_dma(chip,
4221 + &dma_buffer->data.flash_status[n-1]);
4222 + cmd->len = 4;
4223 + cmd++;
4224 +
4225 + /* kick the execute register */
4226 + cmd->cmd = 0;
4227 + cmd->src =
4228 + msm_virt_to_dma(chip, &dma_buffer->data.exec);
4229 + cmd->dst = NC10(MSM_NAND_EXEC_CMD);
4230 + cmd->len = 4;
4231 + cmd++;
4232 + }
4233 + }
4234 +
4235 + /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
4236 + cmd->cmd = 0;
4237 + cmd->src = msm_virt_to_dma(chip,
4238 + &dma_buffer->data.adm_mux_data_ack_req_nc01);
4239 + cmd->dst = EBI2_NAND_ADM_MUX;
4240 + cmd->len = 4;
4241 + cmd++;
4242 +
4243 + /* we should process outstanding request */
4244 + /* block on data ready, then
4245 + * read the status register
4246 + */
4247 + cmd->cmd = SRC_CRCI_NAND_DATA;
4248 + cmd->src = NC10(MSM_NAND_FLASH_STATUS);
4249 + cmd->dst = msm_virt_to_dma(chip,
4250 + &dma_buffer->data.flash_status[n-1]);
4251 + cmd->len = 4;
4252 + cmd++;
4253 +
4254 + cmd->cmd = 0;
4255 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
4256 + cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
4257 + cmd->len = 4;
4258 + cmd++;
4259 +
4260 + cmd->cmd = 0;
4261 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
4262 + cmd->dst = NC11(MSM_NAND_READ_STATUS);
4263 + cmd->len = 4;
4264 + cmd++;
4265 +
4266 + /* MASK DATA ACK/REQ --> NC01 (0xFC0)*/
4267 + cmd->cmd = 0;
4268 + cmd->src = msm_virt_to_dma(chip,
4269 + &dma_buffer->data.adm_default_mux);
4270 + cmd->dst = EBI2_NAND_ADM_MUX;
4271 + cmd->len = 4;
4272 + cmd++;
4273 +
4274 + if (!interleave_enable) {
4275 + /* setting to defalut values back */
4276 + cmd->cmd = 0;
4277 + cmd->src = msm_virt_to_dma(chip,
4278 + &dma_buffer->data.nc01_flash_dev_cmd_vld_default);
4279 + cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
4280 + cmd->len = 4;
4281 + cmd++;
4282 +
4283 + cmd->cmd = 0;
4284 + cmd->src = msm_virt_to_dma(chip,
4285 + &dma_buffer->data.nc10_flash_dev_cmd0_default);
4286 + cmd->dst = NC10(MSM_NAND_DEV_CMD0);
4287 + cmd->len = 4;
4288 + cmd++;
4289 + } else {
4290 + /* disable CS1 */
4291 + cmd->cmd = 0;
4292 + cmd->src = msm_virt_to_dma(chip,
4293 + &dma_buffer->data.default_ebi2_chip_select_cfg0);
4294 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
4295 + cmd->len = 4;
4296 + cmd++;
4297 + }
4298 +
4299 + dma_buffer->cmd[0].cmd |= CMD_OCB;
4300 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
4301 + BUILD_BUG_ON(16 * 6 + 18 != ARRAY_SIZE(dma_buffer->cmd));
4302 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
4303 + dma_buffer->cmdptr =
4304 + ((msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP);
4305 +
4306 + mb();
4307 + msm_dmov_exec_cmd(chip->dma_channel,
4308 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
4309 + msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
4310 + mb();
4311 +
4312 + /* if any of the writes failed (0x10), or there was a
4313 + * protection violation (0x100), or the program success
4314 + * bit (0x80) is unset, we lose
4315 + */
4316 + err = 0;
4317 + for (n = 0; n < cwperpage; n++) {
4318 + if (dma_buffer->data.flash_status[n] & 0x110) {
4319 + err = -EIO;
4320 + break;
4321 + }
4322 + if (!(dma_buffer->data.flash_status[n] & 0x80)) {
4323 + err = -EIO;
4324 + break;
4325 + }
4326 + }
4327 + /* check for flash status busy for the last codeword */
4328 + if (!interleave_enable)
4329 + if (!(dma_buffer->data.flash_status[cwperpage - 1]
4330 + & 0x20)) {
4331 + err = -EIO;
4332 + break;
4333 + }
4334 +#if VERBOSE
4335 + for (n = 0; n < cwperpage; n++) {
4336 + if (n%2) {
4337 + pr_info("NC10: write pg %d: flash_status[%d] = %x\n",
4338 + page, n, dma_buffer->data.flash_status[n]);
4339 + } else {
4340 + pr_info("NC01: write pg %d: flash_status[%d] = %x\n",
4341 + page, n, dma_buffer->data.flash_status[n]);
4342 + }
4343 + }
4344 +#endif
4345 + if (err)
4346 + break;
4347 + pages_written++;
4348 + page++;
4349 + }
4350 + if (ops->mode != MTD_OPS_RAW)
4351 + ops->retlen = mtd->writesize * pages_written;
4352 + else
4353 + ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
4354 +
4355 + ops->oobretlen = ops->ooblen - oob_len;
4356 +
4357 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
4358 +
4359 + if (ops->oobbuf)
4360 + dma_unmap_page(chip->dev, oob_dma_addr,
4361 + ops->ooblen, DMA_TO_DEVICE);
4362 +err_dma_map_oobbuf_failed:
4363 + if (ops->datbuf)
4364 + dma_unmap_page(chip->dev, data_dma_addr, ops->len,
4365 + DMA_TO_DEVICE);
4366 + if (err)
4367 + pr_err("msm_nand_write_oob_dualnandc %llx %x %x failed %d\n",
4368 + to, ops->len, ops->ooblen, err);
4369 +
4370 +#if VERBOSE
4371 + pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
4372 + __func__, err, ops->retlen, ops->oobretlen);
4373 +
4374 + pr_info("==================================================="
4375 + "==========\n");
4376 +#endif
4377 + return err;
4378 +}
4379 +
4380 +static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4381 + size_t *retlen, const u_char *buf)
4382 +{
4383 + int ret;
4384 + struct mtd_oob_ops ops;
4385 + int (*write_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
4386 +
4387 + if (!dual_nand_ctlr_present)
4388 + write_oob = msm_nand_write_oob;
4389 + else
4390 + write_oob = msm_nand_write_oob_dualnandc;
4391 +
4392 + ops.mode = MTD_OPS_PLACE_OOB;
4393 + ops.retlen = 0;
4394 + ops.ooblen = 0;
4395 + ops.oobbuf = NULL;
4396 + ret = 0;
4397 + *retlen = 0;
4398 +
4399 + if (!virt_addr_valid(buf) &&
4400 + ((to | len) & (mtd->writesize - 1)) == 0 &&
4401 + ((unsigned long) buf & ~PAGE_MASK) + len > PAGE_SIZE) {
4402 + /*
4403 + * Handle writing of large size write buffer in vmalloc
4404 + * address space that does not fit in an MMU page.
4405 + * The destination address must be on page boundary,
4406 + * and the size must be multiple of NAND page size.
4407 + * Writing partial page is not supported.
4408 + */
4409 + ops.len = mtd->writesize;
4410 +
4411 + for (;;) {
4412 + ops.datbuf = (uint8_t *) buf;
4413 +
4414 + ret = write_oob(mtd, to, &ops);
4415 + if (ret < 0)
4416 + break;
4417 +
4418 + len -= mtd->writesize;
4419 + *retlen += mtd->writesize;
4420 + if (len == 0)
4421 + break;
4422 +
4423 + buf += mtd->writesize;
4424 + to += mtd->writesize;
4425 + }
4426 + } else {
4427 + ops.len = len;
4428 + ops.datbuf = (uint8_t *) buf;
4429 + ret = write_oob(mtd, to, &ops);
4430 + *retlen = ops.retlen;
4431 + }
4432 +
4433 + return ret;
4434 +}
4435 +
4436 +static int
4437 +msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4438 +{
4439 + int err;
4440 + struct msm_nand_chip *chip = mtd->priv;
4441 + struct {
4442 + dmov_s cmd[6];
4443 + unsigned cmdptr;
4444 + struct {
4445 + uint32_t cmd;
4446 + uint32_t addr0;
4447 + uint32_t addr1;
4448 + uint32_t chipsel;
4449 + uint32_t cfg0;
4450 + uint32_t cfg1;
4451 + uint32_t exec;
4452 + uint32_t flash_status;
4453 + uint32_t clrfstatus;
4454 + uint32_t clrrstatus;
4455 + } data;
4456 + } *dma_buffer;
4457 + dmov_s *cmd;
4458 + unsigned page = 0;
4459 +
4460 + if (mtd->writesize == 2048)
4461 + page = instr->addr >> 11;
4462 +
4463 + if (mtd->writesize == 4096)
4464 + page = instr->addr >> 12;
4465 +
4466 + if (instr->addr & (mtd->erasesize - 1)) {
4467 + pr_err("%s: unsupported erase address, 0x%llx\n",
4468 + __func__, instr->addr);
4469 + return -EINVAL;
4470 + }
4471 + if (instr->len != mtd->erasesize) {
4472 + pr_err("%s: unsupported erase len, %lld\n",
4473 + __func__, instr->len);
4474 + return -EINVAL;
4475 + }
4476 +
4477 + wait_event(chip->wait_queue,
4478 + (dma_buffer = msm_nand_get_dma_buffer(
4479 + chip, sizeof(*dma_buffer))));
4480 +
4481 + cmd = dma_buffer->cmd;
4482 +
4483 + dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
4484 + dma_buffer->data.addr0 = page;
4485 + dma_buffer->data.addr1 = 0;
4486 + dma_buffer->data.chipsel = 0 | 4;
4487 + dma_buffer->data.exec = 1;
4488 + dma_buffer->data.flash_status = 0xeeeeeeee;
4489 + dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
4490 + dma_buffer->data.cfg1 = chip->CFG1;
4491 + dma_buffer->data.clrfstatus = 0x00000020;
4492 + dma_buffer->data.clrrstatus = 0x000000C0;
4493 +
4494 + cmd->cmd = DST_CRCI_NAND_CMD | CMD_OCB;
4495 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
4496 + cmd->dst = MSM_NAND_FLASH_CMD;
4497 + cmd->len = 16;
4498 + cmd++;
4499 +
4500 + cmd->cmd = 0;
4501 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
4502 + cmd->dst = MSM_NAND_DEV0_CFG0;
4503 + cmd->len = 8;
4504 + cmd++;
4505 +
4506 + cmd->cmd = 0;
4507 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
4508 + cmd->dst = MSM_NAND_EXEC_CMD;
4509 + cmd->len = 4;
4510 + cmd++;
4511 +
4512 + cmd->cmd = SRC_CRCI_NAND_DATA;
4513 + cmd->src = MSM_NAND_FLASH_STATUS;
4514 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status);
4515 + cmd->len = 4;
4516 + cmd++;
4517 +
4518 + cmd->cmd = 0;
4519 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
4520 + cmd->dst = MSM_NAND_FLASH_STATUS;
4521 + cmd->len = 4;
4522 + cmd++;
4523 +
4524 + cmd->cmd = CMD_OCU | CMD_LC;
4525 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
4526 + cmd->dst = MSM_NAND_READ_STATUS;
4527 + cmd->len = 4;
4528 + cmd++;
4529 +
4530 + BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd) - 1);
4531 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
4532 + dma_buffer->cmdptr =
4533 + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
4534 +
4535 + mb();
4536 + msm_dmov_exec_cmd(
4537 + chip->dma_channel, DMOV_CMD_PTR_LIST |
4538 + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
4539 + mb();
4540 +
4541 + /* we fail if there was an operation error, a mpu error, or the
4542 + * erase success bit was not set.
4543 + */
4544 +
4545 + if (dma_buffer->data.flash_status & 0x110 ||
4546 + !(dma_buffer->data.flash_status & 0x80))
4547 + err = -EIO;
4548 + else
4549 + err = 0;
4550 +
4551 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
4552 + if (err) {
4553 + pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
4554 + instr->fail_addr = instr->addr;
4555 + instr->state = MTD_ERASE_FAILED;
4556 + } else {
4557 + instr->state = MTD_ERASE_DONE;
4558 + instr->fail_addr = 0xffffffff;
4559 + mtd_erase_callback(instr);
4560 + }
4561 + return err;
4562 +}
4563 +
4564 +static int
4565 +msm_nand_erase_dualnandc(struct mtd_info *mtd, struct erase_info *instr)
4566 +{
4567 + int err;
4568 + struct msm_nand_chip *chip = mtd->priv;
4569 + struct {
4570 + dmov_s cmd[18];
4571 + unsigned cmdptr;
4572 + struct {
4573 + uint32_t cmd;
4574 + uint32_t addr0;
4575 + uint32_t addr1;
4576 + uint32_t chipsel_cs0;
4577 + uint32_t chipsel_cs1;
4578 + uint32_t cfg0;
4579 + uint32_t cfg1;
4580 + uint32_t exec;
4581 + uint32_t ecccfg;
4582 + uint32_t ebi2_chip_select_cfg0;
4583 + uint32_t adm_mux_data_ack_req_nc01;
4584 + uint32_t adm_mux_cmd_ack_req_nc01;
4585 + uint32_t adm_mux_data_ack_req_nc10;
4586 + uint32_t adm_mux_cmd_ack_req_nc10;
4587 + uint32_t adm_default_mux;
4588 + uint32_t default_ebi2_chip_select_cfg0;
4589 + uint32_t nc01_flash_dev_cmd0;
4590 + uint32_t nc01_flash_dev_cmd0_default;
4591 + uint32_t flash_status[2];
4592 + uint32_t clrfstatus;
4593 + uint32_t clrrstatus;
4594 + } data;
4595 + } *dma_buffer;
4596 + dmov_s *cmd;
4597 + unsigned page = 0;
4598 +
4599 + if (mtd->writesize == 2048)
4600 + page = instr->addr >> 11;
4601 +
4602 + if (mtd->writesize == 4096)
4603 + page = instr->addr >> 12;
4604 +
4605 + if (mtd->writesize == 8192)
4606 + page = (instr->addr >> 1) >> 12;
4607 +
4608 + if (instr->addr & (mtd->erasesize - 1)) {
4609 + pr_err("%s: unsupported erase address, 0x%llx\n",
4610 + __func__, instr->addr);
4611 + return -EINVAL;
4612 + }
4613 + if (instr->len != mtd->erasesize) {
4614 + pr_err("%s: unsupported erase len, %lld\n",
4615 + __func__, instr->len);
4616 + return -EINVAL;
4617 + }
4618 +
4619 + wait_event(chip->wait_queue,
4620 + (dma_buffer = msm_nand_get_dma_buffer(
4621 + chip, sizeof(*dma_buffer))));
4622 +
4623 + cmd = dma_buffer->cmd;
4624 +
4625 + dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
4626 + dma_buffer->data.addr0 = page;
4627 + dma_buffer->data.addr1 = 0;
4628 + dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
4629 + dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
4630 + dma_buffer->data.exec = 1;
4631 + dma_buffer->data.flash_status[0] = 0xeeeeeeee;
4632 + dma_buffer->data.flash_status[1] = 0xeeeeeeee;
4633 + dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
4634 + dma_buffer->data.cfg1 = chip->CFG1;
4635 + dma_buffer->data.clrfstatus = 0x00000020;
4636 + dma_buffer->data.clrrstatus = 0x000000C0;
4637 +
4638 + dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
4639 + dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
4640 + dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
4641 + dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
4642 + dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
4643 + dma_buffer->data.adm_default_mux = 0x00000FC0;
4644 + dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
4645 +
4646 + /* enable CS1 */
4647 + cmd->cmd = 0 | CMD_OCB;
4648 + cmd->src = msm_virt_to_dma(chip,
4649 + &dma_buffer->data.ebi2_chip_select_cfg0);
4650 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
4651 + cmd->len = 4;
4652 + cmd++;
4653 +
4654 + /* erase CS0 block now !!! */
4655 + /* 0xF14 */
4656 + cmd->cmd = 0;
4657 + cmd->src = msm_virt_to_dma(chip,
4658 + &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
4659 + cmd->dst = EBI2_NAND_ADM_MUX;
4660 + cmd->len = 4;
4661 + cmd++;
4662 +
4663 + cmd->cmd = DST_CRCI_NAND_CMD;
4664 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
4665 + cmd->dst = NC01(MSM_NAND_FLASH_CMD);
4666 + cmd->len = 16;
4667 + cmd++;
4668 +
4669 + cmd->cmd = 0;
4670 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
4671 + cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
4672 + cmd->len = 8;
4673 + cmd++;
4674 +
4675 + cmd->cmd = 0;
4676 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
4677 + cmd->dst = NC01(MSM_NAND_EXEC_CMD);
4678 + cmd->len = 4;
4679 + cmd++;
4680 +
4681 + /* 0xF28 */
4682 + cmd->cmd = 0;
4683 + cmd->src = msm_virt_to_dma(chip,
4684 + &dma_buffer->data.adm_mux_data_ack_req_nc10);
4685 + cmd->dst = EBI2_NAND_ADM_MUX;
4686 + cmd->len = 4;
4687 + cmd++;
4688 +
4689 + cmd->cmd = SRC_CRCI_NAND_DATA;
4690 + cmd->src = NC01(MSM_NAND_FLASH_STATUS);
4691 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[0]);
4692 + cmd->len = 4;
4693 + cmd++;
4694 +
4695 + /* erase CS1 block now !!! */
4696 + /* 0x53C */
4697 + cmd->cmd = 0;
4698 + cmd->src = msm_virt_to_dma(chip,
4699 + &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
4700 + cmd->dst = EBI2_NAND_ADM_MUX;
4701 + cmd->len = 4;
4702 + cmd++;
4703 +
4704 + cmd->cmd = DST_CRCI_NAND_CMD;
4705 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
4706 + cmd->dst = NC10(MSM_NAND_FLASH_CMD);
4707 + cmd->len = 12;
4708 + cmd++;
4709 +
4710 + cmd->cmd = 0;
4711 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
4712 + cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
4713 + cmd->len = 4;
4714 + cmd++;
4715 +
4716 + cmd->cmd = 0;
4717 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
4718 + cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
4719 + cmd->len = 8;
4720 +
4721 + cmd->cmd = 0;
4722 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
4723 + cmd->dst = NC10(MSM_NAND_EXEC_CMD);
4724 + cmd->len = 4;
4725 + cmd++;
4726 +
4727 + /* 0xA3C */
4728 + cmd->cmd = 0;
4729 + cmd->src = msm_virt_to_dma(chip,
4730 + &dma_buffer->data.adm_mux_data_ack_req_nc01);
4731 + cmd->dst = EBI2_NAND_ADM_MUX;
4732 + cmd->len = 4;
4733 + cmd++;
4734 +
4735 + cmd->cmd = SRC_CRCI_NAND_DATA;
4736 + cmd->src = NC10(MSM_NAND_FLASH_STATUS);
4737 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[1]);
4738 + cmd->len = 4;
4739 + cmd++;
4740 +
4741 + cmd->cmd = 0;
4742 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
4743 + cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
4744 + cmd->len = 4;
4745 + cmd++;
4746 +
4747 + cmd->cmd = 0;
4748 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
4749 + cmd->dst = NC11(MSM_NAND_READ_STATUS);
4750 + cmd->len = 4;
4751 + cmd++;
4752 +
4753 + cmd->cmd = 0;
4754 + cmd->src = msm_virt_to_dma(chip,
4755 + &dma_buffer->data.adm_default_mux);
4756 + cmd->dst = EBI2_NAND_ADM_MUX;
4757 + cmd->len = 4;
4758 + cmd++;
4759 +
4760 + /* disable CS1 */
4761 + cmd->cmd = CMD_OCU | CMD_LC;
4762 + cmd->src = msm_virt_to_dma(chip,
4763 + &dma_buffer->data.default_ebi2_chip_select_cfg0);
4764 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
4765 + cmd->len = 4;
4766 + cmd++;
4767 +
4768 + BUILD_BUG_ON(17 != ARRAY_SIZE(dma_buffer->cmd) - 1);
4769 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
4770 +
4771 + dma_buffer->cmdptr =
4772 + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
4773 +
4774 + mb();
4775 + msm_dmov_exec_cmd(
4776 + chip->dma_channel, DMOV_CMD_PTR_LIST |
4777 + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
4778 + mb();
4779 +
4780 + /* we fail if there was an operation error, a mpu error, or the
4781 + * erase success bit was not set.
4782 + */
4783 +
4784 + if (dma_buffer->data.flash_status[0] & 0x110 ||
4785 + !(dma_buffer->data.flash_status[0] & 0x80) ||
4786 + dma_buffer->data.flash_status[1] & 0x110 ||
4787 + !(dma_buffer->data.flash_status[1] & 0x80))
4788 + err = -EIO;
4789 + else
4790 + err = 0;
4791 +
4792 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
4793 + if (err) {
4794 + pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
4795 + instr->fail_addr = instr->addr;
4796 + instr->state = MTD_ERASE_FAILED;
4797 + } else {
4798 + instr->state = MTD_ERASE_DONE;
4799 + instr->fail_addr = 0xffffffff;
4800 + mtd_erase_callback(instr);
4801 + }
4802 + return err;
4803 +}
4804 +
4805 +static int
4806 +msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
4807 +{
4808 + struct msm_nand_chip *chip = mtd->priv;
4809 + int ret;
4810 + struct {
4811 + dmov_s cmd[5];
4812 + unsigned cmdptr;
4813 + struct {
4814 + uint32_t cmd;
4815 + uint32_t addr0;
4816 + uint32_t addr1;
4817 + uint32_t chipsel;
4818 + uint32_t cfg0;
4819 + uint32_t cfg1;
4820 + uint32_t eccbchcfg;
4821 + uint32_t exec;
4822 + uint32_t ecccfg;
4823 + struct {
4824 + uint32_t flash_status;
4825 + uint32_t buffer_status;
4826 + } result;
4827 + } data;
4828 + } *dma_buffer;
4829 + dmov_s *cmd;
4830 + uint8_t *buf;
4831 + unsigned page = 0;
4832 + unsigned cwperpage;
4833 +
4834 + if (mtd->writesize == 2048)
4835 + page = ofs >> 11;
4836 +
4837 + if (mtd->writesize == 4096)
4838 + page = ofs >> 12;
4839 +
4840 + cwperpage = (mtd->writesize >> 9);
4841 +
4842 + /* Check for invalid offset */
4843 + if (ofs > mtd->size)
4844 + return -EINVAL;
4845 + if (ofs & (mtd->erasesize - 1)) {
4846 + pr_err("%s: unsupported block address, 0x%x\n",
4847 + __func__, (uint32_t)ofs);
4848 + return -EINVAL;
4849 + }
4850 +
4851 + wait_event(chip->wait_queue,
4852 + (dma_buffer = msm_nand_get_dma_buffer(chip ,
4853 + sizeof(*dma_buffer) + 4)));
4854 + buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
4855 +
4856 + /* Read 4 bytes starting from the bad block marker location
4857 + * in the last code word of the page
4858 + */
4859 +
4860 + cmd = dma_buffer->cmd;
4861 +
4862 + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
4863 + dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
4864 + dma_buffer->data.cfg1 = chip->CFG1_RAW |
4865 + (chip->CFG1 & CFG1_WIDE_FLASH);
4866 + if (enable_bch_ecc)
4867 + dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
4868 +
4869 + if (chip->CFG1 & CFG1_WIDE_FLASH)
4870 + dma_buffer->data.addr0 = (page << 16) |
4871 + ((chip->cw_size * (cwperpage-1)) >> 1);
4872 + else
4873 + dma_buffer->data.addr0 = (page << 16) |
4874 + (chip->cw_size * (cwperpage-1));
4875 +
4876 + dma_buffer->data.addr1 = (page >> 16) & 0xff;
4877 + dma_buffer->data.chipsel = 0 | 4;
4878 +
4879 + dma_buffer->data.exec = 1;
4880 +
4881 + dma_buffer->data.result.flash_status = 0xeeeeeeee;
4882 + dma_buffer->data.result.buffer_status = 0xeeeeeeee;
4883 +
4884 + cmd->cmd = DST_CRCI_NAND_CMD;
4885 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
4886 + cmd->dst = MSM_NAND_FLASH_CMD;
4887 + cmd->len = 16;
4888 + cmd++;
4889 +
4890 + cmd->cmd = 0;
4891 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
4892 + cmd->dst = MSM_NAND_DEV0_CFG0;
4893 + if (enable_bch_ecc)
4894 + cmd->len = 12;
4895 + else
4896 + cmd->len = 8;
4897 + cmd++;
4898 +
4899 + cmd->cmd = 0;
4900 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
4901 + cmd->dst = MSM_NAND_EXEC_CMD;
4902 + cmd->len = 4;
4903 + cmd++;
4904 +
4905 + cmd->cmd = SRC_CRCI_NAND_DATA;
4906 + cmd->src = MSM_NAND_FLASH_STATUS;
4907 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result);
4908 + cmd->len = 8;
4909 + cmd++;
4910 +
4911 + cmd->cmd = 0;
4912 + cmd->src = MSM_NAND_FLASH_BUFFER +
4913 + (mtd->writesize - (chip->cw_size * (cwperpage-1)));
4914 + cmd->dst = msm_virt_to_dma(chip, buf);
4915 + cmd->len = 4;
4916 + cmd++;
4917 +
4918 + BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd));
4919 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
4920 + dma_buffer->cmd[0].cmd |= CMD_OCB;
4921 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
4922 +
4923 + dma_buffer->cmdptr = (msm_virt_to_dma(chip,
4924 + dma_buffer->cmd) >> 3) | CMD_PTR_LP;
4925 +
4926 + mb();
4927 + msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
4928 + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
4929 + mb();
4930 +
4931 + ret = 0;
4932 + if (dma_buffer->data.result.flash_status & 0x110)
4933 + ret = -EIO;
4934 +
4935 + if (!ret) {
4936 + /* Check for bad block marker byte */
4937 + if (chip->CFG1 & CFG1_WIDE_FLASH) {
4938 + if (buf[0] != 0xFF || buf[1] != 0xFF)
4939 + ret = 1;
4940 + } else {
4941 + if (buf[0] != 0xFF)
4942 + ret = 1;
4943 + }
4944 + }
4945 +
4946 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
4947 + return ret;
4948 +}
4949 +
4950 +static int
4951 +msm_nand_block_isbad_dualnandc(struct mtd_info *mtd, loff_t ofs)
4952 +{
4953 + struct msm_nand_chip *chip = mtd->priv;
4954 + int ret;
4955 + struct {
4956 + dmov_s cmd[18];
4957 + unsigned cmdptr;
4958 + struct {
4959 + uint32_t cmd;
4960 + uint32_t addr0;
4961 + uint32_t addr1;
4962 + uint32_t chipsel_cs0;
4963 + uint32_t chipsel_cs1;
4964 + uint32_t cfg0;
4965 + uint32_t cfg1;
4966 + uint32_t exec;
4967 + uint32_t ecccfg;
4968 + uint32_t ebi2_chip_select_cfg0;
4969 + uint32_t adm_mux_data_ack_req_nc01;
4970 + uint32_t adm_mux_cmd_ack_req_nc01;
4971 + uint32_t adm_mux_data_ack_req_nc10;
4972 + uint32_t adm_mux_cmd_ack_req_nc10;
4973 + uint32_t adm_default_mux;
4974 + uint32_t default_ebi2_chip_select_cfg0;
4975 + struct {
4976 + uint32_t flash_status;
4977 + uint32_t buffer_status;
4978 + } result[2];
4979 + } data;
4980 + } *dma_buffer;
4981 + dmov_s *cmd;
4982 + uint8_t *buf01;
4983 + uint8_t *buf10;
4984 + unsigned page = 0;
4985 + unsigned cwperpage;
4986 +
4987 + if (mtd->writesize == 2048)
4988 + page = ofs >> 11;
4989 +
4990 + if (mtd->writesize == 4096)
4991 + page = ofs >> 12;
4992 +
4993 + if (mtd->writesize == 8192)
4994 + page = (ofs >> 1) >> 12;
4995 +
4996 + cwperpage = ((mtd->writesize >> 1) >> 9);
4997 +
4998 + /* Check for invalid offset */
4999 + if (ofs > mtd->size)
5000 + return -EINVAL;
5001 + if (ofs & (mtd->erasesize - 1)) {
5002 + pr_err("%s: unsupported block address, 0x%x\n",
5003 + __func__, (uint32_t)ofs);
5004 + return -EINVAL;
5005 + }
5006 +
5007 + wait_event(chip->wait_queue,
5008 + (dma_buffer = msm_nand_get_dma_buffer(chip ,
5009 + sizeof(*dma_buffer) + 8)));
5010 + buf01 = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
5011 + buf10 = buf01 + 4;
5012 +
5013 + /* Read 4 bytes starting from the bad block marker location
5014 + * in the last code word of the page
5015 + */
5016 + cmd = dma_buffer->cmd;
5017 +
5018 + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
5019 + dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
5020 + dma_buffer->data.cfg1 = chip->CFG1_RAW |
5021 + (chip->CFG1 & CFG1_WIDE_FLASH);
5022 +
5023 + if (chip->CFG1 & CFG1_WIDE_FLASH)
5024 + dma_buffer->data.addr0 = (page << 16) |
5025 + ((528*(cwperpage-1)) >> 1);
5026 + else
5027 + dma_buffer->data.addr0 = (page << 16) |
5028 + (528*(cwperpage-1));
5029 +
5030 + dma_buffer->data.addr1 = (page >> 16) & 0xff;
5031 + dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
5032 + dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
5033 +
5034 + dma_buffer->data.exec = 1;
5035 +
5036 + dma_buffer->data.result[0].flash_status = 0xeeeeeeee;
5037 + dma_buffer->data.result[0].buffer_status = 0xeeeeeeee;
5038 + dma_buffer->data.result[1].flash_status = 0xeeeeeeee;
5039 + dma_buffer->data.result[1].buffer_status = 0xeeeeeeee;
5040 +
5041 + dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
5042 + dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
5043 + dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
5044 + dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
5045 + dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
5046 + dma_buffer->data.adm_default_mux = 0x00000FC0;
5047 + dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
5048 +
5049 + /* Reading last code word from NC01 */
5050 + /* enable CS1 */
5051 + cmd->cmd = 0;
5052 + cmd->src = msm_virt_to_dma(chip,
5053 + &dma_buffer->data.ebi2_chip_select_cfg0);
5054 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
5055 + cmd->len = 4;
5056 + cmd++;
5057 +
5058 + /* 0xF14 */
5059 + cmd->cmd = 0;
5060 + cmd->src = msm_virt_to_dma(chip,
5061 + &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
5062 + cmd->dst = EBI2_NAND_ADM_MUX;
5063 + cmd->len = 4;
5064 + cmd++;
5065 +
5066 + cmd->cmd = DST_CRCI_NAND_CMD;
5067 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
5068 + cmd->dst = NC01(MSM_NAND_FLASH_CMD);
5069 + cmd->len = 16;
5070 + cmd++;
5071 +
5072 + cmd->cmd = 0;
5073 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
5074 + cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
5075 + cmd->len = 8;
5076 + cmd++;
5077 +
5078 + cmd->cmd = 0;
5079 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
5080 + cmd->dst = NC01(MSM_NAND_EXEC_CMD);
5081 + cmd->len = 4;
5082 + cmd++;
5083 +
5084 + /* 0xF28 */
5085 + cmd->cmd = 0;
5086 + cmd->src = msm_virt_to_dma(chip,
5087 + &dma_buffer->data.adm_mux_data_ack_req_nc10);
5088 + cmd->dst = EBI2_NAND_ADM_MUX;
5089 + cmd->len = 4;
5090 + cmd++;
5091 +
5092 + cmd->cmd = SRC_CRCI_NAND_DATA;
5093 + cmd->src = NC01(MSM_NAND_FLASH_STATUS);
5094 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[0]);
5095 + cmd->len = 8;
5096 + cmd++;
5097 +
5098 + cmd->cmd = 0;
5099 + cmd->src = NC01(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
5100 + (528*(cwperpage-1)));
5101 + cmd->dst = msm_virt_to_dma(chip, buf01);
5102 + cmd->len = 4;
5103 + cmd++;
5104 +
5105 + /* Reading last code word from NC10 */
5106 + /* 0x53C */
5107 + cmd->cmd = 0;
5108 + cmd->src = msm_virt_to_dma(chip,
5109 + &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
5110 + cmd->dst = EBI2_NAND_ADM_MUX;
5111 + cmd->len = 4;
5112 + cmd++;
5113 +
5114 + cmd->cmd = DST_CRCI_NAND_CMD;
5115 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
5116 + cmd->dst = NC10(MSM_NAND_FLASH_CMD);
5117 + cmd->len = 12;
5118 + cmd++;
5119 +
5120 + cmd->cmd = 0;
5121 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
5122 + cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
5123 + cmd->len = 4;
5124 + cmd++;
5125 +
5126 + cmd->cmd = 0;
5127 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
5128 + cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
5129 + cmd->len = 8;
5130 + cmd++;
5131 +
5132 + cmd->cmd = 0;
5133 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
5134 + cmd->dst = NC10(MSM_NAND_EXEC_CMD);
5135 + cmd->len = 4;
5136 + cmd++;
5137 +
5138 + /* A3C */
5139 + cmd->cmd = 0;
5140 + cmd->src = msm_virt_to_dma(chip,
5141 + &dma_buffer->data.adm_mux_data_ack_req_nc01);
5142 + cmd->dst = EBI2_NAND_ADM_MUX;
5143 + cmd->len = 4;
5144 + cmd++;
5145 +
5146 + cmd->cmd = SRC_CRCI_NAND_DATA;
5147 + cmd->src = NC10(MSM_NAND_FLASH_STATUS);
5148 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[1]);
5149 + cmd->len = 8;
5150 + cmd++;
5151 +
5152 + cmd->cmd = 0;
5153 + cmd->src = NC10(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
5154 + (528*(cwperpage-1)));
5155 + cmd->dst = msm_virt_to_dma(chip, buf10);
5156 + cmd->len = 4;
5157 + cmd++;
5158 +
5159 + /* FC0 */
5160 + cmd->cmd = 0;
5161 + cmd->src = msm_virt_to_dma(chip,
5162 + &dma_buffer->data.adm_default_mux);
5163 + cmd->dst = EBI2_NAND_ADM_MUX;
5164 + cmd->len = 4;
5165 + cmd++;
5166 +
5167 + /* disble CS1 */
5168 + cmd->cmd = 0;
5169 + cmd->src = msm_virt_to_dma(chip,
5170 + &dma_buffer->data.ebi2_chip_select_cfg0);
5171 + cmd->dst = EBI2_CHIP_SELECT_CFG0;
5172 + cmd->len = 4;
5173 + cmd++;
5174 +
5175 + BUILD_BUG_ON(18 != ARRAY_SIZE(dma_buffer->cmd));
5176 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
5177 + dma_buffer->cmd[0].cmd |= CMD_OCB;
5178 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
5179 +
5180 + dma_buffer->cmdptr = (msm_virt_to_dma(chip,
5181 + dma_buffer->cmd) >> 3) | CMD_PTR_LP;
5182 +
5183 + mb();
5184 + msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
5185 + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
5186 + mb();
5187 +
5188 + ret = 0;
5189 + if ((dma_buffer->data.result[0].flash_status & 0x110) ||
5190 + (dma_buffer->data.result[1].flash_status & 0x110))
5191 + ret = -EIO;
5192 +
5193 + if (!ret) {
5194 + /* Check for bad block marker byte for NC01 & NC10 */
5195 + if (chip->CFG1 & CFG1_WIDE_FLASH) {
5196 + if ((buf01[0] != 0xFF || buf01[1] != 0xFF) ||
5197 + (buf10[0] != 0xFF || buf10[1] != 0xFF))
5198 + ret = 1;
5199 + } else {
5200 + if (buf01[0] != 0xFF || buf10[0] != 0xFF)
5201 + ret = 1;
5202 + }
5203 + }
5204 +
5205 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 8);
5206 + return ret;
5207 +}
5208 +
5209 +static int
5210 +msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
5211 +{
5212 + struct mtd_oob_ops ops;
5213 + int ret;
5214 + uint8_t *buf;
5215 +
5216 + /* Check for invalid offset */
5217 + if (ofs > mtd->size)
5218 + return -EINVAL;
5219 + if (ofs & (mtd->erasesize - 1)) {
5220 + pr_err("%s: unsupported block address, 0x%x\n",
5221 + __func__, (uint32_t)ofs);
5222 + return -EINVAL;
5223 + }
5224 +
5225 + /*
5226 + Write all 0s to the first page
5227 + This will set the BB marker to 0
5228 + */
5229 + buf = page_address(ZERO_PAGE());
5230 +
5231 + ops.mode = MTD_OPS_RAW;
5232 + ops.len = mtd->writesize + mtd->oobsize;
5233 + ops.retlen = 0;
5234 + ops.ooblen = 0;
5235 + ops.datbuf = buf;
5236 + ops.oobbuf = NULL;
5237 + if (!interleave_enable)
5238 + ret = msm_nand_write_oob(mtd, ofs, &ops);
5239 + else
5240 + ret = msm_nand_write_oob_dualnandc(mtd, ofs, &ops);
5241 +
5242 + return ret;
5243 +}
5244 +
5245 +/**
5246 + * msm_nand_suspend - [MTD Interface] Suspend the msm_nand flash
5247 + * @param mtd MTD device structure
5248 + */
5249 +static int msm_nand_suspend(struct mtd_info *mtd)
5250 +{
5251 + return 0;
5252 +}
5253 +
5254 +/**
5255 + * msm_nand_resume - [MTD Interface] Resume the msm_nand flash
5256 + * @param mtd MTD device structure
5257 + */
5258 +static void msm_nand_resume(struct mtd_info *mtd)
5259 +{
5260 +}
5261 +
5262 +struct onenand_information {
5263 + uint16_t manufacturer_id;
5264 + uint16_t device_id;
5265 + uint16_t version_id;
5266 + uint16_t data_buf_size;
5267 + uint16_t boot_buf_size;
5268 + uint16_t num_of_buffers;
5269 + uint16_t technology;
5270 +};
5271 +
5272 +static struct onenand_information onenand_info;
5273 +static uint32_t nand_sfcmd_mode;
5274 +
5275 +uint32_t flash_onenand_probe(struct msm_nand_chip *chip)
5276 +{
5277 + struct {
5278 + dmov_s cmd[7];
5279 + unsigned cmdptr;
5280 + struct {
5281 + uint32_t bcfg;
5282 + uint32_t cmd;
5283 + uint32_t exec;
5284 + uint32_t status;
5285 + uint32_t addr0;
5286 + uint32_t addr1;
5287 + uint32_t addr2;
5288 + uint32_t addr3;
5289 + uint32_t addr4;
5290 + uint32_t addr5;
5291 + uint32_t addr6;
5292 + uint32_t data0;
5293 + uint32_t data1;
5294 + uint32_t data2;
5295 + uint32_t data3;
5296 + uint32_t data4;
5297 + uint32_t data5;
5298 + uint32_t data6;
5299 + } data;
5300 + } *dma_buffer;
5301 + dmov_s *cmd;
5302 +
5303 + int err = 0;
5304 + uint32_t initialsflashcmd = 0;
5305 +
5306 + initialsflashcmd = flash_rd_reg(chip, MSM_NAND_SFLASHC_CMD);
5307 +
5308 + if ((initialsflashcmd & 0x10) == 0x10)
5309 + nand_sfcmd_mode = MSM_NAND_SFCMD_ASYNC;
5310 + else
5311 + nand_sfcmd_mode = MSM_NAND_SFCMD_BURST;
5312 +
5313 + printk(KERN_INFO "SFLASHC Async Mode bit: %x \n", nand_sfcmd_mode);
5314 +
5315 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
5316 + (chip, sizeof(*dma_buffer))));
5317 +
5318 + cmd = dma_buffer->cmd;
5319 +
5320 + dma_buffer->data.bcfg = SFLASH_BCFG |
5321 + (nand_sfcmd_mode ? 0 : (1 << 24));
5322 + dma_buffer->data.cmd = SFLASH_PREPCMD(7, 0, 0,
5323 + MSM_NAND_SFCMD_DATXS,
5324 + nand_sfcmd_mode,
5325 + MSM_NAND_SFCMD_REGRD);
5326 + dma_buffer->data.exec = 1;
5327 + dma_buffer->data.status = CLEAN_DATA_32;
5328 + dma_buffer->data.addr0 = (ONENAND_DEVICE_ID << 16) |
5329 + (ONENAND_MANUFACTURER_ID);
5330 + dma_buffer->data.addr1 = (ONENAND_DATA_BUFFER_SIZE << 16) |
5331 + (ONENAND_VERSION_ID);
5332 + dma_buffer->data.addr2 = (ONENAND_AMOUNT_OF_BUFFERS << 16) |
5333 + (ONENAND_BOOT_BUFFER_SIZE);
5334 + dma_buffer->data.addr3 = (CLEAN_DATA_16 << 16) |
5335 + (ONENAND_TECHNOLOGY << 0);
5336 + dma_buffer->data.data0 = CLEAN_DATA_32;
5337 + dma_buffer->data.data1 = CLEAN_DATA_32;
5338 + dma_buffer->data.data2 = CLEAN_DATA_32;
5339 + dma_buffer->data.data3 = CLEAN_DATA_32;
5340 +
5341 + /* Enable and configure the SFlash controller */
5342 + cmd->cmd = 0;
5343 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.bcfg);
5344 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
5345 + cmd->len = 4;
5346 + cmd++;
5347 +
5348 + /* Block on cmd ready and write CMD register */
5349 + cmd->cmd = DST_CRCI_NAND_CMD;
5350 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
5351 + cmd->dst = MSM_NAND_SFLASHC_CMD;
5352 + cmd->len = 4;
5353 + cmd++;
5354 +
5355 + /* Configure the ADDR0 and ADDR1 registers */
5356 + cmd->cmd = 0;
5357 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
5358 + cmd->dst = MSM_NAND_ADDR0;
5359 + cmd->len = 8;
5360 + cmd++;
5361 +
5362 + /* Configure the ADDR2 and ADDR3 registers */
5363 + cmd->cmd = 0;
5364 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
5365 + cmd->dst = MSM_NAND_ADDR2;
5366 + cmd->len = 8;
5367 + cmd++;
5368 +
5369 + /* Kick the execute command */
5370 + cmd->cmd = 0;
5371 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
5372 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5373 + cmd->len = 4;
5374 + cmd++;
5375 +
5376 + /* Block on data ready, and read the two status registers */
5377 + cmd->cmd = SRC_CRCI_NAND_DATA;
5378 + cmd->src = MSM_NAND_SFLASHC_STATUS;
5379 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.status);
5380 + cmd->len = 4;
5381 + cmd++;
5382 +
5383 + /* Read data registers - valid only if status says success */
5384 + cmd->cmd = 0;
5385 + cmd->src = MSM_NAND_GENP_REG0;
5386 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data0);
5387 + cmd->len = 16;
5388 + cmd++;
5389 +
5390 + BUILD_BUG_ON(7 != ARRAY_SIZE(dma_buffer->cmd));
5391 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
5392 + dma_buffer->cmd[0].cmd |= CMD_OCB;
5393 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
5394 +
5395 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
5396 + >> 3) | CMD_PTR_LP;
5397 +
5398 + mb();
5399 + msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
5400 + | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
5401 + &dma_buffer->cmdptr)));
5402 + mb();
5403 +
5404 + /* Check for errors, protection violations etc */
5405 + if (dma_buffer->data.status & 0x110) {
5406 + pr_info("%s: MPU/OP error"
5407 + "(0x%x) during Onenand probe\n",
5408 + __func__, dma_buffer->data.status);
5409 + err = -EIO;
5410 + } else {
5411 +
5412 + onenand_info.manufacturer_id =
5413 + (dma_buffer->data.data0 >> 0) & 0x0000FFFF;
5414 + onenand_info.device_id =
5415 + (dma_buffer->data.data0 >> 16) & 0x0000FFFF;
5416 + onenand_info.version_id =
5417 + (dma_buffer->data.data1 >> 0) & 0x0000FFFF;
5418 + onenand_info.data_buf_size =
5419 + (dma_buffer->data.data1 >> 16) & 0x0000FFFF;
5420 + onenand_info.boot_buf_size =
5421 + (dma_buffer->data.data2 >> 0) & 0x0000FFFF;
5422 + onenand_info.num_of_buffers =
5423 + (dma_buffer->data.data2 >> 16) & 0x0000FFFF;
5424 + onenand_info.technology =
5425 + (dma_buffer->data.data3 >> 0) & 0x0000FFFF;
5426 +
5427 +
5428 + pr_info("======================================="
5429 + "==========================\n");
5430 +
5431 + pr_info("%s: manufacturer_id = 0x%x\n"
5432 + , __func__, onenand_info.manufacturer_id);
5433 + pr_info("%s: device_id = 0x%x\n"
5434 + , __func__, onenand_info.device_id);
5435 + pr_info("%s: version_id = 0x%x\n"
5436 + , __func__, onenand_info.version_id);
5437 + pr_info("%s: data_buf_size = 0x%x\n"
5438 + , __func__, onenand_info.data_buf_size);
5439 + pr_info("%s: boot_buf_size = 0x%x\n"
5440 + , __func__, onenand_info.boot_buf_size);
5441 + pr_info("%s: num_of_buffers = 0x%x\n"
5442 + , __func__, onenand_info.num_of_buffers);
5443 + pr_info("%s: technology = 0x%x\n"
5444 + , __func__, onenand_info.technology);
5445 +
5446 + pr_info("======================================="
5447 + "==========================\n");
5448 +
5449 + if ((onenand_info.manufacturer_id != 0x00EC)
5450 + || ((onenand_info.device_id & 0x0040) != 0x0040)
5451 + || (onenand_info.data_buf_size != 0x0800)
5452 + || (onenand_info.boot_buf_size != 0x0200)
5453 + || (onenand_info.num_of_buffers != 0x0201)
5454 + || (onenand_info.technology != 0)) {
5455 +
5456 + pr_info("%s: Detected an unsupported device\n"
5457 + , __func__);
5458 + err = -EIO;
5459 + }
5460 + }
5461 +
5462 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
5463 +
5464 + return err;
5465 +}
5466 +
5467 +int msm_onenand_read_oob(struct mtd_info *mtd,
5468 + loff_t from, struct mtd_oob_ops *ops)
5469 +{
5470 + struct msm_nand_chip *chip = mtd->priv;
5471 +
5472 + struct {
5473 + dmov_s cmd[53];
5474 + unsigned cmdptr;
5475 + struct {
5476 + uint32_t sfbcfg;
5477 + uint32_t sfcmd[9];
5478 + uint32_t sfexec;
5479 + uint32_t sfstat[9];
5480 + uint32_t addr0;
5481 + uint32_t addr1;
5482 + uint32_t addr2;
5483 + uint32_t addr3;
5484 + uint32_t addr4;
5485 + uint32_t addr5;
5486 + uint32_t addr6;
5487 + uint32_t data0;
5488 + uint32_t data1;
5489 + uint32_t data2;
5490 + uint32_t data3;
5491 + uint32_t data4;
5492 + uint32_t data5;
5493 + uint32_t data6;
5494 + uint32_t macro[5];
5495 + } data;
5496 + } *dma_buffer;
5497 + dmov_s *cmd;
5498 +
5499 + int err = 0;
5500 + int i;
5501 + dma_addr_t data_dma_addr = 0;
5502 + dma_addr_t oob_dma_addr = 0;
5503 + dma_addr_t data_dma_addr_curr = 0;
5504 + dma_addr_t oob_dma_addr_curr = 0;
5505 +
5506 + loff_t from_curr = 0;
5507 + unsigned page_count;
5508 + unsigned pages_read = 0;
5509 +
5510 + uint16_t onenand_startaddr1;
5511 + uint16_t onenand_startaddr8;
5512 + uint16_t onenand_startaddr2;
5513 + uint16_t onenand_startbuffer;
5514 + uint16_t onenand_sysconfig1;
5515 + uint16_t controller_status;
5516 + uint16_t interrupt_status;
5517 + uint16_t ecc_status;
5518 +#if VERBOSE
5519 + pr_info("================================================="
5520 + "================\n");
5521 + pr_info("%s: from 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
5522 + "\noobbuf 0x%p ooblen 0x%x\n",
5523 + __func__, from, ops->mode, ops->datbuf, ops->len,
5524 + ops->oobbuf, ops->ooblen);
5525 +#endif
5526 + if (!mtd) {
5527 + pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
5528 + (uint32_t)mtd);
5529 + return -EINVAL;
5530 + }
5531 + if (from & (mtd->writesize - 1)) {
5532 + pr_err("%s: unsupported from, 0x%llx\n", __func__,
5533 + from);
5534 + return -EINVAL;
5535 + }
5536 +
5537 + if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
5538 + (ops->mode != MTD_OPS_RAW)) {
5539 + pr_err("%s: unsupported ops->mode, %d\n", __func__,
5540 + ops->mode);
5541 + return -EINVAL;
5542 + }
5543 +
5544 + if (((ops->datbuf == NULL) || (ops->len == 0)) &&
5545 + ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
5546 + pr_err("%s: incorrect ops fields - nothing to do\n",
5547 + __func__);
5548 + return -EINVAL;
5549 + }
5550 +
5551 + if ((ops->datbuf != NULL) && (ops->len == 0)) {
5552 + pr_err("%s: data buffer passed but length 0\n",
5553 + __func__);
5554 + return -EINVAL;
5555 + }
5556 +
5557 + if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
5558 + pr_err("%s: oob buffer passed but length 0\n",
5559 + __func__);
5560 + return -EINVAL;
5561 + }
5562 +
5563 + if (ops->mode != MTD_OPS_RAW) {
5564 + if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
5565 + /* when ops->datbuf is NULL, ops->len can be ooblen */
5566 + pr_err("%s: unsupported ops->len, %d\n", __func__,
5567 + ops->len);
5568 + return -EINVAL;
5569 + }
5570 + } else {
5571 + if (ops->datbuf != NULL &&
5572 + (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
5573 + pr_err("%s: unsupported ops->len,"
5574 + " %d for MTD_OPS_RAW\n", __func__, ops->len);
5575 + return -EINVAL;
5576 + }
5577 + }
5578 +
5579 + if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
5580 + pr_err("%s: unsupported operation, oobbuf pointer "
5581 + "passed in for RAW mode, %x\n", __func__,
5582 + (uint32_t)ops->oobbuf);
5583 + return -EINVAL;
5584 + }
5585 +
5586 + if (ops->oobbuf && !ops->datbuf) {
5587 + page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
5588 + mtd->oobavail : mtd->oobsize);
5589 + if ((page_count == 0) && (ops->ooblen))
5590 + page_count = 1;
5591 + } else if (ops->mode != MTD_OPS_RAW)
5592 + page_count = ops->len / mtd->writesize;
5593 + else
5594 + page_count = ops->len / (mtd->writesize + mtd->oobsize);
5595 +
5596 + if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
5597 + if (page_count * mtd->oobsize > ops->ooblen) {
5598 + pr_err("%s: unsupported ops->ooblen for "
5599 + "PLACE, %d\n", __func__, ops->ooblen);
5600 + return -EINVAL;
5601 + }
5602 + }
5603 +
5604 + if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
5605 + (ops->ooboffs != 0)) {
5606 + pr_err("%s: unsupported ops->ooboffs, %d\n", __func__,
5607 + ops->ooboffs);
5608 + return -EINVAL;
5609 + }
5610 +
5611 + if (ops->datbuf) {
5612 + memset(ops->datbuf, 0x55, ops->len);
5613 + data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
5614 + ops->datbuf, ops->len, DMA_FROM_DEVICE, NULL);
5615 + if (dma_mapping_error(chip->dev, data_dma_addr)) {
5616 + pr_err("%s: failed to get dma addr for %p\n",
5617 + __func__, ops->datbuf);
5618 + return -EIO;
5619 + }
5620 + }
5621 + if (ops->oobbuf) {
5622 + memset(ops->oobbuf, 0x55, ops->ooblen);
5623 + oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
5624 + ops->oobbuf, ops->ooblen, DMA_FROM_DEVICE, NULL);
5625 + if (dma_mapping_error(chip->dev, oob_dma_addr)) {
5626 + pr_err("%s: failed to get dma addr for %p\n",
5627 + __func__, ops->oobbuf);
5628 + err = -EIO;
5629 + goto err_dma_map_oobbuf_failed;
5630 + }
5631 + }
5632 +
5633 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
5634 + (chip, sizeof(*dma_buffer))));
5635 +
5636 + from_curr = from;
5637 +
5638 + while (page_count-- > 0) {
5639 +
5640 + cmd = dma_buffer->cmd;
5641 +
5642 + if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
5643 + && (from_curr >= (mtd->size>>1))) { /* DDP Device */
5644 + onenand_startaddr1 = DEVICE_FLASHCORE_1 |
5645 + (((uint32_t)(from_curr-(mtd->size>>1))
5646 + / mtd->erasesize));
5647 + onenand_startaddr2 = DEVICE_BUFFERRAM_1;
5648 + } else {
5649 + onenand_startaddr1 = DEVICE_FLASHCORE_0 |
5650 + ((uint32_t)from_curr / mtd->erasesize) ;
5651 + onenand_startaddr2 = DEVICE_BUFFERRAM_0;
5652 + }
5653 +
5654 + onenand_startaddr8 = (((uint32_t)from_curr &
5655 + (mtd->erasesize - 1)) / mtd->writesize) << 2;
5656 + onenand_startbuffer = DATARAM0_0 << 8;
5657 + onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
5658 + ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
5659 + ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
5660 +
5661 + dma_buffer->data.sfbcfg = SFLASH_BCFG |
5662 + (nand_sfcmd_mode ? 0 : (1 << 24));
5663 + dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
5664 + MSM_NAND_SFCMD_CMDXS,
5665 + nand_sfcmd_mode,
5666 + MSM_NAND_SFCMD_REGWR);
5667 + dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
5668 + MSM_NAND_SFCMD_CMDXS,
5669 + nand_sfcmd_mode,
5670 + MSM_NAND_SFCMD_INTHI);
5671 + dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
5672 + MSM_NAND_SFCMD_DATXS,
5673 + nand_sfcmd_mode,
5674 + MSM_NAND_SFCMD_REGRD);
5675 + dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
5676 + MSM_NAND_SFCMD_DATXS,
5677 + nand_sfcmd_mode,
5678 + MSM_NAND_SFCMD_DATRD);
5679 + dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
5680 + MSM_NAND_SFCMD_DATXS,
5681 + nand_sfcmd_mode,
5682 + MSM_NAND_SFCMD_DATRD);
5683 + dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(256, 0, 0,
5684 + MSM_NAND_SFCMD_DATXS,
5685 + nand_sfcmd_mode,
5686 + MSM_NAND_SFCMD_DATRD);
5687 + dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(256, 0, 0,
5688 + MSM_NAND_SFCMD_DATXS,
5689 + nand_sfcmd_mode,
5690 + MSM_NAND_SFCMD_DATRD);
5691 + dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(32, 0, 0,
5692 + MSM_NAND_SFCMD_DATXS,
5693 + nand_sfcmd_mode,
5694 + MSM_NAND_SFCMD_DATRD);
5695 + dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(4, 10, 0,
5696 + MSM_NAND_SFCMD_CMDXS,
5697 + nand_sfcmd_mode,
5698 + MSM_NAND_SFCMD_REGWR);
5699 + dma_buffer->data.sfexec = 1;
5700 + dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
5701 + dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
5702 + dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
5703 + dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
5704 + dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
5705 + dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
5706 + dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
5707 + dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
5708 + dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
5709 + dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
5710 + (ONENAND_SYSTEM_CONFIG_1);
5711 + dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
5712 + (ONENAND_START_ADDRESS_1);
5713 + dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
5714 + (ONENAND_START_ADDRESS_2);
5715 + dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
5716 + (ONENAND_COMMAND);
5717 + dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
5718 + (ONENAND_INTERRUPT_STATUS);
5719 + dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
5720 + (ONENAND_SYSTEM_CONFIG_1);
5721 + dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
5722 + (ONENAND_START_ADDRESS_1);
5723 + dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
5724 + (onenand_sysconfig1);
5725 + dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
5726 + (onenand_startaddr1);
5727 + dma_buffer->data.data2 = (onenand_startbuffer << 16) |
5728 + (onenand_startaddr2);
5729 + dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
5730 + (ONENAND_CMDLOADSPARE);
5731 + dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
5732 + (CLEAN_DATA_16);
5733 + dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
5734 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
5735 + dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
5736 + (ONENAND_STARTADDR1_RES);
5737 + dma_buffer->data.macro[0] = 0x0200;
5738 + dma_buffer->data.macro[1] = 0x0300;
5739 + dma_buffer->data.macro[2] = 0x0400;
5740 + dma_buffer->data.macro[3] = 0x0500;
5741 + dma_buffer->data.macro[4] = 0x8010;
5742 +
5743 + /*************************************************************/
5744 + /* Write necessary address registers in the onenand device */
5745 + /*************************************************************/
5746 +
5747 + /* Enable and configure the SFlash controller */
5748 + cmd->cmd = 0;
5749 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
5750 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
5751 + cmd->len = 4;
5752 + cmd++;
5753 +
5754 + /* Block on cmd ready and write CMD register */
5755 + cmd->cmd = DST_CRCI_NAND_CMD;
5756 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
5757 + cmd->dst = MSM_NAND_SFLASHC_CMD;
5758 + cmd->len = 4;
5759 + cmd++;
5760 +
5761 + /* Write the ADDR0 and ADDR1 registers */
5762 + cmd->cmd = 0;
5763 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
5764 + cmd->dst = MSM_NAND_ADDR0;
5765 + cmd->len = 8;
5766 + cmd++;
5767 +
5768 + /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
5769 + cmd->cmd = 0;
5770 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
5771 + cmd->dst = MSM_NAND_ADDR2;
5772 + cmd->len = 16;
5773 + cmd++;
5774 +
5775 + /* Write the ADDR6 registers */
5776 + cmd->cmd = 0;
5777 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
5778 + cmd->dst = MSM_NAND_ADDR6;
5779 + cmd->len = 4;
5780 + cmd++;
5781 +
5782 + /* Write the GENP0, GENP1, GENP2, GENP3 registers */
5783 + cmd->cmd = 0;
5784 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
5785 + cmd->dst = MSM_NAND_GENP_REG0;
5786 + cmd->len = 16;
5787 + cmd++;
5788 +
5789 + /* Write the FLASH_DEV_CMD4,5,6 registers */
5790 + cmd->cmd = 0;
5791 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
5792 + cmd->dst = MSM_NAND_DEV_CMD4;
5793 + cmd->len = 12;
5794 + cmd++;
5795 +
5796 + /* Kick the execute command */
5797 + cmd->cmd = 0;
5798 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5799 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5800 + cmd->len = 4;
5801 + cmd++;
5802 +
5803 + /* Block on data ready, and read the status register */
5804 + cmd->cmd = SRC_CRCI_NAND_DATA;
5805 + cmd->src = MSM_NAND_SFLASHC_STATUS;
5806 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
5807 + cmd->len = 4;
5808 + cmd++;
5809 +
5810 + /*************************************************************/
5811 + /* Wait for the interrupt from the Onenand device controller */
5812 + /*************************************************************/
5813 +
5814 + /* Block on cmd ready and write CMD register */
5815 + cmd->cmd = DST_CRCI_NAND_CMD;
5816 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
5817 + cmd->dst = MSM_NAND_SFLASHC_CMD;
5818 + cmd->len = 4;
5819 + cmd++;
5820 +
5821 + /* Kick the execute command */
5822 + cmd->cmd = 0;
5823 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5824 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5825 + cmd->len = 4;
5826 + cmd++;
5827 +
5828 + /* Block on data ready, and read the status register */
5829 + cmd->cmd = SRC_CRCI_NAND_DATA;
5830 + cmd->src = MSM_NAND_SFLASHC_STATUS;
5831 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
5832 + cmd->len = 4;
5833 + cmd++;
5834 +
5835 + /*************************************************************/
5836 + /* Read necessary status registers from the onenand device */
5837 + /*************************************************************/
5838 +
5839 + /* Block on cmd ready and write CMD register */
5840 + cmd->cmd = DST_CRCI_NAND_CMD;
5841 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
5842 + cmd->dst = MSM_NAND_SFLASHC_CMD;
5843 + cmd->len = 4;
5844 + cmd++;
5845 +
5846 + /* Kick the execute command */
5847 + cmd->cmd = 0;
5848 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5849 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5850 + cmd->len = 4;
5851 + cmd++;
5852 +
5853 + /* Block on data ready, and read the status register */
5854 + cmd->cmd = SRC_CRCI_NAND_DATA;
5855 + cmd->src = MSM_NAND_SFLASHC_STATUS;
5856 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
5857 + cmd->len = 4;
5858 + cmd++;
5859 +
5860 + /* Read the GENP3 register */
5861 + cmd->cmd = 0;
5862 + cmd->src = MSM_NAND_GENP_REG3;
5863 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
5864 + cmd->len = 4;
5865 + cmd++;
5866 +
5867 + /* Read the DEVCMD4 register */
5868 + cmd->cmd = 0;
5869 + cmd->src = MSM_NAND_DEV_CMD4;
5870 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
5871 + cmd->len = 4;
5872 + cmd++;
5873 +
5874 + /*************************************************************/
5875 + /* Read the data ram area from the onenand buffer ram */
5876 + /*************************************************************/
5877 +
5878 + if (ops->datbuf) {
5879 +
5880 + dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
5881 + (ONENAND_CMDLOAD);
5882 +
5883 + for (i = 0; i < 4; i++) {
5884 +
5885 + /* Block on cmd ready and write CMD register */
5886 + cmd->cmd = DST_CRCI_NAND_CMD;
5887 + cmd->src = msm_virt_to_dma(chip,
5888 + &dma_buffer->data.sfcmd[3+i]);
5889 + cmd->dst = MSM_NAND_SFLASHC_CMD;
5890 + cmd->len = 4;
5891 + cmd++;
5892 +
5893 + /* Write the MACRO1 register */
5894 + cmd->cmd = 0;
5895 + cmd->src = msm_virt_to_dma(chip,
5896 + &dma_buffer->data.macro[i]);
5897 + cmd->dst = MSM_NAND_MACRO1_REG;
5898 + cmd->len = 4;
5899 + cmd++;
5900 +
5901 + /* Kick the execute command */
5902 + cmd->cmd = 0;
5903 + cmd->src = msm_virt_to_dma(chip,
5904 + &dma_buffer->data.sfexec);
5905 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5906 + cmd->len = 4;
5907 + cmd++;
5908 +
5909 + /* Block on data rdy, & read status register */
5910 + cmd->cmd = SRC_CRCI_NAND_DATA;
5911 + cmd->src = MSM_NAND_SFLASHC_STATUS;
5912 + cmd->dst = msm_virt_to_dma(chip,
5913 + &dma_buffer->data.sfstat[3+i]);
5914 + cmd->len = 4;
5915 + cmd++;
5916 +
5917 + /* Transfer nand ctlr buf contents to usr buf */
5918 + cmd->cmd = 0;
5919 + cmd->src = MSM_NAND_FLASH_BUFFER;
5920 + cmd->dst = data_dma_addr_curr;
5921 + cmd->len = 512;
5922 + data_dma_addr_curr += 512;
5923 + cmd++;
5924 + }
5925 + }
5926 +
5927 + if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
5928 +
5929 + /* Block on cmd ready and write CMD register */
5930 + cmd->cmd = DST_CRCI_NAND_CMD;
5931 + cmd->src = msm_virt_to_dma(chip,
5932 + &dma_buffer->data.sfcmd[7]);
5933 + cmd->dst = MSM_NAND_SFLASHC_CMD;
5934 + cmd->len = 4;
5935 + cmd++;
5936 +
5937 + /* Write the MACRO1 register */
5938 + cmd->cmd = 0;
5939 + cmd->src = msm_virt_to_dma(chip,
5940 + &dma_buffer->data.macro[4]);
5941 + cmd->dst = MSM_NAND_MACRO1_REG;
5942 + cmd->len = 4;
5943 + cmd++;
5944 +
5945 + /* Kick the execute command */
5946 + cmd->cmd = 0;
5947 + cmd->src = msm_virt_to_dma(chip,
5948 + &dma_buffer->data.sfexec);
5949 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5950 + cmd->len = 4;
5951 + cmd++;
5952 +
5953 + /* Block on data ready, and read status register */
5954 + cmd->cmd = SRC_CRCI_NAND_DATA;
5955 + cmd->src = MSM_NAND_SFLASHC_STATUS;
5956 + cmd->dst = msm_virt_to_dma(chip,
5957 + &dma_buffer->data.sfstat[7]);
5958 + cmd->len = 4;
5959 + cmd++;
5960 +
5961 + /* Transfer nand ctlr buffer contents into usr buf */
5962 + if (ops->mode == MTD_OPS_AUTO_OOB) {
5963 + for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
5964 + cmd->cmd = 0;
5965 + cmd->src = MSM_NAND_FLASH_BUFFER +
5966 + mtd->ecclayout->oobfree[i].offset;
5967 + cmd->dst = oob_dma_addr_curr;
5968 + cmd->len =
5969 + mtd->ecclayout->oobfree[i].length;
5970 + oob_dma_addr_curr +=
5971 + mtd->ecclayout->oobfree[i].length;
5972 + cmd++;
5973 + }
5974 + }
5975 + if (ops->mode == MTD_OPS_PLACE_OOB) {
5976 + cmd->cmd = 0;
5977 + cmd->src = MSM_NAND_FLASH_BUFFER;
5978 + cmd->dst = oob_dma_addr_curr;
5979 + cmd->len = mtd->oobsize;
5980 + oob_dma_addr_curr += mtd->oobsize;
5981 + cmd++;
5982 + }
5983 + if (ops->mode == MTD_OPS_RAW) {
5984 + cmd->cmd = 0;
5985 + cmd->src = MSM_NAND_FLASH_BUFFER;
5986 + cmd->dst = data_dma_addr_curr;
5987 + cmd->len = mtd->oobsize;
5988 + data_dma_addr_curr += mtd->oobsize;
5989 + cmd++;
5990 + }
5991 + }
5992 +
5993 + /*************************************************************/
5994 + /* Restore the necessary registers to proper values */
5995 + /*************************************************************/
5996 +
5997 + /* Block on cmd ready and write CMD register */
5998 + cmd->cmd = DST_CRCI_NAND_CMD;
5999 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
6000 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6001 + cmd->len = 4;
6002 + cmd++;
6003 +
6004 + /* Kick the execute command */
6005 + cmd->cmd = 0;
6006 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6007 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6008 + cmd->len = 4;
6009 + cmd++;
6010 +
6011 + /* Block on data ready, and read the status register */
6012 + cmd->cmd = SRC_CRCI_NAND_DATA;
6013 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6014 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
6015 + cmd->len = 4;
6016 + cmd++;
6017 +
6018 +
6019 + BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
6020 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
6021 + dma_buffer->cmd[0].cmd |= CMD_OCB;
6022 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
6023 +
6024 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
6025 + >> 3) | CMD_PTR_LP;
6026 +
6027 + mb();
6028 + msm_dmov_exec_cmd(chip->dma_channel,
6029 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
6030 + &dma_buffer->cmdptr)));
6031 + mb();
6032 +
6033 + ecc_status = (dma_buffer->data.data3 >> 16) &
6034 + 0x0000FFFF;
6035 + interrupt_status = (dma_buffer->data.data4 >> 0) &
6036 + 0x0000FFFF;
6037 + controller_status = (dma_buffer->data.data4 >> 16) &
6038 + 0x0000FFFF;
6039 +
6040 +#if VERBOSE
6041 + pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
6042 + "%x %x\n", __func__,
6043 + dma_buffer->data.sfstat[0],
6044 + dma_buffer->data.sfstat[1],
6045 + dma_buffer->data.sfstat[2],
6046 + dma_buffer->data.sfstat[3],
6047 + dma_buffer->data.sfstat[4],
6048 + dma_buffer->data.sfstat[5],
6049 + dma_buffer->data.sfstat[6],
6050 + dma_buffer->data.sfstat[7],
6051 + dma_buffer->data.sfstat[8]);
6052 +
6053 + pr_info("%s: controller_status = %x\n", __func__,
6054 + controller_status);
6055 + pr_info("%s: interrupt_status = %x\n", __func__,
6056 + interrupt_status);
6057 + pr_info("%s: ecc_status = %x\n", __func__,
6058 + ecc_status);
6059 +#endif
6060 + /* Check for errors, protection violations etc */
6061 + if ((controller_status != 0)
6062 + || (dma_buffer->data.sfstat[0] & 0x110)
6063 + || (dma_buffer->data.sfstat[1] & 0x110)
6064 + || (dma_buffer->data.sfstat[2] & 0x110)
6065 + || (dma_buffer->data.sfstat[8] & 0x110)
6066 + || ((dma_buffer->data.sfstat[3] & 0x110) &&
6067 + (ops->datbuf))
6068 + || ((dma_buffer->data.sfstat[4] & 0x110) &&
6069 + (ops->datbuf))
6070 + || ((dma_buffer->data.sfstat[5] & 0x110) &&
6071 + (ops->datbuf))
6072 + || ((dma_buffer->data.sfstat[6] & 0x110) &&
6073 + (ops->datbuf))
6074 + || ((dma_buffer->data.sfstat[7] & 0x110) &&
6075 + ((ops->oobbuf)
6076 + || (ops->mode == MTD_OPS_RAW)))) {
6077 + pr_info("%s: ECC/MPU/OP error\n", __func__);
6078 + err = -EIO;
6079 + }
6080 +
6081 + if (err)
6082 + break;
6083 + pages_read++;
6084 + from_curr += mtd->writesize;
6085 + }
6086 +
6087 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
6088 +
6089 + if (ops->oobbuf) {
6090 + dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
6091 + DMA_FROM_DEVICE);
6092 + }
6093 +err_dma_map_oobbuf_failed:
6094 + if (ops->datbuf) {
6095 + dma_unmap_page(chip->dev, data_dma_addr, ops->len,
6096 + DMA_FROM_DEVICE);
6097 + }
6098 +
6099 + if (err) {
6100 + pr_err("%s: %llx %x %x failed\n", __func__, from_curr,
6101 + ops->datbuf ? ops->len : 0, ops->ooblen);
6102 + } else {
6103 + ops->retlen = ops->oobretlen = 0;
6104 + if (ops->datbuf != NULL) {
6105 + if (ops->mode != MTD_OPS_RAW)
6106 + ops->retlen = mtd->writesize * pages_read;
6107 + else
6108 + ops->retlen = (mtd->writesize + mtd->oobsize)
6109 + * pages_read;
6110 + }
6111 + if (ops->oobbuf != NULL) {
6112 + if (ops->mode == MTD_OPS_AUTO_OOB)
6113 + ops->oobretlen = mtd->oobavail * pages_read;
6114 + else
6115 + ops->oobretlen = mtd->oobsize * pages_read;
6116 + }
6117 + }
6118 +
6119 +#if VERBOSE
6120 + pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
6121 + __func__, err, ops->retlen, ops->oobretlen);
6122 +
6123 + pr_info("==================================================="
6124 + "==============\n");
6125 +#endif
6126 + return err;
6127 +}
6128 +
6129 +int msm_onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
6130 + size_t *retlen, u_char *buf)
6131 +{
6132 + int ret;
6133 + struct mtd_oob_ops ops;
6134 +
6135 + ops.mode = MTD_OPS_PLACE_OOB;
6136 + ops.datbuf = buf;
6137 + ops.len = len;
6138 + ops.retlen = 0;
6139 + ops.oobbuf = NULL;
6140 + ops.ooblen = 0;
6141 + ops.oobretlen = 0;
6142 + ret = msm_onenand_read_oob(mtd, from, &ops);
6143 + *retlen = ops.retlen;
6144 +
6145 + return ret;
6146 +}
6147 +
6148 +static int msm_onenand_write_oob(struct mtd_info *mtd, loff_t to,
6149 + struct mtd_oob_ops *ops)
6150 +{
6151 + struct msm_nand_chip *chip = mtd->priv;
6152 +
6153 + struct {
6154 + dmov_s cmd[53];
6155 + unsigned cmdptr;
6156 + struct {
6157 + uint32_t sfbcfg;
6158 + uint32_t sfcmd[10];
6159 + uint32_t sfexec;
6160 + uint32_t sfstat[10];
6161 + uint32_t addr0;
6162 + uint32_t addr1;
6163 + uint32_t addr2;
6164 + uint32_t addr3;
6165 + uint32_t addr4;
6166 + uint32_t addr5;
6167 + uint32_t addr6;
6168 + uint32_t data0;
6169 + uint32_t data1;
6170 + uint32_t data2;
6171 + uint32_t data3;
6172 + uint32_t data4;
6173 + uint32_t data5;
6174 + uint32_t data6;
6175 + uint32_t macro[5];
6176 + } data;
6177 + } *dma_buffer;
6178 + dmov_s *cmd;
6179 +
6180 + int err = 0;
6181 + int i, j, k;
6182 + dma_addr_t data_dma_addr = 0;
6183 + dma_addr_t oob_dma_addr = 0;
6184 + dma_addr_t init_dma_addr = 0;
6185 + dma_addr_t data_dma_addr_curr = 0;
6186 + dma_addr_t oob_dma_addr_curr = 0;
6187 + uint8_t *init_spare_bytes;
6188 +
6189 + loff_t to_curr = 0;
6190 + unsigned page_count;
6191 + unsigned pages_written = 0;
6192 +
6193 + uint16_t onenand_startaddr1;
6194 + uint16_t onenand_startaddr8;
6195 + uint16_t onenand_startaddr2;
6196 + uint16_t onenand_startbuffer;
6197 + uint16_t onenand_sysconfig1;
6198 +
6199 + uint16_t controller_status;
6200 + uint16_t interrupt_status;
6201 + uint16_t ecc_status;
6202 +
6203 +#if VERBOSE
6204 + pr_info("================================================="
6205 + "================\n");
6206 + pr_info("%s: to 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
6207 + "\noobbuf 0x%p ooblen 0x%x\n",
6208 + __func__, to, ops->mode, ops->datbuf, ops->len,
6209 + ops->oobbuf, ops->ooblen);
6210 +#endif
6211 + if (!mtd) {
6212 + pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
6213 + (uint32_t)mtd);
6214 + return -EINVAL;
6215 + }
6216 + if (to & (mtd->writesize - 1)) {
6217 + pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
6218 + return -EINVAL;
6219 + }
6220 +
6221 + if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
6222 + (ops->mode != MTD_OPS_RAW)) {
6223 + pr_err("%s: unsupported ops->mode, %d\n", __func__,
6224 + ops->mode);
6225 + return -EINVAL;
6226 + }
6227 +
6228 + if (((ops->datbuf == NULL) || (ops->len == 0)) &&
6229 + ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
6230 + pr_err("%s: incorrect ops fields - nothing to do\n",
6231 + __func__);
6232 + return -EINVAL;
6233 + }
6234 +
6235 + if ((ops->datbuf != NULL) && (ops->len == 0)) {
6236 + pr_err("%s: data buffer passed but length 0\n",
6237 + __func__);
6238 + return -EINVAL;
6239 + }
6240 +
6241 + if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
6242 + pr_err("%s: oob buffer passed but length 0\n",
6243 + __func__);
6244 + return -EINVAL;
6245 + }
6246 +
6247 + if (ops->mode != MTD_OPS_RAW) {
6248 + if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
6249 + /* when ops->datbuf is NULL, ops->len can be ooblen */
6250 + pr_err("%s: unsupported ops->len, %d\n", __func__,
6251 + ops->len);
6252 + return -EINVAL;
6253 + }
6254 + } else {
6255 + if (ops->datbuf != NULL &&
6256 + (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
6257 + pr_err("%s: unsupported ops->len,"
6258 + " %d for MTD_OPS_RAW\n", __func__, ops->len);
6259 + return -EINVAL;
6260 + }
6261 + }
6262 +
6263 + if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
6264 + pr_err("%s: unsupported operation, oobbuf pointer "
6265 + "passed in for RAW mode, %x\n", __func__,
6266 + (uint32_t)ops->oobbuf);
6267 + return -EINVAL;
6268 + }
6269 +
6270 + if (ops->oobbuf && !ops->datbuf) {
6271 + page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
6272 + mtd->oobavail : mtd->oobsize);
6273 + if ((page_count == 0) && (ops->ooblen))
6274 + page_count = 1;
6275 + } else if (ops->mode != MTD_OPS_RAW)
6276 + page_count = ops->len / mtd->writesize;
6277 + else
6278 + page_count = ops->len / (mtd->writesize + mtd->oobsize);
6279 +
6280 + if ((ops->mode == MTD_OPS_AUTO_OOB) && (ops->oobbuf != NULL)) {
6281 + if (page_count > 1) {
6282 + pr_err("%s: unsupported ops->ooblen for"
6283 + "AUTO, %d\n", __func__, ops->ooblen);
6284 + return -EINVAL;
6285 + }
6286 + }
6287 +
6288 + if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
6289 + if (page_count * mtd->oobsize > ops->ooblen) {
6290 + pr_err("%s: unsupported ops->ooblen for"
6291 + "PLACE, %d\n", __func__, ops->ooblen);
6292 + return -EINVAL;
6293 + }
6294 + }
6295 +
6296 + if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
6297 + (ops->ooboffs != 0)) {
6298 + pr_err("%s: unsupported ops->ooboffs, %d\n",
6299 + __func__, ops->ooboffs);
6300 + return -EINVAL;
6301 + }
6302 +
6303 + init_spare_bytes = kmalloc(64, GFP_KERNEL);
6304 + if (!init_spare_bytes) {
6305 + pr_err("%s: failed to alloc init_spare_bytes buffer\n",
6306 + __func__);
6307 + return -ENOMEM;
6308 + }
6309 + for (i = 0; i < 64; i++)
6310 + init_spare_bytes[i] = 0xFF;
6311 +
6312 + if ((ops->oobbuf) && (ops->mode == MTD_OPS_AUTO_OOB)) {
6313 + for (i = 0, k = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++)
6314 + for (j = 0; j < mtd->ecclayout->oobfree[i].length;
6315 + j++) {
6316 + init_spare_bytes[j +
6317 + mtd->ecclayout->oobfree[i].offset]
6318 + = (ops->oobbuf)[k];
6319 + k++;
6320 + }
6321 + }
6322 +
6323 + if (ops->datbuf) {
6324 + data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
6325 + ops->datbuf, ops->len, DMA_TO_DEVICE, NULL);
6326 + if (dma_mapping_error(chip->dev, data_dma_addr)) {
6327 + pr_err("%s: failed to get dma addr for %p\n",
6328 + __func__, ops->datbuf);
6329 + return -EIO;
6330 + }
6331 + }
6332 + if (ops->oobbuf) {
6333 + oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
6334 + ops->oobbuf, ops->ooblen, DMA_TO_DEVICE, NULL);
6335 + if (dma_mapping_error(chip->dev, oob_dma_addr)) {
6336 + pr_err("%s: failed to get dma addr for %p\n",
6337 + __func__, ops->oobbuf);
6338 + err = -EIO;
6339 + goto err_dma_map_oobbuf_failed;
6340 + }
6341 + }
6342 +
6343 + init_dma_addr = msm_nand_dma_map(chip->dev, init_spare_bytes, 64,
6344 + DMA_TO_DEVICE, NULL);
6345 + if (dma_mapping_error(chip->dev, init_dma_addr)) {
6346 + pr_err("%s: failed to get dma addr for %p\n",
6347 + __func__, init_spare_bytes);
6348 + err = -EIO;
6349 + goto err_dma_map_initbuf_failed;
6350 + }
6351 +
6352 +
6353 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
6354 + (chip, sizeof(*dma_buffer))));
6355 +
6356 + to_curr = to;
6357 +
6358 + while (page_count-- > 0) {
6359 + cmd = dma_buffer->cmd;
6360 +
6361 + if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
6362 + && (to_curr >= (mtd->size>>1))) { /* DDP Device */
6363 + onenand_startaddr1 = DEVICE_FLASHCORE_1 |
6364 + (((uint32_t)(to_curr-(mtd->size>>1))
6365 + / mtd->erasesize));
6366 + onenand_startaddr2 = DEVICE_BUFFERRAM_1;
6367 + } else {
6368 + onenand_startaddr1 = DEVICE_FLASHCORE_0 |
6369 + ((uint32_t)to_curr / mtd->erasesize) ;
6370 + onenand_startaddr2 = DEVICE_BUFFERRAM_0;
6371 + }
6372 +
6373 + onenand_startaddr8 = (((uint32_t)to_curr &
6374 + (mtd->erasesize - 1)) / mtd->writesize) << 2;
6375 + onenand_startbuffer = DATARAM0_0 << 8;
6376 + onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
6377 + ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
6378 + ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
6379 +
6380 + dma_buffer->data.sfbcfg = SFLASH_BCFG |
6381 + (nand_sfcmd_mode ? 0 : (1 << 24));
6382 + dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(6, 0, 0,
6383 + MSM_NAND_SFCMD_CMDXS,
6384 + nand_sfcmd_mode,
6385 + MSM_NAND_SFCMD_REGWR);
6386 + dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(256, 0, 0,
6387 + MSM_NAND_SFCMD_CMDXS,
6388 + nand_sfcmd_mode,
6389 + MSM_NAND_SFCMD_DATWR);
6390 + dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(256, 0, 0,
6391 + MSM_NAND_SFCMD_CMDXS,
6392 + nand_sfcmd_mode,
6393 + MSM_NAND_SFCMD_DATWR);
6394 + dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
6395 + MSM_NAND_SFCMD_CMDXS,
6396 + nand_sfcmd_mode,
6397 + MSM_NAND_SFCMD_DATWR);
6398 + dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
6399 + MSM_NAND_SFCMD_CMDXS,
6400 + nand_sfcmd_mode,
6401 + MSM_NAND_SFCMD_DATWR);
6402 + dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(32, 0, 0,
6403 + MSM_NAND_SFCMD_CMDXS,
6404 + nand_sfcmd_mode,
6405 + MSM_NAND_SFCMD_DATWR);
6406 + dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(1, 6, 0,
6407 + MSM_NAND_SFCMD_CMDXS,
6408 + nand_sfcmd_mode,
6409 + MSM_NAND_SFCMD_REGWR);
6410 + dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(0, 0, 32,
6411 + MSM_NAND_SFCMD_CMDXS,
6412 + nand_sfcmd_mode,
6413 + MSM_NAND_SFCMD_INTHI);
6414 + dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(3, 7, 0,
6415 + MSM_NAND_SFCMD_DATXS,
6416 + nand_sfcmd_mode,
6417 + MSM_NAND_SFCMD_REGRD);
6418 + dma_buffer->data.sfcmd[9] = SFLASH_PREPCMD(4, 10, 0,
6419 + MSM_NAND_SFCMD_CMDXS,
6420 + nand_sfcmd_mode,
6421 + MSM_NAND_SFCMD_REGWR);
6422 + dma_buffer->data.sfexec = 1;
6423 + dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
6424 + dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
6425 + dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
6426 + dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
6427 + dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
6428 + dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
6429 + dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
6430 + dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
6431 + dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
6432 + dma_buffer->data.sfstat[9] = CLEAN_DATA_32;
6433 + dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
6434 + (ONENAND_SYSTEM_CONFIG_1);
6435 + dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
6436 + (ONENAND_START_ADDRESS_1);
6437 + dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
6438 + (ONENAND_START_ADDRESS_2);
6439 + dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
6440 + (ONENAND_COMMAND);
6441 + dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
6442 + (ONENAND_INTERRUPT_STATUS);
6443 + dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
6444 + (ONENAND_SYSTEM_CONFIG_1);
6445 + dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
6446 + (ONENAND_START_ADDRESS_1);
6447 + dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
6448 + (onenand_sysconfig1);
6449 + dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
6450 + (onenand_startaddr1);
6451 + dma_buffer->data.data2 = (onenand_startbuffer << 16) |
6452 + (onenand_startaddr2);
6453 + dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
6454 + (ONENAND_CMDPROGSPARE);
6455 + dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
6456 + (CLEAN_DATA_16);
6457 + dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
6458 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
6459 + dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
6460 + (ONENAND_STARTADDR1_RES);
6461 + dma_buffer->data.macro[0] = 0x0200;
6462 + dma_buffer->data.macro[1] = 0x0300;
6463 + dma_buffer->data.macro[2] = 0x0400;
6464 + dma_buffer->data.macro[3] = 0x0500;
6465 + dma_buffer->data.macro[4] = 0x8010;
6466 +
6467 +
6468 + /*************************************************************/
6469 + /* Write necessary address registers in the onenand device */
6470 + /*************************************************************/
6471 +
6472 + /* Enable and configure the SFlash controller */
6473 + cmd->cmd = 0;
6474 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
6475 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
6476 + cmd->len = 4;
6477 + cmd++;
6478 +
6479 + /* Block on cmd ready and write CMD register */
6480 + cmd->cmd = DST_CRCI_NAND_CMD;
6481 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
6482 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6483 + cmd->len = 4;
6484 + cmd++;
6485 +
6486 + /* Write the ADDR0 and ADDR1 registers */
6487 + cmd->cmd = 0;
6488 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
6489 + cmd->dst = MSM_NAND_ADDR0;
6490 + cmd->len = 8;
6491 + cmd++;
6492 +
6493 + /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
6494 + cmd->cmd = 0;
6495 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
6496 + cmd->dst = MSM_NAND_ADDR2;
6497 + cmd->len = 16;
6498 + cmd++;
6499 +
6500 + /* Write the ADDR6 registers */
6501 + cmd->cmd = 0;
6502 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
6503 + cmd->dst = MSM_NAND_ADDR6;
6504 + cmd->len = 4;
6505 + cmd++;
6506 +
6507 + /* Write the GENP0, GENP1, GENP2, GENP3 registers */
6508 + cmd->cmd = 0;
6509 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
6510 + cmd->dst = MSM_NAND_GENP_REG0;
6511 + cmd->len = 16;
6512 + cmd++;
6513 +
6514 + /* Write the FLASH_DEV_CMD4,5,6 registers */
6515 + cmd->cmd = 0;
6516 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
6517 + cmd->dst = MSM_NAND_DEV_CMD4;
6518 + cmd->len = 12;
6519 + cmd++;
6520 +
6521 + /* Kick the execute command */
6522 + cmd->cmd = 0;
6523 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6524 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6525 + cmd->len = 4;
6526 + cmd++;
6527 +
6528 + /* Block on data ready, and read the status register */
6529 + cmd->cmd = SRC_CRCI_NAND_DATA;
6530 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6531 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
6532 + cmd->len = 4;
6533 + cmd++;
6534 +
6535 + /*************************************************************/
6536 + /* Write the data ram area in the onenand buffer ram */
6537 + /*************************************************************/
6538 +
6539 + if (ops->datbuf) {
6540 + dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
6541 + (ONENAND_CMDPROG);
6542 +
6543 + for (i = 0; i < 4; i++) {
6544 +
6545 + /* Block on cmd ready and write CMD register */
6546 + cmd->cmd = DST_CRCI_NAND_CMD;
6547 + cmd->src = msm_virt_to_dma(chip,
6548 + &dma_buffer->data.sfcmd[1+i]);
6549 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6550 + cmd->len = 4;
6551 + cmd++;
6552 +
6553 + /* Trnsfr usr buf contents to nand ctlr buf */
6554 + cmd->cmd = 0;
6555 + cmd->src = data_dma_addr_curr;
6556 + cmd->dst = MSM_NAND_FLASH_BUFFER;
6557 + cmd->len = 512;
6558 + data_dma_addr_curr += 512;
6559 + cmd++;
6560 +
6561 + /* Write the MACRO1 register */
6562 + cmd->cmd = 0;
6563 + cmd->src = msm_virt_to_dma(chip,
6564 + &dma_buffer->data.macro[i]);
6565 + cmd->dst = MSM_NAND_MACRO1_REG;
6566 + cmd->len = 4;
6567 + cmd++;
6568 +
6569 + /* Kick the execute command */
6570 + cmd->cmd = 0;
6571 + cmd->src = msm_virt_to_dma(chip,
6572 + &dma_buffer->data.sfexec);
6573 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6574 + cmd->len = 4;
6575 + cmd++;
6576 +
6577 + /* Block on data rdy, & read status register */
6578 + cmd->cmd = SRC_CRCI_NAND_DATA;
6579 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6580 + cmd->dst = msm_virt_to_dma(chip,
6581 + &dma_buffer->data.sfstat[1+i]);
6582 + cmd->len = 4;
6583 + cmd++;
6584 +
6585 + }
6586 + }
6587 +
6588 + /* Block on cmd ready and write CMD register */
6589 + cmd->cmd = DST_CRCI_NAND_CMD;
6590 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[5]);
6591 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6592 + cmd->len = 4;
6593 + cmd++;
6594 +
6595 + if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
6596 +
6597 + /* Transfer user buf contents into nand ctlr buffer */
6598 + if (ops->mode == MTD_OPS_AUTO_OOB) {
6599 + cmd->cmd = 0;
6600 + cmd->src = init_dma_addr;
6601 + cmd->dst = MSM_NAND_FLASH_BUFFER;
6602 + cmd->len = mtd->oobsize;
6603 + cmd++;
6604 + }
6605 + if (ops->mode == MTD_OPS_PLACE_OOB) {
6606 + cmd->cmd = 0;
6607 + cmd->src = oob_dma_addr_curr;
6608 + cmd->dst = MSM_NAND_FLASH_BUFFER;
6609 + cmd->len = mtd->oobsize;
6610 + oob_dma_addr_curr += mtd->oobsize;
6611 + cmd++;
6612 + }
6613 + if (ops->mode == MTD_OPS_RAW) {
6614 + cmd->cmd = 0;
6615 + cmd->src = data_dma_addr_curr;
6616 + cmd->dst = MSM_NAND_FLASH_BUFFER;
6617 + cmd->len = mtd->oobsize;
6618 + data_dma_addr_curr += mtd->oobsize;
6619 + cmd++;
6620 + }
6621 + } else {
6622 + cmd->cmd = 0;
6623 + cmd->src = init_dma_addr;
6624 + cmd->dst = MSM_NAND_FLASH_BUFFER;
6625 + cmd->len = mtd->oobsize;
6626 + cmd++;
6627 + }
6628 +
6629 + /* Write the MACRO1 register */
6630 + cmd->cmd = 0;
6631 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.macro[4]);
6632 + cmd->dst = MSM_NAND_MACRO1_REG;
6633 + cmd->len = 4;
6634 + cmd++;
6635 +
6636 + /* Kick the execute command */
6637 + cmd->cmd = 0;
6638 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6639 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6640 + cmd->len = 4;
6641 + cmd++;
6642 +
6643 + /* Block on data ready, and read the status register */
6644 + cmd->cmd = SRC_CRCI_NAND_DATA;
6645 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6646 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[5]);
6647 + cmd->len = 4;
6648 + cmd++;
6649 +
6650 + /*********************************************************/
6651 + /* Issuing write command */
6652 + /*********************************************************/
6653 +
6654 + /* Block on cmd ready and write CMD register */
6655 + cmd->cmd = DST_CRCI_NAND_CMD;
6656 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[6]);
6657 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6658 + cmd->len = 4;
6659 + cmd++;
6660 +
6661 + /* Kick the execute command */
6662 + cmd->cmd = 0;
6663 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6664 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6665 + cmd->len = 4;
6666 + cmd++;
6667 +
6668 + /* Block on data ready, and read the status register */
6669 + cmd->cmd = SRC_CRCI_NAND_DATA;
6670 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6671 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[6]);
6672 + cmd->len = 4;
6673 + cmd++;
6674 +
6675 + /*************************************************************/
6676 + /* Wait for the interrupt from the Onenand device controller */
6677 + /*************************************************************/
6678 +
6679 + /* Block on cmd ready and write CMD register */
6680 + cmd->cmd = DST_CRCI_NAND_CMD;
6681 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[7]);
6682 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6683 + cmd->len = 4;
6684 + cmd++;
6685 +
6686 + /* Kick the execute command */
6687 + cmd->cmd = 0;
6688 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6689 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6690 + cmd->len = 4;
6691 + cmd++;
6692 +
6693 + /* Block on data ready, and read the status register */
6694 + cmd->cmd = SRC_CRCI_NAND_DATA;
6695 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6696 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[7]);
6697 + cmd->len = 4;
6698 + cmd++;
6699 +
6700 + /*************************************************************/
6701 + /* Read necessary status registers from the onenand device */
6702 + /*************************************************************/
6703 +
6704 + /* Block on cmd ready and write CMD register */
6705 + cmd->cmd = DST_CRCI_NAND_CMD;
6706 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
6707 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6708 + cmd->len = 4;
6709 + cmd++;
6710 +
6711 + /* Kick the execute command */
6712 + cmd->cmd = 0;
6713 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6714 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6715 + cmd->len = 4;
6716 + cmd++;
6717 +
6718 + /* Block on data ready, and read the status register */
6719 + cmd->cmd = SRC_CRCI_NAND_DATA;
6720 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6721 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
6722 + cmd->len = 4;
6723 + cmd++;
6724 +
6725 + /* Read the GENP3 register */
6726 + cmd->cmd = 0;
6727 + cmd->src = MSM_NAND_GENP_REG3;
6728 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
6729 + cmd->len = 4;
6730 + cmd++;
6731 +
6732 + /* Read the DEVCMD4 register */
6733 + cmd->cmd = 0;
6734 + cmd->src = MSM_NAND_DEV_CMD4;
6735 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
6736 + cmd->len = 4;
6737 + cmd++;
6738 +
6739 + /*************************************************************/
6740 + /* Restore the necessary registers to proper values */
6741 + /*************************************************************/
6742 +
6743 + /* Block on cmd ready and write CMD register */
6744 + cmd->cmd = DST_CRCI_NAND_CMD;
6745 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[9]);
6746 + cmd->dst = MSM_NAND_SFLASHC_CMD;
6747 + cmd->len = 4;
6748 + cmd++;
6749 +
6750 + /* Kick the execute command */
6751 + cmd->cmd = 0;
6752 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6753 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6754 + cmd->len = 4;
6755 + cmd++;
6756 +
6757 + /* Block on data ready, and read the status register */
6758 + cmd->cmd = SRC_CRCI_NAND_DATA;
6759 + cmd->src = MSM_NAND_SFLASHC_STATUS;
6760 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[9]);
6761 + cmd->len = 4;
6762 + cmd++;
6763 +
6764 +
6765 + BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
6766 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
6767 + dma_buffer->cmd[0].cmd |= CMD_OCB;
6768 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
6769 +
6770 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
6771 + >> 3) | CMD_PTR_LP;
6772 +
6773 + mb();
6774 + msm_dmov_exec_cmd(chip->dma_channel,
6775 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
6776 + &dma_buffer->cmdptr)));
6777 + mb();
6778 +
6779 + ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
6780 + interrupt_status = (dma_buffer->data.data4 >> 0)&0x0000FFFF;
6781 + controller_status = (dma_buffer->data.data4 >> 16)&0x0000FFFF;
6782 +
6783 +#if VERBOSE
6784 + pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
6785 + " %x %x %x\n", __func__,
6786 + dma_buffer->data.sfstat[0],
6787 + dma_buffer->data.sfstat[1],
6788 + dma_buffer->data.sfstat[2],
6789 + dma_buffer->data.sfstat[3],
6790 + dma_buffer->data.sfstat[4],
6791 + dma_buffer->data.sfstat[5],
6792 + dma_buffer->data.sfstat[6],
6793 + dma_buffer->data.sfstat[7],
6794 + dma_buffer->data.sfstat[8],
6795 + dma_buffer->data.sfstat[9]);
6796 +
6797 + pr_info("%s: controller_status = %x\n", __func__,
6798 + controller_status);
6799 + pr_info("%s: interrupt_status = %x\n", __func__,
6800 + interrupt_status);
6801 + pr_info("%s: ecc_status = %x\n", __func__,
6802 + ecc_status);
6803 +#endif
6804 + /* Check for errors, protection violations etc */
6805 + if ((controller_status != 0)
6806 + || (dma_buffer->data.sfstat[0] & 0x110)
6807 + || (dma_buffer->data.sfstat[6] & 0x110)
6808 + || (dma_buffer->data.sfstat[7] & 0x110)
6809 + || (dma_buffer->data.sfstat[8] & 0x110)
6810 + || (dma_buffer->data.sfstat[9] & 0x110)
6811 + || ((dma_buffer->data.sfstat[1] & 0x110) &&
6812 + (ops->datbuf))
6813 + || ((dma_buffer->data.sfstat[2] & 0x110) &&
6814 + (ops->datbuf))
6815 + || ((dma_buffer->data.sfstat[3] & 0x110) &&
6816 + (ops->datbuf))
6817 + || ((dma_buffer->data.sfstat[4] & 0x110) &&
6818 + (ops->datbuf))
6819 + || ((dma_buffer->data.sfstat[5] & 0x110) &&
6820 + ((ops->oobbuf)
6821 + || (ops->mode == MTD_OPS_RAW)))) {
6822 + pr_info("%s: ECC/MPU/OP error\n", __func__);
6823 + err = -EIO;
6824 + }
6825 +
6826 + if (err)
6827 + break;
6828 + pages_written++;
6829 + to_curr += mtd->writesize;
6830 + }
6831 +
6832 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
6833 +
6834 + dma_unmap_page(chip->dev, init_dma_addr, 64, DMA_TO_DEVICE);
6835 +
6836 +err_dma_map_initbuf_failed:
6837 + if (ops->oobbuf) {
6838 + dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
6839 + DMA_TO_DEVICE);
6840 + }
6841 +err_dma_map_oobbuf_failed:
6842 + if (ops->datbuf) {
6843 + dma_unmap_page(chip->dev, data_dma_addr, ops->len,
6844 + DMA_TO_DEVICE);
6845 + }
6846 +
6847 + if (err) {
6848 + pr_err("%s: %llx %x %x failed\n", __func__, to_curr,
6849 + ops->datbuf ? ops->len : 0, ops->ooblen);
6850 + } else {
6851 + ops->retlen = ops->oobretlen = 0;
6852 + if (ops->datbuf != NULL) {
6853 + if (ops->mode != MTD_OPS_RAW)
6854 + ops->retlen = mtd->writesize * pages_written;
6855 + else
6856 + ops->retlen = (mtd->writesize + mtd->oobsize)
6857 + * pages_written;
6858 + }
6859 + if (ops->oobbuf != NULL) {
6860 + if (ops->mode == MTD_OPS_AUTO_OOB)
6861 + ops->oobretlen = mtd->oobavail * pages_written;
6862 + else
6863 + ops->oobretlen = mtd->oobsize * pages_written;
6864 + }
6865 + }
6866 +
6867 +#if VERBOSE
6868 + pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
6869 + __func__, err, ops->retlen, ops->oobretlen);
6870 +
6871 + pr_info("================================================="
6872 + "================\n");
6873 +#endif
6874 + kfree(init_spare_bytes);
6875 + return err;
6876 +}
6877 +
6878 +static int msm_onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
6879 + size_t *retlen, const u_char *buf)
6880 +{
6881 + int ret;
6882 + struct mtd_oob_ops ops;
6883 +
6884 + ops.mode = MTD_OPS_PLACE_OOB;
6885 + ops.datbuf = (uint8_t *)buf;
6886 + ops.len = len;
6887 + ops.retlen = 0;
6888 + ops.oobbuf = NULL;
6889 + ops.ooblen = 0;
6890 + ops.oobretlen = 0;
6891 + ret = msm_onenand_write_oob(mtd, to, &ops);
6892 + *retlen = ops.retlen;
6893 +
6894 + return ret;
6895 +}
6896 +
6897 +static int msm_onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
6898 +{
6899 + struct msm_nand_chip *chip = mtd->priv;
6900 +
6901 + struct {
6902 + dmov_s cmd[20];
6903 + unsigned cmdptr;
6904 + struct {
6905 + uint32_t sfbcfg;
6906 + uint32_t sfcmd[4];
6907 + uint32_t sfexec;
6908 + uint32_t sfstat[4];
6909 + uint32_t addr0;
6910 + uint32_t addr1;
6911 + uint32_t addr2;
6912 + uint32_t addr3;
6913 + uint32_t addr4;
6914 + uint32_t addr5;
6915 + uint32_t addr6;
6916 + uint32_t data0;
6917 + uint32_t data1;
6918 + uint32_t data2;
6919 + uint32_t data3;
6920 + uint32_t data4;
6921 + uint32_t data5;
6922 + uint32_t data6;
6923 + } data;
6924 + } *dma_buffer;
6925 + dmov_s *cmd;
6926 +
6927 + int err = 0;
6928 +
6929 + uint16_t onenand_startaddr1;
6930 + uint16_t onenand_startaddr8;
6931 + uint16_t onenand_startaddr2;
6932 + uint16_t onenand_startbuffer;
6933 +
6934 + uint16_t controller_status;
6935 + uint16_t interrupt_status;
6936 + uint16_t ecc_status;
6937 +
6938 + uint64_t temp;
6939 +
6940 +#if VERBOSE
6941 + pr_info("================================================="
6942 + "================\n");
6943 + pr_info("%s: addr 0x%llx len 0x%llx\n",
6944 + __func__, instr->addr, instr->len);
6945 +#endif
6946 + if (instr->addr & (mtd->erasesize - 1)) {
6947 + pr_err("%s: Unsupported erase address, 0x%llx\n",
6948 + __func__, instr->addr);
6949 + return -EINVAL;
6950 + }
6951 + if (instr->len != mtd->erasesize) {
6952 + pr_err("%s: Unsupported erase len, %lld\n",
6953 + __func__, instr->len);
6954 + return -EINVAL;
6955 + }
6956 +
6957 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
6958 + (chip, sizeof(*dma_buffer))));
6959 +
6960 + cmd = dma_buffer->cmd;
6961 +
6962 + temp = instr->addr;
6963 +
6964 + if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
6965 + && (temp >= (mtd->size>>1))) { /* DDP Device */
6966 + onenand_startaddr1 = DEVICE_FLASHCORE_1 |
6967 + (((uint32_t)(temp-(mtd->size>>1))
6968 + / mtd->erasesize));
6969 + onenand_startaddr2 = DEVICE_BUFFERRAM_1;
6970 + } else {
6971 + onenand_startaddr1 = DEVICE_FLASHCORE_0 |
6972 + ((uint32_t)temp / mtd->erasesize) ;
6973 + onenand_startaddr2 = DEVICE_BUFFERRAM_0;
6974 + }
6975 +
6976 + onenand_startaddr8 = 0x0000;
6977 + onenand_startbuffer = DATARAM0_0 << 8;
6978 +
6979 + dma_buffer->data.sfbcfg = SFLASH_BCFG |
6980 + (nand_sfcmd_mode ? 0 : (1 << 24));
6981 + dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
6982 + MSM_NAND_SFCMD_CMDXS,
6983 + nand_sfcmd_mode,
6984 + MSM_NAND_SFCMD_REGWR);
6985 + dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
6986 + MSM_NAND_SFCMD_CMDXS,
6987 + nand_sfcmd_mode,
6988 + MSM_NAND_SFCMD_INTHI);
6989 + dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
6990 + MSM_NAND_SFCMD_DATXS,
6991 + nand_sfcmd_mode,
6992 + MSM_NAND_SFCMD_REGRD);
6993 + dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
6994 + MSM_NAND_SFCMD_CMDXS,
6995 + nand_sfcmd_mode,
6996 + MSM_NAND_SFCMD_REGWR);
6997 + dma_buffer->data.sfexec = 1;
6998 + dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
6999 + dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
7000 + dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
7001 + dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
7002 + dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
7003 + (ONENAND_SYSTEM_CONFIG_1);
7004 + dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
7005 + (ONENAND_START_ADDRESS_1);
7006 + dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
7007 + (ONENAND_START_ADDRESS_2);
7008 + dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
7009 + (ONENAND_COMMAND);
7010 + dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
7011 + (ONENAND_INTERRUPT_STATUS);
7012 + dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
7013 + (ONENAND_SYSTEM_CONFIG_1);
7014 + dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
7015 + (ONENAND_START_ADDRESS_1);
7016 + dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
7017 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
7018 + dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
7019 + (onenand_startaddr1);
7020 + dma_buffer->data.data2 = (onenand_startbuffer << 16) |
7021 + (onenand_startaddr2);
7022 + dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
7023 + (ONENAND_CMDERAS);
7024 + dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
7025 + (CLEAN_DATA_16);
7026 + dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
7027 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
7028 + dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
7029 + (ONENAND_STARTADDR1_RES);
7030 +
7031 + /***************************************************************/
7032 + /* Write the necessary address registers in the onenand device */
7033 + /***************************************************************/
7034 +
7035 + /* Enable and configure the SFlash controller */
7036 + cmd->cmd = 0;
7037 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
7038 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
7039 + cmd->len = 4;
7040 + cmd++;
7041 +
7042 + /* Block on cmd ready and write CMD register */
7043 + cmd->cmd = DST_CRCI_NAND_CMD;
7044 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
7045 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7046 + cmd->len = 4;
7047 + cmd++;
7048 +
7049 + /* Write the ADDR0 and ADDR1 registers */
7050 + cmd->cmd = 0;
7051 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
7052 + cmd->dst = MSM_NAND_ADDR0;
7053 + cmd->len = 8;
7054 + cmd++;
7055 +
7056 + /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
7057 + cmd->cmd = 0;
7058 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
7059 + cmd->dst = MSM_NAND_ADDR2;
7060 + cmd->len = 16;
7061 + cmd++;
7062 +
7063 + /* Write the ADDR6 registers */
7064 + cmd->cmd = 0;
7065 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
7066 + cmd->dst = MSM_NAND_ADDR6;
7067 + cmd->len = 4;
7068 + cmd++;
7069 +
7070 + /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
7071 + cmd->cmd = 0;
7072 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
7073 + cmd->dst = MSM_NAND_GENP_REG0;
7074 + cmd->len = 16;
7075 + cmd++;
7076 +
7077 + /* Write the FLASH_DEV_CMD4,5,6 registers */
7078 + cmd->cmd = 0;
7079 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
7080 + cmd->dst = MSM_NAND_DEV_CMD4;
7081 + cmd->len = 12;
7082 + cmd++;
7083 +
7084 + /* Kick the execute command */
7085 + cmd->cmd = 0;
7086 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7087 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7088 + cmd->len = 4;
7089 + cmd++;
7090 +
7091 + /* Block on data ready, and read the status register */
7092 + cmd->cmd = SRC_CRCI_NAND_DATA;
7093 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7094 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
7095 + cmd->len = 4;
7096 + cmd++;
7097 +
7098 + /***************************************************************/
7099 + /* Wait for the interrupt from the Onenand device controller */
7100 + /***************************************************************/
7101 +
7102 + /* Block on cmd ready and write CMD register */
7103 + cmd->cmd = DST_CRCI_NAND_CMD;
7104 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
7105 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7106 + cmd->len = 4;
7107 + cmd++;
7108 +
7109 + /* Kick the execute command */
7110 + cmd->cmd = 0;
7111 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7112 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7113 + cmd->len = 4;
7114 + cmd++;
7115 +
7116 + /* Block on data ready, and read the status register */
7117 + cmd->cmd = SRC_CRCI_NAND_DATA;
7118 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7119 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
7120 + cmd->len = 4;
7121 + cmd++;
7122 +
7123 + /***************************************************************/
7124 + /* Read the necessary status registers from the onenand device */
7125 + /***************************************************************/
7126 +
7127 + /* Block on cmd ready and write CMD register */
7128 + cmd->cmd = DST_CRCI_NAND_CMD;
7129 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
7130 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7131 + cmd->len = 4;
7132 + cmd++;
7133 +
7134 + /* Kick the execute command */
7135 + cmd->cmd = 0;
7136 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7137 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7138 + cmd->len = 4;
7139 + cmd++;
7140 +
7141 + /* Block on data ready, and read the status register */
7142 + cmd->cmd = SRC_CRCI_NAND_DATA;
7143 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7144 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
7145 + cmd->len = 4;
7146 + cmd++;
7147 +
7148 + /* Read the GENP3 register */
7149 + cmd->cmd = 0;
7150 + cmd->src = MSM_NAND_GENP_REG3;
7151 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
7152 + cmd->len = 4;
7153 + cmd++;
7154 +
7155 + /* Read the DEVCMD4 register */
7156 + cmd->cmd = 0;
7157 + cmd->src = MSM_NAND_DEV_CMD4;
7158 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
7159 + cmd->len = 4;
7160 + cmd++;
7161 +
7162 + /***************************************************************/
7163 + /* Restore the necessary registers to proper values */
7164 + /***************************************************************/
7165 +
7166 + /* Block on cmd ready and write CMD register */
7167 + cmd->cmd = DST_CRCI_NAND_CMD;
7168 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
7169 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7170 + cmd->len = 4;
7171 + cmd++;
7172 +
7173 + /* Kick the execute command */
7174 + cmd->cmd = 0;
7175 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7176 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7177 + cmd->len = 4;
7178 + cmd++;
7179 +
7180 + /* Block on data ready, and read the status register */
7181 + cmd->cmd = SRC_CRCI_NAND_DATA;
7182 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7183 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
7184 + cmd->len = 4;
7185 + cmd++;
7186 +
7187 +
7188 + BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
7189 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
7190 + dma_buffer->cmd[0].cmd |= CMD_OCB;
7191 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
7192 +
7193 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
7194 + >> 3) | CMD_PTR_LP;
7195 +
7196 + mb();
7197 + msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
7198 + | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
7199 + &dma_buffer->cmdptr)));
7200 + mb();
7201 +
7202 + ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
7203 + interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
7204 + controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
7205 +
7206 +#if VERBOSE
7207 + pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
7208 + dma_buffer->data.sfstat[0],
7209 + dma_buffer->data.sfstat[1],
7210 + dma_buffer->data.sfstat[2],
7211 + dma_buffer->data.sfstat[3]);
7212 +
7213 + pr_info("%s: controller_status = %x\n", __func__,
7214 + controller_status);
7215 + pr_info("%s: interrupt_status = %x\n", __func__,
7216 + interrupt_status);
7217 + pr_info("%s: ecc_status = %x\n", __func__,
7218 + ecc_status);
7219 +#endif
7220 + /* Check for errors, protection violations etc */
7221 + if ((controller_status != 0)
7222 + || (dma_buffer->data.sfstat[0] & 0x110)
7223 + || (dma_buffer->data.sfstat[1] & 0x110)
7224 + || (dma_buffer->data.sfstat[2] & 0x110)
7225 + || (dma_buffer->data.sfstat[3] & 0x110)) {
7226 + pr_err("%s: ECC/MPU/OP error\n", __func__);
7227 + err = -EIO;
7228 + }
7229 +
7230 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
7231 +
7232 + if (err) {
7233 + pr_err("%s: Erase failed, 0x%llx\n", __func__,
7234 + instr->addr);
7235 + instr->fail_addr = instr->addr;
7236 + instr->state = MTD_ERASE_FAILED;
7237 + } else {
7238 + instr->state = MTD_ERASE_DONE;
7239 + instr->fail_addr = 0xffffffff;
7240 + mtd_erase_callback(instr);
7241 + }
7242 +
7243 +#if VERBOSE
7244 + pr_info("\n%s: ret %d\n", __func__, err);
7245 + pr_info("===================================================="
7246 + "=============\n");
7247 +#endif
7248 + return err;
7249 +}
7250 +
7251 +static int msm_onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
7252 +{
7253 + struct mtd_oob_ops ops;
7254 + int rval, i;
7255 + int ret = 0;
7256 + uint8_t *buffer;
7257 + uint8_t *oobptr;
7258 +
7259 + if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
7260 + pr_err("%s: unsupported block address, 0x%x\n",
7261 + __func__, (uint32_t)ofs);
7262 + return -EINVAL;
7263 + }
7264 +
7265 + buffer = kmalloc(2112, GFP_KERNEL|GFP_DMA);
7266 + if (buffer == 0) {
7267 + pr_err("%s: Could not kmalloc for buffer\n",
7268 + __func__);
7269 + return -ENOMEM;
7270 + }
7271 +
7272 + memset(buffer, 0x00, 2112);
7273 + oobptr = &(buffer[2048]);
7274 +
7275 + ops.mode = MTD_OPS_RAW;
7276 + ops.len = 2112;
7277 + ops.retlen = 0;
7278 + ops.ooblen = 0;
7279 + ops.oobretlen = 0;
7280 + ops.ooboffs = 0;
7281 + ops.datbuf = buffer;
7282 + ops.oobbuf = NULL;
7283 +
7284 + for (i = 0; i < 2; i++) {
7285 + ofs = ofs + i*mtd->writesize;
7286 + rval = msm_onenand_read_oob(mtd, ofs, &ops);
7287 + if (rval) {
7288 + pr_err("%s: Error in reading bad blk info\n",
7289 + __func__);
7290 + ret = rval;
7291 + break;
7292 + }
7293 + if ((oobptr[0] != 0xFF) || (oobptr[1] != 0xFF) ||
7294 + (oobptr[16] != 0xFF) || (oobptr[17] != 0xFF) ||
7295 + (oobptr[32] != 0xFF) || (oobptr[33] != 0xFF) ||
7296 + (oobptr[48] != 0xFF) || (oobptr[49] != 0xFF)
7297 + ) {
7298 + ret = 1;
7299 + break;
7300 + }
7301 + }
7302 +
7303 + kfree(buffer);
7304 +
7305 +#if VERBOSE
7306 + if (ret == 1)
7307 + pr_info("%s : Block containing 0x%x is bad\n",
7308 + __func__, (unsigned int)ofs);
7309 +#endif
7310 + return ret;
7311 +}
7312 +
7313 +static int msm_onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
7314 +{
7315 + struct mtd_oob_ops ops;
7316 + int rval, i;
7317 + int ret = 0;
7318 + uint8_t *buffer;
7319 +
7320 + if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
7321 + pr_err("%s: unsupported block address, 0x%x\n",
7322 + __func__, (uint32_t)ofs);
7323 + return -EINVAL;
7324 + }
7325 +
7326 + buffer = page_address(ZERO_PAGE());
7327 +
7328 + ops.mode = MTD_OPS_RAW;
7329 + ops.len = 2112;
7330 + ops.retlen = 0;
7331 + ops.ooblen = 0;
7332 + ops.oobretlen = 0;
7333 + ops.ooboffs = 0;
7334 + ops.datbuf = buffer;
7335 + ops.oobbuf = NULL;
7336 +
7337 + for (i = 0; i < 2; i++) {
7338 + ofs = ofs + i*mtd->writesize;
7339 + rval = msm_onenand_write_oob(mtd, ofs, &ops);
7340 + if (rval) {
7341 + pr_err("%s: Error in writing bad blk info\n",
7342 + __func__);
7343 + ret = rval;
7344 + break;
7345 + }
7346 + }
7347 +
7348 + return ret;
7349 +}
7350 +
7351 +static int msm_onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
7352 +{
7353 + struct msm_nand_chip *chip = mtd->priv;
7354 +
7355 + struct {
7356 + dmov_s cmd[20];
7357 + unsigned cmdptr;
7358 + struct {
7359 + uint32_t sfbcfg;
7360 + uint32_t sfcmd[4];
7361 + uint32_t sfexec;
7362 + uint32_t sfstat[4];
7363 + uint32_t addr0;
7364 + uint32_t addr1;
7365 + uint32_t addr2;
7366 + uint32_t addr3;
7367 + uint32_t addr4;
7368 + uint32_t addr5;
7369 + uint32_t addr6;
7370 + uint32_t data0;
7371 + uint32_t data1;
7372 + uint32_t data2;
7373 + uint32_t data3;
7374 + uint32_t data4;
7375 + uint32_t data5;
7376 + uint32_t data6;
7377 + } data;
7378 + } *dma_buffer;
7379 + dmov_s *cmd;
7380 +
7381 + int err = 0;
7382 +
7383 + uint16_t onenand_startaddr1;
7384 + uint16_t onenand_startaddr8;
7385 + uint16_t onenand_startaddr2;
7386 + uint16_t onenand_startblock;
7387 +
7388 + uint16_t controller_status;
7389 + uint16_t interrupt_status;
7390 + uint16_t write_prot_status;
7391 +
7392 + uint64_t start_ofs;
7393 +
7394 +#if VERBOSE
7395 + pr_info("===================================================="
7396 + "=============\n");
7397 + pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
7398 +#endif
7399 + /* 'ofs' & 'len' should align to block size */
7400 + if (ofs&(mtd->erasesize - 1)) {
7401 + pr_err("%s: Unsupported ofs address, 0x%llx\n",
7402 + __func__, ofs);
7403 + return -EINVAL;
7404 + }
7405 +
7406 + if (len&(mtd->erasesize - 1)) {
7407 + pr_err("%s: Unsupported len, %lld\n",
7408 + __func__, len);
7409 + return -EINVAL;
7410 + }
7411 +
7412 + if (ofs+len > mtd->size) {
7413 + pr_err("%s: Maximum chip size exceeded\n", __func__);
7414 + return -EINVAL;
7415 + }
7416 +
7417 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
7418 + (chip, sizeof(*dma_buffer))));
7419 +
7420 + for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
7421 +#if VERBOSE
7422 + pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
7423 +#endif
7424 +
7425 + cmd = dma_buffer->cmd;
7426 + if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
7427 + && (ofs >= (mtd->size>>1))) { /* DDP Device */
7428 + onenand_startaddr1 = DEVICE_FLASHCORE_1 |
7429 + (((uint32_t)(ofs - (mtd->size>>1))
7430 + / mtd->erasesize));
7431 + onenand_startaddr2 = DEVICE_BUFFERRAM_1;
7432 + onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
7433 + / mtd->erasesize);
7434 + } else {
7435 + onenand_startaddr1 = DEVICE_FLASHCORE_0 |
7436 + ((uint32_t)ofs / mtd->erasesize) ;
7437 + onenand_startaddr2 = DEVICE_BUFFERRAM_0;
7438 + onenand_startblock = ((uint32_t)ofs
7439 + / mtd->erasesize);
7440 + }
7441 +
7442 + onenand_startaddr8 = 0x0000;
7443 + dma_buffer->data.sfbcfg = SFLASH_BCFG |
7444 + (nand_sfcmd_mode ? 0 : (1 << 24));
7445 + dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
7446 + MSM_NAND_SFCMD_CMDXS,
7447 + nand_sfcmd_mode,
7448 + MSM_NAND_SFCMD_REGWR);
7449 + dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
7450 + MSM_NAND_SFCMD_CMDXS,
7451 + nand_sfcmd_mode,
7452 + MSM_NAND_SFCMD_INTHI);
7453 + dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
7454 + MSM_NAND_SFCMD_DATXS,
7455 + nand_sfcmd_mode,
7456 + MSM_NAND_SFCMD_REGRD);
7457 + dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
7458 + MSM_NAND_SFCMD_CMDXS,
7459 + nand_sfcmd_mode,
7460 + MSM_NAND_SFCMD_REGWR);
7461 + dma_buffer->data.sfexec = 1;
7462 + dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
7463 + dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
7464 + dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
7465 + dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
7466 + dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
7467 + (ONENAND_SYSTEM_CONFIG_1);
7468 + dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
7469 + (ONENAND_START_ADDRESS_1);
7470 + dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
7471 + (ONENAND_START_ADDRESS_2);
7472 + dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
7473 + (ONENAND_COMMAND);
7474 + dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
7475 + (ONENAND_INTERRUPT_STATUS);
7476 + dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
7477 + (ONENAND_SYSTEM_CONFIG_1);
7478 + dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
7479 + (ONENAND_START_ADDRESS_1);
7480 + dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
7481 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
7482 + dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
7483 + (onenand_startaddr1);
7484 + dma_buffer->data.data2 = (onenand_startblock << 16) |
7485 + (onenand_startaddr2);
7486 + dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
7487 + (ONENAND_CMD_UNLOCK);
7488 + dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
7489 + (CLEAN_DATA_16);
7490 + dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
7491 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
7492 + dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
7493 + (ONENAND_STARTADDR1_RES);
7494 +
7495 + /*************************************************************/
7496 + /* Write the necessary address reg in the onenand device */
7497 + /*************************************************************/
7498 +
7499 + /* Enable and configure the SFlash controller */
7500 + cmd->cmd = 0;
7501 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
7502 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
7503 + cmd->len = 4;
7504 + cmd++;
7505 +
7506 + /* Block on cmd ready and write CMD register */
7507 + cmd->cmd = DST_CRCI_NAND_CMD;
7508 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
7509 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7510 + cmd->len = 4;
7511 + cmd++;
7512 +
7513 + /* Write the ADDR0 and ADDR1 registers */
7514 + cmd->cmd = 0;
7515 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
7516 + cmd->dst = MSM_NAND_ADDR0;
7517 + cmd->len = 8;
7518 + cmd++;
7519 +
7520 + /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
7521 + cmd->cmd = 0;
7522 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
7523 + cmd->dst = MSM_NAND_ADDR2;
7524 + cmd->len = 16;
7525 + cmd++;
7526 +
7527 + /* Write the ADDR6 registers */
7528 + cmd->cmd = 0;
7529 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
7530 + cmd->dst = MSM_NAND_ADDR6;
7531 + cmd->len = 4;
7532 + cmd++;
7533 +
7534 + /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
7535 + cmd->cmd = 0;
7536 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
7537 + cmd->dst = MSM_NAND_GENP_REG0;
7538 + cmd->len = 16;
7539 + cmd++;
7540 +
7541 + /* Write the FLASH_DEV_CMD4,5,6 registers */
7542 + cmd->cmd = 0;
7543 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
7544 + cmd->dst = MSM_NAND_DEV_CMD4;
7545 + cmd->len = 12;
7546 + cmd++;
7547 +
7548 + /* Kick the execute command */
7549 + cmd->cmd = 0;
7550 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7551 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7552 + cmd->len = 4;
7553 + cmd++;
7554 +
7555 + /* Block on data ready, and read the status register */
7556 + cmd->cmd = SRC_CRCI_NAND_DATA;
7557 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7558 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
7559 + cmd->len = 4;
7560 + cmd++;
7561 +
7562 + /*************************************************************/
7563 + /* Wait for the interrupt from the Onenand device controller */
7564 + /*************************************************************/
7565 +
7566 + /* Block on cmd ready and write CMD register */
7567 + cmd->cmd = DST_CRCI_NAND_CMD;
7568 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
7569 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7570 + cmd->len = 4;
7571 + cmd++;
7572 +
7573 + /* Kick the execute command */
7574 + cmd->cmd = 0;
7575 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7576 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7577 + cmd->len = 4;
7578 + cmd++;
7579 +
7580 + /* Block on data ready, and read the status register */
7581 + cmd->cmd = SRC_CRCI_NAND_DATA;
7582 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7583 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
7584 + cmd->len = 4;
7585 + cmd++;
7586 +
7587 + /*********************************************************/
7588 + /* Read the necessary status reg from the onenand device */
7589 + /*********************************************************/
7590 +
7591 + /* Block on cmd ready and write CMD register */
7592 + cmd->cmd = DST_CRCI_NAND_CMD;
7593 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
7594 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7595 + cmd->len = 4;
7596 + cmd++;
7597 +
7598 + /* Kick the execute command */
7599 + cmd->cmd = 0;
7600 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7601 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7602 + cmd->len = 4;
7603 + cmd++;
7604 +
7605 + /* Block on data ready, and read the status register */
7606 + cmd->cmd = SRC_CRCI_NAND_DATA;
7607 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7608 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
7609 + cmd->len = 4;
7610 + cmd++;
7611 +
7612 + /* Read the GENP3 register */
7613 + cmd->cmd = 0;
7614 + cmd->src = MSM_NAND_GENP_REG3;
7615 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
7616 + cmd->len = 4;
7617 + cmd++;
7618 +
7619 + /* Read the DEVCMD4 register */
7620 + cmd->cmd = 0;
7621 + cmd->src = MSM_NAND_DEV_CMD4;
7622 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
7623 + cmd->len = 4;
7624 + cmd++;
7625 +
7626 + /************************************************************/
7627 + /* Restore the necessary registers to proper values */
7628 + /************************************************************/
7629 +
7630 + /* Block on cmd ready and write CMD register */
7631 + cmd->cmd = DST_CRCI_NAND_CMD;
7632 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
7633 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7634 + cmd->len = 4;
7635 + cmd++;
7636 +
7637 + /* Kick the execute command */
7638 + cmd->cmd = 0;
7639 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7640 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7641 + cmd->len = 4;
7642 + cmd++;
7643 +
7644 + /* Block on data ready, and read the status register */
7645 + cmd->cmd = SRC_CRCI_NAND_DATA;
7646 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7647 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
7648 + cmd->len = 4;
7649 + cmd++;
7650 +
7651 +
7652 + BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
7653 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
7654 + dma_buffer->cmd[0].cmd |= CMD_OCB;
7655 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
7656 +
7657 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
7658 + >> 3) | CMD_PTR_LP;
7659 +
7660 + mb();
7661 + msm_dmov_exec_cmd(chip->dma_channel,
7662 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
7663 + &dma_buffer->cmdptr)));
7664 + mb();
7665 +
7666 + write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
7667 + interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
7668 + controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
7669 +
7670 +#if VERBOSE
7671 + pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
7672 + dma_buffer->data.sfstat[0],
7673 + dma_buffer->data.sfstat[1],
7674 + dma_buffer->data.sfstat[2],
7675 + dma_buffer->data.sfstat[3]);
7676 +
7677 + pr_info("%s: controller_status = %x\n", __func__,
7678 + controller_status);
7679 + pr_info("%s: interrupt_status = %x\n", __func__,
7680 + interrupt_status);
7681 + pr_info("%s: write_prot_status = %x\n", __func__,
7682 + write_prot_status);
7683 +#endif
7684 + /* Check for errors, protection violations etc */
7685 + if ((controller_status != 0)
7686 + || (dma_buffer->data.sfstat[0] & 0x110)
7687 + || (dma_buffer->data.sfstat[1] & 0x110)
7688 + || (dma_buffer->data.sfstat[2] & 0x110)
7689 + || (dma_buffer->data.sfstat[3] & 0x110)) {
7690 + pr_err("%s: ECC/MPU/OP error\n", __func__);
7691 + err = -EIO;
7692 + }
7693 +
7694 + if (!(write_prot_status & ONENAND_WP_US)) {
7695 + pr_err("%s: Unexpected status ofs = 0x%llx,"
7696 + "wp_status = %x\n",
7697 + __func__, ofs, write_prot_status);
7698 + err = -EIO;
7699 + }
7700 +
7701 + if (err)
7702 + break;
7703 + }
7704 +
7705 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
7706 +
7707 +#if VERBOSE
7708 + pr_info("\n%s: ret %d\n", __func__, err);
7709 + pr_info("===================================================="
7710 + "=============\n");
7711 +#endif
7712 + return err;
7713 +}
7714 +
7715 +static int msm_onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
7716 +{
7717 + struct msm_nand_chip *chip = mtd->priv;
7718 +
7719 + struct {
7720 + dmov_s cmd[20];
7721 + unsigned cmdptr;
7722 + struct {
7723 + uint32_t sfbcfg;
7724 + uint32_t sfcmd[4];
7725 + uint32_t sfexec;
7726 + uint32_t sfstat[4];
7727 + uint32_t addr0;
7728 + uint32_t addr1;
7729 + uint32_t addr2;
7730 + uint32_t addr3;
7731 + uint32_t addr4;
7732 + uint32_t addr5;
7733 + uint32_t addr6;
7734 + uint32_t data0;
7735 + uint32_t data1;
7736 + uint32_t data2;
7737 + uint32_t data3;
7738 + uint32_t data4;
7739 + uint32_t data5;
7740 + uint32_t data6;
7741 + } data;
7742 + } *dma_buffer;
7743 + dmov_s *cmd;
7744 +
7745 + int err = 0;
7746 +
7747 + uint16_t onenand_startaddr1;
7748 + uint16_t onenand_startaddr8;
7749 + uint16_t onenand_startaddr2;
7750 + uint16_t onenand_startblock;
7751 +
7752 + uint16_t controller_status;
7753 + uint16_t interrupt_status;
7754 + uint16_t write_prot_status;
7755 +
7756 + uint64_t start_ofs;
7757 +
7758 +#if VERBOSE
7759 + pr_info("===================================================="
7760 + "=============\n");
7761 + pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
7762 +#endif
7763 + /* 'ofs' & 'len' should align to block size */
7764 + if (ofs&(mtd->erasesize - 1)) {
7765 + pr_err("%s: Unsupported ofs address, 0x%llx\n",
7766 + __func__, ofs);
7767 + return -EINVAL;
7768 + }
7769 +
7770 + if (len&(mtd->erasesize - 1)) {
7771 + pr_err("%s: Unsupported len, %lld\n",
7772 + __func__, len);
7773 + return -EINVAL;
7774 + }
7775 +
7776 + if (ofs+len > mtd->size) {
7777 + pr_err("%s: Maximum chip size exceeded\n", __func__);
7778 + return -EINVAL;
7779 + }
7780 +
7781 + wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
7782 + (chip, sizeof(*dma_buffer))));
7783 +
7784 + for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
7785 +#if VERBOSE
7786 + pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
7787 +#endif
7788 +
7789 + cmd = dma_buffer->cmd;
7790 + if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
7791 + && (ofs >= (mtd->size>>1))) { /* DDP Device */
7792 + onenand_startaddr1 = DEVICE_FLASHCORE_1 |
7793 + (((uint32_t)(ofs - (mtd->size>>1))
7794 + / mtd->erasesize));
7795 + onenand_startaddr2 = DEVICE_BUFFERRAM_1;
7796 + onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
7797 + / mtd->erasesize);
7798 + } else {
7799 + onenand_startaddr1 = DEVICE_FLASHCORE_0 |
7800 + ((uint32_t)ofs / mtd->erasesize) ;
7801 + onenand_startaddr2 = DEVICE_BUFFERRAM_0;
7802 + onenand_startblock = ((uint32_t)ofs
7803 + / mtd->erasesize);
7804 + }
7805 +
7806 + onenand_startaddr8 = 0x0000;
7807 + dma_buffer->data.sfbcfg = SFLASH_BCFG |
7808 + (nand_sfcmd_mode ? 0 : (1 << 24));
7809 + dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
7810 + MSM_NAND_SFCMD_CMDXS,
7811 + nand_sfcmd_mode,
7812 + MSM_NAND_SFCMD_REGWR);
7813 + dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
7814 + MSM_NAND_SFCMD_CMDXS,
7815 + nand_sfcmd_mode,
7816 + MSM_NAND_SFCMD_INTHI);
7817 + dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
7818 + MSM_NAND_SFCMD_DATXS,
7819 + nand_sfcmd_mode,
7820 + MSM_NAND_SFCMD_REGRD);
7821 + dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
7822 + MSM_NAND_SFCMD_CMDXS,
7823 + nand_sfcmd_mode,
7824 + MSM_NAND_SFCMD_REGWR);
7825 + dma_buffer->data.sfexec = 1;
7826 + dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
7827 + dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
7828 + dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
7829 + dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
7830 + dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
7831 + (ONENAND_SYSTEM_CONFIG_1);
7832 + dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
7833 + (ONENAND_START_ADDRESS_1);
7834 + dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
7835 + (ONENAND_START_ADDRESS_2);
7836 + dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
7837 + (ONENAND_COMMAND);
7838 + dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
7839 + (ONENAND_INTERRUPT_STATUS);
7840 + dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
7841 + (ONENAND_SYSTEM_CONFIG_1);
7842 + dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
7843 + (ONENAND_START_ADDRESS_1);
7844 + dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
7845 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
7846 + dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
7847 + (onenand_startaddr1);
7848 + dma_buffer->data.data2 = (onenand_startblock << 16) |
7849 + (onenand_startaddr2);
7850 + dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
7851 + (ONENAND_CMD_LOCK);
7852 + dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
7853 + (CLEAN_DATA_16);
7854 + dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
7855 + (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
7856 + dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
7857 + (ONENAND_STARTADDR1_RES);
7858 +
7859 + /*************************************************************/
7860 + /* Write the necessary address reg in the onenand device */
7861 + /*************************************************************/
7862 +
7863 + /* Enable and configure the SFlash controller */
7864 + cmd->cmd = 0;
7865 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
7866 + cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
7867 + cmd->len = 4;
7868 + cmd++;
7869 +
7870 + /* Block on cmd ready and write CMD register */
7871 + cmd->cmd = DST_CRCI_NAND_CMD;
7872 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
7873 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7874 + cmd->len = 4;
7875 + cmd++;
7876 +
7877 + /* Write the ADDR0 and ADDR1 registers */
7878 + cmd->cmd = 0;
7879 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
7880 + cmd->dst = MSM_NAND_ADDR0;
7881 + cmd->len = 8;
7882 + cmd++;
7883 +
7884 + /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
7885 + cmd->cmd = 0;
7886 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
7887 + cmd->dst = MSM_NAND_ADDR2;
7888 + cmd->len = 16;
7889 + cmd++;
7890 +
7891 + /* Write the ADDR6 registers */
7892 + cmd->cmd = 0;
7893 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
7894 + cmd->dst = MSM_NAND_ADDR6;
7895 + cmd->len = 4;
7896 + cmd++;
7897 +
7898 + /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
7899 + cmd->cmd = 0;
7900 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
7901 + cmd->dst = MSM_NAND_GENP_REG0;
7902 + cmd->len = 16;
7903 + cmd++;
7904 +
7905 + /* Write the FLASH_DEV_CMD4,5,6 registers */
7906 + cmd->cmd = 0;
7907 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
7908 + cmd->dst = MSM_NAND_DEV_CMD4;
7909 + cmd->len = 12;
7910 + cmd++;
7911 +
7912 + /* Kick the execute command */
7913 + cmd->cmd = 0;
7914 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7915 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7916 + cmd->len = 4;
7917 + cmd++;
7918 +
7919 + /* Block on data ready, and read the status register */
7920 + cmd->cmd = SRC_CRCI_NAND_DATA;
7921 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7922 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
7923 + cmd->len = 4;
7924 + cmd++;
7925 +
7926 + /*************************************************************/
7927 + /* Wait for the interrupt from the Onenand device controller */
7928 + /*************************************************************/
7929 +
7930 + /* Block on cmd ready and write CMD register */
7931 + cmd->cmd = DST_CRCI_NAND_CMD;
7932 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
7933 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7934 + cmd->len = 4;
7935 + cmd++;
7936 +
7937 + /* Kick the execute command */
7938 + cmd->cmd = 0;
7939 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7940 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7941 + cmd->len = 4;
7942 + cmd++;
7943 +
7944 + /* Block on data ready, and read the status register */
7945 + cmd->cmd = SRC_CRCI_NAND_DATA;
7946 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7947 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
7948 + cmd->len = 4;
7949 + cmd++;
7950 +
7951 + /*********************************************************/
7952 + /* Read the necessary status reg from the onenand device */
7953 + /*********************************************************/
7954 +
7955 + /* Block on cmd ready and write CMD register */
7956 + cmd->cmd = DST_CRCI_NAND_CMD;
7957 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
7958 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7959 + cmd->len = 4;
7960 + cmd++;
7961 +
7962 + /* Kick the execute command */
7963 + cmd->cmd = 0;
7964 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
7965 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
7966 + cmd->len = 4;
7967 + cmd++;
7968 +
7969 + /* Block on data ready, and read the status register */
7970 + cmd->cmd = SRC_CRCI_NAND_DATA;
7971 + cmd->src = MSM_NAND_SFLASHC_STATUS;
7972 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
7973 + cmd->len = 4;
7974 + cmd++;
7975 +
7976 + /* Read the GENP3 register */
7977 + cmd->cmd = 0;
7978 + cmd->src = MSM_NAND_GENP_REG3;
7979 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
7980 + cmd->len = 4;
7981 + cmd++;
7982 +
7983 + /* Read the DEVCMD4 register */
7984 + cmd->cmd = 0;
7985 + cmd->src = MSM_NAND_DEV_CMD4;
7986 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
7987 + cmd->len = 4;
7988 + cmd++;
7989 +
7990 + /************************************************************/
7991 + /* Restore the necessary registers to proper values */
7992 + /************************************************************/
7993 +
7994 + /* Block on cmd ready and write CMD register */
7995 + cmd->cmd = DST_CRCI_NAND_CMD;
7996 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
7997 + cmd->dst = MSM_NAND_SFLASHC_CMD;
7998 + cmd->len = 4;
7999 + cmd++;
8000 +
8001 + /* Kick the execute command */
8002 + cmd->cmd = 0;
8003 + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
8004 + cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
8005 + cmd->len = 4;
8006 + cmd++;
8007 +
8008 + /* Block on data ready, and read the status register */
8009 + cmd->cmd = SRC_CRCI_NAND_DATA;
8010 + cmd->src = MSM_NAND_SFLASHC_STATUS;
8011 + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
8012 + cmd->len = 4;
8013 + cmd++;
8014 +
8015 +
8016 + BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
8017 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
8018 + dma_buffer->cmd[0].cmd |= CMD_OCB;
8019 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
8020 +
8021 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
8022 + >> 3) | CMD_PTR_LP;
8023 +
8024 + mb();
8025 + msm_dmov_exec_cmd(chip->dma_channel,
8026 + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
8027 + &dma_buffer->cmdptr)));
8028 + mb();
8029 +
8030 + write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
8031 + interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
8032 + controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
8033 +
8034 +#if VERBOSE
8035 + pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
8036 + dma_buffer->data.sfstat[0],
8037 + dma_buffer->data.sfstat[1],
8038 + dma_buffer->data.sfstat[2],
8039 + dma_buffer->data.sfstat[3]);
8040 +
8041 + pr_info("%s: controller_status = %x\n", __func__,
8042 + controller_status);
8043 + pr_info("%s: interrupt_status = %x\n", __func__,
8044 + interrupt_status);
8045 + pr_info("%s: write_prot_status = %x\n", __func__,
8046 + write_prot_status);
8047 +#endif
8048 + /* Check for errors, protection violations etc */
8049 + if ((controller_status != 0)
8050 + || (dma_buffer->data.sfstat[0] & 0x110)
8051 + || (dma_buffer->data.sfstat[1] & 0x110)
8052 + || (dma_buffer->data.sfstat[2] & 0x110)
8053 + || (dma_buffer->data.sfstat[3] & 0x110)) {
8054 + pr_err("%s: ECC/MPU/OP error\n", __func__);
8055 + err = -EIO;
8056 + }
8057 +
8058 + if (!(write_prot_status & ONENAND_WP_LS)) {
8059 + pr_err("%s: Unexpected status ofs = 0x%llx,"
8060 + "wp_status = %x\n",
8061 + __func__, ofs, write_prot_status);
8062 + err = -EIO;
8063 + }
8064 +
8065 + if (err)
8066 + break;
8067 + }
8068 +
8069 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
8070 +
8071 +#if VERBOSE
8072 + pr_info("\n%s: ret %d\n", __func__, err);
8073 + pr_info("===================================================="
8074 + "=============\n");
8075 +#endif
8076 + return err;
8077 +}
8078 +
8079 +static int msm_onenand_suspend(struct mtd_info *mtd)
8080 +{
8081 + return 0;
8082 +}
8083 +
8084 +static void msm_onenand_resume(struct mtd_info *mtd)
8085 +{
8086 +}
8087 +
8088 +int msm_onenand_scan(struct mtd_info *mtd, int maxchips)
8089 +{
8090 + struct msm_nand_chip *chip = mtd->priv;
8091 +
8092 + /* Probe and check whether onenand device is present */
8093 + if (flash_onenand_probe(chip))
8094 + return -ENODEV;
8095 +
8096 + mtd->size = 0x1000000 << ((onenand_info.device_id & 0xF0) >> 4);
8097 + mtd->writesize = onenand_info.data_buf_size;
8098 + mtd->oobsize = mtd->writesize >> 5;
8099 + mtd->erasesize = mtd->writesize << 6;
8100 + mtd->oobavail = msm_onenand_oob_64.oobavail;
8101 + mtd->ecclayout = &msm_onenand_oob_64;
8102 +
8103 + mtd->type = MTD_NANDFLASH;
8104 + mtd->flags = MTD_CAP_NANDFLASH;
8105 + mtd->_erase = msm_onenand_erase;
8106 + mtd->_point = NULL;
8107 + mtd->_unpoint = NULL;
8108 + mtd->_read = msm_onenand_read;
8109 + mtd->_write = msm_onenand_write;
8110 + mtd->_read_oob = msm_onenand_read_oob;
8111 + mtd->_write_oob = msm_onenand_write_oob;
8112 + mtd->_lock = msm_onenand_lock;
8113 + mtd->_unlock = msm_onenand_unlock;
8114 + mtd->_suspend = msm_onenand_suspend;
8115 + mtd->_resume = msm_onenand_resume;
8116 + mtd->_block_isbad = msm_onenand_block_isbad;
8117 + mtd->_block_markbad = msm_onenand_block_markbad;
8118 + mtd->owner = THIS_MODULE;
8119 +
8120 + pr_info("Found a supported onenand device\n");
8121 +
8122 + return 0;
8123 +}
8124 +
8125 +static const unsigned int bch_sup_cntrl[] = {
8126 + 0x307, /* MSM7x2xA */
8127 + 0x4030, /* MDM 9x15 */
8128 +};
8129 +
8130 +static inline bool msm_nand_has_bch_ecc_engine(unsigned int hw_id)
8131 +{
8132 + int i;
8133 +
8134 + for (i = 0; i < ARRAY_SIZE(bch_sup_cntrl); i++) {
8135 + if (hw_id == bch_sup_cntrl[i])
8136 + return true;
8137 + }
8138 +
8139 + return false;
8140 +}
8141 +
8142 +/**
8143 + * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
8144 + * @param mtd MTD device structure
8145 + * @param maxchips Number of chips to scan for
8146 + *
8147 + * This fills out all the not initialized function pointers
8148 + * with the defaults.
8149 + * The flash ID is read and the mtd/chip structures are
8150 + * filled with the appropriate values.
8151 + */
8152 +int msm_nand_scan(struct mtd_info *mtd, int maxchips)
8153 +{
8154 + struct msm_nand_chip *chip = mtd->priv;
8155 + uint32_t flash_id = 0, i, mtd_writesize;
8156 + uint8_t dev_found = 0;
8157 + uint8_t wide_bus;
8158 + uint32_t manid;
8159 + uint32_t devid;
8160 + uint32_t devcfg;
8161 + struct nand_flash_dev *flashdev = NULL;
8162 + struct nand_manufacturers *flashman = NULL;
8163 + unsigned int hw_id;
8164 +
8165 + /*
8166 + * Some Spansion parts, like the S34MS04G2, requires that the
8167 + * NAND Flash be reset before issuing an ONFI probe.
8168 + */
8169 + flash_reset(chip);
8170 +
8171 + /* Probe the Flash device for ONFI compliance */
8172 + if (!flash_onfi_probe(chip)) {
8173 + dev_found = 1;
8174 + } else {
8175 + /* Read the Flash ID from the Nand Flash Device */
8176 + flash_id = flash_read_id(chip);
8177 + manid = flash_id & 0xFF;
8178 + devid = (flash_id >> 8) & 0xFF;
8179 + devcfg = (flash_id >> 24) & 0xFF;
8180 +
8181 + for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
8182 + if (nand_manuf_ids[i].id == manid)
8183 + flashman = &nand_manuf_ids[i];
8184 + for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
8185 + if (nand_flash_ids[i].id == devid)
8186 + flashdev = &nand_flash_ids[i];
8187 + if (!flashdev || !flashman) {
8188 + pr_err("ERROR: unknown nand device manuf=%x devid=%x\n",
8189 + manid, devid);
8190 + return -ENOENT;
8191 + } else
8192 + dev_found = 1;
8193 +
8194 + if (!flashdev->pagesize) {
8195 + supported_flash.flash_id = flash_id;
8196 + supported_flash.density = flashdev->chipsize << 20;
8197 + supported_flash.widebus = devcfg & (1 << 6) ? 1 : 0;
8198 + supported_flash.pagesize = 1024 << (devcfg & 0x3);
8199 + supported_flash.blksize = (64 * 1024) <<
8200 + ((devcfg >> 4) & 0x3);
8201 + supported_flash.oobsize = (8 << ((devcfg >> 2) & 0x3)) *
8202 + (supported_flash.pagesize >> 9);
8203 +
8204 + if ((supported_flash.oobsize > 64) &&
8205 + (supported_flash.pagesize == 2048)) {
8206 + pr_info("msm_nand: Found a 2K page device with"
8207 + " %d oobsize - changing oobsize to 64 "
8208 + "bytes.\n", supported_flash.oobsize);
8209 + supported_flash.oobsize = 64;
8210 + }
8211 + } else {
8212 + supported_flash.flash_id = flash_id;
8213 + supported_flash.density = flashdev->chipsize << 20;
8214 + supported_flash.widebus = flashdev->options &
8215 + NAND_BUSWIDTH_16 ? 1 : 0;
8216 + supported_flash.pagesize = flashdev->pagesize;
8217 + supported_flash.blksize = flashdev->erasesize;
8218 + supported_flash.oobsize = flashdev->pagesize >> 5;
8219 + }
8220 + }
8221 +
8222 + if (dev_found) {
8223 + (!interleave_enable) ? (i = 1) : (i = 2);
8224 + wide_bus = supported_flash.widebus;
8225 + mtd->size = supported_flash.density * i;
8226 + mtd->writesize = supported_flash.pagesize * i;
8227 + mtd->oobsize = supported_flash.oobsize * i;
8228 + mtd->erasesize = supported_flash.blksize * i;
8229 + mtd->writebufsize = mtd->writesize;
8230 +
8231 + if (!interleave_enable)
8232 + mtd_writesize = mtd->writesize;
8233 + else
8234 + mtd_writesize = mtd->writesize >> 1;
8235 +
8236 + /* Check whether controller and NAND device support 8bit ECC*/
8237 + hw_id = flash_rd_reg(chip, MSM_NAND_HW_INFO);
8238 + if (msm_nand_has_bch_ecc_engine(hw_id)
8239 + && (supported_flash.ecc_correctability >= 8)) {
8240 + pr_info("Found supported NAND device for %dbit ECC\n",
8241 + supported_flash.ecc_correctability);
8242 + enable_bch_ecc = 1;
8243 + } else {
8244 + pr_info("Found a supported NAND device\n");
8245 + }
8246 + pr_info("NAND Controller ID : 0x%x\n", hw_id);
8247 + pr_info("NAND Device ID : 0x%x\n", supported_flash.flash_id);
8248 + pr_info("Buswidth : %d Bits\n", (wide_bus) ? 16 : 8);
8249 + pr_info("Density : %lld MByte\n", (mtd->size>>20));
8250 + pr_info("Pagesize : %d Bytes\n", mtd->writesize);
8251 + pr_info("Erasesize: %d Bytes\n", mtd->erasesize);
8252 + pr_info("Oobsize : %d Bytes\n", mtd->oobsize);
8253 + } else {
8254 + pr_err("Unsupported Nand,Id: 0x%x \n", flash_id);
8255 + return -ENODEV;
8256 + }
8257 +
8258 + /* Size of each codeword is 532Bytes incase of 8bit BCH ECC*/
8259 + chip->cw_size = enable_bch_ecc ? 532 : 528;
8260 + chip->CFG0 = (((mtd_writesize >> 9)-1) << 6) /* 4/8 cw/pg for 2/4k */
8261 + | (516 << 9) /* 516 user data bytes */
8262 + | (10 << 19) /* 10 parity bytes */
8263 + | (5 << 27) /* 5 address cycles */
8264 + | (0 << 30) /* Do not read status before data */
8265 + | (1 << 31) /* Send read cmd */
8266 + /* 0 spare bytes for 16 bit nand or 1/2 spare bytes for 8 bit */
8267 + | (wide_bus ? 0 << 23 : (enable_bch_ecc ? 2 << 23 : 1 << 23));
8268 +
8269 + chip->CFG1 = (0 << 0) /* Enable ecc */
8270 + | (7 << 2) /* 8 recovery cycles */
8271 + | (0 << 5) /* Allow CS deassertion */
8272 + /* Bad block marker location */
8273 + | ((mtd_writesize - (chip->cw_size * (
8274 + (mtd_writesize >> 9) - 1)) + 1) << 6)
8275 + | (0 << 16) /* Bad block in user data area */
8276 + | (2 << 17) /* 6 cycle tWB/tRB */
8277 + | ((wide_bus) ? CFG1_WIDE_FLASH : 0); /* Wide flash bit */
8278 +
8279 + chip->ecc_buf_cfg = 0x203;
8280 + chip->CFG0_RAW = 0xA80420C0;
8281 + chip->CFG1_RAW = 0x5045D;
8282 +
8283 + if (enable_bch_ecc) {
8284 + chip->CFG1 |= (1 << 27); /* Enable BCH engine */
8285 + chip->ecc_bch_cfg = (0 << 0) /* Enable ECC*/
8286 + | (0 << 1) /* Enable/Disable SW reset of ECC engine */
8287 + | (1 << 4) /* 8bit ecc*/
8288 + | ((wide_bus) ? (14 << 8) : (13 << 8))/*parity bytes*/
8289 + | (516 << 16) /* 516 user data bytes */
8290 + | (1 << 30); /* Turn on ECC engine clocks always */
8291 + chip->CFG0_RAW = 0xA80428C0; /* CW size is increased to 532B */
8292 + }
8293 +
8294 + /*
8295 + * For 4bit RS ECC (default ECC), parity bytes = 10 (for x8 and x16 I/O)
8296 + * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
8297 + */
8298 + chip->ecc_parity_bytes = enable_bch_ecc ? (wide_bus ? 14 : 13) : 10;
8299 +
8300 + pr_info("CFG0 Init : 0x%08x\n", chip->CFG0);
8301 + pr_info("CFG1 Init : 0x%08x\n", chip->CFG1);
8302 + pr_info("ECCBUFCFG : 0x%08x\n", chip->ecc_buf_cfg);
8303 +
8304 + if (mtd->oobsize == 64) {
8305 + mtd->oobavail = msm_nand_oob_64.oobavail;
8306 + mtd->ecclayout = &msm_nand_oob_64;
8307 + } else if (mtd->oobsize == 128) {
8308 + mtd->oobavail = msm_nand_oob_128.oobavail;
8309 + mtd->ecclayout = &msm_nand_oob_128;
8310 + } else if (mtd->oobsize == 224) {
8311 + mtd->oobavail = wide_bus ? msm_nand_oob_224_x16.oobavail :
8312 + msm_nand_oob_224_x8.oobavail;
8313 + mtd->ecclayout = wide_bus ? &msm_nand_oob_224_x16 :
8314 + &msm_nand_oob_224_x8;
8315 + } else if (mtd->oobsize == 256) {
8316 + mtd->oobavail = msm_nand_oob_256.oobavail;
8317 + mtd->ecclayout = &msm_nand_oob_256;
8318 + } else {
8319 + pr_err("Unsupported Nand, oobsize: 0x%x \n",
8320 + mtd->oobsize);
8321 + return -ENODEV;
8322 + }
8323 +
8324 + /* Fill in remaining MTD driver data */
8325 + mtd->type = MTD_NANDFLASH;
8326 + mtd->flags = MTD_CAP_NANDFLASH;
8327 + /* mtd->ecctype = MTD_ECC_SW; */
8328 + mtd->_erase = msm_nand_erase;
8329 + mtd->_block_isbad = msm_nand_block_isbad;
8330 + mtd->_block_markbad = msm_nand_block_markbad;
8331 + mtd->_point = NULL;
8332 + mtd->_unpoint = NULL;
8333 + mtd->_read = msm_nand_read;
8334 + mtd->_write = msm_nand_write;
8335 + mtd->_read_oob = msm_nand_read_oob;
8336 + mtd->_write_oob = msm_nand_write_oob;
8337 + if (dual_nand_ctlr_present) {
8338 + mtd->_read_oob = msm_nand_read_oob_dualnandc;
8339 + mtd->_write_oob = msm_nand_write_oob_dualnandc;
8340 + if (interleave_enable) {
8341 + mtd->_erase = msm_nand_erase_dualnandc;
8342 + mtd->_block_isbad = msm_nand_block_isbad_dualnandc;
8343 + }
8344 + }
8345 +
8346 + /* mtd->sync = msm_nand_sync; */
8347 + mtd->_lock = NULL;
8348 + /* mtd->_unlock = msm_nand_unlock; */
8349 + mtd->_suspend = msm_nand_suspend;
8350 + mtd->_resume = msm_nand_resume;
8351 + mtd->owner = THIS_MODULE;
8352 +
8353 + /* Unlock whole block */
8354 + /* msm_nand_unlock_all(mtd); */
8355 +
8356 + /* return this->scan_bbt(mtd); */
8357 + return 0;
8358 +}
8359 +EXPORT_SYMBOL_GPL(msm_nand_scan);
8360 +
8361 +/**
8362 + * msm_nand_release - [msm_nand Interface] Free resources held by the msm_nand device
8363 + * @param mtd MTD device structure
8364 + */
8365 +void msm_nand_release(struct mtd_info *mtd)
8366 +{
8367 + /* struct msm_nand_chip *this = mtd->priv; */
8368 +
8369 + /* Deregister the device */
8370 + mtd_device_unregister(mtd);
8371 +}
8372 +EXPORT_SYMBOL_GPL(msm_nand_release);
8373 +
8374 +struct msm_nand_info {
8375 + struct mtd_info mtd;
8376 + struct mtd_partition *parts;
8377 + struct msm_nand_chip msm_nand;
8378 +};
8379 +
8380 +/* duplicating the NC01 XFR contents to NC10 */
8381 +static int msm_nand_nc10_xfr_settings(struct mtd_info *mtd)
8382 +{
8383 + struct msm_nand_chip *chip = mtd->priv;
8384 +
8385 + struct {
8386 + dmov_s cmd[2];
8387 + unsigned cmdptr;
8388 + } *dma_buffer;
8389 + dmov_s *cmd;
8390 +
8391 + wait_event(chip->wait_queue,
8392 + (dma_buffer = msm_nand_get_dma_buffer(
8393 + chip, sizeof(*dma_buffer))));
8394 +
8395 + cmd = dma_buffer->cmd;
8396 +
8397 + /* Copying XFR register contents from NC01 --> NC10 */
8398 + cmd->cmd = 0;
8399 + cmd->src = NC01(MSM_NAND_XFR_STEP1);
8400 + cmd->dst = NC10(MSM_NAND_XFR_STEP1);
8401 + cmd->len = 28;
8402 + cmd++;
8403 +
8404 + BUILD_BUG_ON(2 != ARRAY_SIZE(dma_buffer->cmd));
8405 + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
8406 + dma_buffer->cmd[0].cmd |= CMD_OCB;
8407 + cmd[-1].cmd |= CMD_OCU | CMD_LC;
8408 + dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
8409 + | CMD_PTR_LP;
8410 +
8411 + mb();
8412 + msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
8413 + | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
8414 + &dma_buffer->cmdptr)));
8415 + mb();
8416 + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
8417 + return 0;
8418 +}
8419 +
8420 +static ssize_t boot_layout_show(struct device *dev,
8421 + struct device_attribute *attr,
8422 + char *buf)
8423 +{
8424 + return sprintf(buf, "%d\n", boot_layout);
8425 +}
8426 +
8427 +static ssize_t boot_layout_store(struct device *dev,
8428 + struct device_attribute *attr,
8429 + const char *buf, size_t n)
8430 +{
8431 + struct msm_nand_info *info = dev_get_drvdata(dev);
8432 + struct msm_nand_chip *chip = info->mtd.priv;
8433 + unsigned int ud_size;
8434 + unsigned int spare_size;
8435 + unsigned int ecc_num_data_bytes;
8436 +
8437 + sscanf(buf, "%d", &boot_layout);
8438 +
8439 + ud_size = boot_layout? 512: 516;
8440 + spare_size = boot_layout? (chip->cw_size -
8441 + (chip->ecc_parity_bytes+ 1+ ud_size)):
8442 + (enable_bch_ecc ? 2 : 1);
8443 + ecc_num_data_bytes = boot_layout? 512: 516;
8444 +
8445 + chip->CFG0 = (chip->CFG0 & ~SPARE_SIZE_BYTES_MASK);
8446 + chip->CFG0 |= (spare_size << 23);
8447 +
8448 + chip->CFG0 = (chip->CFG0 & ~UD_SIZE_BYTES_MASK);
8449 + chip->CFG0 |= (ud_size << 9);
8450 +
8451 + chip->ecc_buf_cfg = (chip->ecc_buf_cfg & ~ECC_NUM_DATA_BYTES_MASK)
8452 + | (ecc_num_data_bytes << 16);
8453 +
8454 + return n;
8455 +}
8456 +
8457 +static const DEVICE_ATTR(boot_layout, 0644, boot_layout_show, boot_layout_store);
8458 +
8459 +static int msm_nand_probe(struct platform_device *pdev)
8460 +
8461 +{
8462 + struct msm_nand_info *info;
8463 + struct resource *res;
8464 + int err;
8465 + struct mtd_part_parser_data ppdata = {};
8466 +
8467 +
8468 + res = platform_get_resource(pdev,
8469 + IORESOURCE_MEM, 0);
8470 + if (!res || !res->start) {
8471 + pr_err("%s: msm_nand_phys resource invalid/absent\n",
8472 + __func__);
8473 + return -ENODEV;
8474 + }
8475 + msm_nand_phys = res->start;
8476 +
8477 + info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info), GFP_KERNEL);
8478 + if (!info) {
8479 + pr_err("%s: No memory for msm_nand_info\n", __func__);
8480 + return -ENOMEM;
8481 + }
8482 +
8483 + info->msm_nand.dev = &pdev->dev;
8484 +
8485 + init_waitqueue_head(&info->msm_nand.wait_queue);
8486 +
8487 + info->msm_nand.dma_channel = 3;
8488 + pr_info("%s: dmac 0x%x\n", __func__, info->msm_nand.dma_channel);
8489 +
8490 + /* this currently fails if dev is passed in */
8491 + info->msm_nand.dma_buffer =
8492 + dma_alloc_coherent(/*dev*/ NULL, MSM_NAND_DMA_BUFFER_SIZE,
8493 + &info->msm_nand.dma_addr, GFP_KERNEL);
8494 + if (info->msm_nand.dma_buffer == NULL) {
8495 + pr_err("%s: No memory for msm_nand.dma_buffer\n", __func__);
8496 + err = -ENOMEM;
8497 + goto out_free_info;
8498 + }
8499 +
8500 + pr_info("%s: allocated dma buffer at %p, dma_addr %x\n",
8501 + __func__, info->msm_nand.dma_buffer, info->msm_nand.dma_addr);
8502 +
8503 + /* Let default be VERSION_1 for backward compatibility */
8504 + info->msm_nand.uncorrectable_bit_mask = BIT(8);
8505 + info->msm_nand.num_err_mask = 0x1F;
8506 +
8507 + info->mtd.name = dev_name(&pdev->dev);
8508 + info->mtd.priv = &info->msm_nand;
8509 + info->mtd.owner = THIS_MODULE;
8510 +
8511 + /* config ebi2_cfg register only for ping pong mode!!! */
8512 + if (!interleave_enable && dual_nand_ctlr_present)
8513 + flash_wr_reg(&info->msm_nand, EBI2_CFG_REG, 0x4010080);
8514 +
8515 + if (dual_nand_ctlr_present)
8516 + msm_nand_nc10_xfr_settings(&info->mtd);
8517 +
8518 + if (msm_nand_scan(&info->mtd, 1))
8519 + if (msm_onenand_scan(&info->mtd, 1)) {
8520 + pr_err("%s: No nand device found\n", __func__);
8521 + err = -ENXIO;
8522 + goto out_free_dma_buffer;
8523 + }
8524 +
8525 + flash_wr_reg(&info->msm_nand, MSM_NAND_DEV_CMD_VLD,
8526 + DEV_CMD_VLD_SEQ_READ_START_VLD |
8527 + DEV_CMD_VLD_ERASE_START_VLD |
8528 + DEV_CMD_VLD_WRITE_START_VLD |
8529 + DEV_CMD_VLD_READ_START_VLD);
8530 +
8531 + ppdata.of_node = pdev->dev.of_node;
8532 + err = mtd_device_parse_register(&info->mtd, NULL, &ppdata, NULL, 0);
8533 +
8534 + if (err < 0) {
8535 + pr_err("%s: mtd_device_parse_register failed with err=%d\n",
8536 + __func__, err);
8537 + goto out_free_dma_buffer;
8538 + }
8539 +
8540 + err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_boot_layout.attr);
8541 + if (err)
8542 + goto out_free_dma_buffer;
8543 +
8544 + dev_set_drvdata(&pdev->dev, info);
8545 +
8546 + return 0;
8547 +
8548 +out_free_dma_buffer:
8549 + dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
8550 + info->msm_nand.dma_buffer,
8551 + info->msm_nand.dma_addr);
8552 +out_free_info:
8553 + return err;
8554 +}
8555 +
8556 +static int msm_nand_remove(struct platform_device *pdev)
8557 +{
8558 + struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
8559 +
8560 + dev_set_drvdata(&pdev->dev, NULL);
8561 +
8562 + if (info) {
8563 + msm_nand_release(&info->mtd);
8564 + dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
8565 + info->msm_nand.dma_buffer,
8566 + info->msm_nand.dma_addr);
8567 + }
8568 +
8569 + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_boot_layout.attr);
8570 +
8571 + return 0;
8572 +}
8573 +
8574 +
8575 +#ifdef CONFIG_OF
8576 +static const struct of_device_id msm_nand_of_match[] = {
8577 + { .compatible = "qcom,qcom_nand", },
8578 + {},
8579 +};
8580 +MODULE_DEVICE_TABLE(of, msm_nand_of_match);
8581 +#endif
8582 +
8583 +
8584 +static struct platform_driver msm_nand_driver = {
8585 + .probe = msm_nand_probe,
8586 + .remove = msm_nand_remove,
8587 + .driver = {
8588 + .name = "qcom_nand",
8589 + .owner = THIS_MODULE,
8590 + .of_match_table = msm_nand_of_match,
8591 + }
8592 +};
8593 +
8594 +
8595 +module_platform_driver(msm_nand_driver);
8596 +
8597 +MODULE_LICENSE("GPL");
8598 +MODULE_DESCRIPTION("msm_nand flash driver code");
8599 diff --git a/drivers/mtd/nand/qcom_nand.h b/drivers/mtd/nand/qcom_nand.h
8600 new file mode 100644
8601 index 0000000..468186c
8602 --- /dev/null
8603 +++ b/drivers/mtd/nand/qcom_nand.h
8604 @@ -0,0 +1,196 @@
8605 +/* drivers/mtd/devices/msm_nand.h
8606 + *
8607 + * Copyright (c) 2008-2011, The Linux Foundation. All rights reserved.
8608 + * Copyright (C) 2007 Google, Inc.
8609 + *
8610 + * This software is licensed under the terms of the GNU General Public
8611 + * License version 2, as published by the Free Software Foundation, and
8612 + * may be copied, distributed, and modified under those terms.
8613 + *
8614 + * This program is distributed in the hope that it will be useful,
8615 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8616 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8617 + * GNU General Public License for more details.
8618 + *
8619 + */
8620 +
8621 +#ifndef __DRIVERS_MTD_DEVICES_MSM_NAND_H
8622 +#define __DRIVERS_MTD_DEVICES_MSM_NAND_H
8623 +
8624 +extern unsigned long msm_nand_phys;
8625 +extern unsigned long msm_nandc01_phys;
8626 +extern unsigned long msm_nandc10_phys;
8627 +extern unsigned long msm_nandc11_phys;
8628 +extern unsigned long ebi2_register_base;
8629 +
8630 +#define NC01(X) ((X) + msm_nandc01_phys - msm_nand_phys)
8631 +#define NC10(X) ((X) + msm_nandc10_phys - msm_nand_phys)
8632 +#define NC11(X) ((X) + msm_nandc11_phys - msm_nand_phys)
8633 +
8634 +#define MSM_NAND_REG(off) (msm_nand_phys + (off))
8635 +
8636 +#define MSM_NAND_FLASH_CMD MSM_NAND_REG(0x0000)
8637 +#define MSM_NAND_ADDR0 MSM_NAND_REG(0x0004)
8638 +#define MSM_NAND_ADDR1 MSM_NAND_REG(0x0008)
8639 +#define MSM_NAND_FLASH_CHIP_SELECT MSM_NAND_REG(0x000C)
8640 +#define MSM_NAND_EXEC_CMD MSM_NAND_REG(0x0010)
8641 +#define MSM_NAND_FLASH_STATUS MSM_NAND_REG(0x0014)
8642 +#define MSM_NAND_BUFFER_STATUS MSM_NAND_REG(0x0018)
8643 +#define MSM_NAND_SFLASHC_STATUS MSM_NAND_REG(0x001C)
8644 +#define MSM_NAND_DEV0_CFG0 MSM_NAND_REG(0x0020)
8645 +#define MSM_NAND_DEV0_CFG1 MSM_NAND_REG(0x0024)
8646 +#define MSM_NAND_DEV0_ECC_CFG MSM_NAND_REG(0x0028)
8647 +#define MSM_NAND_DEV1_ECC_CFG MSM_NAND_REG(0x002C)
8648 +#define MSM_NAND_DEV1_CFG0 MSM_NAND_REG(0x0030)
8649 +#define MSM_NAND_DEV1_CFG1 MSM_NAND_REG(0x0034)
8650 +#define MSM_NAND_SFLASHC_CMD MSM_NAND_REG(0x0038)
8651 +#define MSM_NAND_SFLASHC_EXEC_CMD MSM_NAND_REG(0x003C)
8652 +#define MSM_NAND_READ_ID MSM_NAND_REG(0x0040)
8653 +#define MSM_NAND_READ_STATUS MSM_NAND_REG(0x0044)
8654 +#define MSM_NAND_CONFIG_DATA MSM_NAND_REG(0x0050)
8655 +#define MSM_NAND_CONFIG MSM_NAND_REG(0x0054)
8656 +#define MSM_NAND_CONFIG_MODE MSM_NAND_REG(0x0058)
8657 +#define MSM_NAND_CONFIG_STATUS MSM_NAND_REG(0x0060)
8658 +#define MSM_NAND_MACRO1_REG MSM_NAND_REG(0x0064)
8659 +#define MSM_NAND_XFR_STEP1 MSM_NAND_REG(0x0070)
8660 +#define MSM_NAND_XFR_STEP2 MSM_NAND_REG(0x0074)
8661 +#define MSM_NAND_XFR_STEP3 MSM_NAND_REG(0x0078)
8662 +#define MSM_NAND_XFR_STEP4 MSM_NAND_REG(0x007C)
8663 +#define MSM_NAND_XFR_STEP5 MSM_NAND_REG(0x0080)
8664 +#define MSM_NAND_XFR_STEP6 MSM_NAND_REG(0x0084)
8665 +#define MSM_NAND_XFR_STEP7 MSM_NAND_REG(0x0088)
8666 +#define MSM_NAND_GENP_REG0 MSM_NAND_REG(0x0090)
8667 +#define MSM_NAND_GENP_REG1 MSM_NAND_REG(0x0094)
8668 +#define MSM_NAND_GENP_REG2 MSM_NAND_REG(0x0098)
8669 +#define MSM_NAND_GENP_REG3 MSM_NAND_REG(0x009C)
8670 +#define MSM_NAND_DEV_CMD0 MSM_NAND_REG(0x00A0)
8671 +#define MSM_NAND_DEV_CMD1 MSM_NAND_REG(0x00A4)
8672 +#define MSM_NAND_DEV_CMD2 MSM_NAND_REG(0x00A8)
8673 +#define MSM_NAND_DEV_CMD_VLD MSM_NAND_REG(0x00AC)
8674 +#define DEV_CMD_VLD_SEQ_READ_START_VLD 0x10
8675 +#define DEV_CMD_VLD_ERASE_START_VLD 0x8
8676 +#define DEV_CMD_VLD_WRITE_START_VLD 0x4
8677 +#define DEV_CMD_VLD_READ_STOP_VLD 0x2
8678 +#define DEV_CMD_VLD_READ_START_VLD 0x1
8679 +
8680 +#define MSM_NAND_EBI2_MISR_SIG_REG MSM_NAND_REG(0x00B0)
8681 +#define MSM_NAND_ADDR2 MSM_NAND_REG(0x00C0)
8682 +#define MSM_NAND_ADDR3 MSM_NAND_REG(0x00C4)
8683 +#define MSM_NAND_ADDR4 MSM_NAND_REG(0x00C8)
8684 +#define MSM_NAND_ADDR5 MSM_NAND_REG(0x00CC)
8685 +#define MSM_NAND_DEV_CMD3 MSM_NAND_REG(0x00D0)
8686 +#define MSM_NAND_DEV_CMD4 MSM_NAND_REG(0x00D4)
8687 +#define MSM_NAND_DEV_CMD5 MSM_NAND_REG(0x00D8)
8688 +#define MSM_NAND_DEV_CMD6 MSM_NAND_REG(0x00DC)
8689 +#define MSM_NAND_SFLASHC_BURST_CFG MSM_NAND_REG(0x00E0)
8690 +#define MSM_NAND_ADDR6 MSM_NAND_REG(0x00E4)
8691 +#define MSM_NAND_EBI2_ECC_BUF_CFG MSM_NAND_REG(0x00F0)
8692 +#define MSM_NAND_HW_INFO MSM_NAND_REG(0x00FC)
8693 +#define MSM_NAND_FLASH_BUFFER MSM_NAND_REG(0x0100)
8694 +
8695 +/* device commands */
8696 +
8697 +#define MSM_NAND_CMD_SOFT_RESET 0x01
8698 +#define MSM_NAND_CMD_PAGE_READ 0x32
8699 +#define MSM_NAND_CMD_PAGE_READ_ECC 0x33
8700 +#define MSM_NAND_CMD_PAGE_READ_ALL 0x34
8701 +#define MSM_NAND_CMD_SEQ_PAGE_READ 0x15
8702 +#define MSM_NAND_CMD_PRG_PAGE 0x36
8703 +#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
8704 +#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
8705 +#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
8706 +#define MSM_NAND_CMD_FETCH_ID 0x0B
8707 +#define MSM_NAND_CMD_STATUS 0x0C
8708 +#define MSM_NAND_CMD_RESET 0x0D
8709 +
8710 +/* Sflash Commands */
8711 +
8712 +#define MSM_NAND_SFCMD_DATXS 0x0
8713 +#define MSM_NAND_SFCMD_CMDXS 0x1
8714 +#define MSM_NAND_SFCMD_BURST 0x0
8715 +#define MSM_NAND_SFCMD_ASYNC 0x1
8716 +#define MSM_NAND_SFCMD_ABORT 0x1
8717 +#define MSM_NAND_SFCMD_REGRD 0x2
8718 +#define MSM_NAND_SFCMD_REGWR 0x3
8719 +#define MSM_NAND_SFCMD_INTLO 0x4
8720 +#define MSM_NAND_SFCMD_INTHI 0x5
8721 +#define MSM_NAND_SFCMD_DATRD 0x6
8722 +#define MSM_NAND_SFCMD_DATWR 0x7
8723 +
8724 +#define SFLASH_PREPCMD(numxfr, offval, delval, trnstp, mode, opcode) \
8725 + ((numxfr<<20)|(offval<<12)|(delval<<6)|(trnstp<<5)|(mode<<4)|opcode)
8726 +
8727 +#define SFLASH_BCFG 0x20100327
8728 +
8729 +/* Onenand addresses */
8730 +
8731 +#define ONENAND_MANUFACTURER_ID 0xF000
8732 +#define ONENAND_DEVICE_ID 0xF001
8733 +#define ONENAND_VERSION_ID 0xF002
8734 +#define ONENAND_DATA_BUFFER_SIZE 0xF003
8735 +#define ONENAND_BOOT_BUFFER_SIZE 0xF004
8736 +#define ONENAND_AMOUNT_OF_BUFFERS 0xF005
8737 +#define ONENAND_TECHNOLOGY 0xF006
8738 +#define ONENAND_START_ADDRESS_1 0xF100
8739 +#define ONENAND_START_ADDRESS_2 0xF101
8740 +#define ONENAND_START_ADDRESS_3 0xF102
8741 +#define ONENAND_START_ADDRESS_4 0xF103
8742 +#define ONENAND_START_ADDRESS_5 0xF104
8743 +#define ONENAND_START_ADDRESS_6 0xF105
8744 +#define ONENAND_START_ADDRESS_7 0xF106
8745 +#define ONENAND_START_ADDRESS_8 0xF107
8746 +#define ONENAND_START_BUFFER 0xF200
8747 +#define ONENAND_COMMAND 0xF220
8748 +#define ONENAND_SYSTEM_CONFIG_1 0xF221
8749 +#define ONENAND_SYSTEM_CONFIG_2 0xF222
8750 +#define ONENAND_CONTROLLER_STATUS 0xF240
8751 +#define ONENAND_INTERRUPT_STATUS 0xF241
8752 +#define ONENAND_START_BLOCK_ADDRESS 0xF24C
8753 +#define ONENAND_WRITE_PROT_STATUS 0xF24E
8754 +#define ONENAND_ECC_STATUS 0xFF00
8755 +#define ONENAND_ECC_ERRPOS_MAIN0 0xFF01
8756 +#define ONENAND_ECC_ERRPOS_SPARE0 0xFF02
8757 +#define ONENAND_ECC_ERRPOS_MAIN1 0xFF03
8758 +#define ONENAND_ECC_ERRPOS_SPARE1 0xFF04
8759 +#define ONENAND_ECC_ERRPOS_MAIN2 0xFF05
8760 +#define ONENAND_ECC_ERRPOS_SPARE2 0xFF06
8761 +#define ONENAND_ECC_ERRPOS_MAIN3 0xFF07
8762 +#define ONENAND_ECC_ERRPOS_SPARE3 0xFF08
8763 +
8764 +/* Onenand commands */
8765 +#define ONENAND_WP_US (1 << 2)
8766 +#define ONENAND_WP_LS (1 << 1)
8767 +
8768 +#define ONENAND_CMDLOAD 0x0000
8769 +#define ONENAND_CMDLOADSPARE 0x0013
8770 +#define ONENAND_CMDPROG 0x0080
8771 +#define ONENAND_CMDPROGSPARE 0x001A
8772 +#define ONENAND_CMDERAS 0x0094
8773 +#define ONENAND_CMD_UNLOCK 0x0023
8774 +#define ONENAND_CMD_LOCK 0x002A
8775 +
8776 +#define ONENAND_SYSCFG1_ECCENA(mode) (0x40E0 | (mode ? 0 : 0x8002))
8777 +#define ONENAND_SYSCFG1_ECCDIS(mode) (0x41E0 | (mode ? 0 : 0x8002))
8778 +
8779 +#define ONENAND_CLRINTR 0x0000
8780 +#define ONENAND_STARTADDR1_RES 0x07FF
8781 +#define ONENAND_STARTADDR3_RES 0x07FF
8782 +
8783 +#define DATARAM0_0 0x8
8784 +#define DEVICE_FLASHCORE_0 (0 << 15)
8785 +#define DEVICE_FLASHCORE_1 (1 << 15)
8786 +#define DEVICE_BUFFERRAM_0 (0 << 15)
8787 +#define DEVICE_BUFFERRAM_1 (1 << 15)
8788 +#define ONENAND_DEVICE_IS_DDP (1 << 3)
8789 +
8790 +#define CLEAN_DATA_16 0xFFFF
8791 +#define CLEAN_DATA_32 0xFFFFFFFF
8792 +
8793 +#define EBI2_REG(off) (ebi2_register_base + (off))
8794 +#define EBI2_CHIP_SELECT_CFG0 EBI2_REG(0x0000)
8795 +#define EBI2_CFG_REG EBI2_REG(0x0004)
8796 +#define EBI2_NAND_ADM_MUX EBI2_REG(0x005C)
8797 +
8798 +extern struct flash_platform_data msm_nand_data;
8799 +
8800 +#endif
8801 --
8802 1.7.10.4
8803