ipq806x: make patches apply again
[openwrt/openwrt.git] / target / linux / ipq806x / patches-4.9 / 861-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch
1 From 645c7805f2602569263d7ac78050b2c9e91e3377 Mon Sep 17 00:00:00 2001
2 From: Ram Chandra Jangir <rjangir@codeaurora.org>
3 Date: Thu, 20 Apr 2017 10:23:00 +0530
4 Subject: [PATCH] qcom: mtd: nand: Added bam transaction and support
5 additional CSRs
6
7 This patch adds the following for NAND BAM DMA support
8 - Bam transaction which will be used for any NAND request.
9 It contains the array of command elements, command and
10 data sgl. This transaction will be resetted before every
11 request.
12 - Allocation function for NAND BAM transaction which will be
13 called only once at probe time.
14 - Reset function for NAND BAM transaction which will be called
15 before any new NAND request.
16 - Add support for additional CSRs.
17 NAND_READ_LOCATION - page offset for reading in BAM DMA mode
18 NAND_ERASED_CW_DETECT_CFG - status for erased code words
19 NAND_BUFFER_STATUS - status for ECC
20
21 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
22 Signed-off-by: Ram Chandra Jangir <rjangir@codeaurora.org>
23 ---
24 drivers/mtd/nand/qcom_nandc.c | 631 +++++++++++++++++++++++++++++++++++----
25 include/linux/dma/qcom_bam_dma.h | 149 +++++++++
26 2 files changed, 721 insertions(+), 59 deletions(-)
27 create mode 100644 include/linux/dma/qcom_bam_dma.h
28
29 --- a/drivers/mtd/nand/qcom_nandc.c
30 +++ b/drivers/mtd/nand/qcom_nandc.c
31 @@ -22,6 +22,7 @@
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 +#include <linux/dma/qcom_bam_dma.h>
36
37 /* NANDc reg offsets */
38 #define NAND_FLASH_CMD 0x00
39 @@ -53,6 +54,8 @@
40 #define NAND_VERSION 0xf08
41 #define NAND_READ_LOCATION_0 0xf20
42 #define NAND_READ_LOCATION_1 0xf24
43 +#define NAND_READ_LOCATION_2 0xf28
44 +#define NAND_READ_LOCATION_3 0xf2c
45
46 /* dummy register offsets, used by write_reg_dma */
47 #define NAND_DEV_CMD1_RESTORE 0xdead
48 @@ -135,6 +138,11 @@
49 #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
50 #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
51
52 +/* NAND_READ_LOCATION_n bits */
53 +#define READ_LOCATION_OFFSET 0
54 +#define READ_LOCATION_SIZE 16
55 +#define READ_LOCATION_LAST 31
56 +
57 /* Version Mask */
58 #define NAND_VERSION_MAJOR_MASK 0xf0000000
59 #define NAND_VERSION_MAJOR_SHIFT 28
60 @@ -156,6 +164,9 @@
61 #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
62 ERASE_START_VLD | SEQ_READ_START_VLD)
63
64 +/* NAND_CTRL bits */
65 +#define BAM_MODE_EN BIT(0)
66 +
67 /*
68 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
69 * the driver calls the chunks 'step' or 'codeword' interchangeably
70 @@ -177,12 +188,77 @@
71 #define ECC_BCH_4BIT BIT(2)
72 #define ECC_BCH_8BIT BIT(3)
73
74 +/* Flags used for BAM DMA desc preparation*/
75 +/* Don't set the EOT in current tx sgl */
76 +#define DMA_DESC_FLAG_NO_EOT (0x0001)
77 +/* Set the NWD flag in current sgl */
78 +#define DMA_DESC_FLAG_BAM_NWD (0x0002)
79 +/* Close current sgl and start writing in another sgl */
80 +#define DMA_DESC_FLAG_BAM_NEXT_SGL (0x0004)
81 +/*
82 + * Erased codeword status is being used two times in single transfer so this
83 + * flag will determine the current value of erased codeword status register
84 + */
85 +#define DMA_DESC_ERASED_CW_SET (0x0008)
86 +
87 +/* Returns the dma address for reg read buffer */
88 +#define REG_BUF_DMA_ADDR(chip, vaddr) \
89 + ((chip)->reg_read_buf_phys + \
90 + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
91 +
92 +/* Returns the nand register physical address */
93 +#define NAND_REG_PHYS_ADDRESS(chip, addr) \
94 + ((chip)->base_dma + (addr))
95 +
96 +/* command element array size in bam transaction */
97 +#define BAM_CMD_ELEMENT_SIZE (256)
98 +/* command sgl size in bam transaction */
99 +#define BAM_CMD_SGL_SIZE (256)
100 +/* data sgl size in bam transaction */
101 +#define BAM_DATA_SGL_SIZE (128)
102 +
103 +/*
104 + * This data type corresponds to the BAM transaction which will be used for any
105 + * nand request.
106 + * @bam_ce - the array of bam command elements
107 + * @cmd_sgl - sgl for nand bam command pipe
108 + * @tx_sgl - sgl for nand bam consumer pipe
109 + * @rx_sgl - sgl for nand bam producer pipe
110 + * @bam_ce_index - the index in bam_ce which is available for next sgl request
111 + * @pre_bam_ce_index - the index in bam_ce which marks the start position ce
112 + * for current sgl. It will be used for size calculation
113 + * for current sgl
114 + * @cmd_sgl_cnt - no of entries in command sgl.
115 + * @tx_sgl_cnt - no of entries in tx sgl.
116 + * @rx_sgl_cnt - no of entries in rx sgl.
117 + */
118 +struct bam_transaction {
119 + struct bam_cmd_element bam_ce[BAM_CMD_ELEMENT_SIZE];
120 + struct qcom_bam_sgl cmd_sgl[BAM_CMD_SGL_SIZE];
121 + struct qcom_bam_sgl tx_sgl[BAM_DATA_SGL_SIZE];
122 + struct qcom_bam_sgl rx_sgl[BAM_DATA_SGL_SIZE];
123 + uint32_t bam_ce_index;
124 + uint32_t pre_bam_ce_index;
125 + uint32_t cmd_sgl_cnt;
126 + uint32_t tx_sgl_cnt;
127 + uint32_t rx_sgl_cnt;
128 +};
129 +
130 +/**
131 + * This data type corresponds to the nand dma descriptor
132 + * @list - list for desc_info
133 + * @dir - DMA transfer direction
134 + * @sgl - sgl which will be used for single sgl dma descriptor
135 + * @dma_desc - low level dma engine descriptor
136 + * @bam_desc_data - used for bam desc mappings
137 + */
138 struct desc_info {
139 struct list_head node;
140
141 enum dma_data_direction dir;
142 struct scatterlist sgl;
143 struct dma_async_tx_descriptor *dma_desc;
144 + struct qcom_bam_custom_data bam_desc_data;
145 };
146
147 /*
148 @@ -210,6 +286,13 @@ struct nandc_regs {
149 __le32 orig_vld;
150
151 __le32 ecc_buf_cfg;
152 + __le32 read_location0;
153 + __le32 read_location1;
154 + __le32 read_location2;
155 + __le32 read_location3;
156 +
157 + __le32 erased_cw_detect_cfg_clr;
158 + __le32 erased_cw_detect_cfg_set;
159 };
160
161 /*
162 @@ -225,6 +308,7 @@ struct nandc_regs {
163 * @aon_clk: another controller clock
164 *
165 * @chan: dma channel
166 + * @bam_txn: contains the bam transaction address
167 * @cmd_crci: ADM DMA CRCI for command flow control
168 * @data_crci: ADM DMA CRCI for data flow control
169 * @desc_list: DMA descriptor list (list of desc_infos)
170 @@ -250,6 +334,7 @@ struct nandc_regs {
171 struct qcom_nand_controller {
172 struct nand_hw_control controller;
173 struct list_head host_list;
174 + struct bam_transaction *bam_txn;
175
176 struct device *dev;
177
178 @@ -350,6 +435,45 @@ struct qcom_nand_driver_data {
179 bool dma_bam_enabled;
180 };
181
182 +/* Allocates and Initializes the BAM transaction */
183 +struct bam_transaction *alloc_bam_transaction(
184 + struct qcom_nand_controller *nandc)
185 +{
186 + struct bam_transaction *bam_txn;
187 +
188 + bam_txn = kzalloc(sizeof(*bam_txn), GFP_KERNEL);
189 +
190 + if (!bam_txn)
191 + return NULL;
192 +
193 + bam_txn->bam_ce_index = 0;
194 + bam_txn->pre_bam_ce_index = 0;
195 + bam_txn->cmd_sgl_cnt = 0;
196 + bam_txn->tx_sgl_cnt = 0;
197 + bam_txn->rx_sgl_cnt = 0;
198 +
199 + qcom_bam_sg_init_table(bam_txn->cmd_sgl, BAM_CMD_SGL_SIZE);
200 + qcom_bam_sg_init_table(bam_txn->tx_sgl, BAM_DATA_SGL_SIZE);
201 + qcom_bam_sg_init_table(bam_txn->rx_sgl, BAM_DATA_SGL_SIZE);
202 +
203 + return bam_txn;
204 +}
205 +
206 +/* Clears the BAM transaction index */
207 +void clear_bam_transaction(struct qcom_nand_controller *nandc)
208 +{
209 + struct bam_transaction *bam_txn = nandc->bam_txn;
210 +
211 + if (!nandc->dma_bam_enabled)
212 + return;
213 +
214 + bam_txn->bam_ce_index = 0;
215 + bam_txn->pre_bam_ce_index = 0;
216 + bam_txn->cmd_sgl_cnt = 0;
217 + bam_txn->tx_sgl_cnt = 0;
218 + bam_txn->rx_sgl_cnt = 0;
219 +}
220 +
221 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
222 {
223 return container_of(chip, struct qcom_nand_host, chip);
224 @@ -406,6 +530,16 @@ static __le32 *offset_to_nandc_reg(struc
225 return &regs->orig_vld;
226 case NAND_EBI2_ECC_BUF_CFG:
227 return &regs->ecc_buf_cfg;
228 + case NAND_BUFFER_STATUS:
229 + return &regs->clrreadstatus;
230 + case NAND_READ_LOCATION_0:
231 + return &regs->read_location0;
232 + case NAND_READ_LOCATION_1:
233 + return &regs->read_location1;
234 + case NAND_READ_LOCATION_2:
235 + return &regs->read_location2;
236 + case NAND_READ_LOCATION_3:
237 + return &regs->read_location3;
238 default:
239 return NULL;
240 }
241 @@ -447,7 +581,7 @@ static void update_rw_regs(struct qcom_n
242 {
243 struct nand_chip *chip = &host->chip;
244 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
245 - u32 cmd, cfg0, cfg1, ecc_bch_cfg;
246 + u32 cmd, cfg0, cfg1, ecc_bch_cfg, read_location0;
247
248 if (read) {
249 if (host->use_ecc)
250 @@ -464,12 +598,20 @@ static void update_rw_regs(struct qcom_n
251
252 cfg1 = host->cfg1;
253 ecc_bch_cfg = host->ecc_bch_cfg;
254 + if (read)
255 + read_location0 = (0 << READ_LOCATION_OFFSET) |
256 + (host->cw_data << READ_LOCATION_SIZE) |
257 + (1 << READ_LOCATION_LAST);
258 } else {
259 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
260 (num_cw - 1) << CW_PER_PAGE;
261
262 cfg1 = host->cfg1_raw;
263 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
264 + if (read)
265 + read_location0 = (0 << READ_LOCATION_OFFSET) |
266 + (host->cw_size << READ_LOCATION_SIZE) |
267 + (1 << READ_LOCATION_LAST);
268 }
269
270 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
271 @@ -480,8 +622,104 @@ static void update_rw_regs(struct qcom_n
272 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
273 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
274 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
275 +
276 + if (read)
277 + nandc_set_reg(nandc, NAND_READ_LOCATION_0, read_location0);
278 +}
279 +
280 +/*
281 + * Prepares the command descriptor for BAM DMA which will be used for NAND
282 + * register read and write. The command descriptor requires the command
283 + * to be formed in command element type so this function uses the command
284 + * element from bam transaction ce array and fills the same with required
285 + * data. A single SGL can contain multiple command elements so
286 + * DMA_DESC_FLAG_BAM_NEXT_SGL will be used for starting the separate SGL
287 + * after the current command element.
288 + */
289 +static int prep_dma_desc_command(struct qcom_nand_controller *nandc, bool read,
290 + int reg_off, const void *vaddr,
291 + int size, unsigned int flags)
292 +{
293 + int bam_ce_size;
294 + int i;
295 + struct bam_cmd_element *bam_ce_buffer;
296 + struct bam_transaction *bam_txn = nandc->bam_txn;
297 +
298 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_index];
299 +
300 + /* fill the command desc */
301 + for (i = 0; i < size; i++) {
302 + if (read) {
303 + qcom_prep_bam_ce(&bam_ce_buffer[i],
304 + NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
305 + BAM_READ_COMMAND,
306 + REG_BUF_DMA_ADDR(nandc,
307 + (unsigned int *)vaddr + i));
308 + } else {
309 + qcom_prep_bam_ce(&bam_ce_buffer[i],
310 + NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
311 + BAM_WRITE_COMMAND,
312 + *((unsigned int *)vaddr + i));
313 + }
314 + }
315 +
316 + /* use the separate sgl after this command */
317 + if (flags & DMA_DESC_FLAG_BAM_NEXT_SGL) {
318 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->pre_bam_ce_index];
319 + bam_txn->bam_ce_index += size;
320 + bam_ce_size = (bam_txn->bam_ce_index -
321 + bam_txn->pre_bam_ce_index) *
322 + sizeof(struct bam_cmd_element);
323 + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].sgl,
324 + bam_ce_buffer,
325 + bam_ce_size);
326 + if (flags & DMA_DESC_FLAG_BAM_NWD)
327 + bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
328 + DESC_FLAG_NWD | DESC_FLAG_CMD;
329 + else
330 + bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
331 + DESC_FLAG_CMD;
332 +
333 + bam_txn->cmd_sgl_cnt++;
334 + bam_txn->pre_bam_ce_index = bam_txn->bam_ce_index;
335 + } else {
336 + bam_txn->bam_ce_index += size;
337 + }
338 +
339 + return 0;
340 }
341
342 +/*
343 + * Prepares the data descriptor for BAM DMA which will be used for NAND
344 + * data read and write.
345 + */
346 +static int prep_dma_desc_data_bam(struct qcom_nand_controller *nandc, bool read,
347 + int reg_off, const void *vaddr,
348 + int size, unsigned int flags)
349 +{
350 + struct bam_transaction *bam_txn = nandc->bam_txn;
351 +
352 + if (read) {
353 + sg_set_buf(&bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].sgl,
354 + vaddr, size);
355 + bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].dma_flags = 0;
356 + bam_txn->rx_sgl_cnt++;
357 + } else {
358 + sg_set_buf(&bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].sgl,
359 + vaddr, size);
360 + if (flags & DMA_DESC_FLAG_NO_EOT)
361 + bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags = 0;
362 + else
363 + bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags =
364 + DESC_FLAG_EOT;
365 +
366 + bam_txn->tx_sgl_cnt++;
367 + }
368 +
369 + return 0;
370 +}
371 +
372 +/* Prepares the dma desciptor for adm dma engine */
373 static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
374 int reg_off, const void *vaddr, int size,
375 bool flow_control)
376 @@ -560,7 +798,7 @@ err:
377 * @num_regs: number of registers to read
378 */
379 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
380 - int num_regs)
381 + int num_regs, unsigned int flags)
382 {
383 bool flow_control = false;
384 void *vaddr;
385 @@ -569,10 +807,18 @@ static int read_reg_dma(struct qcom_nand
386 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
387 flow_control = true;
388
389 - size = num_regs * sizeof(u32);
390 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
391 nandc->reg_read_pos += num_regs;
392
393 + if (nandc->dma_bam_enabled) {
394 + size = num_regs;
395 +
396 + return prep_dma_desc_command(nandc, true, first, vaddr, size,
397 + flags);
398 + }
399 +
400 + size = num_regs * sizeof(u32);
401 +
402 return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
403 }
404
405 @@ -584,7 +830,7 @@ static int read_reg_dma(struct qcom_nand
406 * @num_regs: number of registers to write
407 */
408 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
409 - int num_regs)
410 + int num_regs, unsigned int flags)
411 {
412 bool flow_control = false;
413 struct nandc_regs *regs = nandc->regs;
414 @@ -596,12 +842,29 @@ static int write_reg_dma(struct qcom_nan
415 if (first == NAND_FLASH_CMD)
416 flow_control = true;
417
418 + if (first == NAND_ERASED_CW_DETECT_CFG) {
419 + if (flags & DMA_DESC_ERASED_CW_SET)
420 + vaddr = &regs->erased_cw_detect_cfg_set;
421 + else
422 + vaddr = &regs->erased_cw_detect_cfg_clr;
423 + }
424 +
425 + if (first == NAND_EXEC_CMD)
426 + flags |= DMA_DESC_FLAG_BAM_NWD;
427 +
428 if (first == NAND_DEV_CMD1_RESTORE)
429 first = NAND_DEV_CMD1;
430
431 if (first == NAND_DEV_CMD_VLD_RESTORE)
432 first = NAND_DEV_CMD_VLD;
433
434 + if (nandc->dma_bam_enabled) {
435 + size = num_regs;
436 +
437 + return prep_dma_desc_command(nandc, false, first, vaddr, size,
438 + flags);
439 + }
440 +
441 size = num_regs * sizeof(u32);
442
443 return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
444 @@ -616,8 +879,12 @@ static int write_reg_dma(struct qcom_nan
445 * @size: DMA transaction size in bytes
446 */
447 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
448 - const u8 *vaddr, int size)
449 + const u8 *vaddr, int size, unsigned int flags)
450 {
451 + if (nandc->dma_bam_enabled)
452 + return prep_dma_desc_data_bam(nandc, true, reg_off, vaddr, size,
453 + flags);
454 +
455 return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
456 }
457
458 @@ -630,8 +897,12 @@ static int read_data_dma(struct qcom_nan
459 * @size: DMA transaction size in bytes
460 */
461 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
462 - const u8 *vaddr, int size)
463 + const u8 *vaddr, int size, unsigned int flags)
464 {
465 + if (nandc->dma_bam_enabled)
466 + return prep_dma_desc_data_bam(nandc, false, reg_off, vaddr,
467 + size, flags);
468 +
469 return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
470 }
471
472 @@ -641,14 +912,57 @@ static int write_data_dma(struct qcom_na
473 */
474 static void config_cw_read(struct qcom_nand_controller *nandc)
475 {
476 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
477 - write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
478 - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
479
480 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
481 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
482 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
483 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
484 +
485 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
486 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
487 + DMA_DESC_ERASED_CW_SET);
488 + if (nandc->dma_bam_enabled)
489 + write_reg_dma(nandc, NAND_READ_LOCATION_0, 1,
490 + DMA_DESC_FLAG_BAM_NEXT_SGL);
491
492 - read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
493 - read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
494 +
495 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NWD |
496 + DMA_DESC_FLAG_BAM_NEXT_SGL);
497 +
498 + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
499 + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
500 + DMA_DESC_FLAG_BAM_NEXT_SGL);
501 +}
502 +
503 +/*
504 + * Helpers to prepare DMA descriptors for configuring registers
505 + * before reading a NAND page with BAM.
506 + */
507 +static void config_bam_page_read(struct qcom_nand_controller *nandc)
508 +{
509 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
510 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
511 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
512 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
513 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
514 + DMA_DESC_ERASED_CW_SET |
515 + DMA_DESC_FLAG_BAM_NEXT_SGL);
516 +}
517 +
518 +/*
519 + * Helpers to prepare DMA descriptors for configuring registers
520 + * before reading each codeword in NAND page with BAM.
521 + */
522 +static void config_bam_cw_read(struct qcom_nand_controller *nandc)
523 +{
524 + if (nandc->dma_bam_enabled)
525 + write_reg_dma(nandc, NAND_READ_LOCATION_0, 4, 0);
526 +
527 + write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
528 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
529 +
530 + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
531 + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
532 + DMA_DESC_FLAG_BAM_NEXT_SGL);
533 }
534
535 /*
536 @@ -657,19 +971,20 @@ static void config_cw_read(struct qcom_n
537 */
538 static void config_cw_write_pre(struct qcom_nand_controller *nandc)
539 {
540 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
541 - write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
542 - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
543 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
544 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
545 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
546 + DMA_DESC_FLAG_BAM_NEXT_SGL);
547 }
548
549 static void config_cw_write_post(struct qcom_nand_controller *nandc)
550 {
551 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
552 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
553
554 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
555 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
556
557 - write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
558 - write_reg_dma(nandc, NAND_READ_STATUS, 1);
559 + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
560 + write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
561 }
562
563 /*
564 @@ -683,6 +998,8 @@ static int nandc_param(struct qcom_nand_
565 struct nand_chip *chip = &host->chip;
566 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
567
568 + clear_bam_transaction(nandc);
569 +
570 /*
571 * NAND_CMD_PARAM is called before we know much about the FLASH chip
572 * in use. we configure the controller to perform a raw read of 512
573 @@ -715,9 +1032,13 @@ static int nandc_param(struct qcom_nand_
574
575 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
576 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
577 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
578 + (0 << READ_LOCATION_OFFSET) |
579 + (512 << READ_LOCATION_SIZE) |
580 + (1 << READ_LOCATION_LAST));
581
582 - write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
583 - write_reg_dma(nandc, NAND_DEV_CMD1, 1);
584 + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
585 + write_reg_dma(nandc, NAND_DEV_CMD1, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
586
587 nandc->buf_count = 512;
588 memset(nandc->data_buffer, 0xff, nandc->buf_count);
589 @@ -725,11 +1046,12 @@ static int nandc_param(struct qcom_nand_
590 config_cw_read(nandc);
591
592 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
593 - nandc->buf_count);
594 + nandc->buf_count, 0);
595
596 /* restore CMD1 and VLD regs */
597 - write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
598 - write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
599 + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
600 + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1,
601 + DMA_DESC_FLAG_BAM_NEXT_SGL);
602
603 return 0;
604 }
605 @@ -740,6 +1062,8 @@ static int erase_block(struct qcom_nand_
606 struct nand_chip *chip = &host->chip;
607 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
608
609 + clear_bam_transaction(nandc);
610 +
611 nandc_set_reg(nandc, NAND_FLASH_CMD,
612 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
613 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
614 @@ -751,14 +1075,15 @@ static int erase_block(struct qcom_nand_
615 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
616 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
617
618 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
619 - write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
620 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
621
622 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
623 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, DMA_DESC_FLAG_BAM_NEXT_SGL);
624 + write_reg_dma(nandc, NAND_DEV0_CFG0, 2, DMA_DESC_FLAG_BAM_NEXT_SGL);
625 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
626 +
627 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
628
629 - write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
630 - write_reg_dma(nandc, NAND_READ_STATUS, 1);
631 + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
632 + write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
633
634 return 0;
635 }
636 @@ -772,16 +1097,19 @@ static int read_id(struct qcom_nand_host
637 if (column == -1)
638 return 0;
639
640 + clear_bam_transaction(nandc);
641 +
642 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
643 nandc_set_reg(nandc, NAND_ADDR0, column);
644 nandc_set_reg(nandc, NAND_ADDR1, 0);
645 - nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
646 + nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
647 + nandc->dma_bam_enabled ? 0 : DM_EN);
648 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
649
650 - write_reg_dma(nandc, NAND_FLASH_CMD, 4);
651 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
652 + write_reg_dma(nandc, NAND_FLASH_CMD, 4, DMA_DESC_FLAG_BAM_NEXT_SGL);
653 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
654
655 - read_reg_dma(nandc, NAND_READ_ID, 1);
656 + read_reg_dma(nandc, NAND_READ_ID, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
657
658 return 0;
659 }
660 @@ -792,28 +1120,108 @@ static int reset(struct qcom_nand_host *
661 struct nand_chip *chip = &host->chip;
662 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
663
664 + clear_bam_transaction(nandc);
665 +
666 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
667 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
668
669 - write_reg_dma(nandc, NAND_FLASH_CMD, 1);
670 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
671 + write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
672 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
673
674 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
675 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
676
677 return 0;
678 }
679
680 +static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
681 + struct dma_chan *chan,
682 + struct qcom_bam_sgl *bam_sgl,
683 + int sgl_cnt,
684 + enum dma_transfer_direction direction)
685 +{
686 + struct desc_info *desc;
687 + struct dma_async_tx_descriptor *dma_desc;
688 +
689 + if (!qcom_bam_map_sg(nandc->dev, bam_sgl, sgl_cnt, direction)) {
690 + dev_err(nandc->dev, "failure in mapping sgl\n");
691 + return -ENOMEM;
692 + }
693 +
694 + desc = kzalloc(sizeof(*desc), GFP_KERNEL);
695 + if (!desc) {
696 + qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
697 + return -ENOMEM;
698 + }
699 +
700 +
701 + desc->bam_desc_data.dir = direction;
702 + desc->bam_desc_data.sgl_cnt = sgl_cnt;
703 + desc->bam_desc_data.bam_sgl = bam_sgl;
704 +
705 + dma_desc = dmaengine_prep_dma_custom_mapping(chan,
706 + &desc->bam_desc_data,
707 + 0);
708 +
709 + if (!dma_desc) {
710 + dev_err(nandc->dev, "failure in cmd prep desc\n");
711 + qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
712 + kfree(desc);
713 + return -EINVAL;
714 + }
715 +
716 + desc->dma_desc = dma_desc;
717 +
718 + list_add_tail(&desc->node, &nandc->desc_list);
719 +
720 + return 0;
721 +
722 +}
723 +
724 /* helpers to submit/free our list of dma descriptors */
725 static int submit_descs(struct qcom_nand_controller *nandc)
726 {
727 struct desc_info *desc;
728 dma_cookie_t cookie = 0;
729 + struct bam_transaction *bam_txn = nandc->bam_txn;
730 + int r;
731 +
732 + if (nandc->dma_bam_enabled) {
733 + if (bam_txn->rx_sgl_cnt) {
734 + r = prepare_bam_async_desc(nandc, nandc->rx_chan,
735 + bam_txn->rx_sgl, bam_txn->rx_sgl_cnt,
736 + DMA_DEV_TO_MEM);
737 + if (r)
738 + return r;
739 + }
740 +
741 + if (bam_txn->tx_sgl_cnt) {
742 + r = prepare_bam_async_desc(nandc, nandc->tx_chan,
743 + bam_txn->tx_sgl, bam_txn->tx_sgl_cnt,
744 + DMA_MEM_TO_DEV);
745 + if (r)
746 + return r;
747 + }
748 +
749 + r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
750 + bam_txn->cmd_sgl, bam_txn->cmd_sgl_cnt,
751 + DMA_MEM_TO_DEV);
752 + if (r)
753 + return r;
754 + }
755
756 list_for_each_entry(desc, &nandc->desc_list, node)
757 cookie = dmaengine_submit(desc->dma_desc);
758
759 - if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
760 - return -ETIMEDOUT;
761 + if (nandc->dma_bam_enabled) {
762 + dma_async_issue_pending(nandc->tx_chan);
763 + dma_async_issue_pending(nandc->rx_chan);
764 +
765 + if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
766 + return -ETIMEDOUT;
767 + } else {
768 + if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
769 + return -ETIMEDOUT;
770 + }
771
772 return 0;
773 }
774 @@ -824,7 +1232,16 @@ static void free_descs(struct qcom_nand_
775
776 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
777 list_del(&desc->node);
778 - dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
779 +
780 + if (nandc->dma_bam_enabled)
781 + qcom_bam_unmap_sg(nandc->dev,
782 + desc->bam_desc_data.bam_sgl,
783 + desc->bam_desc_data.sgl_cnt,
784 + desc->bam_desc_data.dir);
785 + else
786 + dma_unmap_sg(nandc->dev, &desc->sgl, 1,
787 + desc->dir);
788 +
789 kfree(desc);
790 }
791 }
792 @@ -1135,6 +1552,9 @@ static int read_page_ecc(struct qcom_nan
793 struct nand_ecc_ctrl *ecc = &chip->ecc;
794 int i, ret;
795
796 + if (nandc->dma_bam_enabled)
797 + config_bam_page_read(nandc);
798 +
799 /* queue cmd descs for each codeword */
800 for (i = 0; i < ecc->steps; i++) {
801 int data_size, oob_size;
802 @@ -1148,11 +1568,36 @@ static int read_page_ecc(struct qcom_nan
803 oob_size = host->ecc_bytes_hw + host->spare_bytes;
804 }
805
806 - config_cw_read(nandc);
807 + if (nandc->dma_bam_enabled) {
808 + if (data_buf && oob_buf) {
809 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
810 + (0 << READ_LOCATION_OFFSET) |
811 + (data_size << READ_LOCATION_SIZE) |
812 + (0 << READ_LOCATION_LAST));
813 + nandc_set_reg(nandc, NAND_READ_LOCATION_1,
814 + (data_size << READ_LOCATION_OFFSET) |
815 + (oob_size << READ_LOCATION_SIZE) |
816 + (1 << READ_LOCATION_LAST));
817 + } else if (data_buf) {
818 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
819 + (0 << READ_LOCATION_OFFSET) |
820 + (data_size << READ_LOCATION_SIZE) |
821 + (1 << READ_LOCATION_LAST));
822 + } else {
823 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
824 + (data_size << READ_LOCATION_OFFSET) |
825 + (oob_size << READ_LOCATION_SIZE) |
826 + (1 << READ_LOCATION_LAST));
827 + }
828 +
829 + config_bam_cw_read(nandc);
830 + } else {
831 + config_cw_read(nandc);
832 + }
833
834 if (data_buf)
835 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
836 - data_size);
837 + data_size, 0);
838
839 /*
840 * when ecc is enabled, the controller doesn't read the real
841 @@ -1168,7 +1613,7 @@ static int read_page_ecc(struct qcom_nan
842 *oob_buf++ = 0xff;
843
844 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
845 - oob_buf, oob_size);
846 + oob_buf, oob_size, 0);
847 }
848
849 if (data_buf)
850 @@ -1207,10 +1652,14 @@ static int copy_last_cw(struct qcom_nand
851
852 set_address(host, host->cw_size * (ecc->steps - 1), page);
853 update_rw_regs(host, 1, true);
854 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
855 + (0 << READ_LOCATION_OFFSET) |
856 + (size << READ_LOCATION_SIZE) |
857 + (1 << READ_LOCATION_LAST));
858
859 config_cw_read(nandc);
860
861 - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
862 + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
863
864 ret = submit_descs(nandc);
865 if (ret)
866 @@ -1233,6 +1682,7 @@ static int qcom_nandc_read_page(struct m
867 data_buf = buf;
868 oob_buf = oob_required ? chip->oob_poi : NULL;
869
870 + clear_bam_transaction(nandc);
871 ret = read_page_ecc(host, data_buf, oob_buf);
872 if (ret) {
873 dev_err(nandc->dev, "failure to read page\n");
874 @@ -1252,13 +1702,19 @@ static int qcom_nandc_read_page_raw(stru
875 u8 *data_buf, *oob_buf;
876 struct nand_ecc_ctrl *ecc = &chip->ecc;
877 int i, ret;
878 + int read_location;
879
880 data_buf = buf;
881 oob_buf = chip->oob_poi;
882
883 host->use_ecc = false;
884 +
885 + clear_bam_transaction(nandc);
886 update_rw_regs(host, ecc->steps, true);
887
888 + if (nandc->dma_bam_enabled)
889 + config_bam_page_read(nandc);
890 +
891 for (i = 0; i < ecc->steps; i++) {
892 int data_size1, data_size2, oob_size1, oob_size2;
893 int reg_off = FLASH_BUF_ACC;
894 @@ -1276,21 +1732,49 @@ static int qcom_nandc_read_page_raw(stru
895 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
896 }
897
898 - config_cw_read(nandc);
899 + if (nandc->dma_bam_enabled) {
900 + read_location = 0;
901 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
902 + (read_location << READ_LOCATION_OFFSET) |
903 + (data_size1 << READ_LOCATION_SIZE) |
904 + (0 << READ_LOCATION_LAST));
905 + read_location += data_size1;
906 +
907 + nandc_set_reg(nandc, NAND_READ_LOCATION_1,
908 + (read_location << READ_LOCATION_OFFSET) |
909 + (oob_size1 << READ_LOCATION_SIZE) |
910 + (0 << READ_LOCATION_LAST));
911 + read_location += oob_size1;
912 +
913 + nandc_set_reg(nandc, NAND_READ_LOCATION_2,
914 + (read_location << READ_LOCATION_OFFSET) |
915 + (data_size2 << READ_LOCATION_SIZE) |
916 + (0 << READ_LOCATION_LAST));
917 + read_location += data_size2;
918 +
919 + nandc_set_reg(nandc, NAND_READ_LOCATION_3,
920 + (read_location << READ_LOCATION_OFFSET) |
921 + (oob_size2 << READ_LOCATION_SIZE) |
922 + (1 << READ_LOCATION_LAST));
923 +
924 + config_bam_cw_read(nandc);
925 + } else {
926 + config_cw_read(nandc);
927 + }
928
929 - read_data_dma(nandc, reg_off, data_buf, data_size1);
930 + read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
931 reg_off += data_size1;
932 data_buf += data_size1;
933
934 - read_data_dma(nandc, reg_off, oob_buf, oob_size1);
935 + read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
936 reg_off += oob_size1;
937 oob_buf += oob_size1;
938
939 - read_data_dma(nandc, reg_off, data_buf, data_size2);
940 + read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
941 reg_off += data_size2;
942 data_buf += data_size2;
943
944 - read_data_dma(nandc, reg_off, oob_buf, oob_size2);
945 + read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
946 oob_buf += oob_size2;
947 }
948
949 @@ -1313,6 +1797,7 @@ static int qcom_nandc_read_oob(struct mt
950 int ret;
951
952 clear_read_regs(nandc);
953 + clear_bam_transaction(nandc);
954
955 host->use_ecc = true;
956 set_address(host, 0, page);
957 @@ -1336,6 +1821,7 @@ static int qcom_nandc_write_page(struct
958 int i, ret;
959
960 clear_read_regs(nandc);
961 + clear_bam_transaction(nandc);
962
963 data_buf = (u8 *)buf;
964 oob_buf = chip->oob_poi;
965 @@ -1357,7 +1843,8 @@ static int qcom_nandc_write_page(struct
966
967 config_cw_write_pre(nandc);
968
969 - write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
970 + write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
971 + i == (ecc->steps - 1) ? DMA_DESC_FLAG_NO_EOT : 0);
972
973 /*
974 * when ECC is enabled, we don't really need to write anything
975 @@ -1370,7 +1857,7 @@ static int qcom_nandc_write_page(struct
976 oob_buf += host->bbm_size;
977
978 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
979 - oob_buf, oob_size);
980 + oob_buf, oob_size, 0);
981 }
982
983 config_cw_write_post(nandc);
984 @@ -1400,6 +1887,7 @@ static int qcom_nandc_write_page_raw(str
985 int i, ret;
986
987 clear_read_regs(nandc);
988 + clear_bam_transaction(nandc);
989
990 data_buf = (u8 *)buf;
991 oob_buf = chip->oob_poi;
992 @@ -1426,19 +1914,22 @@ static int qcom_nandc_write_page_raw(str
993
994 config_cw_write_pre(nandc);
995
996 - write_data_dma(nandc, reg_off, data_buf, data_size1);
997 + write_data_dma(nandc, reg_off, data_buf, data_size1,
998 + DMA_DESC_FLAG_NO_EOT);
999 reg_off += data_size1;
1000 data_buf += data_size1;
1001
1002 - write_data_dma(nandc, reg_off, oob_buf, oob_size1);
1003 + write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1004 + DMA_DESC_FLAG_NO_EOT);
1005 reg_off += oob_size1;
1006 oob_buf += oob_size1;
1007
1008 - write_data_dma(nandc, reg_off, data_buf, data_size2);
1009 + write_data_dma(nandc, reg_off, data_buf, data_size2,
1010 + DMA_DESC_FLAG_NO_EOT);
1011 reg_off += data_size2;
1012 data_buf += data_size2;
1013
1014 - write_data_dma(nandc, reg_off, oob_buf, oob_size2);
1015 + write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1016 oob_buf += oob_size2;
1017
1018 config_cw_write_post(nandc);
1019 @@ -1474,6 +1965,7 @@ static int qcom_nandc_write_oob(struct m
1020
1021 host->use_ecc = true;
1022
1023 + clear_bam_transaction(nandc);
1024 ret = copy_last_cw(host, page);
1025 if (ret)
1026 return ret;
1027 @@ -1493,7 +1985,7 @@ static int qcom_nandc_write_oob(struct m
1028
1029 config_cw_write_pre(nandc);
1030 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1031 - data_size + oob_size);
1032 + data_size + oob_size, 0);
1033 config_cw_write_post(nandc);
1034
1035 ret = submit_descs(nandc);
1036 @@ -1531,6 +2023,7 @@ static int qcom_nandc_block_bad(struct m
1037 */
1038 host->use_ecc = false;
1039
1040 + clear_bam_transaction(nandc);
1041 ret = copy_last_cw(host, page);
1042 if (ret)
1043 goto err;
1044 @@ -1561,6 +2054,7 @@ static int qcom_nandc_block_markbad(stru
1045 int page, ret, status = 0;
1046
1047 clear_read_regs(nandc);
1048 + clear_bam_transaction(nandc);
1049
1050 /*
1051 * to mark the BBM as bad, we flash the entire last codeword with 0s.
1052 @@ -1577,7 +2071,8 @@ static int qcom_nandc_block_markbad(stru
1053 update_rw_regs(host, 1, false);
1054
1055 config_cw_write_pre(nandc);
1056 - write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
1057 + write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1058 + host->cw_size, 0);
1059 config_cw_write_post(nandc);
1060
1061 ret = submit_descs(nandc);
1062 @@ -1937,6 +2432,8 @@ static int qcom_nand_host_setup(struct q
1063
1064 host->clrflashstatus = FS_READY_BSY_N;
1065 host->clrreadstatus = 0xc0;
1066 + nandc->regs->erased_cw_detect_cfg_clr = CLR_ERASED_PAGE_DET;
1067 + nandc->regs->erased_cw_detect_cfg_set = SET_ERASED_PAGE_DET;
1068
1069 dev_dbg(nandc->dev,
1070 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
1071 @@ -2015,6 +2512,12 @@ static int qcom_nandc_alloc(struct qcom_
1072 dev_err(nandc->dev, "failed to request cmd channel\n");
1073 return -ENODEV;
1074 }
1075 +
1076 + nandc->bam_txn = alloc_bam_transaction(nandc);
1077 + if (!nandc->bam_txn) {
1078 + dev_err(nandc->dev, "failed to allocate bam transaction\n");
1079 + return -ENOMEM;
1080 + }
1081 }
1082
1083 INIT_LIST_HEAD(&nandc->desc_list);
1084 @@ -2050,6 +2553,9 @@ static void qcom_nandc_unalloc(struct qc
1085 devm_kfree(nandc->dev, nandc->reg_read_buf);
1086 }
1087
1088 + if (nandc->bam_txn)
1089 + devm_kfree(nandc->dev, nandc->bam_txn);
1090 +
1091 if (nandc->regs)
1092 devm_kfree(nandc->dev, nandc->regs);
1093
1094 @@ -2060,12 +2566,19 @@ static void qcom_nandc_unalloc(struct qc
1095 /* one time setup of a few nand controller registers */
1096 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
1097 {
1098 + u32 nand_ctrl;
1099 +
1100 /* kill onenand */
1101 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
1102 nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
1103
1104 - /* enable ADM DMA */
1105 - nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1106 + /* enable ADM or BAM DMA */
1107 + if (!nandc->dma_bam_enabled) {
1108 + nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1109 + } else {
1110 + nand_ctrl = nandc_read(nandc, NAND_CTRL);
1111 + nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
1112 + }
1113
1114 /* save the original values of these registers */
1115 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
1116 --- /dev/null
1117 +++ b/include/linux/dma/qcom_bam_dma.h
1118 @@ -0,0 +1,149 @@
1119 +/*
1120 + * Copyright (c) 2017, The Linux Foundation. All rights reserved.
1121 + *
1122 + * Permission to use, copy, modify, and/or distribute this software for any
1123 + * purpose with or without fee is hereby granted, provided that the above
1124 + * copyright notice and this permission notice appear in all copies.
1125 + *
1126 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1127 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1128 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1129 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1130 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1131 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1132 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1133 + */
1134 +
1135 +#ifndef _QCOM_BAM_DMA_H
1136 +#define _QCOM_BAM_DMA_H
1137 +
1138 +#include <linux/dma-mapping.h>
1139 +
1140 +#define DESC_FLAG_INT BIT(15)
1141 +#define DESC_FLAG_EOT BIT(14)
1142 +#define DESC_FLAG_EOB BIT(13)
1143 +#define DESC_FLAG_NWD BIT(12)
1144 +#define DESC_FLAG_CMD BIT(11)
1145 +
1146 +/*
1147 + * QCOM BAM DMA SGL struct
1148 + *
1149 + * @sgl: DMA SGL
1150 + * @dma_flags: BAM DMA flags
1151 + */
1152 +struct qcom_bam_sgl {
1153 + struct scatterlist sgl;
1154 + unsigned int dma_flags;
1155 +};
1156 +
1157 +/*
1158 + * This data type corresponds to the native Command Element
1159 + * supported by BAM DMA Engine.
1160 + *
1161 + * @addr - register address.
1162 + * @command - command type.
1163 + * @data - for write command: content to be written into peripheral register.
1164 + * for read command: dest addr to write peripheral register value to.
1165 + * @mask - register mask.
1166 + * @reserved - for future usage.
1167 + *
1168 + */
1169 +struct bam_cmd_element {
1170 + __le32 addr:24;
1171 + __le32 command:8;
1172 + __le32 data;
1173 + __le32 mask;
1174 + __le32 reserved;
1175 +};
1176 +
1177 +/*
1178 + * This enum indicates the command type in a command element
1179 + */
1180 +enum bam_command_type {
1181 + BAM_WRITE_COMMAND = 0,
1182 + BAM_READ_COMMAND,
1183 +};
1184 +
1185 +/*
1186 + * qcom_bam_sg_init_table - Init QCOM BAM SGL
1187 + * @bam_sgl: bam sgl
1188 + * @nents: number of entries in bam sgl
1189 + *
1190 + * This function performs the initialization for each SGL in BAM SGL
1191 + * with generic SGL API.
1192 + */
1193 +static inline void qcom_bam_sg_init_table(struct qcom_bam_sgl *bam_sgl,
1194 + unsigned int nents)
1195 +{
1196 + int i;
1197 +
1198 + for (i = 0; i < nents; i++)
1199 + sg_init_table(&bam_sgl[i].sgl, 1);
1200 +}
1201 +
1202 +/*
1203 + * qcom_bam_unmap_sg - Unmap QCOM BAM SGL
1204 + * @dev: device for which unmapping needs to be done
1205 + * @bam_sgl: bam sgl
1206 + * @nents: number of entries in bam sgl
1207 + * @dir: dma transfer direction
1208 + *
1209 + * This function performs the DMA unmapping for each SGL in BAM SGL
1210 + * with generic SGL API.
1211 + */
1212 +static inline void qcom_bam_unmap_sg(struct device *dev,
1213 + struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
1214 +{
1215 + int i;
1216 +
1217 + for (i = 0; i < nents; i++)
1218 + dma_unmap_sg(dev, &bam_sgl[i].sgl, 1, dir);
1219 +}
1220 +
1221 +/*
1222 + * qcom_bam_map_sg - Map QCOM BAM SGL
1223 + * @dev: device for which mapping needs to be done
1224 + * @bam_sgl: bam sgl
1225 + * @nents: number of entries in bam sgl
1226 + * @dir: dma transfer direction
1227 + *
1228 + * This function performs the DMA mapping for each SGL in BAM SGL
1229 + * with generic SGL API.
1230 + *
1231 + * returns 0 on error and > 0 on success
1232 + */
1233 +static inline int qcom_bam_map_sg(struct device *dev,
1234 + struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
1235 +{
1236 + int i, ret = 0;
1237 +
1238 + for (i = 0; i < nents; i++) {
1239 + ret = dma_map_sg(dev, &bam_sgl[i].sgl, 1, dir);
1240 + if (!ret)
1241 + break;
1242 + }
1243 +
1244 + /* unmap the mapped sgl from previous loop in case of error */
1245 + if (!ret)
1246 + qcom_bam_unmap_sg(dev, bam_sgl, i, dir);
1247 +
1248 + return ret;
1249 +}
1250 +
1251 +/*
1252 + * qcom_prep_bam_ce - Wrapper function to prepare a single BAM command element
1253 + * with the data that is passed to this function.
1254 + * @bam_ce: bam command element
1255 + * @addr: target address
1256 + * @command: command in bam_command_type
1257 + * @data: actual data for write and dest addr for read
1258 + */
1259 +static inline void qcom_prep_bam_ce(struct bam_cmd_element *bam_ce,
1260 + uint32_t addr, uint32_t command, uint32_t data)
1261 +{
1262 + bam_ce->addr = cpu_to_le32(addr);
1263 + bam_ce->command = cpu_to_le32(command);
1264 + bam_ce->data = cpu_to_le32(data);
1265 + bam_ce->mask = 0xFFFFFFFF;
1266 +}
1267 +#endif