674de77d9628ce401ec19cca441db6f4624e8955
[openwrt/staging/mkresin.git] / target / linux / ipq806x / patches-4.9 / 861-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch
1 From 645c7805f2602569263d7ac78050b2c9e91e3377 Mon Sep 17 00:00:00 2001
2 From: Ram Chandra Jangir <rjangir@codeaurora.org>
3 Date: Thu, 20 Apr 2017 10:23:00 +0530
4 Subject: [PATCH] qcom: mtd: nand: Added bam transaction and support
5 additional CSRs
6
7 This patch adds the following for NAND BAM DMA support
8 - Bam transaction which will be used for any NAND request.
9 It contains the array of command elements, command and
10 data sgl. This transaction will be resetted before every
11 request.
12 - Allocation function for NAND BAM transaction which will be
13 called only once at probe time.
14 - Reset function for NAND BAM transaction which will be called
15 before any new NAND request.
16 - Add support for additional CSRs.
17 NAND_READ_LOCATION - page offset for reading in BAM DMA mode
18 NAND_ERASED_CW_DETECT_CFG - status for erased code words
19 NAND_BUFFER_STATUS - status for ECC
20
21 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
22 Signed-off-by: Ram Chandra Jangir <rjangir@codeaurora.org>
23 ---
24 drivers/mtd/nand/qcom_nandc.c | 631 +++++++++++++++++++++++++++++++++++----
25 include/linux/dma/qcom_bam_dma.h | 149 +++++++++
26 2 files changed, 721 insertions(+), 59 deletions(-)
27 create mode 100644 include/linux/dma/qcom_bam_dma.h
28
29 diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
30 index 76a0ffc..9d941e3 100644
31 --- a/drivers/mtd/nand/qcom_nandc.c
32 +++ b/drivers/mtd/nand/qcom_nandc.c
33 @@ -22,6 +22,7 @@
34 #include <linux/of.h>
35 #include <linux/of_device.h>
36 #include <linux/delay.h>
37 +#include <linux/dma/qcom_bam_dma.h>
38
39 /* NANDc reg offsets */
40 #define NAND_FLASH_CMD 0x00
41 @@ -53,6 +54,8 @@
42 #define NAND_VERSION 0xf08
43 #define NAND_READ_LOCATION_0 0xf20
44 #define NAND_READ_LOCATION_1 0xf24
45 +#define NAND_READ_LOCATION_2 0xf28
46 +#define NAND_READ_LOCATION_3 0xf2c
47
48 /* dummy register offsets, used by write_reg_dma */
49 #define NAND_DEV_CMD1_RESTORE 0xdead
50 @@ -131,6 +134,11 @@
51 #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
52 #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
53
54 +/* NAND_READ_LOCATION_n bits */
55 +#define READ_LOCATION_OFFSET 0
56 +#define READ_LOCATION_SIZE 16
57 +#define READ_LOCATION_LAST 31
58 +
59 /* Version Mask */
60 #define NAND_VERSION_MAJOR_MASK 0xf0000000
61 #define NAND_VERSION_MAJOR_SHIFT 28
62 @@ -148,6 +156,9 @@
63 #define FETCH_ID 0xb
64 #define RESET_DEVICE 0xd
65
66 +/* NAND_CTRL bits */
67 +#define BAM_MODE_EN BIT(0)
68 +
69 /*
70 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
71 * the driver calls the chunks 'step' or 'codeword' interchangeably
72 @@ -169,12 +180,77 @@
73 #define ECC_BCH_4BIT BIT(2)
74 #define ECC_BCH_8BIT BIT(3)
75
76 +/* Flags used for BAM DMA desc preparation*/
77 +/* Don't set the EOT in current tx sgl */
78 +#define DMA_DESC_FLAG_NO_EOT (0x0001)
79 +/* Set the NWD flag in current sgl */
80 +#define DMA_DESC_FLAG_BAM_NWD (0x0002)
81 +/* Close current sgl and start writing in another sgl */
82 +#define DMA_DESC_FLAG_BAM_NEXT_SGL (0x0004)
83 +/*
84 + * Erased codeword status is being used two times in single transfer so this
85 + * flag will determine the current value of erased codeword status register
86 + */
87 +#define DMA_DESC_ERASED_CW_SET (0x0008)
88 +
89 +/* Returns the dma address for reg read buffer */
90 +#define REG_BUF_DMA_ADDR(chip, vaddr) \
91 + ((chip)->reg_read_buf_phys + \
92 + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
93 +
94 +/* Returns the nand register physical address */
95 +#define NAND_REG_PHYS_ADDRESS(chip, addr) \
96 + ((chip)->base_dma + (addr))
97 +
98 +/* command element array size in bam transaction */
99 +#define BAM_CMD_ELEMENT_SIZE (256)
100 +/* command sgl size in bam transaction */
101 +#define BAM_CMD_SGL_SIZE (256)
102 +/* data sgl size in bam transaction */
103 +#define BAM_DATA_SGL_SIZE (128)
104 +
105 +/*
106 + * This data type corresponds to the BAM transaction which will be used for any
107 + * nand request.
108 + * @bam_ce - the array of bam command elements
109 + * @cmd_sgl - sgl for nand bam command pipe
110 + * @tx_sgl - sgl for nand bam consumer pipe
111 + * @rx_sgl - sgl for nand bam producer pipe
112 + * @bam_ce_index - the index in bam_ce which is available for next sgl request
113 + * @pre_bam_ce_index - the index in bam_ce which marks the start position ce
114 + * for current sgl. It will be used for size calculation
115 + * for current sgl
116 + * @cmd_sgl_cnt - no of entries in command sgl.
117 + * @tx_sgl_cnt - no of entries in tx sgl.
118 + * @rx_sgl_cnt - no of entries in rx sgl.
119 + */
120 +struct bam_transaction {
121 + struct bam_cmd_element bam_ce[BAM_CMD_ELEMENT_SIZE];
122 + struct qcom_bam_sgl cmd_sgl[BAM_CMD_SGL_SIZE];
123 + struct qcom_bam_sgl tx_sgl[BAM_DATA_SGL_SIZE];
124 + struct qcom_bam_sgl rx_sgl[BAM_DATA_SGL_SIZE];
125 + uint32_t bam_ce_index;
126 + uint32_t pre_bam_ce_index;
127 + uint32_t cmd_sgl_cnt;
128 + uint32_t tx_sgl_cnt;
129 + uint32_t rx_sgl_cnt;
130 +};
131 +
132 +/**
133 + * This data type corresponds to the nand dma descriptor
134 + * @list - list for desc_info
135 + * @dir - DMA transfer direction
136 + * @sgl - sgl which will be used for single sgl dma descriptor
137 + * @dma_desc - low level dma engine descriptor
138 + * @bam_desc_data - used for bam desc mappings
139 + */
140 struct desc_info {
141 struct list_head node;
142
143 enum dma_data_direction dir;
144 struct scatterlist sgl;
145 struct dma_async_tx_descriptor *dma_desc;
146 + struct qcom_bam_custom_data bam_desc_data;
147 };
148
149 /*
150 @@ -202,6 +278,13 @@ struct nandc_regs {
151 __le32 orig_vld;
152
153 __le32 ecc_buf_cfg;
154 + __le32 read_location0;
155 + __le32 read_location1;
156 + __le32 read_location2;
157 + __le32 read_location3;
158 +
159 + __le32 erased_cw_detect_cfg_clr;
160 + __le32 erased_cw_detect_cfg_set;
161 };
162
163 /*
164 @@ -217,6 +300,7 @@ struct nandc_regs {
165 * @aon_clk: another controller clock
166 *
167 * @chan: dma channel
168 + * @bam_txn: contains the bam transaction address
169 * @cmd_crci: ADM DMA CRCI for command flow control
170 * @data_crci: ADM DMA CRCI for data flow control
171 * @desc_list: DMA descriptor list (list of desc_infos)
172 @@ -242,6 +326,7 @@ struct nandc_regs {
173 struct qcom_nand_controller {
174 struct nand_hw_control controller;
175 struct list_head host_list;
176 + struct bam_transaction *bam_txn;
177
178 struct device *dev;
179
180 @@ -342,6 +427,45 @@ struct qcom_nand_driver_data {
181 bool dma_bam_enabled;
182 };
183
184 +/* Allocates and Initializes the BAM transaction */
185 +struct bam_transaction *alloc_bam_transaction(
186 + struct qcom_nand_controller *nandc)
187 +{
188 + struct bam_transaction *bam_txn;
189 +
190 + bam_txn = kzalloc(sizeof(*bam_txn), GFP_KERNEL);
191 +
192 + if (!bam_txn)
193 + return NULL;
194 +
195 + bam_txn->bam_ce_index = 0;
196 + bam_txn->pre_bam_ce_index = 0;
197 + bam_txn->cmd_sgl_cnt = 0;
198 + bam_txn->tx_sgl_cnt = 0;
199 + bam_txn->rx_sgl_cnt = 0;
200 +
201 + qcom_bam_sg_init_table(bam_txn->cmd_sgl, BAM_CMD_SGL_SIZE);
202 + qcom_bam_sg_init_table(bam_txn->tx_sgl, BAM_DATA_SGL_SIZE);
203 + qcom_bam_sg_init_table(bam_txn->rx_sgl, BAM_DATA_SGL_SIZE);
204 +
205 + return bam_txn;
206 +}
207 +
208 +/* Clears the BAM transaction index */
209 +void clear_bam_transaction(struct qcom_nand_controller *nandc)
210 +{
211 + struct bam_transaction *bam_txn = nandc->bam_txn;
212 +
213 + if (!nandc->dma_bam_enabled)
214 + return;
215 +
216 + bam_txn->bam_ce_index = 0;
217 + bam_txn->pre_bam_ce_index = 0;
218 + bam_txn->cmd_sgl_cnt = 0;
219 + bam_txn->tx_sgl_cnt = 0;
220 + bam_txn->rx_sgl_cnt = 0;
221 +}
222 +
223 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
224 {
225 return container_of(chip, struct qcom_nand_host, chip);
226 @@ -398,6 +522,16 @@ static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
227 return &regs->orig_vld;
228 case NAND_EBI2_ECC_BUF_CFG:
229 return &regs->ecc_buf_cfg;
230 + case NAND_BUFFER_STATUS:
231 + return &regs->clrreadstatus;
232 + case NAND_READ_LOCATION_0:
233 + return &regs->read_location0;
234 + case NAND_READ_LOCATION_1:
235 + return &regs->read_location1;
236 + case NAND_READ_LOCATION_2:
237 + return &regs->read_location2;
238 + case NAND_READ_LOCATION_3:
239 + return &regs->read_location3;
240 default:
241 return NULL;
242 }
243 @@ -439,7 +573,7 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
244 {
245 struct nand_chip *chip = &host->chip;
246 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
247 - u32 cmd, cfg0, cfg1, ecc_bch_cfg;
248 + u32 cmd, cfg0, cfg1, ecc_bch_cfg, read_location0;
249
250 if (read) {
251 if (host->use_ecc)
252 @@ -456,12 +590,20 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
253
254 cfg1 = host->cfg1;
255 ecc_bch_cfg = host->ecc_bch_cfg;
256 + if (read)
257 + read_location0 = (0 << READ_LOCATION_OFFSET) |
258 + (host->cw_data << READ_LOCATION_SIZE) |
259 + (1 << READ_LOCATION_LAST);
260 } else {
261 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
262 (num_cw - 1) << CW_PER_PAGE;
263
264 cfg1 = host->cfg1_raw;
265 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
266 + if (read)
267 + read_location0 = (0 << READ_LOCATION_OFFSET) |
268 + (host->cw_size << READ_LOCATION_SIZE) |
269 + (1 << READ_LOCATION_LAST);
270 }
271
272 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
273 @@ -472,8 +614,104 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
274 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
275 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
276 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
277 +
278 + if (read)
279 + nandc_set_reg(nandc, NAND_READ_LOCATION_0, read_location0);
280 }
281
282 +/*
283 + * Prepares the command descriptor for BAM DMA which will be used for NAND
284 + * register read and write. The command descriptor requires the command
285 + * to be formed in command element type so this function uses the command
286 + * element from bam transaction ce array and fills the same with required
287 + * data. A single SGL can contain multiple command elements so
288 + * DMA_DESC_FLAG_BAM_NEXT_SGL will be used for starting the separate SGL
289 + * after the current command element.
290 + */
291 +static int prep_dma_desc_command(struct qcom_nand_controller *nandc, bool read,
292 + int reg_off, const void *vaddr,
293 + int size, unsigned int flags)
294 +{
295 + int bam_ce_size;
296 + int i;
297 + struct bam_cmd_element *bam_ce_buffer;
298 + struct bam_transaction *bam_txn = nandc->bam_txn;
299 +
300 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_index];
301 +
302 + /* fill the command desc */
303 + for (i = 0; i < size; i++) {
304 + if (read) {
305 + qcom_prep_bam_ce(&bam_ce_buffer[i],
306 + NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
307 + BAM_READ_COMMAND,
308 + REG_BUF_DMA_ADDR(nandc,
309 + (unsigned int *)vaddr + i));
310 + } else {
311 + qcom_prep_bam_ce(&bam_ce_buffer[i],
312 + NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
313 + BAM_WRITE_COMMAND,
314 + *((unsigned int *)vaddr + i));
315 + }
316 + }
317 +
318 + /* use the separate sgl after this command */
319 + if (flags & DMA_DESC_FLAG_BAM_NEXT_SGL) {
320 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->pre_bam_ce_index];
321 + bam_txn->bam_ce_index += size;
322 + bam_ce_size = (bam_txn->bam_ce_index -
323 + bam_txn->pre_bam_ce_index) *
324 + sizeof(struct bam_cmd_element);
325 + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].sgl,
326 + bam_ce_buffer,
327 + bam_ce_size);
328 + if (flags & DMA_DESC_FLAG_BAM_NWD)
329 + bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
330 + DESC_FLAG_NWD | DESC_FLAG_CMD;
331 + else
332 + bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
333 + DESC_FLAG_CMD;
334 +
335 + bam_txn->cmd_sgl_cnt++;
336 + bam_txn->pre_bam_ce_index = bam_txn->bam_ce_index;
337 + } else {
338 + bam_txn->bam_ce_index += size;
339 + }
340 +
341 + return 0;
342 +}
343 +
344 +/*
345 + * Prepares the data descriptor for BAM DMA which will be used for NAND
346 + * data read and write.
347 + */
348 +static int prep_dma_desc_data_bam(struct qcom_nand_controller *nandc, bool read,
349 + int reg_off, const void *vaddr,
350 + int size, unsigned int flags)
351 +{
352 + struct bam_transaction *bam_txn = nandc->bam_txn;
353 +
354 + if (read) {
355 + sg_set_buf(&bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].sgl,
356 + vaddr, size);
357 + bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].dma_flags = 0;
358 + bam_txn->rx_sgl_cnt++;
359 + } else {
360 + sg_set_buf(&bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].sgl,
361 + vaddr, size);
362 + if (flags & DMA_DESC_FLAG_NO_EOT)
363 + bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags = 0;
364 + else
365 + bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags =
366 + DESC_FLAG_EOT;
367 +
368 + bam_txn->tx_sgl_cnt++;
369 + }
370 +
371 + return 0;
372 +}
373 +
374 +/* Prepares the dma desciptor for adm dma engine */
375 static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
376 int reg_off, const void *vaddr, int size,
377 bool flow_control)
378 @@ -552,7 +790,7 @@ static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
379 * @num_regs: number of registers to read
380 */
381 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
382 - int num_regs)
383 + int num_regs, unsigned int flags)
384 {
385 bool flow_control = false;
386 void *vaddr;
387 @@ -561,10 +799,18 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
388 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
389 flow_control = true;
390
391 - size = num_regs * sizeof(u32);
392 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
393 nandc->reg_read_pos += num_regs;
394
395 + if (nandc->dma_bam_enabled) {
396 + size = num_regs;
397 +
398 + return prep_dma_desc_command(nandc, true, first, vaddr, size,
399 + flags);
400 + }
401 +
402 + size = num_regs * sizeof(u32);
403 +
404 return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
405 }
406
407 @@ -576,7 +822,7 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
408 * @num_regs: number of registers to write
409 */
410 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
411 - int num_regs)
412 + int num_regs, unsigned int flags)
413 {
414 bool flow_control = false;
415 struct nandc_regs *regs = nandc->regs;
416 @@ -588,12 +834,29 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
417 if (first == NAND_FLASH_CMD)
418 flow_control = true;
419
420 + if (first == NAND_ERASED_CW_DETECT_CFG) {
421 + if (flags & DMA_DESC_ERASED_CW_SET)
422 + vaddr = &regs->erased_cw_detect_cfg_set;
423 + else
424 + vaddr = &regs->erased_cw_detect_cfg_clr;
425 + }
426 +
427 + if (first == NAND_EXEC_CMD)
428 + flags |= DMA_DESC_FLAG_BAM_NWD;
429 +
430 if (first == NAND_DEV_CMD1_RESTORE)
431 first = NAND_DEV_CMD1;
432
433 if (first == NAND_DEV_CMD_VLD_RESTORE)
434 first = NAND_DEV_CMD_VLD;
435
436 + if (nandc->dma_bam_enabled) {
437 + size = num_regs;
438 +
439 + return prep_dma_desc_command(nandc, false, first, vaddr, size,
440 + flags);
441 + }
442 +
443 size = num_regs * sizeof(u32);
444
445 return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
446 @@ -608,8 +871,12 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
447 * @size: DMA transaction size in bytes
448 */
449 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
450 - const u8 *vaddr, int size)
451 + const u8 *vaddr, int size, unsigned int flags)
452 {
453 + if (nandc->dma_bam_enabled)
454 + return prep_dma_desc_data_bam(nandc, true, reg_off, vaddr, size,
455 + flags);
456 +
457 return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
458 }
459
460 @@ -622,8 +889,12 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
461 * @size: DMA transaction size in bytes
462 */
463 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
464 - const u8 *vaddr, int size)
465 + const u8 *vaddr, int size, unsigned int flags)
466 {
467 + if (nandc->dma_bam_enabled)
468 + return prep_dma_desc_data_bam(nandc, false, reg_off, vaddr,
469 + size, flags);
470 +
471 return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
472 }
473
474 @@ -633,14 +904,57 @@ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
475 */
476 static void config_cw_read(struct qcom_nand_controller *nandc)
477 {
478 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
479 - write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
480 - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
481
482 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
483 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
484 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
485 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
486 +
487 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
488 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
489 + DMA_DESC_ERASED_CW_SET);
490 + if (nandc->dma_bam_enabled)
491 + write_reg_dma(nandc, NAND_READ_LOCATION_0, 1,
492 + DMA_DESC_FLAG_BAM_NEXT_SGL);
493 +
494
495 - read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
496 - read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
497 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NWD |
498 + DMA_DESC_FLAG_BAM_NEXT_SGL);
499 +
500 + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
501 + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
502 + DMA_DESC_FLAG_BAM_NEXT_SGL);
503 +}
504 +
505 +/*
506 + * Helpers to prepare DMA descriptors for configuring registers
507 + * before reading a NAND page with BAM.
508 + */
509 +static void config_bam_page_read(struct qcom_nand_controller *nandc)
510 +{
511 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
512 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
513 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
514 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
515 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
516 + DMA_DESC_ERASED_CW_SET |
517 + DMA_DESC_FLAG_BAM_NEXT_SGL);
518 +}
519 +
520 +/*
521 + * Helpers to prepare DMA descriptors for configuring registers
522 + * before reading each codeword in NAND page with BAM.
523 + */
524 +static void config_bam_cw_read(struct qcom_nand_controller *nandc)
525 +{
526 + if (nandc->dma_bam_enabled)
527 + write_reg_dma(nandc, NAND_READ_LOCATION_0, 4, 0);
528 +
529 + write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
530 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
531 +
532 + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
533 + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
534 + DMA_DESC_FLAG_BAM_NEXT_SGL);
535 }
536
537 /*
538 @@ -649,19 +963,20 @@ static void config_cw_read(struct qcom_nand_controller *nandc)
539 */
540 static void config_cw_write_pre(struct qcom_nand_controller *nandc)
541 {
542 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
543 - write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
544 - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
545 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
546 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
547 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
548 + DMA_DESC_FLAG_BAM_NEXT_SGL);
549 }
550
551 static void config_cw_write_post(struct qcom_nand_controller *nandc)
552 {
553 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
554 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
555
556 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
557 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
558
559 - write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
560 - write_reg_dma(nandc, NAND_READ_STATUS, 1);
561 + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
562 + write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
563 }
564
565 /*
566 @@ -675,6 +990,8 @@ static int nandc_param(struct qcom_nand_host *host)
567 struct nand_chip *chip = &host->chip;
568 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
569
570 + clear_bam_transaction(nandc);
571 +
572 /*
573 * NAND_CMD_PARAM is called before we know much about the FLASH chip
574 * in use. we configure the controller to perform a raw read of 512
575 @@ -708,9 +1025,13 @@ static int nandc_param(struct qcom_nand_host *host)
576
577 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
578 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
579 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
580 + (0 << READ_LOCATION_OFFSET) |
581 + (512 << READ_LOCATION_SIZE) |
582 + (1 << READ_LOCATION_LAST));
583
584 - write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
585 - write_reg_dma(nandc, NAND_DEV_CMD1, 1);
586 + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
587 + write_reg_dma(nandc, NAND_DEV_CMD1, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
588
589 nandc->buf_count = 512;
590 memset(nandc->data_buffer, 0xff, nandc->buf_count);
591 @@ -718,11 +1039,12 @@ static int nandc_param(struct qcom_nand_host *host)
592 config_cw_read(nandc);
593
594 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
595 - nandc->buf_count);
596 + nandc->buf_count, 0);
597
598 /* restore CMD1 and VLD regs */
599 - write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
600 - write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
601 + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
602 + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1,
603 + DMA_DESC_FLAG_BAM_NEXT_SGL);
604
605 return 0;
606 }
607 @@ -733,6 +1055,8 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
608 struct nand_chip *chip = &host->chip;
609 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
610
611 + clear_bam_transaction(nandc);
612 +
613 nandc_set_reg(nandc, NAND_FLASH_CMD,
614 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
615 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
616 @@ -744,14 +1068,15 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
617 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
618 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
619
620 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
621 - write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
622 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
623
624 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
625 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, DMA_DESC_FLAG_BAM_NEXT_SGL);
626 + write_reg_dma(nandc, NAND_DEV0_CFG0, 2, DMA_DESC_FLAG_BAM_NEXT_SGL);
627 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
628
629 - write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
630 - write_reg_dma(nandc, NAND_READ_STATUS, 1);
631 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
632 +
633 + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
634 + write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
635
636 return 0;
637 }
638 @@ -765,16 +1090,19 @@ static int read_id(struct qcom_nand_host *host, int column)
639 if (column == -1)
640 return 0;
641
642 + clear_bam_transaction(nandc);
643 +
644 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
645 nandc_set_reg(nandc, NAND_ADDR0, column);
646 nandc_set_reg(nandc, NAND_ADDR1, 0);
647 - nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
648 + nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
649 + nandc->dma_bam_enabled ? 0 : DM_EN);
650 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
651
652 - write_reg_dma(nandc, NAND_FLASH_CMD, 4);
653 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
654 + write_reg_dma(nandc, NAND_FLASH_CMD, 4, DMA_DESC_FLAG_BAM_NEXT_SGL);
655 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
656
657 - read_reg_dma(nandc, NAND_READ_ID, 1);
658 + read_reg_dma(nandc, NAND_READ_ID, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
659
660 return 0;
661 }
662 @@ -785,15 +1113,61 @@ static int reset(struct qcom_nand_host *host)
663 struct nand_chip *chip = &host->chip;
664 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
665
666 + clear_bam_transaction(nandc);
667 +
668 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
669 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
670
671 - write_reg_dma(nandc, NAND_FLASH_CMD, 1);
672 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
673 + write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
674 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
675 +
676 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
677 +
678 + return 0;
679 +}
680 +
681 +static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
682 + struct dma_chan *chan,
683 + struct qcom_bam_sgl *bam_sgl,
684 + int sgl_cnt,
685 + enum dma_transfer_direction direction)
686 +{
687 + struct desc_info *desc;
688 + struct dma_async_tx_descriptor *dma_desc;
689 +
690 + if (!qcom_bam_map_sg(nandc->dev, bam_sgl, sgl_cnt, direction)) {
691 + dev_err(nandc->dev, "failure in mapping sgl\n");
692 + return -ENOMEM;
693 + }
694 +
695 + desc = kzalloc(sizeof(*desc), GFP_KERNEL);
696 + if (!desc) {
697 + qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
698 + return -ENOMEM;
699 + }
700 +
701 +
702 + desc->bam_desc_data.dir = direction;
703 + desc->bam_desc_data.sgl_cnt = sgl_cnt;
704 + desc->bam_desc_data.bam_sgl = bam_sgl;
705 +
706 + dma_desc = dmaengine_prep_dma_custom_mapping(chan,
707 + &desc->bam_desc_data,
708 + 0);
709 +
710 + if (!dma_desc) {
711 + dev_err(nandc->dev, "failure in cmd prep desc\n");
712 + qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
713 + kfree(desc);
714 + return -EINVAL;
715 + }
716 +
717 + desc->dma_desc = dma_desc;
718
719 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
720 + list_add_tail(&desc->node, &nandc->desc_list);
721
722 return 0;
723 +
724 }
725
726 /* helpers to submit/free our list of dma descriptors */
727 @@ -801,12 +1175,46 @@ static int submit_descs(struct qcom_nand_controller *nandc)
728 {
729 struct desc_info *desc;
730 dma_cookie_t cookie = 0;
731 + struct bam_transaction *bam_txn = nandc->bam_txn;
732 + int r;
733 +
734 + if (nandc->dma_bam_enabled) {
735 + if (bam_txn->rx_sgl_cnt) {
736 + r = prepare_bam_async_desc(nandc, nandc->rx_chan,
737 + bam_txn->rx_sgl, bam_txn->rx_sgl_cnt,
738 + DMA_DEV_TO_MEM);
739 + if (r)
740 + return r;
741 + }
742 +
743 + if (bam_txn->tx_sgl_cnt) {
744 + r = prepare_bam_async_desc(nandc, nandc->tx_chan,
745 + bam_txn->tx_sgl, bam_txn->tx_sgl_cnt,
746 + DMA_MEM_TO_DEV);
747 + if (r)
748 + return r;
749 + }
750 +
751 + r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
752 + bam_txn->cmd_sgl, bam_txn->cmd_sgl_cnt,
753 + DMA_MEM_TO_DEV);
754 + if (r)
755 + return r;
756 + }
757
758 list_for_each_entry(desc, &nandc->desc_list, node)
759 cookie = dmaengine_submit(desc->dma_desc);
760
761 - if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
762 - return -ETIMEDOUT;
763 + if (nandc->dma_bam_enabled) {
764 + dma_async_issue_pending(nandc->tx_chan);
765 + dma_async_issue_pending(nandc->rx_chan);
766 +
767 + if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
768 + return -ETIMEDOUT;
769 + } else {
770 + if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
771 + return -ETIMEDOUT;
772 + }
773
774 return 0;
775 }
776 @@ -817,7 +1225,16 @@ static void free_descs(struct qcom_nand_controller *nandc)
777
778 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
779 list_del(&desc->node);
780 - dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
781 +
782 + if (nandc->dma_bam_enabled)
783 + qcom_bam_unmap_sg(nandc->dev,
784 + desc->bam_desc_data.bam_sgl,
785 + desc->bam_desc_data.sgl_cnt,
786 + desc->bam_desc_data.dir);
787 + else
788 + dma_unmap_sg(nandc->dev, &desc->sgl, 1,
789 + desc->dir);
790 +
791 kfree(desc);
792 }
793 }
794 @@ -1128,6 +1545,9 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
795 struct nand_ecc_ctrl *ecc = &chip->ecc;
796 int i, ret;
797
798 + if (nandc->dma_bam_enabled)
799 + config_bam_page_read(nandc);
800 +
801 /* queue cmd descs for each codeword */
802 for (i = 0; i < ecc->steps; i++) {
803 int data_size, oob_size;
804 @@ -1141,11 +1561,36 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
805 oob_size = host->ecc_bytes_hw + host->spare_bytes;
806 }
807
808 - config_cw_read(nandc);
809 + if (nandc->dma_bam_enabled) {
810 + if (data_buf && oob_buf) {
811 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
812 + (0 << READ_LOCATION_OFFSET) |
813 + (data_size << READ_LOCATION_SIZE) |
814 + (0 << READ_LOCATION_LAST));
815 + nandc_set_reg(nandc, NAND_READ_LOCATION_1,
816 + (data_size << READ_LOCATION_OFFSET) |
817 + (oob_size << READ_LOCATION_SIZE) |
818 + (1 << READ_LOCATION_LAST));
819 + } else if (data_buf) {
820 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
821 + (0 << READ_LOCATION_OFFSET) |
822 + (data_size << READ_LOCATION_SIZE) |
823 + (1 << READ_LOCATION_LAST));
824 + } else {
825 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
826 + (data_size << READ_LOCATION_OFFSET) |
827 + (oob_size << READ_LOCATION_SIZE) |
828 + (1 << READ_LOCATION_LAST));
829 + }
830 +
831 + config_bam_cw_read(nandc);
832 + } else {
833 + config_cw_read(nandc);
834 + }
835
836 if (data_buf)
837 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
838 - data_size);
839 + data_size, 0);
840
841 /*
842 * when ecc is enabled, the controller doesn't read the real
843 @@ -1161,7 +1606,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
844 *oob_buf++ = 0xff;
845
846 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
847 - oob_buf, oob_size);
848 + oob_buf, oob_size, 0);
849 }
850
851 if (data_buf)
852 @@ -1200,10 +1645,14 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
853
854 set_address(host, host->cw_size * (ecc->steps - 1), page);
855 update_rw_regs(host, 1, true);
856 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
857 + (0 << READ_LOCATION_OFFSET) |
858 + (size << READ_LOCATION_SIZE) |
859 + (1 << READ_LOCATION_LAST));
860
861 config_cw_read(nandc);
862
863 - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
864 + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
865
866 ret = submit_descs(nandc);
867 if (ret)
868 @@ -1226,6 +1675,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
869 data_buf = buf;
870 oob_buf = oob_required ? chip->oob_poi : NULL;
871
872 + clear_bam_transaction(nandc);
873 ret = read_page_ecc(host, data_buf, oob_buf);
874 if (ret) {
875 dev_err(nandc->dev, "failure to read page\n");
876 @@ -1245,13 +1695,19 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
877 u8 *data_buf, *oob_buf;
878 struct nand_ecc_ctrl *ecc = &chip->ecc;
879 int i, ret;
880 + int read_location;
881
882 data_buf = buf;
883 oob_buf = chip->oob_poi;
884
885 host->use_ecc = false;
886 +
887 + clear_bam_transaction(nandc);
888 update_rw_regs(host, ecc->steps, true);
889
890 + if (nandc->dma_bam_enabled)
891 + config_bam_page_read(nandc);
892 +
893 for (i = 0; i < ecc->steps; i++) {
894 int data_size1, data_size2, oob_size1, oob_size2;
895 int reg_off = FLASH_BUF_ACC;
896 @@ -1269,21 +1725,49 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
897 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
898 }
899
900 - config_cw_read(nandc);
901 + if (nandc->dma_bam_enabled) {
902 + read_location = 0;
903 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
904 + (read_location << READ_LOCATION_OFFSET) |
905 + (data_size1 << READ_LOCATION_SIZE) |
906 + (0 << READ_LOCATION_LAST));
907 + read_location += data_size1;
908 +
909 + nandc_set_reg(nandc, NAND_READ_LOCATION_1,
910 + (read_location << READ_LOCATION_OFFSET) |
911 + (oob_size1 << READ_LOCATION_SIZE) |
912 + (0 << READ_LOCATION_LAST));
913 + read_location += oob_size1;
914 +
915 + nandc_set_reg(nandc, NAND_READ_LOCATION_2,
916 + (read_location << READ_LOCATION_OFFSET) |
917 + (data_size2 << READ_LOCATION_SIZE) |
918 + (0 << READ_LOCATION_LAST));
919 + read_location += data_size2;
920 +
921 + nandc_set_reg(nandc, NAND_READ_LOCATION_3,
922 + (read_location << READ_LOCATION_OFFSET) |
923 + (oob_size2 << READ_LOCATION_SIZE) |
924 + (1 << READ_LOCATION_LAST));
925 +
926 + config_bam_cw_read(nandc);
927 + } else {
928 + config_cw_read(nandc);
929 + }
930
931 - read_data_dma(nandc, reg_off, data_buf, data_size1);
932 + read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
933 reg_off += data_size1;
934 data_buf += data_size1;
935
936 - read_data_dma(nandc, reg_off, oob_buf, oob_size1);
937 + read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
938 reg_off += oob_size1;
939 oob_buf += oob_size1;
940
941 - read_data_dma(nandc, reg_off, data_buf, data_size2);
942 + read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
943 reg_off += data_size2;
944 data_buf += data_size2;
945
946 - read_data_dma(nandc, reg_off, oob_buf, oob_size2);
947 + read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
948 oob_buf += oob_size2;
949 }
950
951 @@ -1306,6 +1790,7 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
952 int ret;
953
954 clear_read_regs(nandc);
955 + clear_bam_transaction(nandc);
956
957 host->use_ecc = true;
958 set_address(host, 0, page);
959 @@ -1329,6 +1814,7 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
960 int i, ret;
961
962 clear_read_regs(nandc);
963 + clear_bam_transaction(nandc);
964
965 data_buf = (u8 *)buf;
966 oob_buf = chip->oob_poi;
967 @@ -1350,7 +1836,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
968
969 config_cw_write_pre(nandc);
970
971 - write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
972 + write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
973 + i == (ecc->steps - 1) ? DMA_DESC_FLAG_NO_EOT : 0);
974
975 /*
976 * when ECC is enabled, we don't really need to write anything
977 @@ -1363,7 +1850,7 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
978 oob_buf += host->bbm_size;
979
980 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
981 - oob_buf, oob_size);
982 + oob_buf, oob_size, 0);
983 }
984
985 config_cw_write_post(nandc);
986 @@ -1393,6 +1880,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
987 int i, ret;
988
989 clear_read_regs(nandc);
990 + clear_bam_transaction(nandc);
991
992 data_buf = (u8 *)buf;
993 oob_buf = chip->oob_poi;
994 @@ -1419,19 +1907,22 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
995
996 config_cw_write_pre(nandc);
997
998 - write_data_dma(nandc, reg_off, data_buf, data_size1);
999 + write_data_dma(nandc, reg_off, data_buf, data_size1,
1000 + DMA_DESC_FLAG_NO_EOT);
1001 reg_off += data_size1;
1002 data_buf += data_size1;
1003
1004 - write_data_dma(nandc, reg_off, oob_buf, oob_size1);
1005 + write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1006 + DMA_DESC_FLAG_NO_EOT);
1007 reg_off += oob_size1;
1008 oob_buf += oob_size1;
1009
1010 - write_data_dma(nandc, reg_off, data_buf, data_size2);
1011 + write_data_dma(nandc, reg_off, data_buf, data_size2,
1012 + DMA_DESC_FLAG_NO_EOT);
1013 reg_off += data_size2;
1014 data_buf += data_size2;
1015
1016 - write_data_dma(nandc, reg_off, oob_buf, oob_size2);
1017 + write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1018 oob_buf += oob_size2;
1019
1020 config_cw_write_post(nandc);
1021 @@ -1467,6 +1958,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1022
1023 host->use_ecc = true;
1024
1025 + clear_bam_transaction(nandc);
1026 ret = copy_last_cw(host, page);
1027 if (ret)
1028 return ret;
1029 @@ -1486,7 +1978,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1030
1031 config_cw_write_pre(nandc);
1032 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1033 - data_size + oob_size);
1034 + data_size + oob_size, 0);
1035 config_cw_write_post(nandc);
1036
1037 ret = submit_descs(nandc);
1038 @@ -1524,6 +2016,7 @@ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
1039 */
1040 host->use_ecc = false;
1041
1042 + clear_bam_transaction(nandc);
1043 ret = copy_last_cw(host, page);
1044 if (ret)
1045 goto err;
1046 @@ -1554,6 +2047,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
1047 int page, ret, status = 0;
1048
1049 clear_read_regs(nandc);
1050 + clear_bam_transaction(nandc);
1051
1052 /*
1053 * to mark the BBM as bad, we flash the entire last codeword with 0s.
1054 @@ -1570,7 +2064,8 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
1055 update_rw_regs(host, 1, false);
1056
1057 config_cw_write_pre(nandc);
1058 - write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
1059 + write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1060 + host->cw_size, 0);
1061 config_cw_write_post(nandc);
1062
1063 ret = submit_descs(nandc);
1064 @@ -1930,6 +2425,8 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
1065
1066 host->clrflashstatus = FS_READY_BSY_N;
1067 host->clrreadstatus = 0xc0;
1068 + nandc->regs->erased_cw_detect_cfg_clr = CLR_ERASED_PAGE_DET;
1069 + nandc->regs->erased_cw_detect_cfg_set = SET_ERASED_PAGE_DET;
1070
1071 dev_dbg(nandc->dev,
1072 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
1073 @@ -2008,6 +2505,12 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
1074 dev_err(nandc->dev, "failed to request cmd channel\n");
1075 return -ENODEV;
1076 }
1077 +
1078 + nandc->bam_txn = alloc_bam_transaction(nandc);
1079 + if (!nandc->bam_txn) {
1080 + dev_err(nandc->dev, "failed to allocate bam transaction\n");
1081 + return -ENOMEM;
1082 + }
1083 }
1084
1085 INIT_LIST_HEAD(&nandc->desc_list);
1086 @@ -2043,6 +2546,9 @@ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
1087 devm_kfree(nandc->dev, nandc->reg_read_buf);
1088 }
1089
1090 + if (nandc->bam_txn)
1091 + devm_kfree(nandc->dev, nandc->bam_txn);
1092 +
1093 if (nandc->regs)
1094 devm_kfree(nandc->dev, nandc->regs);
1095
1096 @@ -2053,11 +2559,18 @@ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
1097 /* one time setup of a few nand controller registers */
1098 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
1099 {
1100 + u32 nand_ctrl;
1101 +
1102 /* kill onenand */
1103 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
1104
1105 - /* enable ADM DMA */
1106 - nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1107 + /* enable ADM or BAM DMA */
1108 + if (!nandc->dma_bam_enabled) {
1109 + nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1110 + } else {
1111 + nand_ctrl = nandc_read(nandc, NAND_CTRL);
1112 + nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
1113 + }
1114
1115 /* save the original values of these registers */
1116 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
1117 diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
1118 new file mode 100644
1119 index 0000000..7e87a85
1120 --- /dev/null
1121 +++ b/include/linux/dma/qcom_bam_dma.h
1122 @@ -0,0 +1,149 @@
1123 +/*
1124 + * Copyright (c) 2017, The Linux Foundation. All rights reserved.
1125 + *
1126 + * Permission to use, copy, modify, and/or distribute this software for any
1127 + * purpose with or without fee is hereby granted, provided that the above
1128 + * copyright notice and this permission notice appear in all copies.
1129 + *
1130 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1131 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1132 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1133 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1134 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1135 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1136 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1137 + */
1138 +
1139 +#ifndef _QCOM_BAM_DMA_H
1140 +#define _QCOM_BAM_DMA_H
1141 +
1142 +#include <linux/dma-mapping.h>
1143 +
1144 +#define DESC_FLAG_INT BIT(15)
1145 +#define DESC_FLAG_EOT BIT(14)
1146 +#define DESC_FLAG_EOB BIT(13)
1147 +#define DESC_FLAG_NWD BIT(12)
1148 +#define DESC_FLAG_CMD BIT(11)
1149 +
1150 +/*
1151 + * QCOM BAM DMA SGL struct
1152 + *
1153 + * @sgl: DMA SGL
1154 + * @dma_flags: BAM DMA flags
1155 + */
1156 +struct qcom_bam_sgl {
1157 + struct scatterlist sgl;
1158 + unsigned int dma_flags;
1159 +};
1160 +
1161 +/*
1162 + * This data type corresponds to the native Command Element
1163 + * supported by BAM DMA Engine.
1164 + *
1165 + * @addr - register address.
1166 + * @command - command type.
1167 + * @data - for write command: content to be written into peripheral register.
1168 + * for read command: dest addr to write peripheral register value to.
1169 + * @mask - register mask.
1170 + * @reserved - for future usage.
1171 + *
1172 + */
1173 +struct bam_cmd_element {
1174 + __le32 addr:24;
1175 + __le32 command:8;
1176 + __le32 data;
1177 + __le32 mask;
1178 + __le32 reserved;
1179 +};
1180 +
1181 +/*
1182 + * This enum indicates the command type in a command element
1183 + */
1184 +enum bam_command_type {
1185 + BAM_WRITE_COMMAND = 0,
1186 + BAM_READ_COMMAND,
1187 +};
1188 +
1189 +/*
1190 + * qcom_bam_sg_init_table - Init QCOM BAM SGL
1191 + * @bam_sgl: bam sgl
1192 + * @nents: number of entries in bam sgl
1193 + *
1194 + * This function performs the initialization for each SGL in BAM SGL
1195 + * with generic SGL API.
1196 + */
1197 +static inline void qcom_bam_sg_init_table(struct qcom_bam_sgl *bam_sgl,
1198 + unsigned int nents)
1199 +{
1200 + int i;
1201 +
1202 + for (i = 0; i < nents; i++)
1203 + sg_init_table(&bam_sgl[i].sgl, 1);
1204 +}
1205 +
1206 +/*
1207 + * qcom_bam_unmap_sg - Unmap QCOM BAM SGL
1208 + * @dev: device for which unmapping needs to be done
1209 + * @bam_sgl: bam sgl
1210 + * @nents: number of entries in bam sgl
1211 + * @dir: dma transfer direction
1212 + *
1213 + * This function performs the DMA unmapping for each SGL in BAM SGL
1214 + * with generic SGL API.
1215 + */
1216 +static inline void qcom_bam_unmap_sg(struct device *dev,
1217 + struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
1218 +{
1219 + int i;
1220 +
1221 + for (i = 0; i < nents; i++)
1222 + dma_unmap_sg(dev, &bam_sgl[i].sgl, 1, dir);
1223 +}
1224 +
1225 +/*
1226 + * qcom_bam_map_sg - Map QCOM BAM SGL
1227 + * @dev: device for which mapping needs to be done
1228 + * @bam_sgl: bam sgl
1229 + * @nents: number of entries in bam sgl
1230 + * @dir: dma transfer direction
1231 + *
1232 + * This function performs the DMA mapping for each SGL in BAM SGL
1233 + * with generic SGL API.
1234 + *
1235 + * returns 0 on error and > 0 on success
1236 + */
1237 +static inline int qcom_bam_map_sg(struct device *dev,
1238 + struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
1239 +{
1240 + int i, ret = 0;
1241 +
1242 + for (i = 0; i < nents; i++) {
1243 + ret = dma_map_sg(dev, &bam_sgl[i].sgl, 1, dir);
1244 + if (!ret)
1245 + break;
1246 + }
1247 +
1248 + /* unmap the mapped sgl from previous loop in case of error */
1249 + if (!ret)
1250 + qcom_bam_unmap_sg(dev, bam_sgl, i, dir);
1251 +
1252 + return ret;
1253 +}
1254 +
1255 +/*
1256 + * qcom_prep_bam_ce - Wrapper function to prepare a single BAM command element
1257 + * with the data that is passed to this function.
1258 + * @bam_ce: bam command element
1259 + * @addr: target address
1260 + * @command: command in bam_command_type
1261 + * @data: actual data for write and dest addr for read
1262 + */
1263 +static inline void qcom_prep_bam_ce(struct bam_cmd_element *bam_ce,
1264 + uint32_t addr, uint32_t command, uint32_t data)
1265 +{
1266 + bam_ce->addr = cpu_to_le32(addr);
1267 + bam_ce->command = cpu_to_le32(command);
1268 + bam_ce->data = cpu_to_le32(data);
1269 + bam_ce->mask = 0xFFFFFFFF;
1270 +}
1271 +#endif
1272 --
1273 2.7.2