1 From 645c7805f2602569263d7ac78050b2c9e91e3377 Mon Sep 17 00:00:00 2001
2 From: Ram Chandra Jangir <rjangir@codeaurora.org>
3 Date: Thu, 20 Apr 2017 10:23:00 +0530
4 Subject: [PATCH] qcom: mtd: nand: Added bam transaction and support
7 This patch adds the following for NAND BAM DMA support
8 - Bam transaction which will be used for any NAND request.
9 It contains the array of command elements, command and
10 data sgl. This transaction will be resetted before every
12 - Allocation function for NAND BAM transaction which will be
13 called only once at probe time.
14 - Reset function for NAND BAM transaction which will be called
15 before any new NAND request.
16 - Add support for additional CSRs.
17 NAND_READ_LOCATION - page offset for reading in BAM DMA mode
18 NAND_ERASED_CW_DETECT_CFG - status for erased code words
19 NAND_BUFFER_STATUS - status for ECC
21 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
22 Signed-off-by: Ram Chandra Jangir <rjangir@codeaurora.org>
24 drivers/mtd/nand/qcom_nandc.c | 631 +++++++++++++++++++++++++++++++++++----
25 include/linux/dma/qcom_bam_dma.h | 149 +++++++++
26 2 files changed, 721 insertions(+), 59 deletions(-)
27 create mode 100644 include/linux/dma/qcom_bam_dma.h
29 --- a/drivers/mtd/nand/qcom_nandc.c
30 +++ b/drivers/mtd/nand/qcom_nandc.c
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 +#include <linux/dma/qcom_bam_dma.h>
37 /* NANDc reg offsets */
38 #define NAND_FLASH_CMD 0x00
40 #define NAND_VERSION 0xf08
41 #define NAND_READ_LOCATION_0 0xf20
42 #define NAND_READ_LOCATION_1 0xf24
43 +#define NAND_READ_LOCATION_2 0xf28
44 +#define NAND_READ_LOCATION_3 0xf2c
46 /* dummy register offsets, used by write_reg_dma */
47 #define NAND_DEV_CMD1_RESTORE 0xdead
49 #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
50 #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
52 +/* NAND_READ_LOCATION_n bits */
53 +#define READ_LOCATION_OFFSET 0
54 +#define READ_LOCATION_SIZE 16
55 +#define READ_LOCATION_LAST 31
58 #define NAND_VERSION_MAJOR_MASK 0xf0000000
59 #define NAND_VERSION_MAJOR_SHIFT 28
61 #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
62 ERASE_START_VLD | SEQ_READ_START_VLD)
65 +#define BAM_MODE_EN BIT(0)
68 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
69 * the driver calls the chunks 'step' or 'codeword' interchangeably
71 #define ECC_BCH_4BIT BIT(2)
72 #define ECC_BCH_8BIT BIT(3)
74 +/* Flags used for BAM DMA desc preparation*/
75 +/* Don't set the EOT in current tx sgl */
76 +#define DMA_DESC_FLAG_NO_EOT (0x0001)
77 +/* Set the NWD flag in current sgl */
78 +#define DMA_DESC_FLAG_BAM_NWD (0x0002)
79 +/* Close current sgl and start writing in another sgl */
80 +#define DMA_DESC_FLAG_BAM_NEXT_SGL (0x0004)
82 + * Erased codeword status is being used two times in single transfer so this
83 + * flag will determine the current value of erased codeword status register
85 +#define DMA_DESC_ERASED_CW_SET (0x0008)
87 +/* Returns the dma address for reg read buffer */
88 +#define REG_BUF_DMA_ADDR(chip, vaddr) \
89 + ((chip)->reg_read_buf_phys + \
90 + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
92 +/* Returns the nand register physical address */
93 +#define NAND_REG_PHYS_ADDRESS(chip, addr) \
94 + ((chip)->base_dma + (addr))
96 +/* command element array size in bam transaction */
97 +#define BAM_CMD_ELEMENT_SIZE (256)
98 +/* command sgl size in bam transaction */
99 +#define BAM_CMD_SGL_SIZE (256)
100 +/* data sgl size in bam transaction */
101 +#define BAM_DATA_SGL_SIZE (128)
104 + * This data type corresponds to the BAM transaction which will be used for any
106 + * @bam_ce - the array of bam command elements
107 + * @cmd_sgl - sgl for nand bam command pipe
108 + * @tx_sgl - sgl for nand bam consumer pipe
109 + * @rx_sgl - sgl for nand bam producer pipe
110 + * @bam_ce_index - the index in bam_ce which is available for next sgl request
111 + * @pre_bam_ce_index - the index in bam_ce which marks the start position ce
112 + * for current sgl. It will be used for size calculation
114 + * @cmd_sgl_cnt - no of entries in command sgl.
115 + * @tx_sgl_cnt - no of entries in tx sgl.
116 + * @rx_sgl_cnt - no of entries in rx sgl.
118 +struct bam_transaction {
119 + struct bam_cmd_element bam_ce[BAM_CMD_ELEMENT_SIZE];
120 + struct qcom_bam_sgl cmd_sgl[BAM_CMD_SGL_SIZE];
121 + struct qcom_bam_sgl tx_sgl[BAM_DATA_SGL_SIZE];
122 + struct qcom_bam_sgl rx_sgl[BAM_DATA_SGL_SIZE];
123 + uint32_t bam_ce_index;
124 + uint32_t pre_bam_ce_index;
125 + uint32_t cmd_sgl_cnt;
126 + uint32_t tx_sgl_cnt;
127 + uint32_t rx_sgl_cnt;
131 + * This data type corresponds to the nand dma descriptor
132 + * @list - list for desc_info
133 + * @dir - DMA transfer direction
134 + * @sgl - sgl which will be used for single sgl dma descriptor
135 + * @dma_desc - low level dma engine descriptor
136 + * @bam_desc_data - used for bam desc mappings
139 struct list_head node;
141 enum dma_data_direction dir;
142 struct scatterlist sgl;
143 struct dma_async_tx_descriptor *dma_desc;
144 + struct qcom_bam_custom_data bam_desc_data;
148 @@ -210,6 +286,13 @@ struct nandc_regs {
152 + __le32 read_location0;
153 + __le32 read_location1;
154 + __le32 read_location2;
155 + __le32 read_location3;
157 + __le32 erased_cw_detect_cfg_clr;
158 + __le32 erased_cw_detect_cfg_set;
162 @@ -225,6 +308,7 @@ struct nandc_regs {
163 * @aon_clk: another controller clock
166 + * @bam_txn: contains the bam transaction address
167 * @cmd_crci: ADM DMA CRCI for command flow control
168 * @data_crci: ADM DMA CRCI for data flow control
169 * @desc_list: DMA descriptor list (list of desc_infos)
170 @@ -250,6 +334,7 @@ struct nandc_regs {
171 struct qcom_nand_controller {
172 struct nand_hw_control controller;
173 struct list_head host_list;
174 + struct bam_transaction *bam_txn;
178 @@ -350,6 +435,45 @@ struct qcom_nand_driver_data {
179 bool dma_bam_enabled;
182 +/* Allocates and Initializes the BAM transaction */
183 +struct bam_transaction *alloc_bam_transaction(
184 + struct qcom_nand_controller *nandc)
186 + struct bam_transaction *bam_txn;
188 + bam_txn = kzalloc(sizeof(*bam_txn), GFP_KERNEL);
193 + bam_txn->bam_ce_index = 0;
194 + bam_txn->pre_bam_ce_index = 0;
195 + bam_txn->cmd_sgl_cnt = 0;
196 + bam_txn->tx_sgl_cnt = 0;
197 + bam_txn->rx_sgl_cnt = 0;
199 + qcom_bam_sg_init_table(bam_txn->cmd_sgl, BAM_CMD_SGL_SIZE);
200 + qcom_bam_sg_init_table(bam_txn->tx_sgl, BAM_DATA_SGL_SIZE);
201 + qcom_bam_sg_init_table(bam_txn->rx_sgl, BAM_DATA_SGL_SIZE);
206 +/* Clears the BAM transaction index */
207 +void clear_bam_transaction(struct qcom_nand_controller *nandc)
209 + struct bam_transaction *bam_txn = nandc->bam_txn;
211 + if (!nandc->dma_bam_enabled)
214 + bam_txn->bam_ce_index = 0;
215 + bam_txn->pre_bam_ce_index = 0;
216 + bam_txn->cmd_sgl_cnt = 0;
217 + bam_txn->tx_sgl_cnt = 0;
218 + bam_txn->rx_sgl_cnt = 0;
221 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
223 return container_of(chip, struct qcom_nand_host, chip);
224 @@ -406,6 +530,16 @@ static __le32 *offset_to_nandc_reg(struc
225 return ®s->orig_vld;
226 case NAND_EBI2_ECC_BUF_CFG:
227 return ®s->ecc_buf_cfg;
228 + case NAND_BUFFER_STATUS:
229 + return ®s->clrreadstatus;
230 + case NAND_READ_LOCATION_0:
231 + return ®s->read_location0;
232 + case NAND_READ_LOCATION_1:
233 + return ®s->read_location1;
234 + case NAND_READ_LOCATION_2:
235 + return ®s->read_location2;
236 + case NAND_READ_LOCATION_3:
237 + return ®s->read_location3;
241 @@ -447,7 +581,7 @@ static void update_rw_regs(struct qcom_n
243 struct nand_chip *chip = &host->chip;
244 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
245 - u32 cmd, cfg0, cfg1, ecc_bch_cfg;
246 + u32 cmd, cfg0, cfg1, ecc_bch_cfg, read_location0;
250 @@ -464,12 +598,20 @@ static void update_rw_regs(struct qcom_n
253 ecc_bch_cfg = host->ecc_bch_cfg;
255 + read_location0 = (0 << READ_LOCATION_OFFSET) |
256 + (host->cw_data << READ_LOCATION_SIZE) |
257 + (1 << READ_LOCATION_LAST);
259 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
260 (num_cw - 1) << CW_PER_PAGE;
262 cfg1 = host->cfg1_raw;
263 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
265 + read_location0 = (0 << READ_LOCATION_OFFSET) |
266 + (host->cw_size << READ_LOCATION_SIZE) |
267 + (1 << READ_LOCATION_LAST);
270 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
271 @@ -480,8 +622,104 @@ static void update_rw_regs(struct qcom_n
272 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
273 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
274 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
277 + nandc_set_reg(nandc, NAND_READ_LOCATION_0, read_location0);
281 + * Prepares the command descriptor for BAM DMA which will be used for NAND
282 + * register read and write. The command descriptor requires the command
283 + * to be formed in command element type so this function uses the command
284 + * element from bam transaction ce array and fills the same with required
285 + * data. A single SGL can contain multiple command elements so
286 + * DMA_DESC_FLAG_BAM_NEXT_SGL will be used for starting the separate SGL
287 + * after the current command element.
289 +static int prep_dma_desc_command(struct qcom_nand_controller *nandc, bool read,
290 + int reg_off, const void *vaddr,
291 + int size, unsigned int flags)
295 + struct bam_cmd_element *bam_ce_buffer;
296 + struct bam_transaction *bam_txn = nandc->bam_txn;
298 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_index];
300 + /* fill the command desc */
301 + for (i = 0; i < size; i++) {
303 + qcom_prep_bam_ce(&bam_ce_buffer[i],
304 + NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
306 + REG_BUF_DMA_ADDR(nandc,
307 + (unsigned int *)vaddr + i));
309 + qcom_prep_bam_ce(&bam_ce_buffer[i],
310 + NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
312 + *((unsigned int *)vaddr + i));
316 + /* use the separate sgl after this command */
317 + if (flags & DMA_DESC_FLAG_BAM_NEXT_SGL) {
318 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->pre_bam_ce_index];
319 + bam_txn->bam_ce_index += size;
320 + bam_ce_size = (bam_txn->bam_ce_index -
321 + bam_txn->pre_bam_ce_index) *
322 + sizeof(struct bam_cmd_element);
323 + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].sgl,
326 + if (flags & DMA_DESC_FLAG_BAM_NWD)
327 + bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
328 + DESC_FLAG_NWD | DESC_FLAG_CMD;
330 + bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
333 + bam_txn->cmd_sgl_cnt++;
334 + bam_txn->pre_bam_ce_index = bam_txn->bam_ce_index;
336 + bam_txn->bam_ce_index += size;
343 + * Prepares the data descriptor for BAM DMA which will be used for NAND
344 + * data read and write.
346 +static int prep_dma_desc_data_bam(struct qcom_nand_controller *nandc, bool read,
347 + int reg_off, const void *vaddr,
348 + int size, unsigned int flags)
350 + struct bam_transaction *bam_txn = nandc->bam_txn;
353 + sg_set_buf(&bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].sgl,
355 + bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].dma_flags = 0;
356 + bam_txn->rx_sgl_cnt++;
358 + sg_set_buf(&bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].sgl,
360 + if (flags & DMA_DESC_FLAG_NO_EOT)
361 + bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags = 0;
363 + bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags =
366 + bam_txn->tx_sgl_cnt++;
372 +/* Prepares the dma desciptor for adm dma engine */
373 static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
374 int reg_off, const void *vaddr, int size,
376 @@ -560,7 +798,7 @@ err:
377 * @num_regs: number of registers to read
379 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
381 + int num_regs, unsigned int flags)
383 bool flow_control = false;
385 @@ -569,10 +807,18 @@ static int read_reg_dma(struct qcom_nand
386 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
389 - size = num_regs * sizeof(u32);
390 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
391 nandc->reg_read_pos += num_regs;
393 + if (nandc->dma_bam_enabled) {
396 + return prep_dma_desc_command(nandc, true, first, vaddr, size,
400 + size = num_regs * sizeof(u32);
402 return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
405 @@ -584,7 +830,7 @@ static int read_reg_dma(struct qcom_nand
406 * @num_regs: number of registers to write
408 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
410 + int num_regs, unsigned int flags)
412 bool flow_control = false;
413 struct nandc_regs *regs = nandc->regs;
414 @@ -596,12 +842,29 @@ static int write_reg_dma(struct qcom_nan
415 if (first == NAND_FLASH_CMD)
418 + if (first == NAND_ERASED_CW_DETECT_CFG) {
419 + if (flags & DMA_DESC_ERASED_CW_SET)
420 + vaddr = ®s->erased_cw_detect_cfg_set;
422 + vaddr = ®s->erased_cw_detect_cfg_clr;
425 + if (first == NAND_EXEC_CMD)
426 + flags |= DMA_DESC_FLAG_BAM_NWD;
428 if (first == NAND_DEV_CMD1_RESTORE)
429 first = NAND_DEV_CMD1;
431 if (first == NAND_DEV_CMD_VLD_RESTORE)
432 first = NAND_DEV_CMD_VLD;
434 + if (nandc->dma_bam_enabled) {
437 + return prep_dma_desc_command(nandc, false, first, vaddr, size,
441 size = num_regs * sizeof(u32);
443 return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
444 @@ -616,8 +879,12 @@ static int write_reg_dma(struct qcom_nan
445 * @size: DMA transaction size in bytes
447 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
448 - const u8 *vaddr, int size)
449 + const u8 *vaddr, int size, unsigned int flags)
451 + if (nandc->dma_bam_enabled)
452 + return prep_dma_desc_data_bam(nandc, true, reg_off, vaddr, size,
455 return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
458 @@ -630,8 +897,12 @@ static int read_data_dma(struct qcom_nan
459 * @size: DMA transaction size in bytes
461 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
462 - const u8 *vaddr, int size)
463 + const u8 *vaddr, int size, unsigned int flags)
465 + if (nandc->dma_bam_enabled)
466 + return prep_dma_desc_data_bam(nandc, false, reg_off, vaddr,
469 return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
472 @@ -641,14 +912,57 @@ static int write_data_dma(struct qcom_na
474 static void config_cw_read(struct qcom_nand_controller *nandc)
476 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
477 - write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
478 - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
480 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
481 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
482 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
483 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
485 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
486 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
487 + DMA_DESC_ERASED_CW_SET);
488 + if (nandc->dma_bam_enabled)
489 + write_reg_dma(nandc, NAND_READ_LOCATION_0, 1,
490 + DMA_DESC_FLAG_BAM_NEXT_SGL);
492 - read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
493 - read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
495 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NWD |
496 + DMA_DESC_FLAG_BAM_NEXT_SGL);
498 + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
499 + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
500 + DMA_DESC_FLAG_BAM_NEXT_SGL);
504 + * Helpers to prepare DMA descriptors for configuring registers
505 + * before reading a NAND page with BAM.
507 +static void config_bam_page_read(struct qcom_nand_controller *nandc)
509 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
510 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
511 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
512 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
513 + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
514 + DMA_DESC_ERASED_CW_SET |
515 + DMA_DESC_FLAG_BAM_NEXT_SGL);
519 + * Helpers to prepare DMA descriptors for configuring registers
520 + * before reading each codeword in NAND page with BAM.
522 +static void config_bam_cw_read(struct qcom_nand_controller *nandc)
524 + if (nandc->dma_bam_enabled)
525 + write_reg_dma(nandc, NAND_READ_LOCATION_0, 4, 0);
527 + write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
528 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
530 + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
531 + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
532 + DMA_DESC_FLAG_BAM_NEXT_SGL);
536 @@ -657,19 +971,20 @@ static void config_cw_read(struct qcom_n
538 static void config_cw_write_pre(struct qcom_nand_controller *nandc)
540 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
541 - write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
542 - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
543 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
544 + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
545 + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
546 + DMA_DESC_FLAG_BAM_NEXT_SGL);
549 static void config_cw_write_post(struct qcom_nand_controller *nandc)
551 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
552 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
554 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
555 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
557 - write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
558 - write_reg_dma(nandc, NAND_READ_STATUS, 1);
559 + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
560 + write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
564 @@ -683,6 +998,8 @@ static int nandc_param(struct qcom_nand_
565 struct nand_chip *chip = &host->chip;
566 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
568 + clear_bam_transaction(nandc);
571 * NAND_CMD_PARAM is called before we know much about the FLASH chip
572 * in use. we configure the controller to perform a raw read of 512
573 @@ -715,9 +1032,13 @@ static int nandc_param(struct qcom_nand_
575 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
576 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
577 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
578 + (0 << READ_LOCATION_OFFSET) |
579 + (512 << READ_LOCATION_SIZE) |
580 + (1 << READ_LOCATION_LAST));
582 - write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
583 - write_reg_dma(nandc, NAND_DEV_CMD1, 1);
584 + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
585 + write_reg_dma(nandc, NAND_DEV_CMD1, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
587 nandc->buf_count = 512;
588 memset(nandc->data_buffer, 0xff, nandc->buf_count);
589 @@ -725,11 +1046,12 @@ static int nandc_param(struct qcom_nand_
590 config_cw_read(nandc);
592 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
594 + nandc->buf_count, 0);
596 /* restore CMD1 and VLD regs */
597 - write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
598 - write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
599 + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
600 + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1,
601 + DMA_DESC_FLAG_BAM_NEXT_SGL);
605 @@ -740,6 +1062,8 @@ static int erase_block(struct qcom_nand_
606 struct nand_chip *chip = &host->chip;
607 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
609 + clear_bam_transaction(nandc);
611 nandc_set_reg(nandc, NAND_FLASH_CMD,
612 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
613 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
614 @@ -751,14 +1075,15 @@ static int erase_block(struct qcom_nand_
615 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
616 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
618 - write_reg_dma(nandc, NAND_FLASH_CMD, 3);
619 - write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
620 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
622 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
623 + write_reg_dma(nandc, NAND_FLASH_CMD, 3, DMA_DESC_FLAG_BAM_NEXT_SGL);
624 + write_reg_dma(nandc, NAND_DEV0_CFG0, 2, DMA_DESC_FLAG_BAM_NEXT_SGL);
625 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
627 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
629 - write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
630 - write_reg_dma(nandc, NAND_READ_STATUS, 1);
631 + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
632 + write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
636 @@ -772,16 +1097,19 @@ static int read_id(struct qcom_nand_host
640 + clear_bam_transaction(nandc);
642 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
643 nandc_set_reg(nandc, NAND_ADDR0, column);
644 nandc_set_reg(nandc, NAND_ADDR1, 0);
645 - nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
646 + nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
647 + nandc->dma_bam_enabled ? 0 : DM_EN);
648 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
650 - write_reg_dma(nandc, NAND_FLASH_CMD, 4);
651 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
652 + write_reg_dma(nandc, NAND_FLASH_CMD, 4, DMA_DESC_FLAG_BAM_NEXT_SGL);
653 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
655 - read_reg_dma(nandc, NAND_READ_ID, 1);
656 + read_reg_dma(nandc, NAND_READ_ID, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
660 @@ -792,28 +1120,108 @@ static int reset(struct qcom_nand_host *
661 struct nand_chip *chip = &host->chip;
662 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
664 + clear_bam_transaction(nandc);
666 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
667 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
669 - write_reg_dma(nandc, NAND_FLASH_CMD, 1);
670 - write_reg_dma(nandc, NAND_EXEC_CMD, 1);
671 + write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
672 + write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
674 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
675 + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
680 +static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
681 + struct dma_chan *chan,
682 + struct qcom_bam_sgl *bam_sgl,
684 + enum dma_transfer_direction direction)
686 + struct desc_info *desc;
687 + struct dma_async_tx_descriptor *dma_desc;
689 + if (!qcom_bam_map_sg(nandc->dev, bam_sgl, sgl_cnt, direction)) {
690 + dev_err(nandc->dev, "failure in mapping sgl\n");
694 + desc = kzalloc(sizeof(*desc), GFP_KERNEL);
696 + qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
701 + desc->bam_desc_data.dir = direction;
702 + desc->bam_desc_data.sgl_cnt = sgl_cnt;
703 + desc->bam_desc_data.bam_sgl = bam_sgl;
705 + dma_desc = dmaengine_prep_dma_custom_mapping(chan,
706 + &desc->bam_desc_data,
710 + dev_err(nandc->dev, "failure in cmd prep desc\n");
711 + qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
716 + desc->dma_desc = dma_desc;
718 + list_add_tail(&desc->node, &nandc->desc_list);
724 /* helpers to submit/free our list of dma descriptors */
725 static int submit_descs(struct qcom_nand_controller *nandc)
727 struct desc_info *desc;
728 dma_cookie_t cookie = 0;
729 + struct bam_transaction *bam_txn = nandc->bam_txn;
732 + if (nandc->dma_bam_enabled) {
733 + if (bam_txn->rx_sgl_cnt) {
734 + r = prepare_bam_async_desc(nandc, nandc->rx_chan,
735 + bam_txn->rx_sgl, bam_txn->rx_sgl_cnt,
741 + if (bam_txn->tx_sgl_cnt) {
742 + r = prepare_bam_async_desc(nandc, nandc->tx_chan,
743 + bam_txn->tx_sgl, bam_txn->tx_sgl_cnt,
749 + r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
750 + bam_txn->cmd_sgl, bam_txn->cmd_sgl_cnt,
756 list_for_each_entry(desc, &nandc->desc_list, node)
757 cookie = dmaengine_submit(desc->dma_desc);
759 - if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
761 + if (nandc->dma_bam_enabled) {
762 + dma_async_issue_pending(nandc->tx_chan);
763 + dma_async_issue_pending(nandc->rx_chan);
765 + if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
768 + if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
774 @@ -824,7 +1232,16 @@ static void free_descs(struct qcom_nand_
776 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
777 list_del(&desc->node);
778 - dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
780 + if (nandc->dma_bam_enabled)
781 + qcom_bam_unmap_sg(nandc->dev,
782 + desc->bam_desc_data.bam_sgl,
783 + desc->bam_desc_data.sgl_cnt,
784 + desc->bam_desc_data.dir);
786 + dma_unmap_sg(nandc->dev, &desc->sgl, 1,
792 @@ -1135,6 +1552,9 @@ static int read_page_ecc(struct qcom_nan
793 struct nand_ecc_ctrl *ecc = &chip->ecc;
796 + if (nandc->dma_bam_enabled)
797 + config_bam_page_read(nandc);
799 /* queue cmd descs for each codeword */
800 for (i = 0; i < ecc->steps; i++) {
801 int data_size, oob_size;
802 @@ -1148,11 +1568,36 @@ static int read_page_ecc(struct qcom_nan
803 oob_size = host->ecc_bytes_hw + host->spare_bytes;
806 - config_cw_read(nandc);
807 + if (nandc->dma_bam_enabled) {
808 + if (data_buf && oob_buf) {
809 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
810 + (0 << READ_LOCATION_OFFSET) |
811 + (data_size << READ_LOCATION_SIZE) |
812 + (0 << READ_LOCATION_LAST));
813 + nandc_set_reg(nandc, NAND_READ_LOCATION_1,
814 + (data_size << READ_LOCATION_OFFSET) |
815 + (oob_size << READ_LOCATION_SIZE) |
816 + (1 << READ_LOCATION_LAST));
817 + } else if (data_buf) {
818 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
819 + (0 << READ_LOCATION_OFFSET) |
820 + (data_size << READ_LOCATION_SIZE) |
821 + (1 << READ_LOCATION_LAST));
823 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
824 + (data_size << READ_LOCATION_OFFSET) |
825 + (oob_size << READ_LOCATION_SIZE) |
826 + (1 << READ_LOCATION_LAST));
829 + config_bam_cw_read(nandc);
831 + config_cw_read(nandc);
835 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
840 * when ecc is enabled, the controller doesn't read the real
841 @@ -1168,7 +1613,7 @@ static int read_page_ecc(struct qcom_nan
844 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
845 - oob_buf, oob_size);
846 + oob_buf, oob_size, 0);
850 @@ -1207,10 +1652,14 @@ static int copy_last_cw(struct qcom_nand
852 set_address(host, host->cw_size * (ecc->steps - 1), page);
853 update_rw_regs(host, 1, true);
854 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
855 + (0 << READ_LOCATION_OFFSET) |
856 + (size << READ_LOCATION_SIZE) |
857 + (1 << READ_LOCATION_LAST));
859 config_cw_read(nandc);
861 - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
862 + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
864 ret = submit_descs(nandc);
866 @@ -1233,6 +1682,7 @@ static int qcom_nandc_read_page(struct m
868 oob_buf = oob_required ? chip->oob_poi : NULL;
870 + clear_bam_transaction(nandc);
871 ret = read_page_ecc(host, data_buf, oob_buf);
873 dev_err(nandc->dev, "failure to read page\n");
874 @@ -1252,13 +1702,19 @@ static int qcom_nandc_read_page_raw(stru
875 u8 *data_buf, *oob_buf;
876 struct nand_ecc_ctrl *ecc = &chip->ecc;
881 oob_buf = chip->oob_poi;
883 host->use_ecc = false;
885 + clear_bam_transaction(nandc);
886 update_rw_regs(host, ecc->steps, true);
888 + if (nandc->dma_bam_enabled)
889 + config_bam_page_read(nandc);
891 for (i = 0; i < ecc->steps; i++) {
892 int data_size1, data_size2, oob_size1, oob_size2;
893 int reg_off = FLASH_BUF_ACC;
894 @@ -1276,21 +1732,49 @@ static int qcom_nandc_read_page_raw(stru
895 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
898 - config_cw_read(nandc);
899 + if (nandc->dma_bam_enabled) {
901 + nandc_set_reg(nandc, NAND_READ_LOCATION_0,
902 + (read_location << READ_LOCATION_OFFSET) |
903 + (data_size1 << READ_LOCATION_SIZE) |
904 + (0 << READ_LOCATION_LAST));
905 + read_location += data_size1;
907 + nandc_set_reg(nandc, NAND_READ_LOCATION_1,
908 + (read_location << READ_LOCATION_OFFSET) |
909 + (oob_size1 << READ_LOCATION_SIZE) |
910 + (0 << READ_LOCATION_LAST));
911 + read_location += oob_size1;
913 + nandc_set_reg(nandc, NAND_READ_LOCATION_2,
914 + (read_location << READ_LOCATION_OFFSET) |
915 + (data_size2 << READ_LOCATION_SIZE) |
916 + (0 << READ_LOCATION_LAST));
917 + read_location += data_size2;
919 + nandc_set_reg(nandc, NAND_READ_LOCATION_3,
920 + (read_location << READ_LOCATION_OFFSET) |
921 + (oob_size2 << READ_LOCATION_SIZE) |
922 + (1 << READ_LOCATION_LAST));
924 + config_bam_cw_read(nandc);
926 + config_cw_read(nandc);
929 - read_data_dma(nandc, reg_off, data_buf, data_size1);
930 + read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
931 reg_off += data_size1;
932 data_buf += data_size1;
934 - read_data_dma(nandc, reg_off, oob_buf, oob_size1);
935 + read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
936 reg_off += oob_size1;
937 oob_buf += oob_size1;
939 - read_data_dma(nandc, reg_off, data_buf, data_size2);
940 + read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
941 reg_off += data_size2;
942 data_buf += data_size2;
944 - read_data_dma(nandc, reg_off, oob_buf, oob_size2);
945 + read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
946 oob_buf += oob_size2;
949 @@ -1313,6 +1797,7 @@ static int qcom_nandc_read_oob(struct mt
952 clear_read_regs(nandc);
953 + clear_bam_transaction(nandc);
955 host->use_ecc = true;
956 set_address(host, 0, page);
957 @@ -1336,6 +1821,7 @@ static int qcom_nandc_write_page(struct
960 clear_read_regs(nandc);
961 + clear_bam_transaction(nandc);
963 data_buf = (u8 *)buf;
964 oob_buf = chip->oob_poi;
965 @@ -1357,7 +1843,8 @@ static int qcom_nandc_write_page(struct
967 config_cw_write_pre(nandc);
969 - write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
970 + write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
971 + i == (ecc->steps - 1) ? DMA_DESC_FLAG_NO_EOT : 0);
974 * when ECC is enabled, we don't really need to write anything
975 @@ -1370,7 +1857,7 @@ static int qcom_nandc_write_page(struct
976 oob_buf += host->bbm_size;
978 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
979 - oob_buf, oob_size);
980 + oob_buf, oob_size, 0);
983 config_cw_write_post(nandc);
984 @@ -1400,6 +1887,7 @@ static int qcom_nandc_write_page_raw(str
987 clear_read_regs(nandc);
988 + clear_bam_transaction(nandc);
990 data_buf = (u8 *)buf;
991 oob_buf = chip->oob_poi;
992 @@ -1426,19 +1914,22 @@ static int qcom_nandc_write_page_raw(str
994 config_cw_write_pre(nandc);
996 - write_data_dma(nandc, reg_off, data_buf, data_size1);
997 + write_data_dma(nandc, reg_off, data_buf, data_size1,
998 + DMA_DESC_FLAG_NO_EOT);
999 reg_off += data_size1;
1000 data_buf += data_size1;
1002 - write_data_dma(nandc, reg_off, oob_buf, oob_size1);
1003 + write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1004 + DMA_DESC_FLAG_NO_EOT);
1005 reg_off += oob_size1;
1006 oob_buf += oob_size1;
1008 - write_data_dma(nandc, reg_off, data_buf, data_size2);
1009 + write_data_dma(nandc, reg_off, data_buf, data_size2,
1010 + DMA_DESC_FLAG_NO_EOT);
1011 reg_off += data_size2;
1012 data_buf += data_size2;
1014 - write_data_dma(nandc, reg_off, oob_buf, oob_size2);
1015 + write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1016 oob_buf += oob_size2;
1018 config_cw_write_post(nandc);
1019 @@ -1474,6 +1965,7 @@ static int qcom_nandc_write_oob(struct m
1021 host->use_ecc = true;
1023 + clear_bam_transaction(nandc);
1024 ret = copy_last_cw(host, page);
1027 @@ -1493,7 +1985,7 @@ static int qcom_nandc_write_oob(struct m
1029 config_cw_write_pre(nandc);
1030 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1031 - data_size + oob_size);
1032 + data_size + oob_size, 0);
1033 config_cw_write_post(nandc);
1035 ret = submit_descs(nandc);
1036 @@ -1531,6 +2023,7 @@ static int qcom_nandc_block_bad(struct m
1038 host->use_ecc = false;
1040 + clear_bam_transaction(nandc);
1041 ret = copy_last_cw(host, page);
1044 @@ -1561,6 +2054,7 @@ static int qcom_nandc_block_markbad(stru
1045 int page, ret, status = 0;
1047 clear_read_regs(nandc);
1048 + clear_bam_transaction(nandc);
1051 * to mark the BBM as bad, we flash the entire last codeword with 0s.
1052 @@ -1577,7 +2071,8 @@ static int qcom_nandc_block_markbad(stru
1053 update_rw_regs(host, 1, false);
1055 config_cw_write_pre(nandc);
1056 - write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
1057 + write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1058 + host->cw_size, 0);
1059 config_cw_write_post(nandc);
1061 ret = submit_descs(nandc);
1062 @@ -1937,6 +2432,8 @@ static int qcom_nand_host_setup(struct q
1064 host->clrflashstatus = FS_READY_BSY_N;
1065 host->clrreadstatus = 0xc0;
1066 + nandc->regs->erased_cw_detect_cfg_clr = CLR_ERASED_PAGE_DET;
1067 + nandc->regs->erased_cw_detect_cfg_set = SET_ERASED_PAGE_DET;
1070 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
1071 @@ -2015,6 +2512,12 @@ static int qcom_nandc_alloc(struct qcom_
1072 dev_err(nandc->dev, "failed to request cmd channel\n");
1076 + nandc->bam_txn = alloc_bam_transaction(nandc);
1077 + if (!nandc->bam_txn) {
1078 + dev_err(nandc->dev, "failed to allocate bam transaction\n");
1083 INIT_LIST_HEAD(&nandc->desc_list);
1084 @@ -2050,6 +2553,9 @@ static void qcom_nandc_unalloc(struct qc
1085 devm_kfree(nandc->dev, nandc->reg_read_buf);
1088 + if (nandc->bam_txn)
1089 + devm_kfree(nandc->dev, nandc->bam_txn);
1092 devm_kfree(nandc->dev, nandc->regs);
1094 @@ -2060,12 +2566,19 @@ static void qcom_nandc_unalloc(struct qc
1095 /* one time setup of a few nand controller registers */
1096 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
1101 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
1102 nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
1104 - /* enable ADM DMA */
1105 - nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1106 + /* enable ADM or BAM DMA */
1107 + if (!nandc->dma_bam_enabled) {
1108 + nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1110 + nand_ctrl = nandc_read(nandc, NAND_CTRL);
1111 + nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
1114 /* save the original values of these registers */
1115 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
1117 +++ b/include/linux/dma/qcom_bam_dma.h
1120 + * Copyright (c) 2017, The Linux Foundation. All rights reserved.
1122 + * Permission to use, copy, modify, and/or distribute this software for any
1123 + * purpose with or without fee is hereby granted, provided that the above
1124 + * copyright notice and this permission notice appear in all copies.
1126 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1127 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1128 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1129 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1130 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1131 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1132 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1135 +#ifndef _QCOM_BAM_DMA_H
1136 +#define _QCOM_BAM_DMA_H
1138 +#include <linux/dma-mapping.h>
1140 +#define DESC_FLAG_INT BIT(15)
1141 +#define DESC_FLAG_EOT BIT(14)
1142 +#define DESC_FLAG_EOB BIT(13)
1143 +#define DESC_FLAG_NWD BIT(12)
1144 +#define DESC_FLAG_CMD BIT(11)
1147 + * QCOM BAM DMA SGL struct
1150 + * @dma_flags: BAM DMA flags
1152 +struct qcom_bam_sgl {
1153 + struct scatterlist sgl;
1154 + unsigned int dma_flags;
1158 + * This data type corresponds to the native Command Element
1159 + * supported by BAM DMA Engine.
1161 + * @addr - register address.
1162 + * @command - command type.
1163 + * @data - for write command: content to be written into peripheral register.
1164 + * for read command: dest addr to write peripheral register value to.
1165 + * @mask - register mask.
1166 + * @reserved - for future usage.
1169 +struct bam_cmd_element {
1178 + * This enum indicates the command type in a command element
1180 +enum bam_command_type {
1181 + BAM_WRITE_COMMAND = 0,
1186 + * qcom_bam_sg_init_table - Init QCOM BAM SGL
1187 + * @bam_sgl: bam sgl
1188 + * @nents: number of entries in bam sgl
1190 + * This function performs the initialization for each SGL in BAM SGL
1191 + * with generic SGL API.
1193 +static inline void qcom_bam_sg_init_table(struct qcom_bam_sgl *bam_sgl,
1194 + unsigned int nents)
1198 + for (i = 0; i < nents; i++)
1199 + sg_init_table(&bam_sgl[i].sgl, 1);
1203 + * qcom_bam_unmap_sg - Unmap QCOM BAM SGL
1204 + * @dev: device for which unmapping needs to be done
1205 + * @bam_sgl: bam sgl
1206 + * @nents: number of entries in bam sgl
1207 + * @dir: dma transfer direction
1209 + * This function performs the DMA unmapping for each SGL in BAM SGL
1210 + * with generic SGL API.
1212 +static inline void qcom_bam_unmap_sg(struct device *dev,
1213 + struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
1217 + for (i = 0; i < nents; i++)
1218 + dma_unmap_sg(dev, &bam_sgl[i].sgl, 1, dir);
1222 + * qcom_bam_map_sg - Map QCOM BAM SGL
1223 + * @dev: device for which mapping needs to be done
1224 + * @bam_sgl: bam sgl
1225 + * @nents: number of entries in bam sgl
1226 + * @dir: dma transfer direction
1228 + * This function performs the DMA mapping for each SGL in BAM SGL
1229 + * with generic SGL API.
1231 + * returns 0 on error and > 0 on success
1233 +static inline int qcom_bam_map_sg(struct device *dev,
1234 + struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
1238 + for (i = 0; i < nents; i++) {
1239 + ret = dma_map_sg(dev, &bam_sgl[i].sgl, 1, dir);
1244 + /* unmap the mapped sgl from previous loop in case of error */
1246 + qcom_bam_unmap_sg(dev, bam_sgl, i, dir);
1252 + * qcom_prep_bam_ce - Wrapper function to prepare a single BAM command element
1253 + * with the data that is passed to this function.
1254 + * @bam_ce: bam command element
1255 + * @addr: target address
1256 + * @command: command in bam_command_type
1257 + * @data: actual data for write and dest addr for read
1259 +static inline void qcom_prep_bam_ce(struct bam_cmd_element *bam_ce,
1260 + uint32_t addr, uint32_t command, uint32_t data)
1262 + bam_ce->addr = cpu_to_le32(addr);
1263 + bam_ce->command = cpu_to_le32(command);
1264 + bam_ce->data = cpu_to_le32(data);
1265 + bam_ce->mask = 0xFFFFFFFF;