From 35307c0bbf030f9c489db85253862d250e56b102 Mon Sep 17 00:00:00 2001 From: Ram Chandra Jangir Date: Thu, 25 May 2017 05:01:02 +0530 Subject: [PATCH] ipq806x: Add support for custom data mapping in bam_dma dmaengine This change adds a new function to support for preparing DMA descriptor for custom data. Signed-off-by: Ram Chandra Jangir --- ...qcom-bam_dma-Add-custom-data-mapping.patch | 217 ++++++++++++++++++ 1 file changed, 217 insertions(+) create mode 100644 target/linux/ipq806x/patches-4.9/862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch diff --git a/target/linux/ipq806x/patches-4.9/862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch b/target/linux/ipq806x/patches-4.9/862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch new file mode 100644 index 0000000000..3b26120947 --- /dev/null +++ b/target/linux/ipq806x/patches-4.9/862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch @@ -0,0 +1,217 @@ +From 5a7ccdf845d64b385affdcffaf2defbe9848be15 Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Thu, 20 Apr 2017 10:39:00 +0530 +Subject: [PATCH] dmaengine: qcom: bam_dma: Add custom data mapping + +Add a new function to support for preparing DMA descriptor +for custom data. + +Signed-off-by: Abhishek Sahu +Signed-off-by: Ram Chandra Jangir +--- + drivers/dma/qcom/bam_dma.c | 97 +++++++++++++++++++++++++++++++++++++--- + include/linux/dma/qcom_bam_dma.h | 14 ++++++ + include/linux/dmaengine.h | 14 ++++++ + 3 files changed, 119 insertions(+), 6 deletions(-) + +diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c +index 03c4eb3..bde8d70 100644 +--- a/drivers/dma/qcom/bam_dma.c ++++ b/drivers/dma/qcom/bam_dma.c +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + + #include "../dmaengine.h" + #include "../virt-dma.h" +@@ -61,11 +62,6 @@ struct bam_desc_hw { + + #define BAM_DMA_AUTOSUSPEND_DELAY 100 + +-#define DESC_FLAG_INT BIT(15) +-#define DESC_FLAG_EOT BIT(14) +-#define DESC_FLAG_EOB BIT(13) +-#define DESC_FLAG_NWD BIT(12) +- + struct bam_async_desc { + struct virt_dma_desc vd; + +@@ -670,6 +666,93 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, + } + + /** ++ * bam_prep_dma_custom_mapping - Prep DMA descriptor from custom data ++ * ++ * @chan: dma channel ++ * @data: custom data ++ * @flags: DMA flags ++ */ ++static struct dma_async_tx_descriptor *bam_prep_dma_custom_mapping( ++ struct dma_chan *chan, ++ void *data, unsigned long flags) ++{ ++ struct bam_chan *bchan = to_bam_chan(chan); ++ struct bam_device *bdev = bchan->bdev; ++ struct bam_async_desc *async_desc; ++ struct qcom_bam_custom_data *desc_data = data; ++ u32 i; ++ struct bam_desc_hw *desc; ++ unsigned int num_alloc = 0; ++ ++ ++ if (!is_slave_direction(desc_data->dir)) { ++ dev_err(bdev->dev, "invalid dma direction\n"); ++ return NULL; ++ } ++ ++ /* calculate number of required entries */ ++ for (i = 0; i < desc_data->sgl_cnt; i++) ++ num_alloc += DIV_ROUND_UP( ++ sg_dma_len(&desc_data->bam_sgl[i].sgl), BAM_FIFO_SIZE); ++ ++ /* allocate enough room to accommodate the number of entries */ ++ async_desc = kzalloc(sizeof(*async_desc) + ++ (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); ++ ++ if (!async_desc) ++ goto err_out; ++ ++ if (flags & DMA_PREP_FENCE) ++ async_desc->flags |= DESC_FLAG_NWD; ++ ++ if (flags & DMA_PREP_INTERRUPT) ++ async_desc->flags |= DESC_FLAG_EOT; ++ else ++ async_desc->flags |= DESC_FLAG_INT; ++ ++ async_desc->num_desc = num_alloc; ++ async_desc->curr_desc = async_desc->desc; ++ async_desc->dir = desc_data->dir; ++ ++ /* fill in temporary descriptors */ ++ desc = async_desc->desc; ++ for (i = 0; i < desc_data->sgl_cnt; i++) { ++ unsigned int remainder; ++ unsigned int curr_offset = 0; ++ ++ remainder = sg_dma_len(&desc_data->bam_sgl[i].sgl); ++ ++ do { ++ desc->addr = cpu_to_le32( ++ sg_dma_address(&desc_data->bam_sgl[i].sgl) + ++ curr_offset); ++ ++ if (desc_data->bam_sgl[i].dma_flags) ++ desc->flags |= cpu_to_le16( ++ desc_data->bam_sgl[i].dma_flags); ++ ++ if (remainder > BAM_FIFO_SIZE) { ++ desc->size = cpu_to_le16(BAM_FIFO_SIZE); ++ remainder -= BAM_FIFO_SIZE; ++ curr_offset += BAM_FIFO_SIZE; ++ } else { ++ desc->size = cpu_to_le16(remainder); ++ remainder = 0; ++ } ++ ++ async_desc->length += desc->size; ++ desc++; ++ } while (remainder > 0); ++ } ++ ++ return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); ++ ++err_out: ++ kfree(async_desc); ++ return NULL; ++} ++ ++/** + * bam_dma_terminate_all - terminate all transactions on a channel + * @bchan: bam dma channel + * +@@ -960,7 +1043,7 @@ static void bam_start_dma(struct bam_chan *bchan) + + /* set any special flags on the last descriptor */ + if (async_desc->num_desc == async_desc->xfer_len) +- desc[async_desc->xfer_len - 1].flags = ++ desc[async_desc->xfer_len - 1].flags |= + cpu_to_le16(async_desc->flags); + else + desc[async_desc->xfer_len - 1].flags |= +@@ -1237,6 +1320,8 @@ static int bam_dma_probe(struct platform_device *pdev) + bdev->common.device_alloc_chan_resources = bam_alloc_chan; + bdev->common.device_free_chan_resources = bam_free_chan; + bdev->common.device_prep_slave_sg = bam_prep_slave_sg; ++ bdev->common.device_prep_dma_custom_mapping = ++ bam_prep_dma_custom_mapping; + bdev->common.device_config = bam_slave_config; + bdev->common.device_pause = bam_pause; + bdev->common.device_resume = bam_resume; +diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h +index 7e87a85..7113c77 100644 +--- a/include/linux/dma/qcom_bam_dma.h ++++ b/include/linux/dma/qcom_bam_dma.h +@@ -65,6 +65,19 @@ enum bam_command_type { + }; + + /* ++ * QCOM BAM DMA custom data ++ * ++ * @sgl_cnt: number of sgl in bam_sgl ++ * @dir: DMA data transfer direction ++ * @bam_sgl: BAM SGL pointer ++ */ ++struct qcom_bam_custom_data { ++ u32 sgl_cnt; ++ enum dma_transfer_direction dir; ++ struct qcom_bam_sgl *bam_sgl; ++}; ++ ++/* + * qcom_bam_sg_init_table - Init QCOM BAM SGL + * @bam_sgl: bam sgl + * @nents: number of entries in bam sgl +diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h +index cc535a4..627c125 100644 +--- a/include/linux/dmaengine.h ++++ b/include/linux/dmaengine.h +@@ -692,6 +692,8 @@ struct dma_filter { + * be called after period_len bytes have been transferred. + * @device_prep_interleaved_dma: Transfer expression in a generic way. + * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address ++ * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver ++ * specific custom data + * @device_config: Pushes a new configuration to a channel, return 0 or an error + * code + * @device_pause: Pauses any transfer happening on a channel. Returns +@@ -783,6 +785,9 @@ struct dma_device { + struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( + struct dma_chan *chan, dma_addr_t dst, u64 data, + unsigned long flags); ++ struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)( ++ struct dma_chan *chan, void *data, ++ unsigned long flags); + + int (*device_config)(struct dma_chan *chan, + struct dma_slave_config *config); +@@ -899,6 +904,15 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( + src_sg, src_nents, flags); + } + ++static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping( ++ struct dma_chan *chan, ++ void *data, ++ unsigned long flags) ++{ ++ return chan->device->device_prep_dma_custom_mapping(chan, data, ++ flags); ++} ++ + /** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers +-- +2.7.2 -- 2.30.2