3b2612094779d94afde25657b639cd7b58d4c6f1
[openwrt/staging/mkresin.git] / target / linux / ipq806x / patches-4.9 / 862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch
1 From 5a7ccdf845d64b385affdcffaf2defbe9848be15 Mon Sep 17 00:00:00 2001
2 From: Ram Chandra Jangir <rjangir@codeaurora.org>
3 Date: Thu, 20 Apr 2017 10:39:00 +0530
4 Subject: [PATCH] dmaengine: qcom: bam_dma: Add custom data mapping
5
6 Add a new function to support for preparing DMA descriptor
7 for custom data.
8
9 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
10 Signed-off-by: Ram Chandra Jangir <rjangir@codeaurora.org>
11 ---
12 drivers/dma/qcom/bam_dma.c | 97 +++++++++++++++++++++++++++++++++++++---
13 include/linux/dma/qcom_bam_dma.h | 14 ++++++
14 include/linux/dmaengine.h | 14 ++++++
15 3 files changed, 119 insertions(+), 6 deletions(-)
16
17 diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
18 index 03c4eb3..bde8d70 100644
19 --- a/drivers/dma/qcom/bam_dma.c
20 +++ b/drivers/dma/qcom/bam_dma.c
21 @@ -49,6 +49,7 @@
22 #include <linux/clk.h>
23 #include <linux/dmaengine.h>
24 #include <linux/pm_runtime.h>
25 +#include <linux/dma/qcom_bam_dma.h>
26
27 #include "../dmaengine.h"
28 #include "../virt-dma.h"
29 @@ -61,11 +62,6 @@ struct bam_desc_hw {
30
31 #define BAM_DMA_AUTOSUSPEND_DELAY 100
32
33 -#define DESC_FLAG_INT BIT(15)
34 -#define DESC_FLAG_EOT BIT(14)
35 -#define DESC_FLAG_EOB BIT(13)
36 -#define DESC_FLAG_NWD BIT(12)
37 -
38 struct bam_async_desc {
39 struct virt_dma_desc vd;
40
41 @@ -670,6 +666,93 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
42 }
43
44 /**
45 + * bam_prep_dma_custom_mapping - Prep DMA descriptor from custom data
46 + *
47 + * @chan: dma channel
48 + * @data: custom data
49 + * @flags: DMA flags
50 + */
51 +static struct dma_async_tx_descriptor *bam_prep_dma_custom_mapping(
52 + struct dma_chan *chan,
53 + void *data, unsigned long flags)
54 +{
55 + struct bam_chan *bchan = to_bam_chan(chan);
56 + struct bam_device *bdev = bchan->bdev;
57 + struct bam_async_desc *async_desc;
58 + struct qcom_bam_custom_data *desc_data = data;
59 + u32 i;
60 + struct bam_desc_hw *desc;
61 + unsigned int num_alloc = 0;
62 +
63 +
64 + if (!is_slave_direction(desc_data->dir)) {
65 + dev_err(bdev->dev, "invalid dma direction\n");
66 + return NULL;
67 + }
68 +
69 + /* calculate number of required entries */
70 + for (i = 0; i < desc_data->sgl_cnt; i++)
71 + num_alloc += DIV_ROUND_UP(
72 + sg_dma_len(&desc_data->bam_sgl[i].sgl), BAM_FIFO_SIZE);
73 +
74 + /* allocate enough room to accommodate the number of entries */
75 + async_desc = kzalloc(sizeof(*async_desc) +
76 + (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
77 +
78 + if (!async_desc)
79 + goto err_out;
80 +
81 + if (flags & DMA_PREP_FENCE)
82 + async_desc->flags |= DESC_FLAG_NWD;
83 +
84 + if (flags & DMA_PREP_INTERRUPT)
85 + async_desc->flags |= DESC_FLAG_EOT;
86 + else
87 + async_desc->flags |= DESC_FLAG_INT;
88 +
89 + async_desc->num_desc = num_alloc;
90 + async_desc->curr_desc = async_desc->desc;
91 + async_desc->dir = desc_data->dir;
92 +
93 + /* fill in temporary descriptors */
94 + desc = async_desc->desc;
95 + for (i = 0; i < desc_data->sgl_cnt; i++) {
96 + unsigned int remainder;
97 + unsigned int curr_offset = 0;
98 +
99 + remainder = sg_dma_len(&desc_data->bam_sgl[i].sgl);
100 +
101 + do {
102 + desc->addr = cpu_to_le32(
103 + sg_dma_address(&desc_data->bam_sgl[i].sgl) +
104 + curr_offset);
105 +
106 + if (desc_data->bam_sgl[i].dma_flags)
107 + desc->flags |= cpu_to_le16(
108 + desc_data->bam_sgl[i].dma_flags);
109 +
110 + if (remainder > BAM_FIFO_SIZE) {
111 + desc->size = cpu_to_le16(BAM_FIFO_SIZE);
112 + remainder -= BAM_FIFO_SIZE;
113 + curr_offset += BAM_FIFO_SIZE;
114 + } else {
115 + desc->size = cpu_to_le16(remainder);
116 + remainder = 0;
117 + }
118 +
119 + async_desc->length += desc->size;
120 + desc++;
121 + } while (remainder > 0);
122 + }
123 +
124 + return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
125 +
126 +err_out:
127 + kfree(async_desc);
128 + return NULL;
129 +}
130 +
131 +/**
132 * bam_dma_terminate_all - terminate all transactions on a channel
133 * @bchan: bam dma channel
134 *
135 @@ -960,7 +1043,7 @@ static void bam_start_dma(struct bam_chan *bchan)
136
137 /* set any special flags on the last descriptor */
138 if (async_desc->num_desc == async_desc->xfer_len)
139 - desc[async_desc->xfer_len - 1].flags =
140 + desc[async_desc->xfer_len - 1].flags |=
141 cpu_to_le16(async_desc->flags);
142 else
143 desc[async_desc->xfer_len - 1].flags |=
144 @@ -1237,6 +1320,8 @@ static int bam_dma_probe(struct platform_device *pdev)
145 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
146 bdev->common.device_free_chan_resources = bam_free_chan;
147 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
148 + bdev->common.device_prep_dma_custom_mapping =
149 + bam_prep_dma_custom_mapping;
150 bdev->common.device_config = bam_slave_config;
151 bdev->common.device_pause = bam_pause;
152 bdev->common.device_resume = bam_resume;
153 diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
154 index 7e87a85..7113c77 100644
155 --- a/include/linux/dma/qcom_bam_dma.h
156 +++ b/include/linux/dma/qcom_bam_dma.h
157 @@ -65,6 +65,19 @@ enum bam_command_type {
158 };
159
160 /*
161 + * QCOM BAM DMA custom data
162 + *
163 + * @sgl_cnt: number of sgl in bam_sgl
164 + * @dir: DMA data transfer direction
165 + * @bam_sgl: BAM SGL pointer
166 + */
167 +struct qcom_bam_custom_data {
168 + u32 sgl_cnt;
169 + enum dma_transfer_direction dir;
170 + struct qcom_bam_sgl *bam_sgl;
171 +};
172 +
173 +/*
174 * qcom_bam_sg_init_table - Init QCOM BAM SGL
175 * @bam_sgl: bam sgl
176 * @nents: number of entries in bam sgl
177 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
178 index cc535a4..627c125 100644
179 --- a/include/linux/dmaengine.h
180 +++ b/include/linux/dmaengine.h
181 @@ -692,6 +692,8 @@ struct dma_filter {
182 * be called after period_len bytes have been transferred.
183 * @device_prep_interleaved_dma: Transfer expression in a generic way.
184 * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
185 + * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver
186 + * specific custom data
187 * @device_config: Pushes a new configuration to a channel, return 0 or an error
188 * code
189 * @device_pause: Pauses any transfer happening on a channel. Returns
190 @@ -783,6 +785,9 @@ struct dma_device {
191 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
192 struct dma_chan *chan, dma_addr_t dst, u64 data,
193 unsigned long flags);
194 + struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)(
195 + struct dma_chan *chan, void *data,
196 + unsigned long flags);
197
198 int (*device_config)(struct dma_chan *chan,
199 struct dma_slave_config *config);
200 @@ -899,6 +904,15 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
201 src_sg, src_nents, flags);
202 }
203
204 +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping(
205 + struct dma_chan *chan,
206 + void *data,
207 + unsigned long flags)
208 +{
209 + return chan->device->device_prep_dma_custom_mapping(chan, data,
210 + flags);
211 +}
212 +
213 /**
214 * dmaengine_terminate_all() - Terminate all active DMA transfers
215 * @chan: The channel for which to terminate the transfers
216 --
217 2.7.2