0af7973327ed9bbf4c35a990715f5b635923ca12
[openwrt/staging/wigyori.git] / target / linux / brcm2708 / patches-4.19 / 950-0687-staging-vcsm-cma-Rework-to-use-dma-APIs-not-CMA.patch
1 From 03d574236ca07cd6ffec88a8124426e5e42722e1 Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Mon, 1 Jul 2019 11:57:25 +0100
4 Subject: [PATCH 687/725] staging: vcsm-cma: Rework to use dma APIs, not CMA
5
6 Due to a misunderstanding of the DMA mapping APIs, I made
7 the wrong decision on how to implement this.
8
9 Rework to use dma_alloc_coherent instead of the CMA
10 API. This also allows it to be built as a module easily.
11
12 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
13 ---
14 .../staging/vc04_services/vc-sm-cma/Kconfig | 4 +-
15 .../staging/vc04_services/vc-sm-cma/Makefile | 2 +-
16 .../staging/vc04_services/vc-sm-cma/vc_sm.c | 291 ++++++++++--------
17 .../staging/vc04_services/vc-sm-cma/vc_sm.h | 13 +-
18 .../vc04_services/vc-sm-cma/vc_sm_cma.c | 98 ------
19 .../vc04_services/vc-sm-cma/vc_sm_cma.h | 39 ---
20 6 files changed, 168 insertions(+), 279 deletions(-)
21 delete mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.c
22 delete mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.h
23
24 --- a/drivers/staging/vc04_services/vc-sm-cma/Kconfig
25 +++ b/drivers/staging/vc04_services/vc-sm-cma/Kconfig
26 @@ -1,6 +1,6 @@
27 config BCM_VC_SM_CMA
28 - bool "VideoCore Shared Memory (CMA) driver"
29 - depends on BCM2835_VCHIQ && DMA_CMA
30 + tristate "VideoCore Shared Memory (CMA) driver"
31 + depends on BCM2835_VCHIQ
32 select RBTREE
33 select DMA_SHARED_BUFFER
34 help
35 --- a/drivers/staging/vc04_services/vc-sm-cma/Makefile
36 +++ b/drivers/staging/vc04_services/vc-sm-cma/Makefile
37 @@ -3,6 +3,6 @@ ccflags-y += -Idrivers/staging/vc04_serv
38 ccflags-y += -D__VCCOREVER__=0
39
40 vc-sm-cma-$(CONFIG_BCM_VC_SM_CMA) := \
41 - vc_sm.o vc_sm_cma_vchi.o vc_sm_cma.o
42 + vc_sm.o vc_sm_cma_vchi.o
43
44 obj-$(CONFIG_BCM_VC_SM_CMA) += vc-sm-cma.o
45 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
46 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
47 @@ -6,8 +6,8 @@
48 * Dave Stevenson <dave.stevenson@raspberrypi.org>
49 *
50 * Based on vmcs_sm driver from Broadcom Corporation for some API,
51 - * and taking some code for CMA/dmabuf handling from the Android Ion
52 - * driver (Google/Linaro).
53 + * and taking some code for buffer allocation and dmabuf handling from
54 + * videobuf2.
55 *
56 *
57 * This driver has 3 main uses:
58 @@ -52,7 +52,6 @@
59 #include "vc_sm_cma_vchi.h"
60
61 #include "vc_sm.h"
62 -#include "vc_sm_cma.h"
63 #include "vc_sm_knl.h"
64 #include <linux/broadcom/vc_sm_cma_ioctl.h>
65
66 @@ -89,7 +88,6 @@ struct sm_state_t {
67 struct miscdevice misc_dev;
68
69 struct sm_instance *sm_handle; /* Handle for videocore service. */
70 - struct cma *cma_heap;
71
72 spinlock_t kernelid_map_lock; /* Spinlock protecting kernelid_map */
73 struct idr kernelid_map;
74 @@ -110,8 +108,9 @@ struct sm_state_t {
75
76 struct vc_sm_dma_buf_attachment {
77 struct device *dev;
78 - struct sg_table *table;
79 + struct sg_table sg_table;
80 struct list_head list;
81 + enum dma_data_direction dma_dir;
82 };
83
84 /* ---- Private Variables ----------------------------------------------- */
85 @@ -202,9 +201,10 @@ static int vc_sm_cma_global_state_show(s
86 resource->import.attach);
87 seq_printf(s, " SGT %p\n",
88 resource->import.sgt);
89 + } else {
90 + seq_printf(s, " SGT %p\n",
91 + resource->alloc.sg_table);
92 }
93 - seq_printf(s, " SG_TABLE %p\n",
94 - resource->sg_table);
95 seq_printf(s, " DMA_ADDR %pad\n",
96 &resource->dma_addr);
97 seq_printf(s, " VC_HANDLE %08x\n",
98 @@ -296,8 +296,9 @@ static void vc_sm_vpu_free(struct vc_sm_
99 */
100 static void vc_sm_release_resource(struct vc_sm_buffer *buffer)
101 {
102 - pr_debug("[%s]: buffer %p (name %s, size %zu)\n",
103 - __func__, buffer, buffer->name, buffer->size);
104 + pr_debug("[%s]: buffer %p (name %s, size %zu), imported %u\n",
105 + __func__, buffer, buffer->name, buffer->size,
106 + buffer->imported);
107
108 if (buffer->vc_handle) {
109 /* We've sent the unmap request but not had the response. */
110 @@ -313,8 +314,6 @@ static void vc_sm_release_resource(struc
111
112 /* Release the allocation (whether imported dmabuf or CMA allocation) */
113 if (buffer->imported) {
114 - pr_debug("%s: Release imported dmabuf %p\n", __func__,
115 - buffer->import.dma_buf);
116 if (buffer->import.dma_buf)
117 dma_buf_put(buffer->import.dma_buf);
118 else
119 @@ -322,16 +321,8 @@ static void vc_sm_release_resource(struc
120 __func__, buffer);
121 buffer->import.dma_buf = NULL;
122 } else {
123 - if (buffer->sg_table) {
124 - /* Our own allocation that we need to dma_unmap_sg */
125 - dma_unmap_sg(&sm_state->pdev->dev,
126 - buffer->sg_table->sgl,
127 - buffer->sg_table->nents,
128 - DMA_BIDIRECTIONAL);
129 - }
130 - pr_debug("%s: Release our allocation\n", __func__);
131 - vc_sm_cma_buffer_free(&buffer->alloc);
132 - pr_debug("%s: Release our allocation - done\n", __func__);
133 + dma_free_coherent(&sm_state->pdev->dev, buffer->size,
134 + buffer->cookie, buffer->dma_addr);
135 }
136
137
138 @@ -371,38 +362,6 @@ static struct vc_sm_privdata_t *vc_sm_cm
139 return file_data;
140 }
141
142 -static struct sg_table *dup_sg_table(struct sg_table *table)
143 -{
144 - struct sg_table *new_table;
145 - int ret, i;
146 - struct scatterlist *sg, *new_sg;
147 -
148 - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
149 - if (!new_table)
150 - return ERR_PTR(-ENOMEM);
151 -
152 - ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
153 - if (ret) {
154 - kfree(new_table);
155 - return ERR_PTR(ret);
156 - }
157 -
158 - new_sg = new_table->sgl;
159 - for_each_sg(table->sgl, sg, table->nents, i) {
160 - memcpy(new_sg, sg, sizeof(*sg));
161 - sg->dma_address = 0;
162 - new_sg = sg_next(new_sg);
163 - }
164 -
165 - return new_table;
166 -}
167 -
168 -static void free_duped_table(struct sg_table *table)
169 -{
170 - sg_free_table(table);
171 - kfree(table);
172 -}
173 -
174 /* Dma buf operations for use with our own allocations */
175
176 static int vc_sm_dma_buf_attach(struct dma_buf *dmabuf,
177 @@ -410,28 +369,45 @@ static int vc_sm_dma_buf_attach(struct d
178
179 {
180 struct vc_sm_dma_buf_attachment *a;
181 - struct sg_table *table;
182 + struct sg_table *sgt;
183 struct vc_sm_buffer *buf = dmabuf->priv;
184 + struct scatterlist *rd, *wr;
185 + int ret, i;
186
187 a = kzalloc(sizeof(*a), GFP_KERNEL);
188 if (!a)
189 return -ENOMEM;
190
191 - table = dup_sg_table(buf->sg_table);
192 - if (IS_ERR(table)) {
193 + pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
194 +
195 + mutex_lock(&buf->lock);
196 +
197 + INIT_LIST_HEAD(&a->list);
198 +
199 + sgt = &a->sg_table;
200 +
201 + /* Copy the buf->base_sgt scatter list to the attachment, as we can't
202 + * map the same scatter list to multiple attachments at the same time.
203 + */
204 + ret = sg_alloc_table(sgt, buf->alloc.sg_table->orig_nents, GFP_KERNEL);
205 + if (ret) {
206 kfree(a);
207 - return PTR_ERR(table);
208 + return -ENOMEM;
209 }
210
211 - a->table = table;
212 - INIT_LIST_HEAD(&a->list);
213 + rd = buf->alloc.sg_table->sgl;
214 + wr = sgt->sgl;
215 + for (i = 0; i < sgt->orig_nents; ++i) {
216 + sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
217 + rd = sg_next(rd);
218 + wr = sg_next(wr);
219 + }
220
221 + a->dma_dir = DMA_NONE;
222 attachment->priv = a;
223
224 - mutex_lock(&buf->lock);
225 list_add(&a->list, &buf->attachments);
226 mutex_unlock(&buf->lock);
227 - pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
228
229 return 0;
230 }
231 @@ -441,9 +417,20 @@ static void vc_sm_dma_buf_detach(struct
232 {
233 struct vc_sm_dma_buf_attachment *a = attachment->priv;
234 struct vc_sm_buffer *buf = dmabuf->priv;
235 + struct sg_table *sgt;
236
237 pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
238 - free_duped_table(a->table);
239 + if (!a)
240 + return;
241 +
242 + sgt = &a->sg_table;
243 +
244 + /* release the scatterlist cache */
245 + if (a->dma_dir != DMA_NONE)
246 + dma_unmap_sg(attachment->dev, sgt->sgl, sgt->orig_nents,
247 + a->dma_dir);
248 + sg_free_table(sgt);
249 +
250 mutex_lock(&buf->lock);
251 list_del(&a->list);
252 mutex_unlock(&buf->lock);
253 @@ -455,13 +442,38 @@ static struct sg_table *vc_sm_map_dma_bu
254 enum dma_data_direction direction)
255 {
256 struct vc_sm_dma_buf_attachment *a = attachment->priv;
257 + /* stealing dmabuf mutex to serialize map/unmap operations */
258 + struct mutex *lock = &attachment->dmabuf->lock;
259 struct sg_table *table;
260
261 - table = a->table;
262 + mutex_lock(lock);
263 + pr_debug("%s attachment %p\n", __func__, attachment);
264 + table = &a->sg_table;
265 +
266 + /* return previously mapped sg table */
267 + if (a->dma_dir == direction) {
268 + mutex_unlock(lock);
269 + return table;
270 + }
271 +
272 + /* release any previous cache */
273 + if (a->dma_dir != DMA_NONE) {
274 + dma_unmap_sg(attachment->dev, table->sgl, table->orig_nents,
275 + a->dma_dir);
276 + a->dma_dir = DMA_NONE;
277 + }
278 +
279 + /* mapping to the client with new direction */
280 + table->nents = dma_map_sg(attachment->dev, table->sgl,
281 + table->orig_nents, direction);
282 + if (!table->nents) {
283 + pr_err("failed to map scatterlist\n");
284 + mutex_unlock(lock);
285 + return ERR_PTR(-EIO);
286 + }
287
288 - if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
289 - direction))
290 - return ERR_PTR(-ENOMEM);
291 + a->dma_dir = direction;
292 + mutex_unlock(lock);
293
294 pr_debug("%s attachment %p\n", __func__, attachment);
295 return table;
296 @@ -478,41 +490,26 @@ static void vc_sm_unmap_dma_buf(struct d
297 static int vc_sm_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
298 {
299 struct vc_sm_buffer *buf = dmabuf->priv;
300 - struct sg_table *table = buf->sg_table;
301 - unsigned long addr = vma->vm_start;
302 - unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
303 - struct scatterlist *sg;
304 - int i;
305 - int ret = 0;
306 + int ret;
307
308 pr_debug("%s dmabuf %p, buf %p, vm_start %08lX\n", __func__, dmabuf,
309 - buf, addr);
310 + buf, vma->vm_start);
311
312 mutex_lock(&buf->lock);
313
314 /* now map it to userspace */
315 - for_each_sg(table->sgl, sg, table->nents, i) {
316 - struct page *page = sg_page(sg);
317 - unsigned long remainder = vma->vm_end - addr;
318 - unsigned long len = sg->length;
319 + vma->vm_pgoff = 0;
320
321 - if (offset >= sg->length) {
322 - offset -= sg->length;
323 - continue;
324 - } else if (offset) {
325 - page += offset / PAGE_SIZE;
326 - len = sg->length - offset;
327 - offset = 0;
328 - }
329 - len = min(len, remainder);
330 - ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
331 - vma->vm_page_prot);
332 - if (ret)
333 - break;
334 - addr += len;
335 - if (addr >= vma->vm_end)
336 - break;
337 + ret = dma_mmap_coherent(&sm_state->pdev->dev, vma, buf->cookie,
338 + buf->dma_addr, buf->size);
339 +
340 + if (ret) {
341 + pr_err("Remapping memory failed, error: %d\n", ret);
342 + return ret;
343 }
344 +
345 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
346 +
347 mutex_unlock(&buf->lock);
348
349 if (ret)
350 @@ -570,8 +567,8 @@ static int vc_sm_dma_buf_begin_cpu_acces
351 mutex_lock(&buf->lock);
352
353 list_for_each_entry(a, &buf->attachments, list) {
354 - dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
355 - direction);
356 + dma_sync_sg_for_cpu(a->dev, a->sg_table.sgl,
357 + a->sg_table.nents, direction);
358 }
359 mutex_unlock(&buf->lock);
360
361 @@ -593,8 +590,8 @@ static int vc_sm_dma_buf_end_cpu_access(
362 mutex_lock(&buf->lock);
363
364 list_for_each_entry(a, &buf->attachments, list) {
365 - dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
366 - direction);
367 + dma_sync_sg_for_device(a->dev, a->sg_table.sgl,
368 + a->sg_table.nents, direction);
369 }
370 mutex_unlock(&buf->lock);
371
372 @@ -625,7 +622,9 @@ static const struct dma_buf_ops dma_buf_
373 .map = vc_sm_dma_buf_kmap,
374 .unmap = vc_sm_dma_buf_kunmap,
375 };
376 +
377 /* Dma_buf operations for chaining through to an imported dma_buf */
378 +
379 static
380 int vc_sm_import_dma_buf_attach(struct dma_buf *dmabuf,
381 struct dma_buf_attachment *attachment)
382 @@ -819,7 +818,7 @@ vc_sm_cma_import_dmabuf_internal(struct
383
384 import.type = VC_SM_ALLOC_NON_CACHED;
385 dma_addr = sg_dma_address(sgt->sgl);
386 - import.addr = (uint32_t)dma_addr;
387 + import.addr = (u32)dma_addr;
388 if ((import.addr & 0xC0000000) != 0xC0000000) {
389 pr_err("%s: Expecting an uncached alias for dma_addr %pad\n",
390 __func__, &dma_addr);
391 @@ -911,11 +910,12 @@ error:
392 return ret;
393 }
394
395 -static int vc_sm_cma_vpu_alloc(u32 size, uint32_t align, const char *name,
396 +static int vc_sm_cma_vpu_alloc(u32 size, u32 align, const char *name,
397 u32 mem_handle, struct vc_sm_buffer **ret_buffer)
398 {
399 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
400 struct vc_sm_buffer *buffer = NULL;
401 + struct sg_table *sgt;
402 int aligned_size;
403 int ret = 0;
404
405 @@ -938,23 +938,34 @@ static int vc_sm_cma_vpu_alloc(u32 size,
406 */
407 mutex_lock(&buffer->lock);
408
409 - if (vc_sm_cma_buffer_allocate(sm_state->cma_heap, &buffer->alloc,
410 - aligned_size)) {
411 - pr_err("[%s]: cma alloc of %d bytes failed\n",
412 + buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
413 + aligned_size, &buffer->dma_addr,
414 + GFP_KERNEL);
415 + if (!buffer->cookie) {
416 + pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
417 __func__, aligned_size);
418 ret = -ENOMEM;
419 goto error;
420 }
421 - buffer->sg_table = buffer->alloc.sg_table;
422
423 - pr_debug("[%s]: cma alloc of %d bytes success\n",
424 + pr_debug("[%s]: alloc of %d bytes success\n",
425 __func__, aligned_size);
426
427 - if (dma_map_sg(&sm_state->pdev->dev, buffer->sg_table->sgl,
428 - buffer->sg_table->nents, DMA_BIDIRECTIONAL) <= 0) {
429 - pr_err("[%s]: dma_map_sg failed\n", __func__);
430 + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
431 + if (!sgt) {
432 + ret = -ENOMEM;
433 + goto error;
434 + }
435 +
436 + ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
437 + buffer->dma_addr, buffer->size);
438 + if (ret < 0) {
439 + pr_err("failed to get scatterlist from DMA API\n");
440 + kfree(sgt);
441 + ret = -ENOMEM;
442 goto error;
443 }
444 + buffer->alloc.sg_table = sgt;
445
446 INIT_LIST_HEAD(&buffer->attachments);
447
448 @@ -971,10 +982,10 @@ static int vc_sm_cma_vpu_alloc(u32 size,
449 ret = PTR_ERR(buffer->dma_buf);
450 goto error;
451 }
452 - buffer->dma_addr = (uint32_t)sg_dma_address(buffer->sg_table->sgl);
453 + buffer->dma_addr = (u32)sg_dma_address(buffer->alloc.sg_table->sgl);
454 if ((buffer->dma_addr & 0xC0000000) != 0xC0000000) {
455 - pr_err("%s: Expecting an uncached alias for dma_addr %pad\n",
456 - __func__, &buffer->dma_addr);
457 + pr_warn_once("%s: Expecting an uncached alias for dma_addr %pad\n",
458 + __func__, &buffer->dma_addr);
459 buffer->dma_addr |= 0xC0000000;
460 }
461 buffer->private = sm_state->vpu_allocs;
462 @@ -1145,6 +1156,7 @@ int vc_sm_cma_ioctl_alloc(struct vc_sm_p
463 struct vc_sm_import import = { 0 };
464 struct vc_sm_import_result result = { 0 };
465 struct dma_buf *dmabuf = NULL;
466 + struct sg_table *sgt;
467 int aligned_size;
468 int ret = 0;
469 int status;
470 @@ -1162,18 +1174,13 @@ int vc_sm_cma_ioctl_alloc(struct vc_sm_p
471 goto error;
472 }
473
474 - if (vc_sm_cma_buffer_allocate(sm_state->cma_heap, &buffer->alloc,
475 - aligned_size)) {
476 - pr_err("[%s]: cma alloc of %d bytes failed\n",
477 + buffer->cookie = dma_alloc_coherent(&sm_state->pdev->dev,
478 + aligned_size,
479 + &buffer->dma_addr,
480 + GFP_KERNEL);
481 + if (!buffer->cookie) {
482 + pr_err("[%s]: dma_alloc_coherent alloc of %d bytes failed\n",
483 __func__, aligned_size);
484 - kfree(buffer);
485 - return -ENOMEM;
486 - }
487 - buffer->sg_table = buffer->alloc.sg_table;
488 -
489 - if (dma_map_sg(&sm_state->pdev->dev, buffer->sg_table->sgl,
490 - buffer->sg_table->nents, DMA_BIDIRECTIONAL) <= 0) {
491 - pr_err("[%s]: dma_map_sg failed\n", __func__);
492 ret = -ENOMEM;
493 goto error;
494 }
495 @@ -1204,7 +1211,7 @@ int vc_sm_cma_ioctl_alloc(struct vc_sm_p
496 }
497 buffer->dma_buf = dmabuf;
498
499 - import.addr = (uint32_t)sg_dma_address(buffer->sg_table->sgl);
500 + import.addr = buffer->dma_addr;
501 import.size = aligned_size;
502 import.kernel_id = get_kernel_id(buffer);
503
504 @@ -1229,10 +1236,25 @@ int vc_sm_cma_ioctl_alloc(struct vc_sm_p
505 buffer->private = private;
506 buffer->vc_handle = result.res_handle;
507 buffer->size = import.size;
508 - buffer->dma_addr = import.addr;
509 buffer->vpu_state = VPU_MAPPED;
510 buffer->kernel_id = import.kernel_id;
511 - //buffer->res_cached = ioparam->cached;
512 +
513 + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
514 + if (!sgt) {
515 + ret = -ENOMEM;
516 + goto error;
517 + }
518 +
519 + ret = dma_get_sgtable(&sm_state->pdev->dev, sgt, buffer->cookie,
520 + buffer->dma_addr, buffer->size);
521 + if (ret < 0) {
522 + /* FIXME: error handling */
523 + pr_err("failed to get scatterlist from DMA API\n");
524 + kfree(sgt);
525 + ret = -ENOMEM;
526 + goto error;
527 + }
528 + buffer->alloc.sg_table = sgt;
529
530 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
531 if (fd < 0)
532 @@ -1250,11 +1272,19 @@ int vc_sm_cma_ioctl_alloc(struct vc_sm_p
533 return 0;
534
535 error:
536 - if (buffer) {
537 - pr_err("[%s]: something failed - cleanup. ret %d\n", __func__,
538 - ret);
539 + pr_err("[%s]: something failed - cleanup. ret %d\n", __func__, ret);
540
541 + if (dmabuf) {
542 + /* dmabuf has been exported, therefore allow dmabuf cleanup to
543 + * deal with this
544 + */
545 dma_buf_put(dmabuf);
546 + } else {
547 + /* No dmabuf, therefore just free the buffer here */
548 + if (buffer->cookie)
549 + dma_free_coherent(&sm_state->pdev->dev, buffer->size,
550 + buffer->cookie, buffer->dma_addr);
551 + kfree(buffer);
552 }
553 return ret;
554 }
555 @@ -1527,13 +1557,6 @@ static void vc_sm_connected_init(void)
556
557 pr_info("[%s]: start\n", __func__);
558
559 - vc_sm_cma_add_heaps(&sm_state->cma_heap);
560 - if (!sm_state->cma_heap) {
561 - pr_err("[%s]: failed to initialise CMA heap\n",
562 - __func__);
563 - return;
564 - }
565 -
566 /*
567 * Initialize and create a VCHI connection for the shared memory service
568 * running on videocore.
569 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
570 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
571 @@ -21,8 +21,6 @@
572 #include <linux/types.h>
573 #include <linux/miscdevice.h>
574
575 -#include "vc_sm_cma.h"
576 -
577 #define VC_SM_MAX_NAME_LEN 32
578
579 enum vc_sm_vpu_mapping_state {
580 @@ -31,6 +29,12 @@ enum vc_sm_vpu_mapping_state {
581 VPU_UNMAPPING
582 };
583
584 +struct vc_sm_alloc_data {
585 + unsigned long num_pages;
586 + void *priv_virt;
587 + struct sg_table *sg_table;
588 +};
589 +
590 struct vc_sm_imported {
591 struct dma_buf *dma_buf;
592 struct dma_buf_attachment *attach;
593 @@ -56,8 +60,6 @@ struct vc_sm_buffer {
594 int in_use:1; /* Kernel is still using this resource */
595 int imported:1; /* Imported dmabuf */
596
597 - struct sg_table *sg_table;
598 -
599 enum vc_sm_vpu_mapping_state vpu_state;
600 u32 vc_handle; /* VideoCore handle for this buffer */
601 int vpu_allocated; /*
602 @@ -69,11 +71,12 @@ struct vc_sm_buffer {
603 /* DMABUF related fields */
604 struct dma_buf *dma_buf;
605 dma_addr_t dma_addr;
606 + void *cookie;
607
608 struct vc_sm_privdata_t *private;
609
610 union {
611 - struct vc_sm_cma_alloc_data alloc;
612 + struct vc_sm_alloc_data alloc;
613 struct vc_sm_imported import;
614 };
615 };
616 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.c
617 +++ /dev/null
618 @@ -1,98 +0,0 @@
619 -// SPDX-License-Identifier: GPL-2.0
620 -/*
621 - * VideoCore Shared Memory CMA allocator
622 - *
623 - * Copyright: 2018, Raspberry Pi (Trading) Ltd
624 - *
625 - * Based on the Android ION allocator
626 - * Copyright (C) Linaro 2012
627 - * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
628 - *
629 - */
630 -
631 -#include <linux/slab.h>
632 -#include <linux/errno.h>
633 -#include <linux/err.h>
634 -#include <linux/cma.h>
635 -#include <linux/scatterlist.h>
636 -
637 -#include "vc_sm_cma.h"
638 -
639 -/* CMA heap operations functions */
640 -int vc_sm_cma_buffer_allocate(struct cma *cma_heap,
641 - struct vc_sm_cma_alloc_data *buffer,
642 - unsigned long len)
643 -{
644 - /* len should already be page aligned */
645 - unsigned long num_pages = len / PAGE_SIZE;
646 - struct sg_table *table;
647 - struct page *pages;
648 - int ret;
649 -
650 - pages = cma_alloc(cma_heap, num_pages, 0, GFP_KERNEL);
651 - if (!pages)
652 - return -ENOMEM;
653 -
654 - table = kmalloc(sizeof(*table), GFP_KERNEL);
655 - if (!table)
656 - goto err;
657 -
658 - ret = sg_alloc_table(table, 1, GFP_KERNEL);
659 - if (ret)
660 - goto free_mem;
661 -
662 - sg_set_page(table->sgl, pages, len, 0);
663 -
664 - buffer->priv_virt = pages;
665 - buffer->sg_table = table;
666 - buffer->cma_heap = cma_heap;
667 - buffer->num_pages = num_pages;
668 - return 0;
669 -
670 -free_mem:
671 - kfree(table);
672 -err:
673 - cma_release(cma_heap, pages, num_pages);
674 - return -ENOMEM;
675 -}
676 -
677 -void vc_sm_cma_buffer_free(struct vc_sm_cma_alloc_data *buffer)
678 -{
679 - struct cma *cma_heap = buffer->cma_heap;
680 - struct page *pages = buffer->priv_virt;
681 -
682 - /* release memory */
683 - if (cma_heap)
684 - cma_release(cma_heap, pages, buffer->num_pages);
685 -
686 - /* release sg table */
687 - if (buffer->sg_table) {
688 - sg_free_table(buffer->sg_table);
689 - kfree(buffer->sg_table);
690 - buffer->sg_table = NULL;
691 - }
692 -}
693 -
694 -int __vc_sm_cma_add_heaps(struct cma *cma, void *priv)
695 -{
696 - struct cma **heap = (struct cma **)priv;
697 - const char *name = cma_get_name(cma);
698 -
699 - if (!(*heap)) {
700 - phys_addr_t phys_addr = cma_get_base(cma);
701 -
702 - pr_debug("%s: Adding cma heap %s (start %pap, size %lu) for use by vcsm\n",
703 - __func__, name, &phys_addr, cma_get_size(cma));
704 - *heap = cma;
705 - } else {
706 - pr_err("%s: Ignoring heap %s as already set\n",
707 - __func__, name);
708 - }
709 -
710 - return 0;
711 -}
712 -
713 -void vc_sm_cma_add_heaps(struct cma **cma_heap)
714 -{
715 - cma_for_each_area(__vc_sm_cma_add_heaps, cma_heap);
716 -}
717 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.h
718 +++ /dev/null
719 @@ -1,39 +0,0 @@
720 -/* SPDX-License-Identifier: GPL-2.0 */
721 -
722 -/*
723 - * VideoCore Shared Memory CMA allocator
724 - *
725 - * Copyright: 2018, Raspberry Pi (Trading) Ltd
726 - *
727 - * Based on the Android ION allocator
728 - * Copyright (C) Linaro 2012
729 - * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
730 - *
731 - * This software is licensed under the terms of the GNU General Public
732 - * License version 2, as published by the Free Software Foundation, and
733 - * may be copied, distributed, and modified under those terms.
734 - *
735 - * This program is distributed in the hope that it will be useful,
736 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
737 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
738 - * GNU General Public License for more details.
739 - *
740 - */
741 -#ifndef VC_SM_CMA_H
742 -#define VC_SM_CMA_H
743 -
744 -struct vc_sm_cma_alloc_data {
745 - struct cma *cma_heap;
746 - unsigned long num_pages;
747 - void *priv_virt;
748 - struct sg_table *sg_table;
749 -};
750 -
751 -int vc_sm_cma_buffer_allocate(struct cma *cma_heap,
752 - struct vc_sm_cma_alloc_data *buffer,
753 - unsigned long len);
754 -void vc_sm_cma_buffer_free(struct vc_sm_cma_alloc_data *buffer);
755 -
756 -void vc_sm_cma_add_heaps(struct cma **cma_heap);
757 -
758 -#endif