1 From 2994fdc0a9d48be68d6e403bc8ddadecfc8d8796 Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Tue, 25 Sep 2018 10:27:11 +0100
4 Subject: [PATCH 272/806] staging: vc04_services: Add new vc-sm-cma driver
6 This new driver allows contiguous memory blocks to be imported
7 into the VideoCore VPU memory map, and manages the lifetime of
8 those objects, only releasing the source dmabuf once the VPU has
9 confirmed it has finished with it.
11 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
13 drivers/staging/vc04_services/Kconfig | 1 +
14 drivers/staging/vc04_services/Makefile | 1 +
15 .../staging/vc04_services/vc-sm-cma/Kconfig | 10 +
16 .../staging/vc04_services/vc-sm-cma/Makefile | 8 +
17 drivers/staging/vc04_services/vc-sm-cma/TODO | 2 +
18 .../staging/vc04_services/vc-sm-cma/vc_sm.c | 838 ++++++++++++++++++
19 .../staging/vc04_services/vc-sm-cma/vc_sm.h | 59 ++
20 .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.c | 498 +++++++++++
21 .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.h | 59 ++
22 .../vc04_services/vc-sm-cma/vc_sm_defs.h | 298 +++++++
23 .../vc04_services/vc-sm-cma/vc_sm_knl.h | 28 +
24 11 files changed, 1802 insertions(+)
25 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/Kconfig
26 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/Makefile
27 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/TODO
28 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
29 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
30 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
31 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
32 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
33 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_knl.h
35 --- a/drivers/staging/vc04_services/Kconfig
36 +++ b/drivers/staging/vc04_services/Kconfig
37 @@ -22,6 +22,7 @@ source "drivers/staging/vc04_services/bc
39 source "drivers/staging/vc04_services/bcm2835-camera/Kconfig"
40 source "drivers/staging/vc04_services/vchiq-mmal/Kconfig"
41 +source "drivers/staging/vc04_services/vc-sm-cma/Kconfig"
45 --- a/drivers/staging/vc04_services/Makefile
46 +++ b/drivers/staging/vc04_services/Makefile
47 @@ -13,6 +13,7 @@ vchiq-objs := \
48 obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
49 obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
50 obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += vchiq-mmal/
51 +obj-$(CONFIG_BCM_VC_SM_CMA) += vc-sm-cma/
53 ccflags-y += -Idrivers/staging/vc04_services -D__VCCOREVER__=0x04000000
56 +++ b/drivers/staging/vc04_services/vc-sm-cma/Kconfig
59 + tristate "VideoCore Shared Memory (CMA) driver"
60 + depends on BCM2835_VCHIQ
62 + select DMA_SHARED_BUFFER
64 + Say Y here to enable the shared memory interface that
65 + supports sharing dmabufs with VideoCore.
66 + This operates over the VCHIQ interface to a service
67 + running on VideoCore.
69 +++ b/drivers/staging/vc04_services/vc-sm-cma/Makefile
71 +ccflags-y += -Idrivers/staging/vc04_services -Idrivers/staging/vc04_services/interface/vchi -Idrivers/staging/vc04_services/interface/vchiq_arm
72 +# -I"drivers/staging/android/ion/" -I"$(srctree)/fs/"
73 +ccflags-y += -D__VCCOREVER__=0
75 +vc-sm-cma-$(CONFIG_BCM_VC_SM_CMA) := \
76 + vc_sm.o vc_sm_cma_vchi.o
78 +obj-$(CONFIG_BCM_VC_SM_CMA) += vc-sm-cma.o
80 +++ b/drivers/staging/vc04_services/vc-sm-cma/TODO
82 +1) Convert to a platform driver.
85 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
87 +// SPDX-License-Identifier: GPL-2.0
89 + * VideoCore Shared Memory driver using CMA.
91 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
92 + * Dave Stevenson <dave.stevenson@raspberrypi.org>
94 + * Based on vmcs_sm driver from Broadcom Corporation for some API,
95 + * and taking some code for CMA/dmabuf handling from the Android Ion
96 + * driver (Google/Linaro).
98 + * This is cut down version to only support import of dma_bufs from
99 + * other kernel drivers. A more complete implementation of the old
100 + * vmcs_sm functionality can follow later.
104 +/* ---- Include Files ----------------------------------------------------- */
105 +#include <linux/cdev.h>
106 +#include <linux/device.h>
107 +#include <linux/debugfs.h>
108 +#include <linux/dma-mapping.h>
109 +#include <linux/dma-buf.h>
110 +#include <linux/errno.h>
111 +#include <linux/fs.h>
112 +#include <linux/kernel.h>
113 +#include <linux/list.h>
114 +#include <linux/miscdevice.h>
115 +#include <linux/module.h>
116 +#include <linux/mm.h>
117 +#include <linux/of_device.h>
118 +#include <linux/platform_device.h>
119 +#include <linux/proc_fs.h>
120 +#include <linux/slab.h>
121 +#include <linux/seq_file.h>
122 +#include <linux/syscalls.h>
123 +#include <linux/types.h>
125 +#include "vchiq_connected.h"
126 +#include "vc_sm_cma_vchi.h"
129 +#include "vc_sm_knl.h"
131 +/* ---- Private Constants and Types --------------------------------------- */
133 +#define DEVICE_NAME "vcsm-cma"
134 +#define DEVICE_MINOR 0
136 +#define VC_SM_RESOURCE_NAME_DEFAULT "sm-host-resource"
138 +#define VC_SM_DIR_ROOT_NAME "vcsm-cma"
139 +#define VC_SM_STATE "state"
141 +/* Private file data associated with each opened device. */
142 +struct vc_sm_privdata_t {
143 + pid_t pid; /* PID of creator. */
145 + int restart_sys; /* Tracks restart on interrupt. */
146 + enum vc_sm_msg_type int_action; /* Interrupted action. */
147 + u32 int_trans_id; /* Interrupted transaction. */
150 +typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
152 + VC_SM_SHOW show; /* Debug fs function hookup. */
153 + struct dentry *dir_entry; /* Debug fs directory entry. */
154 + void *priv_data; /* Private data */
157 +/* Global state information. */
159 + struct platform_device *pdev;
161 + struct miscdevice dev;
162 + struct sm_instance *sm_handle; /* Handle for videocore service. */
164 + struct mutex map_lock; /* Global map lock. */
165 + struct list_head buffer_list; /* List of buffer. */
167 + struct vc_sm_privdata_t *data_knl; /* Kernel internal data tracking. */
168 + struct dentry *dir_root; /* Debug fs entries root. */
169 + struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
171 + bool require_released_callback; /* VPU will send a released msg when it
172 + * has finished with a resource.
174 + u32 int_trans_id; /* Interrupted transaction. */
177 +/* ---- Private Variables ----------------------------------------------- */
179 +static struct sm_state_t *sm_state;
180 +static int sm_inited;
182 +/* ---- Private Function Prototypes -------------------------------------- */
184 +/* ---- Private Functions ------------------------------------------------ */
186 +static int vc_sm_cma_seq_file_show(struct seq_file *s, void *v)
188 + struct sm_pde_t *sm_pde;
190 + sm_pde = (struct sm_pde_t *)(s->private);
192 + if (sm_pde && sm_pde->show)
193 + sm_pde->show(s, v);
198 +static int vc_sm_cma_single_open(struct inode *inode, struct file *file)
200 + return single_open(file, vc_sm_cma_seq_file_show, inode->i_private);
203 +static const struct file_operations vc_sm_cma_debug_fs_fops = {
204 + .open = vc_sm_cma_single_open,
206 + .llseek = seq_lseek,
207 + .release = single_release,
210 +static int vc_sm_cma_global_state_show(struct seq_file *s, void *v)
212 + struct vc_sm_buffer *resource = NULL;
213 + int resource_count = 0;
218 + seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
219 + (unsigned int)sm_state->sm_handle);
221 + /* Log all applicable mapping(s). */
223 + mutex_lock(&sm_state->map_lock);
224 + seq_puts(s, "\nResources\n");
225 + if (!list_empty(&sm_state->buffer_list)) {
226 + list_for_each_entry(resource, &sm_state->buffer_list,
227 + global_buffer_list) {
230 + seq_printf(s, "\nResource %p\n",
232 + seq_printf(s, " NAME %s\n",
234 + seq_printf(s, " SIZE %d\n",
236 + seq_printf(s, " DMABUF %p\n",
237 + resource->dma_buf);
238 + seq_printf(s, " ATTACH %p\n",
240 + seq_printf(s, " SG_TABLE %p\n",
241 + resource->sg_table);
242 + seq_printf(s, " SGT %p\n",
244 + seq_printf(s, " DMA_ADDR %pad\n",
245 + &resource->dma_addr);
246 + seq_printf(s, " VC_HANDLE %08x\n",
247 + resource->vc_handle);
248 + seq_printf(s, " VC_MAPPING %d\n",
249 + resource->vpu_state);
252 + seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
254 + mutex_unlock(&sm_state->map_lock);
260 + * Adds a buffer to the private data list which tracks all the allocated
263 +static void vc_sm_add_resource(struct vc_sm_privdata_t *privdata,
264 + struct vc_sm_buffer *buffer)
266 + mutex_lock(&sm_state->map_lock);
267 + list_add(&buffer->global_buffer_list, &sm_state->buffer_list);
268 + mutex_unlock(&sm_state->map_lock);
270 + pr_debug("[%s]: added buffer %p (name %s, size %d)\n",
271 + __func__, buffer, buffer->name, buffer->size);
275 + * Release an allocation.
276 + * All refcounting is done via the dma buf object.
278 +static void vc_sm_release_resource(struct vc_sm_buffer *buffer, int force)
280 + mutex_lock(&sm_state->map_lock);
281 + mutex_lock(&buffer->lock);
283 + pr_debug("[%s]: buffer %p (name %s, size %d)\n",
284 + __func__, buffer, buffer->name, buffer->size);
286 + if (buffer->vc_handle && buffer->vpu_state == VPU_MAPPED) {
287 + struct vc_sm_free_t free = { buffer->vc_handle, 0 };
288 + int status = vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
289 + &sm_state->int_trans_id);
290 + if (status != 0 && status != -EINTR) {
291 + pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
292 + __func__, status, sm_state->int_trans_id);
295 + if (sm_state->require_released_callback) {
296 + /* Need to wait for the VPU to confirm the free */
298 + /* Retain a reference on this until the VPU has
301 + buffer->vpu_state = VPU_UNMAPPING;
304 + buffer->vpu_state = VPU_NOT_MAPPED;
305 + buffer->vc_handle = 0;
307 + if (buffer->vc_handle) {
308 + /* We've sent the unmap request but not had the response. */
309 + pr_err("[%s]: Waiting for VPU unmap response on %p\n",
313 + if (buffer->in_use) {
314 + /* Don't release dmabuf here - we await the release */
315 + pr_err("[%s]: buffer %p is still in use\n",
320 + /* Handle cleaning up imported dmabufs */
322 + dma_buf_unmap_attachment(buffer->attach, buffer->sgt,
323 + DMA_BIDIRECTIONAL);
324 + buffer->sgt = NULL;
326 + if (buffer->attach) {
327 + dma_buf_detach(buffer->dma_buf, buffer->attach);
328 + buffer->attach = NULL;
331 + /* Release the dma_buf (whether ours or imported) */
332 + if (buffer->import_dma_buf) {
333 + dma_buf_put(buffer->import_dma_buf);
334 + buffer->import_dma_buf = NULL;
335 + buffer->dma_buf = NULL;
336 + } else if (buffer->dma_buf) {
337 + dma_buf_put(buffer->dma_buf);
338 + buffer->dma_buf = NULL;
341 + if (buffer->sg_table && !buffer->import_dma_buf) {
342 + /* Our own allocation that we need to dma_unmap_sg */
343 + dma_unmap_sg(&sm_state->pdev->dev, buffer->sg_table->sgl,
344 + buffer->sg_table->nents, DMA_BIDIRECTIONAL);
347 + /* Free the local resource. Start by removing it from the list */
348 + buffer->private = NULL;
349 + list_del(&buffer->global_buffer_list);
351 + mutex_unlock(&buffer->lock);
352 + mutex_unlock(&sm_state->map_lock);
354 + mutex_destroy(&buffer->lock);
360 + mutex_unlock(&buffer->lock);
361 + mutex_unlock(&sm_state->map_lock);
364 +/* Create support for private data tracking. */
365 +static struct vc_sm_privdata_t *vc_sm_cma_create_priv_data(pid_t id)
367 + char alloc_name[32];
368 + struct vc_sm_privdata_t *file_data = NULL;
370 + /* Allocate private structure. */
371 + file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
376 + snprintf(alloc_name, sizeof(alloc_name), "%d", id);
378 + file_data->pid = id;
383 +/* Dma_buf operations for chaining through to an imported dma_buf */
385 +int vc_sm_import_dma_buf_attach(struct dma_buf *dmabuf,
386 + struct dma_buf_attachment *attachment)
388 + struct vc_sm_buffer *res = dmabuf->priv;
390 + if (!res->import_dma_buf)
392 + return res->import_dma_buf->ops->attach(res->import_dma_buf,
397 +void vc_sm_import_dma_buf_detatch(struct dma_buf *dmabuf,
398 + struct dma_buf_attachment *attachment)
400 + struct vc_sm_buffer *res = dmabuf->priv;
402 + if (!res->import_dma_buf)
404 + res->import_dma_buf->ops->detach(res->import_dma_buf, attachment);
408 +struct sg_table *vc_sm_import_map_dma_buf(struct dma_buf_attachment *attachment,
409 + enum dma_data_direction direction)
411 + struct vc_sm_buffer *res = attachment->dmabuf->priv;
413 + if (!res->import_dma_buf)
415 + return res->import_dma_buf->ops->map_dma_buf(attachment, direction);
419 +void vc_sm_import_unmap_dma_buf(struct dma_buf_attachment *attachment,
420 + struct sg_table *table,
421 + enum dma_data_direction direction)
423 + struct vc_sm_buffer *res = attachment->dmabuf->priv;
425 + if (!res->import_dma_buf)
427 + res->import_dma_buf->ops->unmap_dma_buf(attachment, table, direction);
431 +int vc_sm_import_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
433 + struct vc_sm_buffer *res = dmabuf->priv;
435 + pr_debug("%s: mmap dma_buf %p, res %p, imported db %p\n", __func__,
436 + dmabuf, res, res->import_dma_buf);
437 + if (!res->import_dma_buf) {
438 + pr_err("%s: mmap dma_buf %p- not an imported buffer\n",
442 + return res->import_dma_buf->ops->mmap(res->import_dma_buf, vma);
446 +void vc_sm_import_dma_buf_release(struct dma_buf *dmabuf)
448 + struct vc_sm_buffer *res = dmabuf->priv;
450 + pr_debug("%s: Relasing dma_buf %p\n", __func__, dmabuf);
451 + if (!res->import_dma_buf)
456 + vc_sm_release_resource(res, 0);
460 +void *vc_sm_import_dma_buf_kmap(struct dma_buf *dmabuf,
461 + unsigned long offset)
463 + struct vc_sm_buffer *res = dmabuf->priv;
465 + if (!res->import_dma_buf)
467 + return res->import_dma_buf->ops->map(res->import_dma_buf,
472 +void vc_sm_import_dma_buf_kunmap(struct dma_buf *dmabuf,
473 + unsigned long offset, void *ptr)
475 + struct vc_sm_buffer *res = dmabuf->priv;
477 + if (!res->import_dma_buf)
479 + res->import_dma_buf->ops->unmap(res->import_dma_buf,
484 +int vc_sm_import_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
485 + enum dma_data_direction direction)
487 + struct vc_sm_buffer *res = dmabuf->priv;
489 + if (!res->import_dma_buf)
491 + return res->import_dma_buf->ops->begin_cpu_access(res->import_dma_buf,
496 +int vc_sm_import_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
497 + enum dma_data_direction direction)
499 + struct vc_sm_buffer *res = dmabuf->priv;
501 + if (!res->import_dma_buf)
503 + return res->import_dma_buf->ops->end_cpu_access(res->import_dma_buf,
507 +static const struct dma_buf_ops dma_buf_import_ops = {
508 + .map_dma_buf = vc_sm_import_map_dma_buf,
509 + .unmap_dma_buf = vc_sm_import_unmap_dma_buf,
510 + .mmap = vc_sm_import_dmabuf_mmap,
511 + .release = vc_sm_import_dma_buf_release,
512 + .attach = vc_sm_import_dma_buf_attach,
513 + .detach = vc_sm_import_dma_buf_detatch,
514 + .begin_cpu_access = vc_sm_import_dma_buf_begin_cpu_access,
515 + .end_cpu_access = vc_sm_import_dma_buf_end_cpu_access,
516 + .map = vc_sm_import_dma_buf_kmap,
517 + .unmap = vc_sm_import_dma_buf_kunmap,
520 +/* Import a dma_buf to be shared with VC. */
522 +vc_sm_cma_import_dmabuf_internal(struct vc_sm_privdata_t *private,
523 + struct dma_buf *dma_buf,
524 + struct dma_buf **imported_buf)
526 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
527 + struct vc_sm_buffer *buffer = NULL;
528 + struct vc_sm_import import = { };
529 + struct vc_sm_import_result result = { };
530 + struct dma_buf_attachment *attach = NULL;
531 + struct sg_table *sgt = NULL;
535 + /* Setup our allocation parameters */
536 + pr_debug("%s: importing dma_buf %p\n", __func__, dma_buf);
538 + get_dma_buf(dma_buf);
541 + attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
542 + if (IS_ERR(attach)) {
543 + ret = PTR_ERR(attach);
547 + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
549 + ret = PTR_ERR(sgt);
553 + /* Verify that the address block is contiguous */
554 + if (sgt->nents != 1) {
559 + /* Allocate local buffer to track this allocation. */
560 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
566 + import.type = VC_SM_ALLOC_NON_CACHED;
567 + import.addr = (uint32_t)sg_dma_address(sgt->sgl);
568 + if ((import.addr & 0xC0000000) != 0xC0000000) {
569 + pr_err("%s: Expecting an uncached alias for dma_addr %08x\n",
570 + __func__, import.addr);
571 + import.addr |= 0xC0000000;
573 + import.size = sg_dma_len(sgt->sgl);
574 + import.allocator = current->tgid;
575 + import.kernel_id = (uint32_t)buffer; //FIXME: 64 bit support needed.
577 + memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
578 + sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
580 + pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %p, size %u\n",
581 + __func__, import.name, import.type, (void *)import.addr,
584 + /* Allocate the videocore buffer. */
585 + status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
586 + &sm_state->int_trans_id);
587 + if (status == -EINTR) {
588 + pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
589 + __func__, sm_state->int_trans_id);
590 + ret = -ERESTARTSYS;
591 + private->restart_sys = -EINTR;
592 + private->int_action = VC_SM_MSG_TYPE_IMPORT;
594 + } else if (status || !result.res_handle) {
595 + pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
596 + __func__, status, sm_state->int_trans_id);
601 + mutex_init(&buffer->lock);
602 + INIT_LIST_HEAD(&buffer->attachments);
603 + memcpy(buffer->name, import.name,
604 + min(sizeof(buffer->name), sizeof(import.name) - 1));
606 + /* Keep track of the buffer we created. */
607 + buffer->private = private;
608 + buffer->vc_handle = result.res_handle;
609 + buffer->size = import.size;
610 + buffer->vpu_state = VPU_MAPPED;
612 + buffer->import_dma_buf = dma_buf;
614 + buffer->attach = attach;
616 + buffer->dma_addr = sg_dma_address(sgt->sgl);
617 + buffer->in_use = 1;
620 + * We're done - we need to export a new dmabuf chaining through most
621 + * functions, but enabling us to release our own internal references
624 + exp_info.ops = &dma_buf_import_ops;
625 + exp_info.size = import.size;
626 + exp_info.flags = O_RDWR;
627 + exp_info.priv = buffer;
629 + buffer->dma_buf = dma_buf_export(&exp_info);
630 + if (IS_ERR(buffer->dma_buf)) {
631 + ret = PTR_ERR(buffer->dma_buf);
635 + vc_sm_add_resource(private, buffer);
637 + *imported_buf = buffer->dma_buf;
642 + if (result.res_handle) {
643 + struct vc_sm_free_t free = { result.res_handle, 0 };
645 + vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
646 + &sm_state->int_trans_id);
650 + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
652 + dma_buf_detach(dma_buf, attach);
653 + dma_buf_put(dma_buf);
657 +/* FIXME: Pass a function pointer to this into vc_vchi_sm.c */
659 +vc_sm_vpu_event(struct sm_instance *instance, struct vc_sm_result_t *reply,
662 + switch (reply->trans_id & ~0x80000000) {
663 + case VC_SM_MSG_TYPE_CLIENT_VERSION:
665 + /* Acknowledge that the firmware supports the version command */
666 + pr_debug("%s: firmware acked version msg. Require release cb\n",
668 + sm_state->require_released_callback = true;
671 + case VC_SM_MSG_TYPE_RELEASED:
673 + struct vc_sm_released *release = (struct vc_sm_released *)reply;
674 + struct vc_sm_buffer *buffer =
675 + (struct vc_sm_buffer *)release->kernel_id;
678 + * FIXME: Need to check buffer is still valid and allocated
679 + * before continuing
681 + pr_debug("%s: Released addr %08x, size %u, id %08x, mem_handle %08x\n",
682 + __func__, release->addr, release->size,
683 + release->kernel_id, release->vc_handle);
684 + mutex_lock(&buffer->lock);
685 + buffer->vc_handle = 0;
686 + buffer->vpu_state = VPU_NOT_MAPPED;
687 + mutex_unlock(&buffer->lock);
689 + vc_sm_release_resource(buffer, 0);
693 + pr_err("%s: Unknown vpu cmd %x\n", __func__, reply->trans_id);
698 +/* Videocore connected. */
699 +static void vc_sm_connected_init(void)
702 + VCHI_INSTANCE_T vchi_instance;
703 + struct vc_sm_version version;
704 + struct vc_sm_result_t version_result;
706 + pr_info("[%s]: start\n", __func__);
709 + * Initialize and create a VCHI connection for the shared memory service
710 + * running on videocore.
712 + ret = vchi_initialise(&vchi_instance);
714 + pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
721 + ret = vchi_connect(vchi_instance);
723 + pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
730 + /* Initialize an instance of the shared memory service. */
731 + sm_state->sm_handle = vc_sm_cma_vchi_init(vchi_instance, 1,
733 + if (!sm_state->sm_handle) {
734 + pr_err("[%s]: failed to initialize shared memory service\n",
741 + /* Create a debug fs directory entry (root). */
742 + sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
743 + if (!sm_state->dir_root) {
744 + pr_err("[%s]: failed to create \'%s\' directory entry\n",
745 + __func__, VC_SM_DIR_ROOT_NAME);
748 + goto err_stop_sm_service;
751 + sm_state->dir_state.show = &vc_sm_cma_global_state_show;
752 + sm_state->dir_state.dir_entry =
753 + debugfs_create_file(VC_SM_STATE, 0444, sm_state->dir_root,
754 + &sm_state->dir_state,
755 + &vc_sm_cma_debug_fs_fops);
757 + INIT_LIST_HEAD(&sm_state->buffer_list);
759 + sm_state->data_knl = vc_sm_cma_create_priv_data(0);
760 + if (!sm_state->data_knl) {
761 + pr_err("[%s]: failed to create kernel private data tracker\n",
763 + goto err_remove_shared_memory;
766 + version.version = 1;
767 + ret = vc_sm_cma_vchi_client_version(sm_state->sm_handle, &version,
769 + &sm_state->int_trans_id);
771 + pr_err("[%s]: Failed to send version request %d\n", __func__,
777 + pr_info("[%s]: installed successfully\n", __func__);
780 +err_remove_shared_memory:
781 + debugfs_remove_recursive(sm_state->dir_root);
782 +err_stop_sm_service:
783 + vc_sm_cma_vchi_stop(&sm_state->sm_handle);
786 + pr_info("[%s]: failed, ret %d\n", __func__, ret);
789 +/* Driver loading. */
790 +static int bcm2835_vc_sm_cma_probe(struct platform_device *pdev)
792 + struct device *dev = &pdev->dev;
795 + pr_info("%s: Videocore shared memory driver\n", __func__);
797 + sm_state = kzalloc(sizeof(*sm_state), GFP_KERNEL);
800 + sm_state->pdev = pdev;
801 + mutex_init(&sm_state->map_lock);
803 + dev->coherent_dma_mask = DMA_BIT_MASK(32);
804 + dev->dma_mask = &dev->coherent_dma_mask;
805 + err = of_dma_configure(dev, NULL, true);
807 + dev_err(dev, "Unable to setup DMA: %d\n", err);
811 + vchiq_add_connected_callback(vc_sm_connected_init);
815 +/* Driver unloading. */
816 +static int bcm2835_vc_sm_cma_remove(struct platform_device *pdev)
818 + pr_debug("[%s]: start\n", __func__);
820 + /* Remove shared memory device. */
821 + misc_deregister(&sm_state->dev);
823 + /* Remove all proc entries. */
824 + //debugfs_remove_recursive(sm_state->dir_root);
826 + /* Stop the videocore shared memory service. */
827 + vc_sm_cma_vchi_stop(&sm_state->sm_handle);
829 + /* Free the memory for the state structure. */
830 + mutex_destroy(&sm_state->map_lock);
834 + pr_debug("[%s]: end\n", __func__);
838 +/* Get an internal resource handle mapped from the external one. */
839 +int vc_sm_cma_int_handle(int handle)
841 + struct dma_buf *dma_buf = (struct dma_buf *)handle;
842 + struct vc_sm_buffer *res;
844 + /* Validate we can work with this device. */
845 + if (!sm_state || !handle) {
846 + pr_err("[%s]: invalid input\n", __func__);
850 + res = (struct vc_sm_buffer *)dma_buf->priv;
851 + return res->vc_handle;
853 +EXPORT_SYMBOL_GPL(vc_sm_cma_int_handle);
855 +/* Free a previously allocated shared memory handle and block. */
856 +int vc_sm_cma_free(int handle)
858 + struct dma_buf *dma_buf = (struct dma_buf *)handle;
860 + /* Validate we can work with this device. */
861 + if (!sm_state || !handle) {
862 + pr_err("[%s]: invalid input\n", __func__);
866 + pr_debug("%s: handle %08x/dmabuf %p\n", __func__, handle, dma_buf);
868 + dma_buf_put(dma_buf);
872 +EXPORT_SYMBOL_GPL(vc_sm_cma_free);
874 +/* Import a dmabuf to be shared with VC. */
875 +int vc_sm_cma_import_dmabuf(struct dma_buf *src_dmabuf, int *handle)
877 + struct dma_buf *new_dma_buf;
878 + struct vc_sm_buffer *res;
881 + /* Validate we can work with this device. */
882 + if (!sm_state || !src_dmabuf || !handle) {
883 + pr_err("[%s]: invalid input\n", __func__);
887 + ret = vc_sm_cma_import_dmabuf_internal(sm_state->data_knl, src_dmabuf,
891 + pr_debug("%s: imported to ptr %p\n", __func__, new_dma_buf);
892 + res = (struct vc_sm_buffer *)new_dma_buf->priv;
894 + /* Assign valid handle at this time.*/
895 + *handle = (int)new_dma_buf;
898 + * succeeded in importing the dma_buf, but then
899 + * failed to look it up again. How?
900 + * Release the fd again.
902 + pr_err("%s: imported vc_sm_cma_get_buffer failed %d\n",
908 +EXPORT_SYMBOL_GPL(vc_sm_cma_import_dmabuf);
910 +static struct platform_driver bcm2835_vcsm_cma_driver = {
911 + .probe = bcm2835_vc_sm_cma_probe,
912 + .remove = bcm2835_vc_sm_cma_remove,
914 + .name = DEVICE_NAME,
915 + .owner = THIS_MODULE,
919 +module_platform_driver(bcm2835_vcsm_cma_driver);
921 +MODULE_AUTHOR("Dave Stevenson");
922 +MODULE_DESCRIPTION("VideoCore CMA Shared Memory Driver");
923 +MODULE_LICENSE("GPL v2");
924 +MODULE_ALIAS("platform:vcsm-cma");
926 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
928 +/* SPDX-License-Identifier: GPL-2.0 */
931 + * VideoCore Shared Memory driver using CMA.
933 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
940 +#include <linux/device.h>
941 +#include <linux/dma-direction.h>
942 +#include <linux/kref.h>
943 +#include <linux/mm_types.h>
944 +#include <linux/mutex.h>
945 +#include <linux/rbtree.h>
946 +#include <linux/sched.h>
947 +#include <linux/shrinker.h>
948 +#include <linux/types.h>
949 +#include <linux/miscdevice.h>
951 +#define VC_SM_MAX_NAME_LEN 32
953 +enum vc_sm_vpu_mapping_state {
959 +struct vc_sm_buffer {
960 + struct list_head global_buffer_list; /* Global list of buffers. */
964 + /* Lock over all the following state for this buffer */
966 + struct sg_table *sg_table;
967 + struct list_head attachments;
969 + char name[VC_SM_MAX_NAME_LEN];
971 + int in_use:1; /* Kernel is still using this resource */
973 + enum vc_sm_vpu_mapping_state vpu_state;
974 + u32 vc_handle; /* VideoCore handle for this buffer */
976 + /* DMABUF related fields */
977 + struct dma_buf *import_dma_buf;
978 + struct dma_buf *dma_buf;
979 + struct dma_buf_attachment *attach;
980 + struct sg_table *sgt;
981 + dma_addr_t dma_addr;
983 + struct vc_sm_privdata_t *private;
988 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
990 +// SPDX-License-Identifier: GPL-2.0
992 + * VideoCore Shared Memory CMA allocator
994 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
995 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
997 + * Based on vmcs_sm driver from Broadcom Corporation.
1001 +/* ---- Include Files ----------------------------------------------------- */
1002 +#include <linux/completion.h>
1003 +#include <linux/kernel.h>
1004 +#include <linux/kthread.h>
1005 +#include <linux/list.h>
1006 +#include <linux/mutex.h>
1007 +#include <linux/semaphore.h>
1008 +#include <linux/slab.h>
1009 +#include <linux/types.h>
1011 +#include "vc_sm_cma_vchi.h"
1013 +#define VC_SM_VER 1
1014 +#define VC_SM_MIN_VER 0
1016 +/* ---- Private Constants and Types -------------------------------------- */
1018 +/* Command blocks come from a pool */
1019 +#define SM_MAX_NUM_CMD_RSP_BLKS 32
1021 +struct sm_cmd_rsp_blk {
1022 + struct list_head head; /* To create lists */
1023 + /* To be signaled when the response is there */
1024 + struct completion cmplt;
1029 + u8 msg[VC_SM_MAX_MSG_LEN];
1037 +struct sm_instance {
1038 + u32 num_connections;
1039 + VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
1040 + struct task_struct *io_thread;
1041 + struct completion io_cmplt;
1043 + vpu_event_cb vpu_event;
1045 + /* Mutex over the following lists */
1046 + struct mutex lock;
1048 + struct list_head cmd_list;
1049 + struct list_head rsp_list;
1050 + struct list_head dead_list;
1052 + struct sm_cmd_rsp_blk free_blk[SM_MAX_NUM_CMD_RSP_BLKS];
1054 + /* Mutex over the free_list */
1055 + struct mutex free_lock;
1056 + struct list_head free_list;
1058 + struct semaphore free_sema;
1062 +/* ---- Private Variables ------------------------------------------------ */
1064 +/* ---- Private Function Prototypes -------------------------------------- */
1066 +/* ---- Private Functions ------------------------------------------------ */
1068 +bcm2835_vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
1070 + unsigned int size)
1072 + return vchi_queue_kernel_message(handle,
1078 +sm_cmd_rsp_blk *vc_vchi_cmd_create(struct sm_instance *instance,
1079 + enum vc_sm_msg_type id, void *msg,
1080 + u32 size, int wait)
1082 + struct sm_cmd_rsp_blk *blk;
1083 + struct vc_sm_msg_hdr_t *hdr;
1085 + if (down_interruptible(&instance->free_sema)) {
1086 + blk = kmalloc(sizeof(*blk), GFP_KERNEL);
1091 + init_completion(&blk->cmplt);
1093 + mutex_lock(&instance->free_lock);
1095 + list_first_entry(&instance->free_list,
1096 + struct sm_cmd_rsp_blk, head);
1097 + list_del(&blk->head);
1098 + mutex_unlock(&instance->free_lock);
1103 + blk->length = sizeof(*hdr) + size;
1105 + hdr = (struct vc_sm_msg_hdr_t *)blk->msg;
1107 + mutex_lock(&instance->lock);
1108 + instance->trans_id++;
1110 + * Retain the top bit for identifying asynchronous events, or VPU cmds.
1112 + instance->trans_id &= ~0x80000000;
1113 + hdr->trans_id = instance->trans_id;
1114 + blk->id = instance->trans_id;
1115 + mutex_unlock(&instance->lock);
1118 + memcpy(hdr->body, msg, size);
1124 +vc_vchi_cmd_delete(struct sm_instance *instance, struct sm_cmd_rsp_blk *blk)
1131 + mutex_lock(&instance->free_lock);
1132 + list_add(&blk->head, &instance->free_list);
1133 + mutex_unlock(&instance->free_lock);
1134 + up(&instance->free_sema);
1137 +static void vc_sm_cma_vchi_rx_ack(struct sm_instance *instance,
1138 + struct sm_cmd_rsp_blk *cmd,
1139 + struct vc_sm_result_t *reply,
1142 + mutex_lock(&instance->lock);
1143 + list_for_each_entry(cmd,
1144 + &instance->rsp_list,
1146 + if (cmd->id == reply->trans_id)
1149 + mutex_unlock(&instance->lock);
1151 + if (&cmd->head == &instance->rsp_list) {
1152 + //pr_debug("%s: received response %u, throw away...",
1153 + pr_err("%s: received response %u, throw away...",
1156 + } else if (reply_len > sizeof(cmd->msg)) {
1157 + pr_err("%s: reply too big (%u) %u, throw away...",
1158 + __func__, reply_len,
1161 + memcpy(cmd->msg, reply,
1163 + complete(&cmd->cmplt);
1167 +static int vc_sm_cma_vchi_videocore_io(void *arg)
1169 + struct sm_instance *instance = arg;
1170 + struct sm_cmd_rsp_blk *cmd = NULL, *cmd_tmp;
1171 + struct vc_sm_result_t *reply;
1178 + vchi_service_release(instance->vchi_handle[0]);
1180 + if (!wait_for_completion_interruptible(&instance->io_cmplt)) {
1181 + vchi_service_use(instance->vchi_handle[0]);
1186 + * Get new command and move it to response list
1188 + mutex_lock(&instance->lock);
1189 + if (list_empty(&instance->cmd_list)) {
1190 + /* no more commands to process */
1191 + mutex_unlock(&instance->lock);
1195 + list_first_entry(&instance->cmd_list,
1196 + struct sm_cmd_rsp_blk,
1198 + list_move(&cmd->head, &instance->rsp_list);
1200 + mutex_unlock(&instance->lock);
1202 + /* Send the command */
1203 + status = bcm2835_vchi_msg_queue(
1204 + instance->vchi_handle[0],
1205 + cmd->msg, cmd->length);
1207 + pr_err("%s: failed to queue message (%d)",
1208 + __func__, status);
1211 + /* If no reply is needed then we're done */
1213 + mutex_lock(&instance->lock);
1214 + list_del(&cmd->head);
1215 + mutex_unlock(&instance->lock);
1216 + vc_vchi_cmd_delete(instance, cmd);
1221 + complete(&cmd->cmplt);
1227 + while (!vchi_msg_peek(instance->vchi_handle[0],
1228 + (void **)&reply, &reply_len,
1229 + VCHI_FLAGS_NONE)) {
1230 + if (reply->trans_id & 0x80000000) {
1231 + /* Async event or cmd from the VPU */
1232 + if (instance->vpu_event)
1233 + instance->vpu_event(
1237 + vc_sm_cma_vchi_rx_ack(instance, cmd,
1238 + reply, reply_len);
1241 + vchi_msg_remove(instance->vchi_handle[0]);
1244 + /* Go through the dead list and free them */
1245 + mutex_lock(&instance->lock);
1246 + list_for_each_entry_safe(cmd, cmd_tmp,
1247 + &instance->dead_list, head) {
1248 + list_del(&cmd->head);
1249 + vc_vchi_cmd_delete(instance, cmd);
1251 + mutex_unlock(&instance->lock);
1258 +static void vc_sm_cma_vchi_callback(void *param,
1259 + const VCHI_CALLBACK_REASON_T reason,
1262 + struct sm_instance *instance = param;
1267 + case VCHI_CALLBACK_MSG_AVAILABLE:
1268 + complete(&instance->io_cmplt);
1271 + case VCHI_CALLBACK_SERVICE_CLOSED:
1272 + pr_info("%s: service CLOSED!!", __func__);
1278 +struct sm_instance *vc_sm_cma_vchi_init(VCHI_INSTANCE_T vchi_instance,
1279 + unsigned int num_connections,
1280 + vpu_event_cb vpu_event)
1283 + struct sm_instance *instance;
1286 + pr_debug("%s: start", __func__);
1288 + if (num_connections > VCHI_MAX_NUM_CONNECTIONS) {
1289 + pr_err("%s: unsupported number of connections %u (max=%u)",
1290 + __func__, num_connections, VCHI_MAX_NUM_CONNECTIONS);
1294 + /* Allocate memory for this instance */
1295 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1297 + /* Misc initialisations */
1298 + mutex_init(&instance->lock);
1299 + init_completion(&instance->io_cmplt);
1300 + INIT_LIST_HEAD(&instance->cmd_list);
1301 + INIT_LIST_HEAD(&instance->rsp_list);
1302 + INIT_LIST_HEAD(&instance->dead_list);
1303 + INIT_LIST_HEAD(&instance->free_list);
1304 + sema_init(&instance->free_sema, SM_MAX_NUM_CMD_RSP_BLKS);
1305 + mutex_init(&instance->free_lock);
1306 + for (i = 0; i < SM_MAX_NUM_CMD_RSP_BLKS; i++) {
1307 + init_completion(&instance->free_blk[i].cmplt);
1308 + list_add(&instance->free_blk[i].head, &instance->free_list);
1311 + /* Open the VCHI service connections */
1312 + instance->num_connections = num_connections;
1313 + for (i = 0; i < num_connections; i++) {
1314 + SERVICE_CREATION_T params = {
1315 + .version = VCHI_VERSION_EX(VC_SM_VER, VC_SM_MIN_VER),
1316 + .service_id = VC_SM_SERVER_NAME,
1317 + .callback = vc_sm_cma_vchi_callback,
1318 + .callback_param = instance,
1321 + status = vchi_service_open(vchi_instance,
1322 + ¶ms, &instance->vchi_handle[i]);
1324 + pr_err("%s: failed to open VCHI service (%d)",
1325 + __func__, status);
1327 + goto err_close_services;
1331 + /* Create the thread which takes care of all io to/from videoocore. */
1332 + instance->io_thread = kthread_create(&vc_sm_cma_vchi_videocore_io,
1333 + (void *)instance, "SMIO");
1334 + if (!instance->io_thread) {
1335 + pr_err("%s: failed to create SMIO thread", __func__);
1337 + goto err_close_services;
1339 + instance->vpu_event = vpu_event;
1340 + set_user_nice(instance->io_thread, -10);
1341 + wake_up_process(instance->io_thread);
1343 + pr_debug("%s: success - instance 0x%x", __func__,
1344 + (unsigned int)instance);
1347 +err_close_services:
1348 + for (i = 0; i < instance->num_connections; i++) {
1349 + if (instance->vchi_handle[i])
1350 + vchi_service_close(instance->vchi_handle[i]);
1354 + pr_debug("%s: FAILED", __func__);
1358 +int vc_sm_cma_vchi_stop(struct sm_instance **handle)
1360 + struct sm_instance *instance;
1364 + pr_err("%s: invalid pointer to handle %p", __func__, handle);
1369 + pr_err("%s: invalid handle %p", __func__, *handle);
1373 + instance = *handle;
1375 + /* Close all VCHI service connections */
1376 + for (i = 0; i < instance->num_connections; i++) {
1379 + vchi_service_use(instance->vchi_handle[i]);
1381 + success = vchi_service_close(instance->vchi_handle[i]);
1393 +static int vc_sm_cma_vchi_send_msg(struct sm_instance *handle,
1394 + enum vc_sm_msg_type msg_id, void *msg,
1395 + u32 msg_size, void *result, u32 result_size,
1396 + u32 *cur_trans_id, u8 wait_reply)
1399 + struct sm_instance *instance = handle;
1400 + struct sm_cmd_rsp_blk *cmd_blk;
1403 + pr_err("%s: invalid handle", __func__);
1407 + pr_err("%s: invalid msg pointer", __func__);
1412 + vc_vchi_cmd_create(instance, msg_id, msg, msg_size, wait_reply);
1414 + pr_err("[%s]: failed to allocate global tracking resource",
1420 + *cur_trans_id = cmd_blk->id;
1422 + mutex_lock(&instance->lock);
1423 + list_add_tail(&cmd_blk->head, &instance->cmd_list);
1424 + mutex_unlock(&instance->lock);
1425 + complete(&instance->io_cmplt);
1431 + /* Wait for the response */
1432 + if (wait_for_completion_interruptible(&cmd_blk->cmplt)) {
1433 + mutex_lock(&instance->lock);
1434 + if (!cmd_blk->sent) {
1435 + list_del(&cmd_blk->head);
1436 + mutex_unlock(&instance->lock);
1437 + vc_vchi_cmd_delete(instance, cmd_blk);
1441 + list_move(&cmd_blk->head, &instance->dead_list);
1442 + mutex_unlock(&instance->lock);
1443 + complete(&instance->io_cmplt);
1444 + return -EINTR; /* We're done */
1447 + if (result && result_size) {
1448 + memcpy(result, cmd_blk->msg, result_size);
1450 + struct vc_sm_result_t *res =
1451 + (struct vc_sm_result_t *)cmd_blk->msg;
1452 + status = (res->success == 0) ? 0 : -ENXIO;
1455 + mutex_lock(&instance->lock);
1456 + list_del(&cmd_blk->head);
1457 + mutex_unlock(&instance->lock);
1458 + vc_vchi_cmd_delete(instance, cmd_blk);
1462 +int vc_sm_cma_vchi_free(struct sm_instance *handle, struct vc_sm_free_t *msg,
1463 + u32 *cur_trans_id)
1465 + return vc_sm_cma_vchi_send_msg(handle, VC_SM_MSG_TYPE_FREE,
1466 + msg, sizeof(*msg), 0, 0, cur_trans_id, 0);
1469 +int vc_sm_cma_vchi_import(struct sm_instance *handle, struct vc_sm_import *msg,
1470 + struct vc_sm_import_result *result, u32 *cur_trans_id)
1472 + return vc_sm_cma_vchi_send_msg(handle, VC_SM_MSG_TYPE_IMPORT,
1473 + msg, sizeof(*msg), result, sizeof(*result),
1477 +int vc_sm_cma_vchi_client_version(struct sm_instance *handle,
1478 + struct vc_sm_version *msg,
1479 + struct vc_sm_result_t *result,
1480 + u32 *cur_trans_id)
1482 + return vc_sm_cma_vchi_send_msg(handle, VC_SM_MSG_TYPE_CLIENT_VERSION,
1483 + //msg, sizeof(*msg), result, sizeof(*result),
1484 + //cur_trans_id, 1);
1485 + msg, sizeof(*msg), NULL, 0,
1489 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
1491 +/* SPDX-License-Identifier: GPL-2.0 */
1494 + * VideoCore Shared Memory CMA allocator
1496 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
1497 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
1499 + * Based on vmcs_sm driver from Broadcom Corporation.
1503 +#ifndef __VC_SM_CMA_VCHI_H__INCLUDED__
1504 +#define __VC_SM_CMA_VCHI_H__INCLUDED__
1506 +#include "interface/vchi/vchi.h"
1508 +#include "vc_sm_defs.h"
1511 + * Forward declare.
1513 +struct sm_instance;
1515 +typedef void (*vpu_event_cb)(struct sm_instance *instance,
1516 + struct vc_sm_result_t *reply, int reply_len);
1519 + * Initialize the shared memory service, opens up vchi connection to talk to it.
1521 +struct sm_instance *vc_sm_cma_vchi_init(VCHI_INSTANCE_T vchi_instance,
1522 + unsigned int num_connections,
1523 + vpu_event_cb vpu_event);
1526 + * Terminates the shared memory service.
1528 +int vc_sm_cma_vchi_stop(struct sm_instance **handle);
1531 + * Ask the shared memory service to free up some memory that was previously
1532 + * allocated by the vc_sm_cma_vchi_alloc function call.
1534 +int vc_sm_cma_vchi_free(struct sm_instance *handle, struct vc_sm_free_t *msg,
1535 + u32 *cur_trans_id);
1538 + * Import a contiguous block of memory and wrap it in a GPU MEM_HANDLE_T.
1540 +int vc_sm_cma_vchi_import(struct sm_instance *handle, struct vc_sm_import *msg,
1541 + struct vc_sm_import_result *result,
1542 + u32 *cur_trans_id);
1544 +int vc_sm_cma_vchi_client_version(struct sm_instance *handle,
1545 + struct vc_sm_version *msg,
1546 + struct vc_sm_result_t *result,
1547 + u32 *cur_trans_id);
1549 +#endif /* __VC_SM_CMA_VCHI_H__INCLUDED__ */
1551 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
1553 +/* SPDX-License-Identifier: GPL-2.0 */
1556 + * VideoCore Shared Memory CMA allocator
1558 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
1560 + * Based on vc_sm_defs.h from the vmcs_sm driver Copyright Broadcom Corporation.
1561 + * All IPC messages are copied across to this file, even if the vc-sm-cma
1562 + * driver is not currently using them.
1564 + ****************************************************************************
1567 +#ifndef __VC_SM_DEFS_H__INCLUDED__
1568 +#define __VC_SM_DEFS_H__INCLUDED__
1570 +/* FourCC code used for VCHI connection */
1571 +#define VC_SM_SERVER_NAME MAKE_FOURCC("SMEM")
1573 +/* Maximum message length */
1574 +#define VC_SM_MAX_MSG_LEN (sizeof(union vc_sm_msg_union_t) + \
1575 + sizeof(struct vc_sm_msg_hdr_t))
1576 +#define VC_SM_MAX_RSP_LEN (sizeof(union vc_sm_msg_union_t))
1578 +/* Resource name maximum size */
1579 +#define VC_SM_RESOURCE_NAME 32
1582 + * Version to be reported to the VPU
1583 + * VPU assumes 0 (aka 1) which does not require the released callback, nor
1584 + * expect the client to handle VC_MEM_REQUESTS.
1585 + * Version 2 requires the released callback, and must support VC_MEM_REQUESTS.
1587 +#define VC_SM_PROTOCOL_VERSION 2
1589 +enum vc_sm_msg_type {
1590 + /* Message types supported for HOST->VC direction */
1592 + /* Allocate shared memory block */
1593 + VC_SM_MSG_TYPE_ALLOC,
1594 + /* Lock allocated shared memory block */
1595 + VC_SM_MSG_TYPE_LOCK,
1596 + /* Unlock allocated shared memory block */
1597 + VC_SM_MSG_TYPE_UNLOCK,
1598 + /* Unlock allocated shared memory block, do not answer command */
1599 + VC_SM_MSG_TYPE_UNLOCK_NOANS,
1600 + /* Free shared memory block */
1601 + VC_SM_MSG_TYPE_FREE,
1602 + /* Resize a shared memory block */
1603 + VC_SM_MSG_TYPE_RESIZE,
1604 + /* Walk the allocated shared memory block(s) */
1605 + VC_SM_MSG_TYPE_WALK_ALLOC,
1607 + /* A previously applied action will need to be reverted */
1608 + VC_SM_MSG_TYPE_ACTION_CLEAN,
1611 + * Import a physical address and wrap into a MEM_HANDLE_T.
1612 + * Release with VC_SM_MSG_TYPE_FREE.
1614 + VC_SM_MSG_TYPE_IMPORT,
1616 + *Tells VC the protocol version supported by this client.
1617 + * 2 supports the async/cmd messages from the VPU for final release
1618 + * of memory, and for VC allocations.
1620 + VC_SM_MSG_TYPE_CLIENT_VERSION,
1621 + /* Response to VC request for memory */
1622 + VC_SM_MSG_TYPE_VC_MEM_REQUEST_REPLY,
1625 + * Asynchronous/cmd messages supported for VC->HOST direction.
1626 + * Signalled by setting the top bit in vc_sm_result_t trans_id.
1630 + * VC has finished with an imported memory allocation.
1631 + * Release any Linux reference counts on the underlying block.
1633 + VC_SM_MSG_TYPE_RELEASED,
1634 + /* VC request for memory */
1635 + VC_SM_MSG_TYPE_VC_MEM_REQUEST,
1637 + VC_SM_MSG_TYPE_MAX
1640 +/* Type of memory to be allocated */
1641 +enum vc_sm_alloc_type_t {
1642 + VC_SM_ALLOC_CACHED,
1643 + VC_SM_ALLOC_NON_CACHED,
1646 +/* Message header for all messages in HOST->VC direction */
1647 +struct vc_sm_msg_hdr_t {
1654 +/* Request to allocate memory (HOST->VC) */
1655 +struct vc_sm_alloc_t {
1656 + /* type of memory to allocate */
1657 + enum vc_sm_alloc_type_t type;
1658 + /* byte amount of data to allocate per unit */
1660 + /* number of unit to allocate */
1662 + /* alignment to be applied on allocation */
1664 + /* identity of who allocated this block */
1666 + /* resource name (for easier tracking on vc side) */
1667 + char name[VC_SM_RESOURCE_NAME];
1671 +/* Result of a requested memory allocation (VC->HOST) */
1672 +struct vc_sm_alloc_result_t {
1673 + /* Transaction identifier */
1676 + /* Resource handle */
1678 + /* Pointer to resource buffer */
1680 + /* Resource base size (bytes) */
1681 + u32 res_base_size;
1682 + /* Resource number */
1687 +/* Request to free a previously allocated memory (HOST->VC) */
1688 +struct vc_sm_free_t {
1689 + /* Resource handle (returned from alloc) */
1691 + /* Resource buffer (returned from alloc) */
1696 +/* Request to lock a previously allocated memory (HOST->VC) */
1697 +struct vc_sm_lock_unlock_t {
1698 + /* Resource handle (returned from alloc) */
1700 + /* Resource buffer (returned from alloc) */
1705 +/* Request to resize a previously allocated memory (HOST->VC) */
1706 +struct vc_sm_resize_t {
1707 + /* Resource handle (returned from alloc) */
1709 + /* Resource buffer (returned from alloc) */
1711 + /* Resource *new* size requested (bytes) */
1716 +/* Result of a requested memory lock (VC->HOST) */
1717 +struct vc_sm_lock_result_t {
1718 + /* Transaction identifier */
1721 + /* Resource handle */
1723 + /* Pointer to resource buffer */
1726 + * Pointer to former resource buffer if the memory
1733 +/* Generic result for a request (VC->HOST) */
1734 +struct vc_sm_result_t {
1735 + /* Transaction identifier */
1742 +/* Request to revert a previously applied action (HOST->VC) */
1743 +struct vc_sm_action_clean_t {
1744 + /* Action of interest */
1745 + enum vc_sm_msg_type res_action;
1746 + /* Transaction identifier for the action of interest */
1747 + u32 action_trans_id;
1751 +/* Request to remove all data associated with a given allocator (HOST->VC) */
1752 +struct vc_sm_free_all_t {
1753 + /* Allocator identifier */
1757 +/* Request to import memory (HOST->VC) */
1758 +struct vc_sm_import {
1759 + /* type of memory to allocate */
1760 + enum vc_sm_alloc_type_t type;
1761 + /* pointer to the VC (ie physical) address of the allocated memory */
1763 + /* size of buffer */
1765 + /* opaque handle returned in RELEASED messages */
1767 + /* Allocator identifier */
1769 + /* resource name (for easier tracking on vc side) */
1770 + char name[VC_SM_RESOURCE_NAME];
1773 +/* Result of a requested memory import (VC->HOST) */
1774 +struct vc_sm_import_result {
1775 + /* Transaction identifier */
1778 + /* Resource handle */
1782 +/* Notification that VC has finished with an allocation (VC->HOST) */
1783 +struct vc_sm_released {
1784 + /* cmd type / trans_id */
1787 + /* pointer to the VC (ie physical) address of the allocated memory */
1789 + /* size of buffer */
1791 + /* opaque handle returned in RELEASED messages */
1797 + * Client informing VC as to the protocol version it supports.
1798 + * >=2 requires the released callback, and supports VC asking for memory.
1799 + * Failure means that the firmware doesn't support this call, and therefore the
1800 + * client should either fail, or NOT rely on getting the released callback.
1802 +struct vc_sm_version {
1806 +/* Request FROM VideoCore for some memory */
1807 +struct vc_sm_vc_mem_request {
1811 + /* trans_id (from VPU) */
1813 + /* size of buffer */
1815 + /* alignment of buffer */
1817 + /* resource name (for easier tracking) */
1818 + char name[VC_SM_RESOURCE_NAME];
1821 +/* Response from the kernel to provide the VPU with some memory */
1822 +struct vc_sm_vc_mem_request_result {
1823 + /* Transaction identifier for the VPU */
1825 + /* pointer to the physical address of the allocated memory */
1827 + /* opaque handle returned in RELEASED messages */
1831 +/* Union of ALL messages */
1832 +union vc_sm_msg_union_t {
1833 + struct vc_sm_alloc_t alloc;
1834 + struct vc_sm_alloc_result_t alloc_result;
1835 + struct vc_sm_free_t free;
1836 + struct vc_sm_lock_unlock_t lock_unlock;
1837 + struct vc_sm_action_clean_t action_clean;
1838 + struct vc_sm_resize_t resize;
1839 + struct vc_sm_lock_result_t lock_result;
1840 + struct vc_sm_result_t result;
1841 + struct vc_sm_free_all_t free_all;
1842 + struct vc_sm_import import;
1843 + struct vc_sm_import_result import_result;
1844 + struct vc_sm_version version;
1845 + struct vc_sm_released released;
1846 + struct vc_sm_vc_mem_request vc_request;
1847 + struct vc_sm_vc_mem_request_result vc_request_result;
1850 +#endif /* __VC_SM_DEFS_H__INCLUDED__ */
1852 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_knl.h
1854 +/* SPDX-License-Identifier: GPL-2.0 */
1857 + * VideoCore Shared Memory CMA allocator
1859 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
1861 + * Based on vc_sm_defs.h from the vmcs_sm driver Copyright Broadcom Corporation.
1865 +#ifndef __VC_SM_KNL_H__INCLUDED__
1866 +#define __VC_SM_KNL_H__INCLUDED__
1868 +#if !defined(__KERNEL__)
1869 +#error "This interface is for kernel use only..."
1872 +/* Free a previously allocated or imported shared memory handle and block. */
1873 +int vc_sm_cma_free(int handle);
1875 +/* Get an internal resource handle mapped from the external one. */
1876 +int vc_sm_cma_int_handle(int handle);
1878 +/* Import a block of memory into the GPU space. */
1879 +int vc_sm_cma_import_dmabuf(struct dma_buf *dmabuf, int *handle);
1881 +#endif /* __VC_SM_KNL_H__INCLUDED__ */