1 From c7fd44aded246efddc9cd7b68f56127226906c47 Mon Sep 17 00:00:00 2001
2 From: Tim Gover <tgover@broadcom.com>
3 Date: Tue, 22 Jul 2014 15:41:04 +0100
4 Subject: [PATCH] vcsm: VideoCore shared memory service for BCM2835
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 Add experimental support for the VideoCore shared memory service.
10 This allows user processes to allocate memory from VideoCore's
11 GPU relocatable heap and mmap the buffers. Additionally, the memory
12 handles can passed to other VideoCore services such as MMAL, OpenMax
16 * This driver was originally released for BCM28155 which has a different
17 cache architecture to BCM2835. Consequently, in this release only
18 uncached mappings are supported. However, there's no fundamental
19 reason which cached mappings cannot be support or BCM2835
20 * More refactoring is required to remove the typedefs.
21 * Re-enable the some of the commented out debug-fs statistics which were
22 disabled when migrating code from proc-fs.
23 * There's a lot of code to support sharing of VCSM in order to support
24 Android. This could probably done more cleanly or perhaps just
27 Signed-off-by: Tim Gover <timgover@gmail.com>
29 config: Disable VC_SM for now to fix hang with cutdown kernel
31 vcsm: Use boolean as it cannot be built as module
33 On building the bcm_vc_sm as a module we get the following error:
35 v7_dma_flush_range and do_munmap are undefined in vc-sm.ko.
37 Fix by making it not an option to build as module
39 vcsm: Add ioctl for custom cache flushing
41 vc-sm: Move headers out of arch directory
43 Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
45 vcsm: Treat EBUSY as success rather than SIGBUS
47 Currently if two cores access the same page concurrently one will return VM_FAULT_NOPAGE
48 and the other VM_FAULT_SIGBUS crashing the user code.
50 Also report when mapping fails.
52 Signed-off-by: popcornmix <popcornmix@gmail.com>
54 vcsm: Provide new ioctl to clean/invalidate a 2D block
56 vcsm: Convert to loading via device tree.
58 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
60 VCSM: New option to import a DMABUF for VPU use
62 Takes a dmabuf, and then calls over to the VPU to wrap
63 it into a suitable handle.
65 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
67 vcsm: fix multi-platform build
69 vcsm: add macros for cache functions
71 vcsm: use dma APIs for cache functions
73 * Will handle multi-platform builds
75 vcsm: Fix up macros to avoid breaking numbers used by existing apps
77 vcsm: Define cache operation constants in user header
79 Without this change, users have to use raw values (1, 2, 3) to specify
82 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
84 vcsm: Support for finding user/vc handle in memory pool
86 vmcs_sm_{usr,vc}_handle_from_pid_and_address() were failing to find
87 handle if specified user pointer is not exactly the one that the memory
88 locking call returned even if the pointer is in range of map/resource.
89 So fixed the functions to match the range.
91 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
93 vcsm: Unify cache manipulating functions
95 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
97 vcsm: Fix obscure conditions
99 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
101 vcsm: Fix memory leaking on clean_invalid2 ioctl handler
103 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
105 vcsm: Describe the use of cache operation constants
107 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
109 vcsm: Fix obscure conditions again
111 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
113 vcsm: Add no-op cache operation constant
115 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
117 vcsm: Revert to do page-table-walk-based cache manipulating on some ioctl calls
119 On FLUSH, INVALID, CLEAN_INVALID ioctl calls, cache operations based on
120 page table walk were used in case that the buffer of the cache is not
121 pinned. So reverted to do page-table-based cache manipulating.
123 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
125 vcsm: Define cache operation constants in user header
127 Without this change, users have to use raw values (1, 2, 3) to specify
130 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
132 vcsm: Updates for changed vchiq interface
134 vcsm: Fix an NULL dereference in the import_dmabuf error path
136 resource was dereferenced even though it was NULL.
138 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
140 vcsm: Use struct service_creation
142 vcsm: Fix makefile include on out-of-tree builds
144 The vc_sm module tries to include the 'fs' directory from the
145 $(srctree). $(srctree) is already provided by the build system, and
146 causes the include path to be duplicated.
148 With -Werror this fails to compile.
150 Remove the unnecessary variable.
152 Signed-off-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
154 vcsm: Remove set but unused variable
156 The 'success' variable is set by the call to vchi_service_close() but never checked.
157 Remove it, keeping the call in place.
159 Signed-off-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
161 vcsm: Reduce scope of local functions
167 vc_sm_ioctl_alloc_share
168 vc_sm_ioctl_import_dmabuf
170 Are declared without a prototype. They are not used outside of this
171 module, thus - convert them to static functions.
173 Signed-off-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
175 vc_sm: Let it support to build in the non-src folder
177 If we build the kernel with "-O=$non-src-folder", this driver will
178 introdcue a building error because of the header's location.
180 Signed-off-by: Hui Wang <hui.wang@canonical.com>
182 drivers/char/Kconfig | 2 +
183 drivers/char/Makefile | 1 +
184 drivers/char/broadcom/Kconfig | 10 +
185 drivers/char/broadcom/Makefile | 1 +
186 drivers/char/broadcom/vc_sm/Makefile | 9 +
187 drivers/char/broadcom/vc_sm/vc_sm_defs.h | 237 ++
188 drivers/char/broadcom/vc_sm/vc_sm_knl.h | 53 +
189 drivers/char/broadcom/vc_sm/vc_vchi_sm.c | 500 +++
190 drivers/char/broadcom/vc_sm/vc_vchi_sm.h | 100 +
191 drivers/char/broadcom/vc_sm/vmcs_sm.c | 3526 ++++++++++++++++++++++
192 include/linux/broadcom/vmcs_sm_ioctl.h | 294 ++
193 11 files changed, 4733 insertions(+)
194 create mode 100644 drivers/char/broadcom/vc_sm/Makefile
195 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_defs.h
196 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_knl.h
197 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.c
198 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.h
199 create mode 100644 drivers/char/broadcom/vc_sm/vmcs_sm.c
200 create mode 100644 include/linux/broadcom/vmcs_sm_ioctl.h
202 --- a/drivers/char/Kconfig
203 +++ b/drivers/char/Kconfig
206 menu "Character devices"
208 +source "drivers/char/broadcom/Kconfig"
210 source "drivers/tty/Kconfig"
213 --- a/drivers/char/Makefile
214 +++ b/drivers/char/Makefile
215 @@ -52,3 +52,4 @@ js-rtc-y = rtc.o
216 obj-$(CONFIG_XILLYBUS) += xillybus/
217 obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
218 obj-$(CONFIG_ADI) += adi.o
219 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
220 --- a/drivers/char/broadcom/Kconfig
221 +++ b/drivers/char/broadcom/Kconfig
222 @@ -16,3 +16,13 @@ config BCM2708_VCMEM
223 Helper for videocore memory access and total size allocation.
228 + bool "VMCS Shared Memory"
229 + depends on BCM2835_VCHIQ
230 + select BCM2708_VCMEM
231 + select DMA_SHARED_BUFFER
234 + Support for the VC shared memory on the Broadcom reference
235 + design. Uses the VCHIQ stack.
236 --- a/drivers/char/broadcom/Makefile
237 +++ b/drivers/char/broadcom/Makefile
239 obj-$(CONFIG_BCM2708_VCMEM) += vc_mem.o
240 +obj-$(CONFIG_BCM_VC_SM) += vc_sm/
242 +++ b/drivers/char/broadcom/vc_sm/Makefile
244 +ccflags-$(CONFIG_BCM_VC_SM) += -Werror -Wall -Wstrict-prototypes -Wno-trigraphs -O2
245 +ccflags-$(CONFIG_BCM_VC_SM) += -I$(srctree)/"drivers/staging/vc04_services" -I$(srctree)/"drivers/staging/vc04_services/interface/vchi" -I$(srctree)/"drivers/staging/vc04_services/interface/vchiq_arm" -I$(srctree)/"fs"
246 +ccflags-$(CONFIG_BCM_VC_SM) += -DOS_ASSERT_FAILURE -D__STDC_VERSION=199901L -D__STDC_VERSION__=199901L -D__VCCOREVER__=0 -D__KERNEL__ -D__linux__
248 +obj-$(CONFIG_BCM_VC_SM) := vc-sm.o
254 +++ b/drivers/char/broadcom/vc_sm/vc_sm_defs.h
257 + ****************************************************************************
258 + * Copyright 2011 Broadcom Corporation. All rights reserved.
260 + * Unless you and Broadcom execute a separate written software license
261 + * agreement governing use of this software, this software is licensed to you
262 + * under the terms of the GNU General Public License version 2, available at
263 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
265 + * Notwithstanding the above, under no circumstances may you combine this
266 + * software in any way with any other Broadcom software provided under a
267 + * license other than the GPL, without Broadcom's express prior written
269 + ****************************************************************************
272 +#ifndef __VC_SM_DEFS_H__INCLUDED__
273 +#define __VC_SM_DEFS_H__INCLUDED__
275 +/* FourCC code used for VCHI connection */
276 +#define VC_SM_SERVER_NAME MAKE_FOURCC("SMEM")
278 +/* Maximum message length */
279 +#define VC_SM_MAX_MSG_LEN (sizeof(union vc_sm_msg_union_t) + \
280 + sizeof(struct vc_sm_msg_hdr_t))
281 +#define VC_SM_MAX_RSP_LEN (sizeof(union vc_sm_msg_union_t))
283 +/* Resource name maximum size */
284 +#define VC_SM_RESOURCE_NAME 32
286 +enum vc_sm_msg_type {
287 + /* Message types supported for HOST->VC direction */
289 + /* Allocate shared memory block */
290 + VC_SM_MSG_TYPE_ALLOC,
291 + /* Lock allocated shared memory block */
292 + VC_SM_MSG_TYPE_LOCK,
293 + /* Unlock allocated shared memory block */
294 + VC_SM_MSG_TYPE_UNLOCK,
295 + /* Unlock allocated shared memory block, do not answer command */
296 + VC_SM_MSG_TYPE_UNLOCK_NOANS,
297 + /* Free shared memory block */
298 + VC_SM_MSG_TYPE_FREE,
299 + /* Resize a shared memory block */
300 + VC_SM_MSG_TYPE_RESIZE,
301 + /* Walk the allocated shared memory block(s) */
302 + VC_SM_MSG_TYPE_WALK_ALLOC,
304 + /* A previously applied action will need to be reverted */
305 + VC_SM_MSG_TYPE_ACTION_CLEAN,
308 + * Import a physical address and wrap into a MEM_HANDLE_T.
309 + * Release with VC_SM_MSG_TYPE_FREE.
311 + VC_SM_MSG_TYPE_IMPORT,
313 + /* Message types supported for VC->HOST direction */
316 + * VC has finished with an imported memory allocation.
317 + * Release any Linux reference counts on the underlying block.
319 + VC_SM_MSG_TYPE_RELEASED,
324 +/* Type of memory to be allocated */
325 +enum vc_sm_alloc_type_t {
326 + VC_SM_ALLOC_CACHED,
327 + VC_SM_ALLOC_NON_CACHED,
330 +/* Message header for all messages in HOST->VC direction */
331 +struct vc_sm_msg_hdr_t {
338 +/* Request to allocate memory (HOST->VC) */
339 +struct vc_sm_alloc_t {
340 + /* type of memory to allocate */
341 + enum vc_sm_alloc_type_t type;
342 + /* byte amount of data to allocate per unit */
343 + uint32_t base_unit;
344 + /* number of unit to allocate */
346 + /* alignement to be applied on allocation */
347 + uint32_t alignement;
348 + /* identity of who allocated this block */
349 + uint32_t allocator;
350 + /* resource name (for easier tracking on vc side) */
351 + char name[VC_SM_RESOURCE_NAME];
355 +/* Result of a requested memory allocation (VC->HOST) */
356 +struct vc_sm_alloc_result_t {
357 + /* Transaction identifier */
360 + /* Resource handle */
361 + uint32_t res_handle;
362 + /* Pointer to resource buffer */
364 + /* Resource base size (bytes) */
365 + uint32_t res_base_size;
366 + /* Resource number */
371 +/* Request to free a previously allocated memory (HOST->VC) */
372 +struct vc_sm_free_t {
373 + /* Resource handle (returned from alloc) */
374 + uint32_t res_handle;
375 + /* Resource buffer (returned from alloc) */
380 +/* Request to lock a previously allocated memory (HOST->VC) */
381 +struct vc_sm_lock_unlock_t {
382 + /* Resource handle (returned from alloc) */
383 + uint32_t res_handle;
384 + /* Resource buffer (returned from alloc) */
389 +/* Request to resize a previously allocated memory (HOST->VC) */
390 +struct vc_sm_resize_t {
391 + /* Resource handle (returned from alloc) */
392 + uint32_t res_handle;
393 + /* Resource buffer (returned from alloc) */
395 + /* Resource *new* size requested (bytes) */
396 + uint32_t res_new_size;
400 +/* Result of a requested memory lock (VC->HOST) */
401 +struct vc_sm_lock_result_t {
402 + /* Transaction identifier */
405 + /* Resource handle */
406 + uint32_t res_handle;
407 + /* Pointer to resource buffer */
410 + * Pointer to former resource buffer if the memory
413 + uint32_t res_old_mem;
417 +/* Generic result for a request (VC->HOST) */
418 +struct vc_sm_result_t {
419 + /* Transaction identifier */
426 +/* Request to revert a previously applied action (HOST->VC) */
427 +struct vc_sm_action_clean_t {
428 + /* Action of interest */
429 + enum vc_sm_msg_type res_action;
430 + /* Transaction identifier for the action of interest */
431 + uint32_t action_trans_id;
435 +/* Request to remove all data associated with a given allocator (HOST->VC) */
436 +struct vc_sm_free_all_t {
437 + /* Allocator identifier */
438 + uint32_t allocator;
441 +/* Request to import memory (HOST->VC) */
442 +struct vc_sm_import {
443 + /* type of memory to allocate */
444 + enum vc_sm_alloc_type_t type;
445 + /* pointer to the VC (ie physical) address of the allocated memory */
447 + /* size of buffer */
449 + /* opaque handle returned in RELEASED messages */
451 + /* Allocator identifier */
452 + uint32_t allocator;
453 + /* resource name (for easier tracking on vc side) */
454 + char name[VC_SM_RESOURCE_NAME];
457 +/* Result of a requested memory import (VC->HOST) */
458 +struct vc_sm_import_result {
459 + /* Transaction identifier */
462 + /* Resource handle */
463 + uint32_t res_handle;
466 +/* Notification that VC has finished with an allocation (VC->HOST) */
467 +struct vc_sm_released {
468 + /* pointer to the VC (ie physical) address of the allocated memory */
470 + /* size of buffer */
472 + /* opaque handle returned in RELEASED messages */
476 +/* Union of ALL messages */
477 +union vc_sm_msg_union_t {
478 + struct vc_sm_alloc_t alloc;
479 + struct vc_sm_alloc_result_t alloc_result;
480 + struct vc_sm_free_t free;
481 + struct vc_sm_lock_unlock_t lock_unlock;
482 + struct vc_sm_action_clean_t action_clean;
483 + struct vc_sm_resize_t resize;
484 + struct vc_sm_lock_result_t lock_result;
485 + struct vc_sm_result_t result;
486 + struct vc_sm_free_all_t free_all;
487 + struct vc_sm_import import;
488 + struct vc_sm_import_result import_result;
489 + struct vc_sm_released released;
492 +#endif /* __VC_SM_DEFS_H__INCLUDED__ */
494 +++ b/drivers/char/broadcom/vc_sm/vc_sm_knl.h
497 + ****************************************************************************
498 + * Copyright 2011 Broadcom Corporation. All rights reserved.
500 + * Unless you and Broadcom execute a separate written software license
501 + * agreement governing use of this software, this software is licensed to you
502 + * under the terms of the GNU General Public License version 2, available at
503 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
505 + * Notwithstanding the above, under no circumstances may you combine this
506 + * software in any way with any other Broadcom software provided under a
507 + * license other than the GPL, without Broadcom's express prior written
509 + ****************************************************************************
512 +#ifndef __VC_SM_KNL_H__INCLUDED__
513 +#define __VC_SM_KNL_H__INCLUDED__
515 +#if !defined(__KERNEL__)
516 +#error "This interface is for kernel use only..."
519 +/* Type of memory to be locked (ie mapped) */
520 +enum vc_sm_lock_cache_mode {
522 + VC_SM_LOCK_NON_CACHED,
525 +/* Allocate a shared memory handle and block. */
526 +int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle);
528 +/* Free a previously allocated shared memory handle and block. */
529 +int vc_sm_free(int handle);
531 +/* Lock a memory handle for use by kernel. */
532 +int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
533 + unsigned long *data);
535 +/* Unlock a memory handle in use by kernel. */
536 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock);
538 +/* Get an internal resource handle mapped from the external one. */
539 +int vc_sm_int_handle(int handle);
541 +/* Map a shared memory region for use by kernel. */
542 +int vc_sm_map(int handle, unsigned int sm_addr,
543 + enum vc_sm_lock_cache_mode mode, unsigned long *data);
545 +/* Import a block of memory into the GPU space. */
546 +int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle);
548 +#endif /* __VC_SM_KNL_H__INCLUDED__ */
550 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.c
553 + ****************************************************************************
554 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
556 + * Unless you and Broadcom execute a separate written software license
557 + * agreement governing use of this software, this software is licensed to you
558 + * under the terms of the GNU General Public License version 2, available at
559 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
561 + * Notwithstanding the above, under no circumstances may you combine this
562 + * software in any way with any other Broadcom software provided under a
563 + * license other than the GPL, without Broadcom's express prior written
565 + ****************************************************************************
568 +/* ---- Include Files ----------------------------------------------------- */
569 +#include <linux/types.h>
570 +#include <linux/kernel.h>
571 +#include <linux/list.h>
572 +#include <linux/semaphore.h>
573 +#include <linux/mutex.h>
574 +#include <linux/slab.h>
575 +#include <linux/kthread.h>
577 +#include "vc_vchi_sm.h"
580 +#define VC_SM_MIN_VER 0
582 +/* ---- Private Constants and Types -------------------------------------- */
584 +/* Command blocks come from a pool */
585 +#define SM_MAX_NUM_CMD_RSP_BLKS 32
587 +struct sm_cmd_rsp_blk {
588 + struct list_head head; /* To create lists */
589 + struct semaphore sema; /* To be signaled when the response is there */
594 + uint8_t msg[VC_SM_MAX_MSG_LEN];
602 +struct sm_instance {
603 + uint32_t num_connections;
604 + VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
605 + struct task_struct *io_thread;
606 + struct semaphore io_sema;
611 + struct list_head cmd_list;
612 + struct list_head rsp_list;
613 + struct list_head dead_list;
615 + struct sm_cmd_rsp_blk free_blk[SM_MAX_NUM_CMD_RSP_BLKS];
616 + struct list_head free_list;
617 + struct mutex free_lock;
618 + struct semaphore free_sema;
622 +/* ---- Private Variables ------------------------------------------------ */
624 +/* ---- Private Function Prototypes -------------------------------------- */
626 +/* ---- Private Functions ------------------------------------------------ */
628 +bcm2835_vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
632 + return vchi_queue_kernel_message(handle,
638 +sm_cmd_rsp_blk *vc_vchi_cmd_create(struct sm_instance *instance,
639 + enum vc_sm_msg_type id, void *msg,
640 + uint32_t size, int wait)
642 + struct sm_cmd_rsp_blk *blk;
643 + struct vc_sm_msg_hdr_t *hdr;
645 + if (down_interruptible(&instance->free_sema)) {
646 + blk = kmalloc(sizeof(*blk), GFP_KERNEL);
651 + sema_init(&blk->sema, 0);
653 + mutex_lock(&instance->free_lock);
655 + list_first_entry(&instance->free_list,
656 + struct sm_cmd_rsp_blk, head);
657 + list_del(&blk->head);
658 + mutex_unlock(&instance->free_lock);
663 + blk->length = sizeof(*hdr) + size;
665 + hdr = (struct vc_sm_msg_hdr_t *) blk->msg;
667 + mutex_lock(&instance->lock);
668 + hdr->trans_id = blk->id = ++instance->trans_id;
669 + mutex_unlock(&instance->lock);
672 + memcpy(hdr->body, msg, size);
678 +vc_vchi_cmd_delete(struct sm_instance *instance, struct sm_cmd_rsp_blk *blk)
685 + mutex_lock(&instance->free_lock);
686 + list_add(&blk->head, &instance->free_list);
687 + mutex_unlock(&instance->free_lock);
688 + up(&instance->free_sema);
691 +static int vc_vchi_sm_videocore_io(void *arg)
693 + struct sm_instance *instance = arg;
694 + struct sm_cmd_rsp_blk *cmd = NULL, *cmd_tmp;
695 + struct vc_sm_result_t *reply;
696 + uint32_t reply_len;
702 + vchi_service_release(instance->vchi_handle[0]);
704 + if (!down_interruptible(&instance->io_sema)) {
705 + vchi_service_use(instance->vchi_handle[0]);
710 + * Get new command and move it to response list
712 + mutex_lock(&instance->lock);
713 + if (list_empty(&instance->cmd_list)) {
714 + /* no more commands to process */
715 + mutex_unlock(&instance->lock);
719 + list_first_entry(&instance->cmd_list,
720 + struct sm_cmd_rsp_blk,
722 + list_move(&cmd->head, &instance->rsp_list);
724 + mutex_unlock(&instance->lock);
726 + /* Send the command */
727 + status = bcm2835_vchi_msg_queue(
728 + instance->vchi_handle[0],
729 + cmd->msg, cmd->length);
731 + pr_err("%s: failed to queue message (%d)",
735 + /* If no reply is needed then we're done */
737 + mutex_lock(&instance->lock);
738 + list_del(&cmd->head);
739 + mutex_unlock(&instance->lock);
740 + vc_vchi_cmd_delete(instance, cmd);
751 + while (!vchi_msg_peek
752 + (instance->vchi_handle[0], (void **)&reply,
753 + &reply_len, VCHI_FLAGS_NONE)) {
754 + mutex_lock(&instance->lock);
755 + list_for_each_entry(cmd, &instance->rsp_list,
757 + if (cmd->id == reply->trans_id)
760 + mutex_unlock(&instance->lock);
762 + if (&cmd->head == &instance->rsp_list) {
763 + pr_debug("%s: received response %u, throw away...",
764 + __func__, reply->trans_id);
765 + } else if (reply_len > sizeof(cmd->msg)) {
766 + pr_err("%s: reply too big (%u) %u, throw away...",
767 + __func__, reply_len,
770 + memcpy(cmd->msg, reply, reply_len);
774 + vchi_msg_remove(instance->vchi_handle[0]);
777 + /* Go through the dead list and free them */
778 + mutex_lock(&instance->lock);
779 + list_for_each_entry_safe(cmd, cmd_tmp,
780 + &instance->dead_list, head) {
781 + list_del(&cmd->head);
782 + vc_vchi_cmd_delete(instance, cmd);
784 + mutex_unlock(&instance->lock);
791 +static void vc_sm_vchi_callback(void *param,
792 + const VCHI_CALLBACK_REASON_T reason,
795 + struct sm_instance *instance = param;
800 + case VCHI_CALLBACK_MSG_AVAILABLE:
801 + up(&instance->io_sema);
804 + case VCHI_CALLBACK_SERVICE_CLOSED:
805 + pr_info("%s: service CLOSED!!", __func__);
811 +struct sm_instance *vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance)
814 + struct sm_instance *instance;
816 + int num_connections = 1;
818 + pr_debug("%s: start", __func__);
820 + /* Allocate memory for this instance */
821 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
823 + /* Misc initialisations */
824 + mutex_init(&instance->lock);
825 + sema_init(&instance->io_sema, 0);
826 + INIT_LIST_HEAD(&instance->cmd_list);
827 + INIT_LIST_HEAD(&instance->rsp_list);
828 + INIT_LIST_HEAD(&instance->dead_list);
829 + INIT_LIST_HEAD(&instance->free_list);
830 + sema_init(&instance->free_sema, SM_MAX_NUM_CMD_RSP_BLKS);
831 + mutex_init(&instance->free_lock);
832 + for (i = 0; i < SM_MAX_NUM_CMD_RSP_BLKS; i++) {
833 + sema_init(&instance->free_blk[i].sema, 0);
834 + list_add(&instance->free_blk[i].head, &instance->free_list);
837 + /* Open the VCHI service connections */
838 + instance->num_connections = num_connections;
839 + for (i = 0; i < num_connections; i++) {
840 + struct service_creation params = {
841 + .version = VCHI_VERSION_EX(VC_SM_VER, VC_SM_MIN_VER),
842 + .service_id = VC_SM_SERVER_NAME,
843 + .callback = vc_sm_vchi_callback,
844 + .callback_param = instance,
847 + status = vchi_service_open(vchi_instance,
848 + ¶ms, &instance->vchi_handle[i]);
850 + pr_err("%s: failed to open VCHI service (%d)",
853 + goto err_close_services;
857 + /* Create the thread which takes care of all io to/from videoocore. */
858 + instance->io_thread = kthread_create(&vc_vchi_sm_videocore_io,
859 + (void *)instance, "SMIO");
860 + if (instance->io_thread == NULL) {
861 + pr_err("%s: failed to create SMIO thread", __func__);
863 + goto err_close_services;
865 + set_user_nice(instance->io_thread, -10);
866 + wake_up_process(instance->io_thread);
868 + pr_debug("%s: success - instance 0x%x", __func__,
869 + (unsigned int)instance);
873 + for (i = 0; i < instance->num_connections; i++) {
874 + if (instance->vchi_handle[i] != NULL)
875 + vchi_service_close(instance->vchi_handle[i]);
878 + pr_debug("%s: FAILED", __func__);
882 +int vc_vchi_sm_stop(struct sm_instance **handle)
884 + struct sm_instance *instance;
887 + if (handle == NULL) {
888 + pr_err("%s: invalid pointer to handle %p", __func__, handle);
892 + if (*handle == NULL) {
893 + pr_err("%s: invalid handle %p", __func__, *handle);
897 + instance = *handle;
899 + /* Close all VCHI service connections */
900 + for (i = 0; i < instance->num_connections; i++) {
901 + vchi_service_use(instance->vchi_handle[i]);
903 + vchi_service_close(instance->vchi_handle[i]);
915 +static int vc_vchi_sm_send_msg(struct sm_instance *handle,
916 + enum vc_sm_msg_type msg_id,
917 + void *msg, uint32_t msg_size,
918 + void *result, uint32_t result_size,
919 + uint32_t *cur_trans_id, uint8_t wait_reply)
922 + struct sm_instance *instance = handle;
923 + struct sm_cmd_rsp_blk *cmd_blk;
925 + if (handle == NULL) {
926 + pr_err("%s: invalid handle", __func__);
930 + pr_err("%s: invalid msg pointer", __func__);
935 + vc_vchi_cmd_create(instance, msg_id, msg, msg_size, wait_reply);
936 + if (cmd_blk == NULL) {
937 + pr_err("[%s]: failed to allocate global tracking resource",
942 + if (cur_trans_id != NULL)
943 + *cur_trans_id = cmd_blk->id;
945 + mutex_lock(&instance->lock);
946 + list_add_tail(&cmd_blk->head, &instance->cmd_list);
947 + mutex_unlock(&instance->lock);
948 + up(&instance->io_sema);
954 + /* Wait for the response */
955 + if (down_interruptible(&cmd_blk->sema)) {
956 + mutex_lock(&instance->lock);
957 + if (!cmd_blk->sent) {
958 + list_del(&cmd_blk->head);
959 + mutex_unlock(&instance->lock);
960 + vc_vchi_cmd_delete(instance, cmd_blk);
963 + mutex_unlock(&instance->lock);
965 + mutex_lock(&instance->lock);
966 + list_move(&cmd_blk->head, &instance->dead_list);
967 + mutex_unlock(&instance->lock);
968 + up(&instance->io_sema);
969 + return -EINTR; /* We're done */
972 + if (result && result_size) {
973 + memcpy(result, cmd_blk->msg, result_size);
975 + struct vc_sm_result_t *res =
976 + (struct vc_sm_result_t *) cmd_blk->msg;
977 + status = (res->success == 0) ? 0 : -ENXIO;
980 + mutex_lock(&instance->lock);
981 + list_del(&cmd_blk->head);
982 + mutex_unlock(&instance->lock);
983 + vc_vchi_cmd_delete(instance, cmd_blk);
987 +int vc_vchi_sm_alloc(struct sm_instance *handle, struct vc_sm_alloc_t *msg,
988 + struct vc_sm_alloc_result_t *result,
989 + uint32_t *cur_trans_id)
991 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ALLOC,
992 + msg, sizeof(*msg), result, sizeof(*result),
996 +int vc_vchi_sm_free(struct sm_instance *handle,
997 + struct vc_sm_free_t *msg, uint32_t *cur_trans_id)
999 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_FREE,
1000 + msg, sizeof(*msg), 0, 0, cur_trans_id, 0);
1003 +int vc_vchi_sm_lock(struct sm_instance *handle,
1004 + struct vc_sm_lock_unlock_t *msg,
1005 + struct vc_sm_lock_result_t *result,
1006 + uint32_t *cur_trans_id)
1008 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_LOCK,
1009 + msg, sizeof(*msg), result, sizeof(*result),
1013 +int vc_vchi_sm_unlock(struct sm_instance *handle,
1014 + struct vc_sm_lock_unlock_t *msg,
1015 + uint32_t *cur_trans_id, uint8_t wait_reply)
1017 + return vc_vchi_sm_send_msg(handle, wait_reply ?
1018 + VC_SM_MSG_TYPE_UNLOCK :
1019 + VC_SM_MSG_TYPE_UNLOCK_NOANS, msg,
1020 + sizeof(*msg), 0, 0, cur_trans_id,
1024 +int vc_vchi_sm_resize(struct sm_instance *handle, struct vc_sm_resize_t *msg,
1025 + uint32_t *cur_trans_id)
1027 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_RESIZE,
1028 + msg, sizeof(*msg), 0, 0, cur_trans_id, 1);
1031 +int vc_vchi_sm_walk_alloc(struct sm_instance *handle)
1033 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_WALK_ALLOC,
1034 + 0, 0, 0, 0, 0, 0);
1037 +int vc_vchi_sm_clean_up(struct sm_instance *handle,
1038 + struct vc_sm_action_clean_t *msg)
1040 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ACTION_CLEAN,
1041 + msg, sizeof(*msg), 0, 0, 0, 0);
1044 +int vc_vchi_sm_import(struct sm_instance *handle, struct vc_sm_import *msg,
1045 + struct vc_sm_import_result *result,
1046 + uint32_t *cur_trans_id)
1048 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_IMPORT,
1049 + msg, sizeof(*msg), result, sizeof(*result),
1053 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.h
1056 + ****************************************************************************
1057 + * Copyright 2011 Broadcom Corporation. All rights reserved.
1059 + * Unless you and Broadcom execute a separate written software license
1060 + * agreement governing use of this software, this software is licensed to you
1061 + * under the terms of the GNU General Public License version 2, available at
1062 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1064 + * Notwithstanding the above, under no circumstances may you combine this
1065 + * software in any way with any other Broadcom software provided under a
1066 + * license other than the GPL, without Broadcom's express prior written
1068 + ****************************************************************************
1071 +#ifndef __VC_VCHI_SM_H__INCLUDED__
1072 +#define __VC_VCHI_SM_H__INCLUDED__
1074 +#include "interface/vchi/vchi.h"
1076 +#include "vc_sm_defs.h"
1079 + * Forward declare.
1081 +struct sm_instance;
1084 + * Initialize the shared memory service, opens up vchi connection to talk to it.
1086 +struct sm_instance *vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance);
1089 + * Terminates the shared memory service.
1091 +int vc_vchi_sm_stop(struct sm_instance **handle);
1094 + * Ask the shared memory service to allocate some memory on videocre and
1095 + * return the result of this allocation (which upon success will be a pointer
1096 + * to some memory in videocore space).
1098 +int vc_vchi_sm_alloc(struct sm_instance *handle, struct vc_sm_alloc_t *alloc,
1099 + struct vc_sm_alloc_result_t *alloc_result,
1100 + uint32_t *trans_id);
1103 + * Ask the shared memory service to free up some memory that was previously
1104 + * allocated by the vc_vchi_sm_alloc function call.
1106 +int vc_vchi_sm_free(struct sm_instance *handle,
1107 + struct vc_sm_free_t *free, uint32_t *trans_id);
1110 + * Ask the shared memory service to lock up some memory that was previously
1111 + * allocated by the vc_vchi_sm_alloc function call.
1113 +int vc_vchi_sm_lock(struct sm_instance *handle,
1114 + struct vc_sm_lock_unlock_t *lock_unlock,
1115 + struct vc_sm_lock_result_t *lock_result,
1116 + uint32_t *trans_id);
1119 + * Ask the shared memory service to unlock some memory that was previously
1120 + * allocated by the vc_vchi_sm_alloc function call.
1122 +int vc_vchi_sm_unlock(struct sm_instance *handle,
1123 + struct vc_sm_lock_unlock_t *lock_unlock,
1124 + uint32_t *trans_id, uint8_t wait_reply);
1127 + * Ask the shared memory service to resize some memory that was previously
1128 + * allocated by the vc_vchi_sm_alloc function call.
1130 +int vc_vchi_sm_resize(struct sm_instance *handle,
1131 + struct vc_sm_resize_t *resize, uint32_t *trans_id);
1134 + * Walk the allocated resources on the videocore side, the allocation will
1135 + * show up in the log. This is purely for debug/information and takes no
1136 + * specific actions.
1138 +int vc_vchi_sm_walk_alloc(struct sm_instance *handle);
1141 + * Clean up following a previously interrupted action which left the system
1142 + * in a bad state of some sort.
1144 +int vc_vchi_sm_clean_up(struct sm_instance *handle,
1145 + struct vc_sm_action_clean_t *action_clean);
1148 + * Import a contiguous block of memory and wrap it in a GPU MEM_HANDLE_T.
1150 +int vc_vchi_sm_import(struct sm_instance *handle, struct vc_sm_import *msg,
1151 + struct vc_sm_import_result *result,
1152 + uint32_t *cur_trans_id);
1154 +#endif /* __VC_VCHI_SM_H__INCLUDED__ */
1156 +++ b/drivers/char/broadcom/vc_sm/vmcs_sm.c
1159 + ****************************************************************************
1160 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
1162 + * Unless you and Broadcom execute a separate written software license
1163 + * agreement governing use of this software, this software is licensed to you
1164 + * under the terms of the GNU General Public License version 2, available at
1165 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1167 + * Notwithstanding the above, under no circumstances may you combine this
1168 + * software in any way with any other Broadcom software provided under a
1169 + * license other than the GPL, without Broadcom's express prior written
1171 + ****************************************************************************
1174 +/* ---- Include Files ----------------------------------------------------- */
1176 +#include <linux/cdev.h>
1177 +#include <linux/broadcom/vc_mem.h>
1178 +#include <linux/device.h>
1179 +#include <linux/debugfs.h>
1180 +#include <linux/dma-mapping.h>
1181 +#include <linux/dma-buf.h>
1182 +#include <linux/errno.h>
1183 +#include <linux/fs.h>
1184 +#include <linux/hugetlb.h>
1185 +#include <linux/ioctl.h>
1186 +#include <linux/kernel.h>
1187 +#include <linux/list.h>
1188 +#include <linux/module.h>
1189 +#include <linux/mm.h>
1190 +#include <linux/of.h>
1191 +#include <linux/platform_device.h>
1192 +#include <linux/pfn.h>
1193 +#include <linux/proc_fs.h>
1194 +#include <linux/pagemap.h>
1195 +#include <linux/semaphore.h>
1196 +#include <linux/slab.h>
1197 +#include <linux/seq_file.h>
1198 +#include <linux/types.h>
1199 +#include <asm/cacheflush.h>
1201 +#include "vchiq_connected.h"
1202 +#include "vc_vchi_sm.h"
1204 +#include <linux/broadcom/vmcs_sm_ioctl.h>
1205 +#include "vc_sm_knl.h"
1207 +/* ---- Private Constants and Types --------------------------------------- */
1209 +#define DEVICE_NAME "vcsm"
1210 +#define DRIVER_NAME "bcm2835-vcsm"
1211 +#define DEVICE_MINOR 0
1213 +#define VC_SM_DIR_ROOT_NAME "vc-smem"
1214 +#define VC_SM_DIR_ALLOC_NAME "alloc"
1215 +#define VC_SM_STATE "state"
1216 +#define VC_SM_STATS "statistics"
1217 +#define VC_SM_RESOURCES "resources"
1218 +#define VC_SM_DEBUG "debug"
1219 +#define VC_SM_WRITE_BUF_SIZE 128
1221 +/* Statistics tracked per resource and globally. */
1249 +static const char *const sm_stats_human_read[] = {
1256 + "Cache Invalidate",
1260 +typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
1262 + VC_SM_SHOW show; /* Debug fs function hookup. */
1263 + struct dentry *dir_entry; /* Debug fs directory entry. */
1264 + void *priv_data; /* Private data */
1268 +/* Single resource allocation tracked for all devices. */
1270 + struct list_head map_list; /* Linked list of maps. */
1272 + struct sm_resource_t *resource; /* Pointer to the resource. */
1274 + pid_t res_pid; /* PID owning that resource. */
1275 + unsigned int res_vc_hdl; /* Resource handle (videocore). */
1276 + unsigned int res_usr_hdl; /* Resource handle (user). */
1278 + unsigned long res_addr; /* Mapped virtual address. */
1279 + struct vm_area_struct *vma; /* VM area for this mapping. */
1280 + unsigned int ref_count; /* Reference count to this vma. */
1282 + /* Used to link maps associated with a resource. */
1283 + struct list_head resource_map_list;
1286 +/* Single resource allocation tracked for each opened device. */
1287 +struct sm_resource_t {
1288 + struct list_head resource_list; /* List of resources. */
1289 + struct list_head global_resource_list; /* Global list of resources. */
1291 + pid_t pid; /* PID owning that resource. */
1292 + uint32_t res_guid; /* Unique identifier. */
1293 + uint32_t lock_count; /* Lock count for this resource. */
1294 + uint32_t ref_count; /* Ref count for this resource. */
1296 + uint32_t res_handle; /* Resource allocation handle. */
1297 + void *res_base_mem; /* Resource base memory address. */
1298 + uint32_t res_size; /* Resource size allocated. */
1299 + enum vmcs_sm_cache_e res_cached; /* Resource cache type. */
1300 + struct sm_resource_t *res_shared; /* Shared resource */
1302 + enum sm_stats_t res_stats[END_ALL]; /* Resource statistics. */
1304 + uint8_t map_count; /* Counter of mappings for this resource. */
1305 + struct list_head map_list; /* Maps associated with a resource. */
1307 + /* DMABUF related fields */
1308 + struct dma_buf *dma_buf;
1309 + struct dma_buf_attachment *attach;
1310 + struct sg_table *sgt;
1311 + dma_addr_t dma_addr;
1313 + struct sm_priv_data_t *private;
1314 + bool map; /* whether to map pages up front */
1317 +/* Private file data associated with each opened device. */
1318 +struct sm_priv_data_t {
1319 + struct list_head resource_list; /* List of resources. */
1321 + pid_t pid; /* PID of creator. */
1323 + struct dentry *dir_pid; /* Debug fs entries root. */
1324 + struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
1325 + struct sm_pde_t dir_res; /* Debug fs resource sub-tree. */
1327 + int restart_sys; /* Tracks restart on interrupt. */
1328 + enum vc_sm_msg_type int_action; /* Interrupted action. */
1329 + uint32_t int_trans_id; /* Interrupted transaction. */
1333 +/* Global state information. */
1334 +struct sm_state_t {
1335 + struct platform_device *pdev;
1336 + struct sm_instance *sm_handle; /* Handle for videocore service. */
1337 + struct dentry *dir_root; /* Debug fs entries root. */
1338 + struct dentry *dir_alloc; /* Debug fs entries allocations. */
1339 + struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
1340 + struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
1341 + struct dentry *debug; /* Debug fs entries debug. */
1343 + struct mutex map_lock; /* Global map lock. */
1344 + struct list_head map_list; /* List of maps. */
1345 + struct list_head resource_list; /* List of resources. */
1347 + enum sm_stats_t deceased[END_ALL]; /* Natural termination stats. */
1348 + enum sm_stats_t terminated[END_ALL]; /* Forced termination stats. */
1349 + uint32_t res_deceased_cnt; /* Natural termination counter. */
1350 + uint32_t res_terminated_cnt; /* Forced termination counter. */
1352 + struct cdev sm_cdev; /* Device. */
1353 + dev_t sm_devid; /* Device identifier. */
1354 + struct class *sm_class; /* Class. */
1355 + struct device *sm_dev; /* Device. */
1357 + struct sm_priv_data_t *data_knl; /* Kernel internal data tracking. */
1359 + struct mutex lock; /* Global lock. */
1360 + uint32_t guid; /* GUID (next) tracker. */
1364 +/* ---- Private Variables ----------------------------------------------- */
1366 +static struct sm_state_t *sm_state;
1367 +static int sm_inited;
1370 +static const char *const sm_cache_map_vector[] = {
1378 +/* ---- Private Function Prototypes -------------------------------------- */
1380 +/* ---- Private Functions ------------------------------------------------ */
1382 +static inline unsigned int vcaddr_to_pfn(unsigned long vc_addr)
1384 + unsigned long pfn = vc_addr & 0x3FFFFFFF;
1386 + pfn += mm_vc_mem_phys_addr;
1387 + pfn >>= PAGE_SHIFT;
1392 + * Carries over to the state statistics the statistics once owned by a deceased
1395 +static void vc_sm_resource_deceased(struct sm_resource_t *p_res, int terminated)
1397 + if (sm_state != NULL) {
1398 + if (p_res != NULL) {
1402 + sm_state->res_terminated_cnt++;
1404 + sm_state->res_deceased_cnt++;
1406 + for (ix = 0; ix < END_ALL; ix++) {
1408 + sm_state->terminated[ix] +=
1409 + p_res->res_stats[ix];
1411 + sm_state->deceased[ix] +=
1412 + p_res->res_stats[ix];
1419 + * Fetch a videocore handle corresponding to a mapping of the pid+address
1420 + * returns 0 (ie NULL) if no such handle exists in the global map.
1422 +static unsigned int vmcs_sm_vc_handle_from_pid_and_address(unsigned int pid,
1423 + unsigned int addr)
1425 + struct sm_mmap *map = NULL;
1426 + unsigned int handle = 0;
1428 + if (!sm_state || addr == 0)
1431 + mutex_lock(&(sm_state->map_lock));
1433 + /* Lookup the resource. */
1434 + if (!list_empty(&sm_state->map_list)) {
1435 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1436 + if (map->res_pid != pid)
1438 + if (addr < map->res_addr ||
1439 + addr >= (map->res_addr + map->resource->res_size))
1442 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> vc-hdl %x (usr-hdl %x)\n",
1443 + __func__, map, map->res_pid, map->res_addr,
1444 + map->res_vc_hdl, map->res_usr_hdl);
1446 + handle = map->res_vc_hdl;
1451 + mutex_unlock(&(sm_state->map_lock));
1455 + * Use a debug log here as it may be a valid situation that we query
1456 + * for something that is not mapped, we do not want a kernel log each
1459 + * There are other error log that would pop up accordingly if someone
1460 + * subsequently tries to use something invalid after being told not to
1463 + if (handle == 0) {
1464 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1465 + __func__, pid, addr);
1472 + * Fetch a user handle corresponding to a mapping of the pid+address
1473 + * returns 0 (ie NULL) if no such handle exists in the global map.
1475 +static unsigned int vmcs_sm_usr_handle_from_pid_and_address(unsigned int pid,
1476 + unsigned int addr)
1478 + struct sm_mmap *map = NULL;
1479 + unsigned int handle = 0;
1481 + if (!sm_state || addr == 0)
1484 + mutex_lock(&(sm_state->map_lock));
1486 + /* Lookup the resource. */
1487 + if (!list_empty(&sm_state->map_list)) {
1488 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1489 + if (map->res_pid != pid)
1491 + if (addr < map->res_addr ||
1492 + addr >= (map->res_addr + map->resource->res_size))
1495 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> usr-hdl %x (vc-hdl %x)\n",
1496 + __func__, map, map->res_pid, map->res_addr,
1497 + map->res_usr_hdl, map->res_vc_hdl);
1499 + handle = map->res_usr_hdl;
1504 + mutex_unlock(&(sm_state->map_lock));
1508 + * Use a debug log here as it may be a valid situation that we query
1509 + * for something that is not mapped yet.
1511 + * There are other error log that would pop up accordingly if someone
1512 + * subsequently tries to use something invalid after being told not to
1516 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1517 + __func__, pid, addr);
1522 +#if defined(DO_NOT_USE)
1524 + * Fetch an address corresponding to a mapping of the pid+handle
1525 + * returns 0 (ie NULL) if no such address exists in the global map.
1527 +static unsigned int vmcs_sm_usr_address_from_pid_and_vc_handle(unsigned int pid,
1530 + struct sm_mmap *map = NULL;
1531 + unsigned int addr = 0;
1533 + if (sm_state == NULL || hdl == 0)
1536 + mutex_lock(&(sm_state->map_lock));
1538 + /* Lookup the resource. */
1539 + if (!list_empty(&sm_state->map_list)) {
1540 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1541 + if (map->res_pid != pid || map->res_vc_hdl != hdl)
1544 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1545 + __func__, map, map->res_pid, map->res_vc_hdl,
1546 + map->res_usr_hdl, map->res_addr);
1548 + addr = map->res_addr;
1553 + mutex_unlock(&(sm_state->map_lock));
1557 + * Use a debug log here as it may be a valid situation that we query
1558 + * for something that is not mapped, we do not want a kernel log each
1561 + * There are other error log that would pop up accordingly if someone
1562 + * subsequently tries to use something invalid after being told not to
1566 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n",
1567 + __func__, pid, hdl);
1574 + * Fetch an address corresponding to a mapping of the pid+handle
1575 + * returns 0 (ie NULL) if no such address exists in the global map.
1577 +static unsigned int vmcs_sm_usr_address_from_pid_and_usr_handle(unsigned int
1582 + struct sm_mmap *map = NULL;
1583 + unsigned int addr = 0;
1585 + if (sm_state == NULL || hdl == 0)
1588 + mutex_lock(&(sm_state->map_lock));
1590 + /* Lookup the resource. */
1591 + if (!list_empty(&sm_state->map_list)) {
1592 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1593 + if (map->res_pid != pid || map->res_usr_hdl != hdl)
1596 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1597 + __func__, map, map->res_pid, map->res_vc_hdl,
1598 + map->res_usr_hdl, map->res_addr);
1600 + addr = map->res_addr;
1605 + mutex_unlock(&(sm_state->map_lock));
1609 + * Use a debug log here as it may be a valid situation that we query
1610 + * for something that is not mapped, we do not want a kernel log each
1613 + * There are other error log that would pop up accordingly if someone
1614 + * subsequently tries to use something invalid after being told not to
1618 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n", __func__,
1624 +/* Adds a resource mapping to the global data list. */
1625 +static void vmcs_sm_add_map(struct sm_state_t *state,
1626 + struct sm_resource_t *resource, struct sm_mmap *map)
1628 + mutex_lock(&(state->map_lock));
1630 + /* Add to the global list of mappings */
1631 + list_add(&map->map_list, &state->map_list);
1633 + /* Add to the list of mappings for this resource */
1634 + list_add(&map->resource_map_list, &resource->map_list);
1635 + resource->map_count++;
1637 + mutex_unlock(&(state->map_lock));
1639 + pr_debug("[%s]: added map %p (pid %u, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1640 + __func__, map, map->res_pid, map->res_vc_hdl,
1641 + map->res_usr_hdl, map->res_addr);
1644 +/* Removes a resource mapping from the global data list. */
1645 +static void vmcs_sm_remove_map(struct sm_state_t *state,
1646 + struct sm_resource_t *resource,
1647 + struct sm_mmap *map)
1649 + mutex_lock(&(state->map_lock));
1651 + /* Remove from the global list of mappings */
1652 + list_del(&map->map_list);
1654 + /* Remove from the list of mapping for this resource */
1655 + list_del(&map->resource_map_list);
1656 + if (resource->map_count > 0)
1657 + resource->map_count--;
1659 + mutex_unlock(&(state->map_lock));
1661 + pr_debug("[%s]: removed map %p (pid %d, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1662 + __func__, map, map->res_pid, map->res_vc_hdl, map->res_usr_hdl,
1668 +/* Read callback for the global state proc entry. */
1669 +static int vc_sm_global_state_show(struct seq_file *s, void *v)
1671 + struct sm_mmap *map = NULL;
1672 + struct sm_resource_t *resource = NULL;
1673 + int map_count = 0;
1674 + int resource_count = 0;
1676 + if (sm_state == NULL)
1679 + seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
1680 + (unsigned int)sm_state->sm_handle);
1682 + /* Log all applicable mapping(s). */
1684 + mutex_lock(&(sm_state->map_lock));
1685 + seq_puts(s, "\nResources\n");
1686 + if (!list_empty(&sm_state->resource_list)) {
1687 + list_for_each_entry(resource, &sm_state->resource_list,
1688 + global_resource_list) {
1691 + seq_printf(s, "\nResource %p\n",
1693 + seq_printf(s, " PID %u\n",
1695 + seq_printf(s, " RES_GUID 0x%x\n",
1696 + resource->res_guid);
1697 + seq_printf(s, " LOCK_COUNT %u\n",
1698 + resource->lock_count);
1699 + seq_printf(s, " REF_COUNT %u\n",
1700 + resource->ref_count);
1701 + seq_printf(s, " res_handle 0x%X\n",
1702 + resource->res_handle);
1703 + seq_printf(s, " res_base_mem %p\n",
1704 + resource->res_base_mem);
1705 + seq_printf(s, " SIZE %d\n",
1706 + resource->res_size);
1707 + seq_printf(s, " DMABUF %p\n",
1708 + resource->dma_buf);
1709 + seq_printf(s, " ATTACH %p\n",
1710 + resource->attach);
1711 + seq_printf(s, " SGT %p\n",
1713 + seq_printf(s, " DMA_ADDR %pad\n",
1714 + &resource->dma_addr);
1717 + seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
1719 + seq_puts(s, "\nMappings\n");
1720 + if (!list_empty(&sm_state->map_list)) {
1721 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1724 + seq_printf(s, "\nMapping 0x%x\n",
1725 + (unsigned int)map);
1726 + seq_printf(s, " TGID %u\n",
1728 + seq_printf(s, " VC-HDL 0x%x\n",
1730 + seq_printf(s, " USR-HDL 0x%x\n",
1731 + map->res_usr_hdl);
1732 + seq_printf(s, " USR-ADDR 0x%lx\n",
1734 + seq_printf(s, " SIZE %d\n",
1735 + map->resource->res_size);
1739 + mutex_unlock(&(sm_state->map_lock));
1740 + seq_printf(s, "\n\nTotal map count: %d\n\n", map_count);
1745 +static int vc_sm_global_statistics_show(struct seq_file *s, void *v)
1749 + /* Global state tracked statistics. */
1750 + if (sm_state != NULL) {
1751 + seq_puts(s, "\nDeceased Resources Statistics\n");
1753 + seq_printf(s, "\nNatural Cause (%u occurences)\n",
1754 + sm_state->res_deceased_cnt);
1755 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1756 + if (sm_state->deceased[ix] > 0) {
1757 + seq_printf(s, " %u\t%s\n",
1758 + sm_state->deceased[ix],
1759 + sm_stats_human_read[ix]);
1762 + seq_puts(s, "\n");
1763 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1764 + if (sm_state->deceased[ix + END_ATTEMPT] > 0) {
1765 + seq_printf(s, " %u\tFAILED %s\n",
1766 + sm_state->deceased[ix + END_ATTEMPT],
1767 + sm_stats_human_read[ix]);
1771 + seq_printf(s, "\nForcefull (%u occurences)\n",
1772 + sm_state->res_terminated_cnt);
1773 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1774 + if (sm_state->terminated[ix] > 0) {
1775 + seq_printf(s, " %u\t%s\n",
1776 + sm_state->terminated[ix],
1777 + sm_stats_human_read[ix]);
1780 + seq_puts(s, "\n");
1781 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1782 + if (sm_state->terminated[ix + END_ATTEMPT] > 0) {
1783 + seq_printf(s, " %u\tFAILED %s\n",
1784 + sm_state->terminated[ix +
1786 + sm_stats_human_read[ix]);
1795 +/* Read callback for the statistics proc entry. */
1796 +static int vc_sm_statistics_show(struct seq_file *s, void *v)
1799 + struct sm_priv_data_t *file_data;
1800 + struct sm_resource_t *resource;
1801 + int res_count = 0;
1802 + struct sm_pde_t *p_pde;
1804 + p_pde = (struct sm_pde_t *)(s->private);
1805 + file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
1807 + if (file_data == NULL)
1810 + /* Per process statistics. */
1812 + seq_printf(s, "\nStatistics for TGID %d\n", file_data->pid);
1814 + mutex_lock(&(sm_state->map_lock));
1816 + if (!list_empty(&file_data->resource_list)) {
1817 + list_for_each_entry(resource, &file_data->resource_list,
1821 + seq_printf(s, "\nGUID: 0x%x\n\n",
1822 + resource->res_guid);
1823 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1824 + if (resource->res_stats[ix] > 0) {
1827 + resource->res_stats[ix],
1828 + sm_stats_human_read[ix]);
1831 + seq_puts(s, "\n");
1832 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1833 + if (resource->res_stats[ix + END_ATTEMPT] > 0) {
1835 + " %u\tFAILED %s\n",
1836 + resource->res_stats[
1837 + ix + END_ATTEMPT],
1838 + sm_stats_human_read[ix]);
1844 + mutex_unlock(&(sm_state->map_lock));
1846 + seq_printf(s, "\nResources Count %d\n", res_count);
1853 +/* Read callback for the allocation proc entry. */
1854 +static int vc_sm_alloc_show(struct seq_file *s, void *v)
1856 + struct sm_priv_data_t *file_data;
1857 + struct sm_resource_t *resource;
1858 + int alloc_count = 0;
1859 + struct sm_pde_t *p_pde;
1861 + p_pde = (struct sm_pde_t *)(s->private);
1862 + file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
1867 + /* Per process statistics. */
1868 + seq_printf(s, "\nAllocation for TGID %d\n", file_data->pid);
1870 + mutex_lock(&(sm_state->map_lock));
1872 + if (!list_empty(&file_data->resource_list)) {
1873 + list_for_each_entry(resource, &file_data->resource_list,
1877 + seq_printf(s, "\nGUID: 0x%x\n",
1878 + resource->res_guid);
1879 + seq_printf(s, "Lock Count: %u\n",
1880 + resource->lock_count);
1881 + seq_printf(s, "Mapped: %s\n",
1882 + (resource->map_count ? "yes" : "no"));
1883 + seq_printf(s, "VC-handle: 0x%x\n",
1884 + resource->res_handle);
1885 + seq_printf(s, "VC-address: 0x%p\n",
1886 + resource->res_base_mem);
1887 + seq_printf(s, "VC-size (bytes): %u\n",
1888 + resource->res_size);
1889 + seq_printf(s, "Cache: %s\n",
1890 + sm_cache_map_vector[resource->res_cached]);
1894 + mutex_unlock(&(sm_state->map_lock));
1896 + seq_printf(s, "\n\nTotal allocation count: %d\n\n", alloc_count);
1902 +static int vc_sm_seq_file_show(struct seq_file *s, void *v)
1904 + struct sm_pde_t *sm_pde;
1906 + sm_pde = (struct sm_pde_t *)(s->private);
1908 + if (sm_pde && sm_pde->show)
1909 + sm_pde->show(s, v);
1914 +static int vc_sm_single_open(struct inode *inode, struct file *file)
1916 + return single_open(file, vc_sm_seq_file_show, inode->i_private);
1919 +static const struct file_operations vc_sm_debug_fs_fops = {
1920 + .open = vc_sm_single_open,
1922 + .llseek = seq_lseek,
1923 + .release = single_release,
1927 + * Adds a resource to the private data list which tracks all the allocated
1930 +static void vmcs_sm_add_resource(struct sm_priv_data_t *privdata,
1931 + struct sm_resource_t *resource)
1933 + mutex_lock(&(sm_state->map_lock));
1934 + list_add(&resource->resource_list, &privdata->resource_list);
1935 + list_add(&resource->global_resource_list, &sm_state->resource_list);
1936 + mutex_unlock(&(sm_state->map_lock));
1938 + pr_debug("[%s]: added resource %p (base addr %p, hdl %x, size %u, cache %u)\n",
1939 + __func__, resource, resource->res_base_mem,
1940 + resource->res_handle, resource->res_size, resource->res_cached);
1944 + * Locates a resource and acquire a reference on it.
1945 + * The resource won't be deleted while there is a reference on it.
1947 +static struct sm_resource_t *vmcs_sm_acquire_resource(struct sm_priv_data_t
1949 + unsigned int res_guid)
1951 + struct sm_resource_t *resource, *ret = NULL;
1953 + mutex_lock(&(sm_state->map_lock));
1955 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1956 + if (resource->res_guid != res_guid)
1959 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1960 + __func__, resource, resource->res_guid,
1961 + resource->res_base_mem, resource->res_handle,
1962 + resource->res_size, resource->res_cached);
1963 + resource->ref_count++;
1968 + mutex_unlock(&(sm_state->map_lock));
1974 + * Locates a resource and acquire a reference on it.
1975 + * The resource won't be deleted while there is a reference on it.
1977 +static struct sm_resource_t *vmcs_sm_acquire_first_resource(
1978 + struct sm_priv_data_t *private)
1980 + struct sm_resource_t *resource, *ret = NULL;
1982 + mutex_lock(&(sm_state->map_lock));
1984 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1985 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1986 + __func__, resource, resource->res_guid,
1987 + resource->res_base_mem, resource->res_handle,
1988 + resource->res_size, resource->res_cached);
1989 + resource->ref_count++;
1994 + mutex_unlock(&(sm_state->map_lock));
2000 + * Locates a resource and acquire a reference on it.
2001 + * The resource won't be deleted while there is a reference on it.
2003 +static struct sm_resource_t *vmcs_sm_acquire_global_resource(unsigned int
2006 + struct sm_resource_t *resource, *ret = NULL;
2008 + mutex_lock(&(sm_state->map_lock));
2010 + list_for_each_entry(resource, &sm_state->resource_list,
2011 + global_resource_list) {
2012 + if (resource->res_guid != res_guid)
2015 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
2016 + __func__, resource, resource->res_guid,
2017 + resource->res_base_mem, resource->res_handle,
2018 + resource->res_size, resource->res_cached);
2019 + resource->ref_count++;
2024 + mutex_unlock(&(sm_state->map_lock));
2030 + * Release a previously acquired resource.
2031 + * The resource will be deleted when its refcount reaches 0.
2033 +static void vmcs_sm_release_resource(struct sm_resource_t *resource, int force)
2035 + struct sm_priv_data_t *private = resource->private;
2036 + struct sm_mmap *map, *map_tmp;
2037 + struct sm_resource_t *res_tmp;
2040 + mutex_lock(&(sm_state->map_lock));
2042 + if (--resource->ref_count) {
2044 + pr_err("[%s]: resource %p in use\n", __func__, resource);
2046 + mutex_unlock(&(sm_state->map_lock));
2050 + /* Time to free the resource. Start by removing it from the list */
2051 + list_del(&resource->resource_list);
2052 + list_del(&resource->global_resource_list);
2055 + * Walk the global resource list, find out if the resource is used
2056 + * somewhere else. In which case we don't want to delete it.
2058 + list_for_each_entry(res_tmp, &sm_state->resource_list,
2059 + global_resource_list) {
2060 + if (res_tmp->res_handle == resource->res_handle) {
2061 + resource->res_handle = 0;
2066 + mutex_unlock(&(sm_state->map_lock));
2068 + pr_debug("[%s]: freeing data - guid %x, hdl %x, base address %p\n",
2069 + __func__, resource->res_guid, resource->res_handle,
2070 + resource->res_base_mem);
2071 + resource->res_stats[FREE]++;
2073 + /* Make sure the resource we're removing is unmapped first */
2074 + if (resource->map_count && !list_empty(&resource->map_list)) {
2075 + down_write(¤t->mm->mmap_sem);
2076 + list_for_each_entry_safe(map, map_tmp, &resource->map_list,
2077 + resource_map_list) {
2079 + do_munmap(current->mm, map->res_addr,
2080 + resource->res_size, NULL);
2082 + pr_err("[%s]: could not unmap resource %p\n",
2083 + __func__, resource);
2086 + up_write(¤t->mm->mmap_sem);
2089 + /* Free up the videocore allocated resource. */
2090 + if (resource->res_handle) {
2091 + struct vc_sm_free_t free = {
2092 + resource->res_handle, (uint32_t)resource->res_base_mem
2094 + int status = vc_vchi_sm_free(sm_state->sm_handle, &free,
2095 + &private->int_trans_id);
2096 + if (status != 0 && status != -EINTR) {
2097 + pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
2098 + __func__, status, private->int_trans_id);
2099 + resource->res_stats[FREE_FAIL]++;
2104 + if (resource->sgt)
2105 + dma_buf_unmap_attachment(resource->attach, resource->sgt,
2106 + DMA_BIDIRECTIONAL);
2107 + if (resource->attach)
2108 + dma_buf_detach(resource->dma_buf, resource->attach);
2109 + if (resource->dma_buf)
2110 + dma_buf_put(resource->dma_buf);
2112 + /* Free up the shared resource. */
2113 + if (resource->res_shared)
2114 + vmcs_sm_release_resource(resource->res_shared, 0);
2116 + /* Free up the local resource tracking this allocation. */
2117 + vc_sm_resource_deceased(resource, force);
2122 + * Dump the map table for the driver. If process is -1, dumps the whole table,
2123 + * if process is a valid pid (non -1) dump only the entries associated with the
2124 + * pid of interest.
2126 +static void vmcs_sm_host_walk_map_per_pid(int pid)
2128 + struct sm_mmap *map = NULL;
2130 + /* Make sure the device was started properly. */
2131 + if (sm_state == NULL) {
2132 + pr_err("[%s]: invalid device\n", __func__);
2136 + mutex_lock(&(sm_state->map_lock));
2138 + /* Log all applicable mapping(s). */
2139 + if (!list_empty(&sm_state->map_list)) {
2140 + list_for_each_entry(map, &sm_state->map_list, map_list) {
2141 + if (pid == -1 || map->res_pid == pid) {
2142 + pr_info("[%s]: tgid: %u - vc-hdl: %x, usr-hdl: %x, usr-addr: %lx\n",
2143 + __func__, map->res_pid, map->res_vc_hdl,
2144 + map->res_usr_hdl, map->res_addr);
2149 + mutex_unlock(&(sm_state->map_lock));
2153 + * Dump the allocation table from host side point of view. This only dumps the
2154 + * data allocated for this process/device referenced by the file_data.
2156 +static void vmcs_sm_host_walk_alloc(struct sm_priv_data_t *file_data)
2158 + struct sm_resource_t *resource = NULL;
2160 + /* Make sure the device was started properly. */
2161 + if ((sm_state == NULL) || (file_data == NULL)) {
2162 + pr_err("[%s]: invalid device\n", __func__);
2166 + mutex_lock(&(sm_state->map_lock));
2168 + if (!list_empty(&file_data->resource_list)) {
2169 + list_for_each_entry(resource, &file_data->resource_list,
2171 + pr_info("[%s]: guid: %x - hdl: %x, vc-mem: %p, size: %u, cache: %u\n",
2172 + __func__, resource->res_guid, resource->res_handle,
2173 + resource->res_base_mem, resource->res_size,
2174 + resource->res_cached);
2178 + mutex_unlock(&(sm_state->map_lock));
2181 +/* Create support for private data tracking. */
2182 +static struct sm_priv_data_t *vc_sm_create_priv_data(pid_t id)
2184 + char alloc_name[32];
2185 + struct sm_priv_data_t *file_data = NULL;
2187 + /* Allocate private structure. */
2188 + file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
2191 + pr_err("[%s]: cannot allocate file data\n", __func__);
2195 + snprintf(alloc_name, sizeof(alloc_name), "%d", id);
2197 + INIT_LIST_HEAD(&file_data->resource_list);
2198 + file_data->pid = id;
2199 + file_data->dir_pid = debugfs_create_dir(alloc_name,
2200 + sm_state->dir_alloc);
2202 + /* TODO: fix this to support querying statistics per pid */
2204 + if (IS_ERR_OR_NULL(file_data->dir_pid)) {
2205 + file_data->dir_pid = NULL;
2207 + struct dentry *dir_entry;
2209 + dir_entry = debugfs_create_file(VC_SM_RESOURCES, 0444,
2210 + file_data->dir_pid, file_data,
2211 + vc_sm_debug_fs_fops);
2213 + file_data->dir_res.dir_entry = dir_entry;
2214 + file_data->dir_res.priv_data = file_data;
2215 + file_data->dir_res.show = &vc_sm_alloc_show;
2217 + dir_entry = debugfs_create_file(VC_SM_STATS, 0444,
2218 + file_data->dir_pid, file_data,
2219 + vc_sm_debug_fs_fops);
2221 + file_data->dir_res.dir_entry = dir_entry;
2222 + file_data->dir_res.priv_data = file_data;
2223 + file_data->dir_res.show = &vc_sm_statistics_show;
2225 + pr_debug("[%s]: private data allocated %p\n", __func__, file_data);
2233 + * Open the device. Creates a private state to help track all allocation
2234 + * associated with this device.
2236 +static int vc_sm_open(struct inode *inode, struct file *file)
2240 + /* Make sure the device was started properly. */
2242 + pr_err("[%s]: invalid device\n", __func__);
2247 + file->private_data = vc_sm_create_priv_data(current->tgid);
2248 + if (file->private_data == NULL) {
2249 + pr_err("[%s]: failed to create data tracker\n", __func__);
2260 + * Close the device. Free up all resources still associated with this device
2263 +static int vc_sm_release(struct inode *inode, struct file *file)
2265 + struct sm_priv_data_t *file_data =
2266 + (struct sm_priv_data_t *)file->private_data;
2267 + struct sm_resource_t *resource;
2270 + /* Make sure the device was started properly. */
2271 + if (sm_state == NULL || file_data == NULL) {
2272 + pr_err("[%s]: invalid device\n", __func__);
2277 + pr_debug("[%s]: using private data %p\n", __func__, file_data);
2279 + if (file_data->restart_sys == -EINTR) {
2280 + struct vc_sm_action_clean_t action_clean;
2282 + pr_debug("[%s]: releasing following EINTR on %u (trans_id: %u) (likely due to signal)...\n",
2283 + __func__, file_data->int_action,
2284 + file_data->int_trans_id);
2286 + action_clean.res_action = file_data->int_action;
2287 + action_clean.action_trans_id = file_data->int_trans_id;
2289 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
2292 + while ((resource = vmcs_sm_acquire_first_resource(file_data)) != NULL) {
2293 + vmcs_sm_release_resource(resource, 0);
2294 + vmcs_sm_release_resource(resource, 1);
2297 + /* Remove the corresponding proc entry. */
2298 + debugfs_remove_recursive(file_data->dir_pid);
2300 + /* Terminate the private data. */
2307 +static void vcsm_vma_open(struct vm_area_struct *vma)
2309 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2311 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2312 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2313 + (int)vma->vm_pgoff);
2318 +static void vcsm_vma_close(struct vm_area_struct *vma)
2320 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2322 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2323 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2324 + (int)vma->vm_pgoff);
2328 + /* Remove from the map table. */
2329 + if (map->ref_count == 0)
2330 + vmcs_sm_remove_map(sm_state, map->resource, map);
2333 +static vm_fault_t vcsm_vma_fault(struct vm_fault *vmf)
2335 + struct sm_mmap *map = (struct sm_mmap *)vmf->vma->vm_private_data;
2336 + struct sm_resource_t *resource = map->resource;
2337 + pgoff_t page_offset;
2338 + unsigned long pfn;
2341 + /* Lock the resource if necessary. */
2342 + if (!resource->lock_count) {
2343 + struct vc_sm_lock_unlock_t lock_unlock;
2344 + struct vc_sm_lock_result_t lock_result;
2347 + lock_unlock.res_handle = resource->res_handle;
2348 + lock_unlock.res_mem = (uint32_t)resource->res_base_mem;
2350 + pr_debug("[%s]: attempt to lock data - hdl %x, base address %p\n",
2351 + __func__, lock_unlock.res_handle,
2352 + (void *)lock_unlock.res_mem);
2354 + /* Lock the videocore allocated resource. */
2355 + status = vc_vchi_sm_lock(sm_state->sm_handle,
2356 + &lock_unlock, &lock_result, 0);
2357 + if (status || !lock_result.res_mem) {
2358 + pr_err("[%s]: failed to lock memory on videocore (status: %u)\n",
2359 + __func__, status);
2360 + resource->res_stats[LOCK_FAIL]++;
2361 + return VM_FAULT_SIGBUS;
2364 + pfn = vcaddr_to_pfn((unsigned long)resource->res_base_mem);
2365 + outer_inv_range(__pfn_to_phys(pfn),
2366 + __pfn_to_phys(pfn) + resource->res_size);
2368 + resource->res_stats[LOCK]++;
2369 + resource->lock_count++;
2371 + /* Keep track of the new base memory. */
2372 + if (lock_result.res_mem &&
2373 + lock_result.res_old_mem &&
2374 + (lock_result.res_mem != lock_result.res_old_mem)) {
2375 + resource->res_base_mem = (void *)lock_result.res_mem;
2379 + /* We don't use vmf->pgoff since that has the fake offset */
2380 + page_offset = ((unsigned long)vmf->address - vmf->vma->vm_start);
2381 + pfn = (uint32_t)resource->res_base_mem & 0x3FFFFFFF;
2382 + pfn += mm_vc_mem_phys_addr;
2383 + pfn += page_offset;
2384 + pfn >>= PAGE_SHIFT;
2386 + /* Finally, remap it */
2387 + ret = vmf_insert_pfn(vmf->vma, (unsigned long)vmf->address, pfn);
2388 + if (ret != VM_FAULT_NOPAGE)
2389 + pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
2390 + pfn, (unsigned long)vmf->address, ret);
2394 +static const struct vm_operations_struct vcsm_vm_ops = {
2395 + .open = vcsm_vma_open,
2396 + .close = vcsm_vma_close,
2397 + .fault = vcsm_vma_fault,
2400 +/* Converts VCSM_CACHE_OP_* to an operating function. */
2401 +static void (*cache_op_to_func(const unsigned cache_op))
2402 + (const void*, const void*)
2404 + switch (cache_op) {
2405 + case VCSM_CACHE_OP_NOP:
2408 + case VCSM_CACHE_OP_INV:
2409 + return dmac_inv_range;
2411 + case VCSM_CACHE_OP_CLEAN:
2412 + return dmac_clean_range;
2414 + case VCSM_CACHE_OP_FLUSH:
2415 + return dmac_flush_range;
2418 + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
2424 + * Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
2426 +static int clean_invalid_contiguous_mem_2d(const void __user *addr,
2427 + const size_t block_count, const size_t block_size, const size_t stride,
2428 + const unsigned cache_op)
2431 + void (*op_fn)(const void*, const void*);
2433 + if (!block_size) {
2434 + pr_err("[%s]: size cannot be 0\n", __func__);
2438 + op_fn = cache_op_to_func(cache_op);
2439 + if (op_fn == NULL)
2442 + for (i = 0; i < block_count; i ++, addr += stride)
2443 + op_fn(addr, addr + block_size);
2448 +/* Clean/invalid/flush cache of which buffer may be non-pinned. */
2449 +/* The caller must lock current->mm->mmap_sem for read. */
2450 +static int clean_invalid_mem_walk(unsigned long addr, const size_t size,
2451 + const unsigned cache_op)
2457 + unsigned long pgd_next, pud_next, pmd_next;
2458 + const unsigned long end = ALIGN(addr + size, PAGE_SIZE);
2459 + void (*op_fn)(const void*, const void*);
2461 + addr &= PAGE_MASK;
2466 + op_fn = cache_op_to_func(cache_op);
2467 + if (op_fn == NULL)
2471 + pgd = pgd_offset(current->mm, addr);
2473 + pgd_next = pgd_addr_end(addr, end);
2475 + if (pgd_none(*pgd) || pgd_bad(*pgd))
2479 + pud = pud_offset(pgd, addr);
2481 + pud_next = pud_addr_end(addr, pgd_next);
2482 + if (pud_none(*pud) || pud_bad(*pud))
2486 + pmd = pmd_offset(pud, addr);
2488 + pmd_next = pmd_addr_end(addr, pud_next);
2489 + if (pmd_none(*pmd) || pmd_bad(*pmd))
2493 + pte = pte_offset_map(pmd, addr);
2495 + if (pte_none(*pte) || !pte_present(*pte))
2498 + op_fn((const void __user*) addr,
2499 + (const void __user*) (addr + PAGE_SIZE));
2500 + } while (pte++, addr += PAGE_SIZE, addr != pmd_next);
2503 + } while (pmd++, addr = pmd_next, addr != pud_next);
2505 + } while (pud++, addr = pud_next, addr != pgd_next);
2507 + } while (pgd++, addr = pgd_next, addr != end);
2512 +/* Clean/invalid/flush cache of buffer in resource */
2513 +static int clean_invalid_resource_walk(const void __user *addr,
2514 + const size_t size, const unsigned cache_op, const int usr_hdl,
2515 + struct sm_resource_t *resource)
2518 + enum sm_stats_t stat_attempt, stat_failure;
2519 + void __user *res_addr;
2521 + if (resource == NULL) {
2522 + pr_err("[%s]: resource is NULL\n", __func__);
2525 + if (resource->res_cached != VMCS_SM_CACHE_HOST &&
2526 + resource->res_cached != VMCS_SM_CACHE_BOTH)
2529 + switch (cache_op) {
2530 + case VCSM_CACHE_OP_NOP:
2532 + case VCSM_CACHE_OP_INV:
2533 + stat_attempt = INVALID;
2534 + stat_failure = INVALID_FAIL;
2536 + case VCSM_CACHE_OP_CLEAN:
2537 + /* Like the original VMCS_SM_CMD_CLEAN_INVALID ioctl handler does. */
2538 + stat_attempt = FLUSH;
2539 + stat_failure = FLUSH_FAIL;
2541 + case VCSM_CACHE_OP_FLUSH:
2542 + stat_attempt = FLUSH;
2543 + stat_failure = FLUSH_FAIL;
2546 + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
2549 + resource->res_stats[stat_attempt]++;
2551 + if (size > resource->res_size) {
2552 + pr_err("[%s]: size (0x%08zu) is larger than res_size (0x%08zu)\n",
2553 + __func__, size, resource->res_size);
2556 + res_addr = (void __user*) vmcs_sm_usr_address_from_pid_and_usr_handle(
2557 + current->tgid, usr_hdl);
2558 + if (res_addr == NULL) {
2559 + pr_err("[%s]: Failed to get user address "
2560 + "from pid (%d) and user handle (%d)\n", __func__, current->tgid,
2561 + resource->res_handle);
2564 + if (!(res_addr <= addr && addr + size <= res_addr + resource->res_size)) {
2565 + pr_err("[%s]: Addr (0x%p-0x%p) out of range (0x%p-0x%p)\n",
2566 + __func__, addr, addr + size, res_addr,
2567 + res_addr + resource->res_size);
2571 + down_read(¤t->mm->mmap_sem);
2572 + err = clean_invalid_mem_walk((unsigned long) addr, size, cache_op);
2573 + up_read(¤t->mm->mmap_sem);
2576 + resource->res_stats[stat_failure]++;
2581 +/* Map an allocated data into something that the user space. */
2582 +static int vc_sm_mmap(struct file *file, struct vm_area_struct *vma)
2585 + struct sm_priv_data_t *file_data =
2586 + (struct sm_priv_data_t *)file->private_data;
2587 + struct sm_resource_t *resource = NULL;
2588 + struct sm_mmap *map = NULL;
2590 + /* Make sure the device was started properly. */
2591 + if ((sm_state == NULL) || (file_data == NULL)) {
2592 + pr_err("[%s]: invalid device\n", __func__);
2596 + pr_debug("[%s]: private data %p, guid %x\n", __func__, file_data,
2597 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2600 + * We lookup to make sure that the data we are being asked to mmap is
2601 + * something that we allocated.
2603 + * We use the offset information as the key to tell us which resource
2606 + resource = vmcs_sm_acquire_resource(file_data,
2607 + ((unsigned int)vma->vm_pgoff <<
2609 + if (resource == NULL) {
2610 + pr_err("[%s]: failed to locate resource for guid %x\n", __func__,
2611 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2615 + pr_debug("[%s]: guid %x, tgid %u, %u, %u\n",
2616 + __func__, resource->res_guid, current->tgid, resource->pid,
2619 + /* Check permissions. */
2620 + if (resource->pid && (resource->pid != current->tgid)) {
2621 + pr_err("[%s]: current tgid %u != %u owner\n",
2622 + __func__, current->tgid, resource->pid);
2627 + /* Verify that what we are asked to mmap is proper. */
2628 + if (resource->res_size != (unsigned int)(vma->vm_end - vma->vm_start)) {
2629 + pr_err("[%s]: size inconsistency (resource: %u - mmap: %u)\n",
2631 + resource->res_size,
2632 + (unsigned int)(vma->vm_end - vma->vm_start));
2639 + * Keep track of the tuple in the global resource list such that one
2640 + * can do a mapping lookup for address/memory handle.
2642 + map = kzalloc(sizeof(*map), GFP_KERNEL);
2643 + if (map == NULL) {
2644 + pr_err("[%s]: failed to allocate global tracking resource\n",
2650 + map->res_pid = current->tgid;
2651 + map->res_vc_hdl = resource->res_handle;
2652 + map->res_usr_hdl = resource->res_guid;
2653 + map->res_addr = (unsigned long)vma->vm_start;
2654 + map->resource = resource;
2656 + vmcs_sm_add_map(sm_state, resource, map);
2659 + * We are not actually mapping the pages, we just provide a fault
2660 + * handler to allow pages to be mapped when accessed
2663 + VM_IO | VM_PFNMAP | VM_DONTCOPY | VM_DONTEXPAND;
2664 + vma->vm_ops = &vcsm_vm_ops;
2665 + vma->vm_private_data = map;
2667 + /* vm_pgoff is the first PFN of the mapped memory */
2668 + vma->vm_pgoff = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
2669 + vma->vm_pgoff += mm_vc_mem_phys_addr;
2670 + vma->vm_pgoff >>= PAGE_SHIFT;
2672 + if ((resource->res_cached == VMCS_SM_CACHE_NONE) ||
2673 + (resource->res_cached == VMCS_SM_CACHE_VC)) {
2674 + /* Allocated non host cached memory, honour it. */
2675 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2678 + pr_debug("[%s]: resource %p (guid %x) - cnt %u, base address %p, handle %x, size %u (%u), cache %u\n",
2680 + resource, resource->res_guid, resource->lock_count,
2681 + resource->res_base_mem, resource->res_handle,
2682 + resource->res_size, (unsigned int)(vma->vm_end - vma->vm_start),
2683 + resource->res_cached);
2685 + pr_debug("[%s]: resource %p (base address %p, handle %x) - map-count %d, usr-addr %x\n",
2686 + __func__, resource, resource->res_base_mem,
2687 + resource->res_handle, resource->map_count,
2688 + (unsigned int)vma->vm_start);
2690 + vcsm_vma_open(vma);
2691 + resource->res_stats[MAP]++;
2692 + vmcs_sm_release_resource(resource, 0);
2694 + if (resource->map) {
2695 + /* We don't use vmf->pgoff since that has the fake offset */
2696 + unsigned long addr;
2698 + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
2699 + /* Finally, remap it */
2700 + unsigned long pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
2702 + pfn += mm_vc_mem_phys_addr;
2703 + pfn += addr - vma->vm_start;
2704 + pfn >>= PAGE_SHIFT;
2705 + ret = vmf_insert_pfn(vma, addr, pfn);
2712 + resource->res_stats[MAP_FAIL]++;
2713 + vmcs_sm_release_resource(resource, 0);
2717 +/* Allocate a shared memory handle and block. */
2718 +static int vc_sm_ioctl_alloc(struct sm_priv_data_t *private,
2719 + struct vmcs_sm_ioctl_alloc *ioparam)
2723 + struct sm_resource_t *resource;
2724 + struct vc_sm_alloc_t alloc = { 0 };
2725 + struct vc_sm_alloc_result_t result = { 0 };
2726 + enum vmcs_sm_cache_e cached = ioparam->cached;
2729 + /* flag to requst buffer is mapped up front, rather than lazily */
2730 + if (cached & 0x80) {
2735 + /* Setup our allocation parameters */
2736 + alloc.type = ((cached == VMCS_SM_CACHE_VC)
2738 + VMCS_SM_CACHE_BOTH)) ? VC_SM_ALLOC_CACHED :
2739 + VC_SM_ALLOC_NON_CACHED;
2740 + alloc.base_unit = ioparam->size;
2741 + alloc.num_unit = ioparam->num;
2742 + alloc.allocator = current->tgid;
2743 + /* Align to kernel page size */
2744 + alloc.alignement = 4096;
2745 + /* Align the size to the kernel page size */
2747 + (alloc.base_unit + alloc.alignement - 1) & ~(alloc.alignement - 1);
2748 + if (*ioparam->name) {
2749 + memcpy(alloc.name, ioparam->name, sizeof(alloc.name) - 1);
2751 + memcpy(alloc.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
2752 + sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
2755 + pr_debug("[%s]: attempt to allocate \"%s\" data - type %u, base %u (%u), num %u, alignement %u\n",
2756 + __func__, alloc.name, alloc.type, ioparam->size,
2757 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2759 + /* Allocate local resource to track this allocation. */
2760 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2765 + INIT_LIST_HEAD(&resource->map_list);
2766 + resource->ref_count++;
2767 + resource->pid = current->tgid;
2769 + /* Allocate the videocore resource. */
2770 + status = vc_vchi_sm_alloc(sm_state->sm_handle, &alloc, &result,
2771 + &private->int_trans_id);
2772 + if (status == -EINTR) {
2773 + pr_debug("[%s]: requesting allocate memory action restart (trans_id: %u)\n",
2774 + __func__, private->int_trans_id);
2775 + ret = -ERESTARTSYS;
2776 + private->restart_sys = -EINTR;
2777 + private->int_action = VC_SM_MSG_TYPE_ALLOC;
2779 + } else if (status != 0 || !result.res_mem) {
2780 + pr_err("[%s]: failed to allocate memory on videocore (status: %u, trans_id: %u)\n",
2781 + __func__, status, private->int_trans_id);
2783 + resource->res_stats[ALLOC_FAIL]++;
2787 + /* Keep track of the resource we created. */
2788 + resource->private = private;
2789 + resource->res_handle = result.res_handle;
2790 + resource->res_base_mem = (void *)result.res_mem;
2791 + resource->res_size = alloc.base_unit * alloc.num_unit;
2792 + resource->res_cached = cached;
2793 + resource->map = map;
2796 + * Kernel/user GUID. This global identifier is used for mmap'ing the
2797 + * allocated region from user space, it is passed as the mmap'ing
2798 + * offset, we use it to 'hide' the videocore handle/address.
2800 + mutex_lock(&sm_state->lock);
2801 + resource->res_guid = ++sm_state->guid;
2802 + mutex_unlock(&sm_state->lock);
2803 + resource->res_guid <<= PAGE_SHIFT;
2805 + vmcs_sm_add_resource(private, resource);
2807 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2808 + __func__, resource->res_guid, resource->res_handle,
2809 + resource->res_base_mem, resource->res_size,
2810 + resource->res_cached);
2813 + resource->res_stats[ALLOC]++;
2814 + ioparam->handle = resource->res_guid;
2818 + pr_err("[%s]: failed to allocate \"%s\" data (%i) - type %u, base %u (%u), num %u, alignment %u\n",
2819 + __func__, alloc.name, ret, alloc.type, ioparam->size,
2820 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2821 + if (resource != NULL) {
2822 + vc_sm_resource_deceased(resource, 1);
2828 +/* Share an allocate memory handle and block.*/
2829 +static int vc_sm_ioctl_alloc_share(struct sm_priv_data_t *private,
2830 + struct vmcs_sm_ioctl_alloc_share *ioparam)
2832 + struct sm_resource_t *resource, *shared_resource;
2835 + pr_debug("[%s]: attempt to share resource %u\n", __func__,
2838 + shared_resource = vmcs_sm_acquire_global_resource(ioparam->handle);
2839 + if (shared_resource == NULL) {
2844 + /* Allocate local resource to track this allocation. */
2845 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2846 + if (resource == NULL) {
2847 + pr_err("[%s]: failed to allocate local tracking resource\n",
2852 + INIT_LIST_HEAD(&resource->map_list);
2853 + resource->ref_count++;
2854 + resource->pid = current->tgid;
2856 + /* Keep track of the resource we created. */
2857 + resource->private = private;
2858 + resource->res_handle = shared_resource->res_handle;
2859 + resource->res_base_mem = shared_resource->res_base_mem;
2860 + resource->res_size = shared_resource->res_size;
2861 + resource->res_cached = shared_resource->res_cached;
2862 + resource->res_shared = shared_resource;
2864 + mutex_lock(&sm_state->lock);
2865 + resource->res_guid = ++sm_state->guid;
2866 + mutex_unlock(&sm_state->lock);
2867 + resource->res_guid <<= PAGE_SHIFT;
2869 + vmcs_sm_add_resource(private, resource);
2871 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2872 + __func__, resource->res_guid, resource->res_handle,
2873 + resource->res_base_mem, resource->res_size,
2874 + resource->res_cached);
2877 + resource->res_stats[ALLOC]++;
2878 + ioparam->handle = resource->res_guid;
2879 + ioparam->size = resource->res_size;
2883 + pr_err("[%s]: failed to share %u\n", __func__, ioparam->handle);
2884 + if (shared_resource != NULL)
2885 + vmcs_sm_release_resource(shared_resource, 0);
2890 +/* Free a previously allocated shared memory handle and block.*/
2891 +static int vc_sm_ioctl_free(struct sm_priv_data_t *private,
2892 + struct vmcs_sm_ioctl_free *ioparam)
2894 + struct sm_resource_t *resource =
2895 + vmcs_sm_acquire_resource(private, ioparam->handle);
2897 + if (resource == NULL) {
2898 + pr_err("[%s]: resource for guid %u does not exist\n", __func__,
2903 + /* Check permissions. */
2904 + if (resource->pid && (resource->pid != current->tgid)) {
2905 + pr_err("[%s]: current tgid %u != %u owner\n",
2906 + __func__, current->tgid, resource->pid);
2907 + vmcs_sm_release_resource(resource, 0);
2911 + vmcs_sm_release_resource(resource, 0);
2912 + vmcs_sm_release_resource(resource, 0);
2916 +/* Resize a previously allocated shared memory handle and block. */
2917 +static int vc_sm_ioctl_resize(struct sm_priv_data_t *private,
2918 + struct vmcs_sm_ioctl_resize *ioparam)
2922 + struct vc_sm_resize_t resize;
2923 + struct sm_resource_t *resource;
2925 + /* Locate resource from GUID. */
2926 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2928 + pr_err("[%s]: failed resource - guid %x\n",
2929 + __func__, ioparam->handle);
2935 + * If the resource is locked, its reference count will be not NULL,
2936 + * in which case we will not be allowed to resize it anyways, so
2937 + * reject the attempt here.
2939 + if (resource->lock_count != 0) {
2940 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2941 + __func__, ioparam->handle, resource->lock_count);
2946 + /* Check permissions. */
2947 + if (resource->pid && (resource->pid != current->tgid)) {
2948 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
2949 + current->tgid, resource->pid);
2954 + if (resource->map_count != 0) {
2955 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2956 + __func__, ioparam->handle, resource->map_count);
2961 + resize.res_handle = resource->res_handle;
2962 + resize.res_mem = (uint32_t)resource->res_base_mem;
2963 + resize.res_new_size = ioparam->new_size;
2965 + pr_debug("[%s]: attempt to resize data - guid %x, hdl %x, base address %p\n",
2966 + __func__, ioparam->handle, resize.res_handle,
2967 + (void *)resize.res_mem);
2969 + /* Resize the videocore allocated resource. */
2970 + status = vc_vchi_sm_resize(sm_state->sm_handle, &resize,
2971 + &private->int_trans_id);
2972 + if (status == -EINTR) {
2973 + pr_debug("[%s]: requesting resize memory action restart (trans_id: %u)\n",
2974 + __func__, private->int_trans_id);
2975 + ret = -ERESTARTSYS;
2976 + private->restart_sys = -EINTR;
2977 + private->int_action = VC_SM_MSG_TYPE_RESIZE;
2979 + } else if (status) {
2980 + pr_err("[%s]: failed to resize memory on videocore (status: %u, trans_id: %u)\n",
2981 + __func__, status, private->int_trans_id);
2986 + pr_debug("[%s]: success to resize data - hdl %x, size %d -> %d\n",
2987 + __func__, resize.res_handle, resource->res_size,
2988 + resize.res_new_size);
2990 + /* Successfully resized, save the information and inform the user. */
2991 + ioparam->old_size = resource->res_size;
2992 + resource->res_size = resize.res_new_size;
2996 + vmcs_sm_release_resource(resource, 0);
3001 +/* Lock a previously allocated shared memory handle and block. */
3002 +static int vc_sm_ioctl_lock(struct sm_priv_data_t *private,
3003 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
3004 + int change_cache, enum vmcs_sm_cache_e cache_type,
3005 + unsigned int vc_addr)
3008 + struct vc_sm_lock_unlock_t lock;
3009 + struct vc_sm_lock_result_t result;
3010 + struct sm_resource_t *resource;
3012 + struct sm_mmap *map, *map_tmp;
3013 + unsigned long phys_addr;
3017 + /* Locate resource from GUID. */
3018 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
3019 + if (resource == NULL) {
3024 + /* Check permissions. */
3025 + if (resource->pid && (resource->pid != current->tgid)) {
3026 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
3027 + current->tgid, resource->pid);
3032 + lock.res_handle = resource->res_handle;
3033 + lock.res_mem = (uint32_t)resource->res_base_mem;
3035 + /* Take the lock and get the address to be mapped. */
3036 + if (vc_addr == 0) {
3037 + pr_debug("[%s]: attempt to lock data - guid %x, hdl %x, base address %p\n",
3038 + __func__, ioparam->handle, lock.res_handle,
3039 + (void *)lock.res_mem);
3041 + /* Lock the videocore allocated resource. */
3042 + status = vc_vchi_sm_lock(sm_state->sm_handle, &lock, &result,
3043 + &private->int_trans_id);
3044 + if (status == -EINTR) {
3045 + pr_debug("[%s]: requesting lock memory action restart (trans_id: %u)\n",
3046 + __func__, private->int_trans_id);
3047 + ret = -ERESTARTSYS;
3048 + private->restart_sys = -EINTR;
3049 + private->int_action = VC_SM_MSG_TYPE_LOCK;
3051 + } else if (status ||
3052 + (!status && !(void *)result.res_mem)) {
3053 + pr_err("[%s]: failed to lock memory on videocore (status: %u, trans_id: %u)\n",
3054 + __func__, status, private->int_trans_id);
3056 + resource->res_stats[LOCK_FAIL]++;
3060 + pr_debug("[%s]: succeed to lock data - hdl %x, base address %p (%p), ref-cnt %d\n",
3061 + __func__, lock.res_handle, (void *)result.res_mem,
3062 + (void *)lock.res_mem, resource->lock_count);
3064 + /* Lock assumed taken already, address to be mapped is known. */
3066 + resource->res_base_mem = (void *)vc_addr;
3068 + resource->res_stats[LOCK]++;
3069 + resource->lock_count++;
3071 + /* Keep track of the new base memory allocation if it has changed. */
3072 + if ((vc_addr == 0) &&
3073 + ((void *)result.res_mem) &&
3074 + ((void *)result.res_old_mem) &&
3075 + (result.res_mem != result.res_old_mem)) {
3076 + resource->res_base_mem = (void *)result.res_mem;
3078 + /* Kernel allocated resources. */
3079 + if (resource->pid == 0) {
3080 + if (!list_empty(&resource->map_list)) {
3081 + list_for_each_entry_safe(map, map_tmp,
3082 + &resource->map_list,
3083 + resource_map_list) {
3084 + if (map->res_addr) {
3085 + iounmap((void *)map->res_addr);
3086 + map->res_addr = 0;
3088 + vmcs_sm_remove_map(sm_state,
3099 + resource->res_cached = cache_type;
3101 + if (resource->map_count) {
3103 + vmcs_sm_usr_address_from_pid_and_usr_handle(
3104 + current->tgid, ioparam->handle);
3106 + pr_debug("[%s] map_count %d private->pid %d current->tgid %d hnd %x addr %u\n",
3107 + __func__, resource->map_count, private->pid,
3108 + current->tgid, ioparam->handle, ioparam->addr);
3110 + /* Kernel allocated resources. */
3111 + if (resource->pid == 0) {
3112 + pr_debug("[%s]: attempt mapping kernel resource - guid %x, hdl %x\n",
3113 + __func__, ioparam->handle, lock.res_handle);
3115 + ioparam->addr = 0;
3117 + map = kzalloc(sizeof(*map), GFP_KERNEL);
3118 + if (map == NULL) {
3119 + pr_err("[%s]: failed allocating tracker\n",
3124 + phys_addr = (uint32_t)resource->res_base_mem &
3126 + phys_addr += mm_vc_mem_phys_addr;
3127 + if (resource->res_cached
3128 + == VMCS_SM_CACHE_HOST) {
3129 + ioparam->addr = (unsigned long)
3130 + /* TODO - make cached work */
3131 + ioremap_nocache(phys_addr,
3132 + resource->res_size);
3134 + pr_debug("[%s]: mapping kernel - guid %x, hdl %x - cached mapping %u\n",
3135 + __func__, ioparam->handle,
3136 + lock.res_handle, ioparam->addr);
3138 + ioparam->addr = (unsigned long)
3139 + ioremap_nocache(phys_addr,
3140 + resource->res_size);
3142 + pr_debug("[%s]: mapping kernel- guid %x, hdl %x - non cached mapping %u\n",
3143 + __func__, ioparam->handle,
3144 + lock.res_handle, ioparam->addr);
3148 + map->res_vc_hdl = resource->res_handle;
3149 + map->res_usr_hdl = resource->res_guid;
3150 + map->res_addr = ioparam->addr;
3151 + map->resource = resource;
3154 + vmcs_sm_add_map(sm_state, resource, map);
3157 + ioparam->addr = 0;
3162 + vmcs_sm_release_resource(resource, 0);
3167 +/* Unlock a previously allocated shared memory handle and block.*/
3168 +static int vc_sm_ioctl_unlock(struct sm_priv_data_t *private,
3169 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
3170 + int flush, int wait_reply, int no_vc_unlock)
3173 + struct vc_sm_lock_unlock_t unlock;
3174 + struct sm_mmap *map, *map_tmp;
3175 + struct sm_resource_t *resource;
3180 + /* Locate resource from GUID. */
3181 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
3182 + if (resource == NULL) {
3187 + /* Check permissions. */
3188 + if (resource->pid && (resource->pid != current->tgid)) {
3189 + pr_err("[%s]: current tgid %u != %u owner\n",
3190 + __func__, current->tgid, resource->pid);
3195 + unlock.res_handle = resource->res_handle;
3196 + unlock.res_mem = (uint32_t)resource->res_base_mem;
3198 + pr_debug("[%s]: attempt to unlock data - guid %x, hdl %x, base address %p\n",
3199 + __func__, ioparam->handle, unlock.res_handle,
3200 + (void *)unlock.res_mem);
3202 + /* User space allocated resources. */
3203 + if (resource->pid) {
3204 + /* Flush if requested */
3205 + if (resource->res_cached && flush) {
3206 + dma_addr_t phys_addr = 0;
3208 + resource->res_stats[FLUSH]++;
3211 + (dma_addr_t)((uint32_t)resource->res_base_mem &
3213 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
3215 + /* L1 cache flush */
3216 + down_read(¤t->mm->mmap_sem);
3217 + list_for_each_entry(map, &resource->map_list,
3218 + resource_map_list) {
3220 + const unsigned long start = map->vma->vm_start;
3221 + const unsigned long end = map->vma->vm_end;
3223 + ret = clean_invalid_mem_walk(start, end - start,
3224 + VCSM_CACHE_OP_FLUSH);
3229 + up_read(¤t->mm->mmap_sem);
3231 + /* L2 cache flush */
3232 + outer_clean_range(phys_addr,
3234 + (size_t) resource->res_size);
3237 + /* We need to zap all the vmas associated with this resource */
3238 + if (resource->lock_count == 1) {
3239 + down_read(¤t->mm->mmap_sem);
3240 + list_for_each_entry(map, &resource->map_list,
3241 + resource_map_list) {
3243 + zap_vma_ptes(map->vma,
3244 + map->vma->vm_start,
3245 + map->vma->vm_end -
3246 + map->vma->vm_start);
3249 + up_read(¤t->mm->mmap_sem);
3252 + /* Kernel allocated resources. */
3254 + /* Global + Taken in this context */
3255 + if (resource->ref_count == 2) {
3256 + if (!list_empty(&resource->map_list)) {
3257 + list_for_each_entry_safe(map, map_tmp,
3258 + &resource->map_list,
3259 + resource_map_list) {
3260 + if (map->res_addr) {
3262 + (resource->res_cached ==
3263 + VMCS_SM_CACHE_HOST)) {
3266 + phys_addr = (uint32_t)
3267 + resource->res_base_mem & 0x3FFFFFFF;
3269 + mm_vc_mem_phys_addr;
3271 + /* L1 cache flush */
3272 + dmac_flush_range((const
3275 + map->res_addr, (const void *)
3276 + (map->res_addr + resource->res_size));
3278 + /* L2 cache flush */
3283 + resource->res_size);
3286 + iounmap((void *)map->res_addr);
3287 + map->res_addr = 0;
3289 + vmcs_sm_remove_map(sm_state,
3299 + if (resource->lock_count) {
3300 + /* Bypass the videocore unlock. */
3303 + /* Unlock the videocore allocated resource. */
3306 + vc_vchi_sm_unlock(sm_state->sm_handle, &unlock,
3307 + &private->int_trans_id,
3309 + if (status == -EINTR) {
3310 + pr_debug("[%s]: requesting unlock memory action restart (trans_id: %u)\n",
3311 + __func__, private->int_trans_id);
3313 + ret = -ERESTARTSYS;
3314 + resource->res_stats[UNLOCK]--;
3315 + private->restart_sys = -EINTR;
3316 + private->int_action = VC_SM_MSG_TYPE_UNLOCK;
3318 + } else if (status != 0) {
3319 + pr_err("[%s]: failed to unlock vc mem (status: %u, trans_id: %u)\n",
3320 + __func__, status, private->int_trans_id);
3323 + resource->res_stats[UNLOCK_FAIL]++;
3328 + resource->res_stats[UNLOCK]++;
3329 + resource->lock_count--;
3332 + pr_debug("[%s]: success to unlock data - hdl %x, base address %p, ref-cnt %d\n",
3333 + __func__, unlock.res_handle, (void *)unlock.res_mem,
3334 + resource->lock_count);
3338 + vmcs_sm_release_resource(resource, 0);
3343 +/* Import a contiguous block of memory to be shared with VC. */
3344 +static int vc_sm_ioctl_import_dmabuf(struct sm_priv_data_t *private,
3345 + struct vmcs_sm_ioctl_import_dmabuf *ioparam,
3346 + struct dma_buf *src_dma_buf)
3350 + struct sm_resource_t *resource = NULL;
3351 + struct vc_sm_import import = { 0 };
3352 + struct vc_sm_import_result result = { 0 };
3353 + struct dma_buf *dma_buf;
3354 + struct dma_buf_attachment *attach = NULL;
3355 + struct sg_table *sgt = NULL;
3357 + /* Setup our allocation parameters */
3358 + if (src_dma_buf) {
3359 + get_dma_buf(src_dma_buf);
3360 + dma_buf = src_dma_buf;
3362 + dma_buf = dma_buf_get(ioparam->dmabuf_fd);
3364 + if (IS_ERR(dma_buf))
3365 + return PTR_ERR(dma_buf);
3367 + attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
3368 + if (IS_ERR(attach)) {
3369 + ret = PTR_ERR(attach);
3373 + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
3374 + if (IS_ERR(sgt)) {
3375 + ret = PTR_ERR(sgt);
3379 + /* Verify that the address block is contiguous */
3380 + if (sgt->nents != 1) {
3385 + import.type = ((ioparam->cached == VMCS_SM_CACHE_VC) ||
3386 + (ioparam->cached == VMCS_SM_CACHE_BOTH)) ?
3387 + VC_SM_ALLOC_CACHED : VC_SM_ALLOC_NON_CACHED;
3388 + import.addr = (uint32_t)sg_dma_address(sgt->sgl);
3389 + import.size = sg_dma_len(sgt->sgl);
3390 + import.allocator = current->tgid;
3392 + if (*ioparam->name)
3393 + memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
3395 + memcpy(import.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
3396 + sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
3398 + pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %p, size %u\n",
3399 + __func__, import.name, import.type,
3400 + (void *)import.addr, import.size);
3402 + /* Allocate local resource to track this allocation. */
3403 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
3408 + INIT_LIST_HEAD(&resource->map_list);
3409 + resource->ref_count++;
3410 + resource->pid = current->tgid;
3412 + /* Allocate the videocore resource. */
3413 + status = vc_vchi_sm_import(sm_state->sm_handle, &import, &result,
3414 + &private->int_trans_id);
3415 + if (status == -EINTR) {
3416 + pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
3417 + __func__, private->int_trans_id);
3418 + ret = -ERESTARTSYS;
3419 + private->restart_sys = -EINTR;
3420 + private->int_action = VC_SM_MSG_TYPE_IMPORT;
3422 + } else if (status || !result.res_handle) {
3423 + pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
3424 + __func__, status, private->int_trans_id);
3426 + resource->res_stats[ALLOC_FAIL]++;
3430 + /* Keep track of the resource we created. */
3431 + resource->private = private;
3432 + resource->res_handle = result.res_handle;
3433 + resource->res_size = import.size;
3434 + resource->res_cached = ioparam->cached;
3436 + resource->dma_buf = dma_buf;
3437 + resource->attach = attach;
3438 + resource->sgt = sgt;
3439 + resource->dma_addr = sg_dma_address(sgt->sgl);
3442 + * Kernel/user GUID. This global identifier is used for mmap'ing the
3443 + * allocated region from user space, it is passed as the mmap'ing
3444 + * offset, we use it to 'hide' the videocore handle/address.
3446 + mutex_lock(&sm_state->lock);
3447 + resource->res_guid = ++sm_state->guid;
3448 + mutex_unlock(&sm_state->lock);
3449 + resource->res_guid <<= PAGE_SHIFT;
3451 + vmcs_sm_add_resource(private, resource);
3454 + resource->res_stats[IMPORT]++;
3455 + ioparam->handle = resource->res_guid;
3460 + resource->res_stats[IMPORT_FAIL]++;
3461 + vc_sm_resource_deceased(resource, 1);
3465 + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
3467 + dma_buf_detach(dma_buf, attach);
3468 + dma_buf_put(dma_buf);
3472 +/* Handle control from host. */
3473 +static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3476 + unsigned int cmdnr = _IOC_NR(cmd);
3477 + struct sm_priv_data_t *file_data =
3478 + (struct sm_priv_data_t *)file->private_data;
3479 + struct sm_resource_t *resource = NULL;
3481 + /* Validate we can work with this device. */
3482 + if ((sm_state == NULL) || (file_data == NULL)) {
3483 + pr_err("[%s]: invalid device\n", __func__);
3488 + pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
3489 + current->tgid, file_data->pid);
3491 + /* Action is a re-post of a previously interrupted action? */
3492 + if (file_data->restart_sys == -EINTR) {
3493 + struct vc_sm_action_clean_t action_clean;
3495 + pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
3496 + __func__, file_data->int_action,
3497 + file_data->int_trans_id);
3499 + action_clean.res_action = file_data->int_action;
3500 + action_clean.action_trans_id = file_data->int_trans_id;
3502 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
3504 + file_data->restart_sys = 0;
3507 + /* Now process the command. */
3509 + /* New memory allocation.
3511 + case VMCS_SM_CMD_ALLOC:
3513 + struct vmcs_sm_ioctl_alloc ioparam;
3515 + /* Get the parameter data. */
3516 + if (copy_from_user
3517 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3518 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3524 + ret = vc_sm_ioctl_alloc(file_data, &ioparam);
3526 + (copy_to_user((void *)arg,
3527 + &ioparam, sizeof(ioparam)) != 0)) {
3528 + struct vmcs_sm_ioctl_free freeparam = {
3531 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3533 + vc_sm_ioctl_free(file_data, &freeparam);
3542 + /* Share existing memory allocation. */
3543 + case VMCS_SM_CMD_ALLOC_SHARE:
3545 + struct vmcs_sm_ioctl_alloc_share ioparam;
3547 + /* Get the parameter data. */
3548 + if (copy_from_user
3549 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3550 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3556 + ret = vc_sm_ioctl_alloc_share(file_data, &ioparam);
3558 + /* Copy result back to user. */
3560 + && copy_to_user((void *)arg, &ioparam,
3561 + sizeof(ioparam)) != 0) {
3562 + struct vmcs_sm_ioctl_free freeparam = {
3565 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3567 + vc_sm_ioctl_free(file_data, &freeparam);
3576 + case VMCS_SM_CMD_IMPORT_DMABUF:
3578 + struct vmcs_sm_ioctl_import_dmabuf ioparam;
3580 + /* Get the parameter data. */
3581 + if (copy_from_user
3582 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3583 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3589 + ret = vc_sm_ioctl_import_dmabuf(file_data, &ioparam,
3592 + (copy_to_user((void *)arg,
3593 + &ioparam, sizeof(ioparam)) != 0)) {
3594 + struct vmcs_sm_ioctl_free freeparam = {
3597 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3599 + vc_sm_ioctl_free(file_data, &freeparam);
3608 + /* Lock (attempt to) *and* register a cache behavior change. */
3609 + case VMCS_SM_CMD_LOCK_CACHE:
3611 + struct vmcs_sm_ioctl_lock_cache ioparam;
3612 + struct vmcs_sm_ioctl_lock_unlock lock;
3614 + /* Get parameter data. */
3615 + if (copy_from_user
3616 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3617 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3623 + lock.handle = ioparam.handle;
3625 + vc_sm_ioctl_lock(file_data, &lock, 1,
3626 + ioparam.cached, 0);
3633 + /* Lock (attempt to) existing memory allocation. */
3634 + case VMCS_SM_CMD_LOCK:
3636 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3638 + /* Get parameter data. */
3639 + if (copy_from_user
3640 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3641 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3647 + ret = vc_sm_ioctl_lock(file_data, &ioparam, 0, 0, 0);
3649 + /* Copy result back to user. */
3650 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3652 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3662 + /* Unlock (attempt to) existing memory allocation. */
3663 + case VMCS_SM_CMD_UNLOCK:
3665 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3667 + /* Get parameter data. */
3668 + if (copy_from_user
3669 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3670 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3676 + ret = vc_sm_ioctl_unlock(file_data, &ioparam, 0, 1, 0);
3683 + /* Resize (attempt to) existing memory allocation. */
3684 + case VMCS_SM_CMD_RESIZE:
3686 + struct vmcs_sm_ioctl_resize ioparam;
3688 + /* Get parameter data. */
3689 + if (copy_from_user
3690 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3691 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3697 + ret = vc_sm_ioctl_resize(file_data, &ioparam);
3699 + /* Copy result back to user. */
3700 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3702 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3710 + /* Terminate existing memory allocation.
3712 + case VMCS_SM_CMD_FREE:
3714 + struct vmcs_sm_ioctl_free ioparam;
3716 + /* Get parameter data.
3718 + if (copy_from_user
3719 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3720 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3726 + ret = vc_sm_ioctl_free(file_data, &ioparam);
3734 + /* Walk allocation on videocore, information shows up in the
3737 + case VMCS_SM_CMD_VC_WALK_ALLOC:
3739 + pr_debug("[%s]: invoking walk alloc\n", __func__);
3741 + if (vc_vchi_sm_walk_alloc(sm_state->sm_handle) != 0)
3742 + pr_err("[%s]: failed to walk-alloc on videocore\n",
3750 + /* Walk mapping table on host, information shows up in the
3753 + case VMCS_SM_CMD_HOST_WALK_MAP:
3755 + /* Use pid of -1 to tell to walk the whole map. */
3756 + vmcs_sm_host_walk_map_per_pid(-1);
3763 + /* Walk mapping table per process on host. */
3764 + case VMCS_SM_CMD_HOST_WALK_PID_ALLOC:
3766 + struct vmcs_sm_ioctl_walk ioparam;
3768 + /* Get parameter data. */
3769 + if (copy_from_user(&ioparam,
3770 + (void *)arg, sizeof(ioparam)) != 0) {
3771 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3777 + vmcs_sm_host_walk_alloc(file_data);
3784 + /* Walk allocation per process on host. */
3785 + case VMCS_SM_CMD_HOST_WALK_PID_MAP:
3787 + struct vmcs_sm_ioctl_walk ioparam;
3789 + /* Get parameter data. */
3790 + if (copy_from_user(&ioparam,
3791 + (void *)arg, sizeof(ioparam)) != 0) {
3792 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3798 + vmcs_sm_host_walk_map_per_pid(ioparam.pid);
3805 + /* Gets the size of the memory associated with a user handle. */
3806 + case VMCS_SM_CMD_SIZE_USR_HANDLE:
3808 + struct vmcs_sm_ioctl_size ioparam;
3810 + /* Get parameter data. */
3811 + if (copy_from_user(&ioparam,
3812 + (void *)arg, sizeof(ioparam)) != 0) {
3813 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3819 + /* Locate resource from GUID. */
3821 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3822 + if (resource != NULL) {
3823 + ioparam.size = resource->res_size;
3824 + vmcs_sm_release_resource(resource, 0);
3829 + if (copy_to_user((void *)arg,
3830 + &ioparam, sizeof(ioparam)) != 0) {
3831 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3841 + /* Verify we are dealing with a valid resource. */
3842 + case VMCS_SM_CMD_CHK_USR_HANDLE:
3844 + struct vmcs_sm_ioctl_chk ioparam;
3846 + /* Get parameter data. */
3847 + if (copy_from_user(&ioparam,
3848 + (void *)arg, sizeof(ioparam)) != 0) {
3849 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3856 + /* Locate resource from GUID. */
3858 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3859 + if (resource == NULL)
3862 + * If the resource is cacheable, return additional
3863 + * information that may be needed to flush the cache.
3865 + else if ((resource->res_cached == VMCS_SM_CACHE_HOST) ||
3866 + (resource->res_cached == VMCS_SM_CACHE_BOTH)) {
3868 + vmcs_sm_usr_address_from_pid_and_usr_handle
3869 + (current->tgid, ioparam.handle);
3870 + ioparam.size = resource->res_size;
3871 + ioparam.cache = resource->res_cached;
3875 + ioparam.cache = resource->res_cached;
3879 + vmcs_sm_release_resource(resource, 0);
3881 + if (copy_to_user((void *)arg,
3882 + &ioparam, sizeof(ioparam)) != 0) {
3883 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3894 + * Maps a user handle given the process and the virtual address.
3896 + case VMCS_SM_CMD_MAPPED_USR_HANDLE:
3898 + struct vmcs_sm_ioctl_map ioparam;
3900 + /* Get parameter data. */
3901 + if (copy_from_user(&ioparam,
3902 + (void *)arg, sizeof(ioparam)) != 0) {
3903 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3911 + vmcs_sm_usr_handle_from_pid_and_address(
3912 + ioparam.pid, ioparam.addr);
3915 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3916 + if ((resource != NULL)
3917 + && ((resource->res_cached == VMCS_SM_CACHE_HOST)
3918 + || (resource->res_cached ==
3919 + VMCS_SM_CACHE_BOTH))) {
3920 + ioparam.size = resource->res_size;
3926 + vmcs_sm_release_resource(resource, 0);
3928 + if (copy_to_user((void *)arg,
3929 + &ioparam, sizeof(ioparam)) != 0) {
3930 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3941 + * Maps a videocore handle given process and virtual address.
3943 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR:
3945 + struct vmcs_sm_ioctl_map ioparam;
3947 + /* Get parameter data. */
3948 + if (copy_from_user(&ioparam,
3949 + (void *)arg, sizeof(ioparam)) != 0) {
3950 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3956 + ioparam.handle = vmcs_sm_vc_handle_from_pid_and_address(
3957 + ioparam.pid, ioparam.addr);
3959 + if (copy_to_user((void *)arg,
3960 + &ioparam, sizeof(ioparam)) != 0) {
3961 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3972 + /* Maps a videocore handle given process and user handle. */
3973 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL:
3975 + struct vmcs_sm_ioctl_map ioparam;
3977 + /* Get parameter data. */
3978 + if (copy_from_user(&ioparam,
3979 + (void *)arg, sizeof(ioparam)) != 0) {
3980 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3986 + /* Locate resource from GUID. */
3988 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3989 + if (resource != NULL) {
3990 + ioparam.handle = resource->res_handle;
3991 + vmcs_sm_release_resource(resource, 0);
3993 + ioparam.handle = 0;
3996 + if (copy_to_user((void *)arg,
3997 + &ioparam, sizeof(ioparam)) != 0) {
3998 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
4010 + * Maps a videocore address given process and videocore handle.
4012 + case VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL:
4014 + struct vmcs_sm_ioctl_map ioparam;
4016 + /* Get parameter data. */
4017 + if (copy_from_user(&ioparam,
4018 + (void *)arg, sizeof(ioparam)) != 0) {
4019 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4026 + /* Locate resource from GUID. */
4028 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
4029 + if (resource != NULL) {
4031 + (unsigned int)resource->res_base_mem;
4032 + vmcs_sm_release_resource(resource, 0);
4037 + if (copy_to_user((void *)arg,
4038 + &ioparam, sizeof(ioparam)) != 0) {
4039 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
4049 + /* Maps a user address given process and vc handle. */
4050 + case VMCS_SM_CMD_MAPPED_USR_ADDRESS:
4052 + struct vmcs_sm_ioctl_map ioparam;
4054 + /* Get parameter data. */
4055 + if (copy_from_user(&ioparam,
4056 + (void *)arg, sizeof(ioparam)) != 0) {
4057 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4064 + * Return the address information from the mapping,
4065 + * 0 (ie NULL) if it cannot locate the actual mapping.
4068 + vmcs_sm_usr_address_from_pid_and_usr_handle
4069 + (ioparam.pid, ioparam.handle);
4071 + if (copy_to_user((void *)arg,
4072 + &ioparam, sizeof(ioparam)) != 0) {
4073 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
4083 + /* Flush the cache for a given mapping. */
4084 + case VMCS_SM_CMD_FLUSH:
4086 + struct vmcs_sm_ioctl_cache ioparam;
4088 + /* Get parameter data. */
4089 + if (copy_from_user(&ioparam,
4090 + (void *)arg, sizeof(ioparam)) != 0) {
4091 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4097 + /* Locate resource from GUID. */
4099 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
4100 + if (resource == NULL) {
4105 + ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
4106 + ioparam.size, VCSM_CACHE_OP_FLUSH, ioparam.handle,
4108 + vmcs_sm_release_resource(resource, 0);
4114 + /* Invalidate the cache for a given mapping. */
4115 + case VMCS_SM_CMD_INVALID:
4117 + struct vmcs_sm_ioctl_cache ioparam;
4119 + /* Get parameter data. */
4120 + if (copy_from_user(&ioparam,
4121 + (void *)arg, sizeof(ioparam)) != 0) {
4122 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4128 + /* Locate resource from GUID. */
4130 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
4131 + if (resource == NULL) {
4136 + ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
4137 + ioparam.size, VCSM_CACHE_OP_INV, ioparam.handle, resource);
4138 + vmcs_sm_release_resource(resource, 0);
4144 + /* Flush/Invalidate the cache for a given mapping. */
4145 + case VMCS_SM_CMD_CLEAN_INVALID:
4148 + struct vmcs_sm_ioctl_clean_invalid ioparam;
4150 + /* Get parameter data. */
4151 + if (copy_from_user(&ioparam,
4152 + (void *)arg, sizeof(ioparam)) != 0) {
4153 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4158 + for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) {
4159 + if (ioparam.s[i].cmd == VCSM_CACHE_OP_NOP)
4162 + /* Locate resource from GUID. */
4164 + vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
4165 + if (resource == NULL) {
4170 + ret = clean_invalid_resource_walk(
4171 + (void __user*) ioparam.s[i].addr, ioparam.s[i].size,
4172 + ioparam.s[i].cmd, ioparam.s[i].handle, resource);
4173 + vmcs_sm_release_resource(resource, 0);
4180 + * Flush/Invalidate the cache for a given mapping.
4181 + * Blocks must be pinned (i.e. accessed) before this call.
4183 + case VMCS_SM_CMD_CLEAN_INVALID2:
4186 + struct vmcs_sm_ioctl_clean_invalid2 ioparam;
4187 + struct vmcs_sm_ioctl_clean_invalid_block *block = NULL;
4189 + /* Get parameter data. */
4190 + if (copy_from_user(&ioparam,
4191 + (void *)arg, sizeof(ioparam)) != 0) {
4192 + pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
4197 + block = kmalloc(ioparam.op_count *
4198 + sizeof(struct vmcs_sm_ioctl_clean_invalid_block),
4204 + if (copy_from_user(block,
4205 + (void *)(arg + sizeof(ioparam)), ioparam.op_count * sizeof(struct vmcs_sm_ioctl_clean_invalid_block)) != 0) {
4206 + pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
4212 + for (i = 0; i < ioparam.op_count; i++) {
4213 + const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i;
4215 + if (op->invalidate_mode == VCSM_CACHE_OP_NOP)
4218 + ret = clean_invalid_contiguous_mem_2d(
4219 + (void __user*) op->start_address, op->block_count,
4220 + op->block_size, op->inter_block_stride,
4221 + op->invalidate_mode);
4241 +/* Device operations that we managed in this driver. */
4242 +static const struct file_operations vmcs_sm_ops = {
4243 + .owner = THIS_MODULE,
4244 + .unlocked_ioctl = vc_sm_ioctl,
4245 + .open = vc_sm_open,
4246 + .release = vc_sm_release,
4247 + .mmap = vc_sm_mmap,
4250 +/* Creation of device. */
4251 +static int vc_sm_create_sharedmemory(void)
4255 + if (sm_state == NULL) {
4260 + /* Create a device class for creating dev nodes. */
4261 + sm_state->sm_class = class_create(THIS_MODULE, "vc-sm");
4262 + if (IS_ERR(sm_state->sm_class)) {
4263 + pr_err("[%s]: unable to create device class\n", __func__);
4264 + ret = PTR_ERR(sm_state->sm_class);
4268 + /* Create a character driver. */
4269 + ret = alloc_chrdev_region(&sm_state->sm_devid,
4270 + DEVICE_MINOR, 1, DEVICE_NAME);
4272 + pr_err("[%s]: unable to allocate device number\n", __func__);
4273 + goto out_dev_class_destroy;
4276 + cdev_init(&sm_state->sm_cdev, &vmcs_sm_ops);
4277 + ret = cdev_add(&sm_state->sm_cdev, sm_state->sm_devid, 1);
4279 + pr_err("[%s]: unable to register device\n", __func__);
4280 + goto out_chrdev_unreg;
4283 + /* Create a device node. */
4284 + sm_state->sm_dev = device_create(sm_state->sm_class,
4286 + MKDEV(MAJOR(sm_state->sm_devid),
4287 + DEVICE_MINOR), NULL,
4289 + if (IS_ERR(sm_state->sm_dev)) {
4290 + pr_err("[%s]: unable to create device node\n", __func__);
4291 + ret = PTR_ERR(sm_state->sm_dev);
4292 + goto out_chrdev_del;
4298 + cdev_del(&sm_state->sm_cdev);
4300 + unregister_chrdev_region(sm_state->sm_devid, 1);
4301 +out_dev_class_destroy:
4302 + class_destroy(sm_state->sm_class);
4303 + sm_state->sm_class = NULL;
4308 +/* Termination of the device. */
4309 +static int vc_sm_remove_sharedmemory(void)
4313 + if (sm_state == NULL) {
4314 + /* Nothing to do. */
4319 + /* Remove the sharedmemory character driver. */
4320 + cdev_del(&sm_state->sm_cdev);
4322 + /* Unregister region. */
4323 + unregister_chrdev_region(sm_state->sm_devid, 1);
4332 +/* Videocore connected. */
4333 +static void vc_sm_connected_init(void)
4336 + VCHI_INSTANCE_T vchi_instance;
4338 + pr_info("[%s]: start\n", __func__);
4341 + * Initialize and create a VCHI connection for the shared memory service
4342 + * running on videocore.
4344 + ret = vchi_initialise(&vchi_instance);
4346 + pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
4350 + goto err_free_mem;
4353 + ret = vchi_connect(vchi_instance);
4355 + pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
4359 + goto err_free_mem;
4362 + /* Initialize an instance of the shared memory service. */
4363 + sm_state->sm_handle =
4364 + vc_vchi_sm_init(vchi_instance);
4365 + if (sm_state->sm_handle == NULL) {
4366 + pr_err("[%s]: failed to initialize shared memory service\n",
4370 + goto err_free_mem;
4373 + /* Create a debug fs directory entry (root). */
4374 + sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
4375 + if (!sm_state->dir_root) {
4376 + pr_err("[%s]: failed to create \'%s\' directory entry\n",
4377 + __func__, VC_SM_DIR_ROOT_NAME);
4380 + goto err_stop_sm_service;
4383 + sm_state->dir_state.show = &vc_sm_global_state_show;
4384 + sm_state->dir_state.dir_entry = debugfs_create_file(VC_SM_STATE,
4385 + 0444, sm_state->dir_root, &sm_state->dir_state,
4386 + &vc_sm_debug_fs_fops);
4388 + sm_state->dir_stats.show = &vc_sm_global_statistics_show;
4389 + sm_state->dir_stats.dir_entry = debugfs_create_file(VC_SM_STATS,
4390 + 0444, sm_state->dir_root, &sm_state->dir_stats,
4391 + &vc_sm_debug_fs_fops);
4393 + /* Create the proc entry children. */
4394 + sm_state->dir_alloc = debugfs_create_dir(VC_SM_DIR_ALLOC_NAME,
4395 + sm_state->dir_root);
4397 + /* Create a shared memory device. */
4398 + ret = vc_sm_create_sharedmemory();
4400 + pr_err("[%s]: failed to create shared memory device\n",
4402 + goto err_remove_debugfs;
4405 + INIT_LIST_HEAD(&sm_state->map_list);
4406 + INIT_LIST_HEAD(&sm_state->resource_list);
4408 + sm_state->data_knl = vc_sm_create_priv_data(0);
4409 + if (sm_state->data_knl == NULL) {
4410 + pr_err("[%s]: failed to create kernel private data tracker\n",
4412 + goto err_remove_shared_memory;
4419 +err_remove_shared_memory:
4420 + vc_sm_remove_sharedmemory();
4421 +err_remove_debugfs:
4422 + debugfs_remove_recursive(sm_state->dir_root);
4423 +err_stop_sm_service:
4424 + vc_vchi_sm_stop(&sm_state->sm_handle);
4428 + pr_info("[%s]: end - returning %d\n", __func__, ret);
4431 +/* Driver loading. */
4432 +static int bcm2835_vcsm_probe(struct platform_device *pdev)
4434 + pr_info("vc-sm: Videocore shared memory driver\n");
4436 + sm_state = kzalloc(sizeof(*sm_state), GFP_KERNEL);
4439 + sm_state->pdev = pdev;
4440 + mutex_init(&sm_state->lock);
4441 + mutex_init(&sm_state->map_lock);
4443 + vchiq_add_connected_callback(vc_sm_connected_init);
4447 +/* Driver unloading. */
4448 +static int bcm2835_vcsm_remove(struct platform_device *pdev)
4450 + pr_debug("[%s]: start\n", __func__);
4452 + /* Remove shared memory device. */
4453 + vc_sm_remove_sharedmemory();
4455 + /* Remove all proc entries. */
4456 + debugfs_remove_recursive(sm_state->dir_root);
4458 + /* Stop the videocore shared memory service. */
4459 + vc_vchi_sm_stop(&sm_state->sm_handle);
4461 + /* Free the memory for the state structure. */
4462 + mutex_destroy(&(sm_state->map_lock));
4466 + pr_debug("[%s]: end\n", __func__);
4470 +#if defined(__KERNEL__)
4471 +/* Allocate a shared memory handle and block. */
4472 +int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle)
4474 + struct vmcs_sm_ioctl_alloc ioparam = { 0 };
4476 + struct sm_resource_t *resource;
4478 + /* Validate we can work with this device. */
4479 + if (sm_state == NULL || alloc == NULL || handle == NULL) {
4480 + pr_err("[%s]: invalid input\n", __func__);
4484 + ioparam.size = alloc->base_unit;
4485 + ioparam.num = alloc->num_unit;
4487 + alloc->type == VC_SM_ALLOC_CACHED ? VMCS_SM_CACHE_VC : 0;
4489 + ret = vc_sm_ioctl_alloc(sm_state->data_knl, &ioparam);
4493 + vmcs_sm_acquire_resource(sm_state->data_knl,
4496 + resource->pid = 0;
4497 + vmcs_sm_release_resource(resource, 0);
4499 + /* Assign valid handle at this time. */
4500 + *handle = ioparam.handle;
4508 +EXPORT_SYMBOL_GPL(vc_sm_alloc);
4510 +/* Get an internal resource handle mapped from the external one. */
4511 +int vc_sm_int_handle(int handle)
4513 + struct sm_resource_t *resource;
4516 + /* Validate we can work with this device. */
4517 + if (sm_state == NULL || handle == 0) {
4518 + pr_err("[%s]: invalid input\n", __func__);
4522 + /* Locate resource from GUID. */
4523 + resource = vmcs_sm_acquire_resource(sm_state->data_knl, handle);
4525 + ret = resource->res_handle;
4526 + vmcs_sm_release_resource(resource, 0);
4531 +EXPORT_SYMBOL_GPL(vc_sm_int_handle);
4533 +/* Free a previously allocated shared memory handle and block. */
4534 +int vc_sm_free(int handle)
4536 + struct vmcs_sm_ioctl_free ioparam = { handle };
4538 + /* Validate we can work with this device. */
4539 + if (sm_state == NULL || handle == 0) {
4540 + pr_err("[%s]: invalid input\n", __func__);
4544 + return vc_sm_ioctl_free(sm_state->data_knl, &ioparam);
4546 +EXPORT_SYMBOL_GPL(vc_sm_free);
4548 +/* Lock a memory handle for use by kernel. */
4549 +int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
4550 + unsigned long *data)
4552 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4555 + /* Validate we can work with this device. */
4556 + if (sm_state == NULL || handle == 0 || data == NULL) {
4557 + pr_err("[%s]: invalid input\n", __func__);
4563 + ioparam.handle = handle;
4564 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4568 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4569 + VMCS_SM_CACHE_NONE), 0);
4571 + *data = ioparam.addr;
4574 +EXPORT_SYMBOL_GPL(vc_sm_lock);
4576 +/* Unlock a memory handle in use by kernel. */
4577 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock)
4579 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4581 + /* Validate we can work with this device. */
4582 + if (sm_state == NULL || handle == 0) {
4583 + pr_err("[%s]: invalid input\n", __func__);
4587 + ioparam.handle = handle;
4588 + return vc_sm_ioctl_unlock(sm_state->data_knl,
4589 + &ioparam, flush, 0, no_vc_unlock);
4591 +EXPORT_SYMBOL_GPL(vc_sm_unlock);
4593 +/* Map a shared memory region for use by kernel. */
4594 +int vc_sm_map(int handle, unsigned int sm_addr,
4595 + enum vc_sm_lock_cache_mode mode, unsigned long *data)
4597 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4600 + /* Validate we can work with this device. */
4601 + if (sm_state == NULL || handle == 0 || data == NULL || sm_addr == 0) {
4602 + pr_err("[%s]: invalid input\n", __func__);
4608 + ioparam.handle = handle;
4609 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4613 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4614 + VMCS_SM_CACHE_NONE), sm_addr);
4616 + *data = ioparam.addr;
4619 +EXPORT_SYMBOL_GPL(vc_sm_map);
4621 +/* Import a dmabuf to be shared with VC. */
4622 +int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle)
4624 + struct vmcs_sm_ioctl_import_dmabuf ioparam = { 0 };
4626 + struct sm_resource_t *resource;
4628 + /* Validate we can work with this device. */
4629 + if (!sm_state || !dmabuf || !handle) {
4630 + pr_err("[%s]: invalid input\n", __func__);
4634 + ioparam.cached = 0;
4635 + strcpy(ioparam.name, "KRNL DMABUF");
4637 + ret = vc_sm_ioctl_import_dmabuf(sm_state->data_knl, &ioparam, dmabuf);
4640 + resource = vmcs_sm_acquire_resource(sm_state->data_knl,
4643 + resource->pid = 0;
4644 + vmcs_sm_release_resource(resource, 0);
4646 + /* Assign valid handle at this time.*/
4647 + *handle = ioparam.handle;
4655 +EXPORT_SYMBOL_GPL(vc_sm_import_dmabuf);
4659 + * Register the driver with device tree
4662 +static const struct of_device_id bcm2835_vcsm_of_match[] = {
4663 + {.compatible = "raspberrypi,bcm2835-vcsm",},
4664 + { /* sentinel */ },
4667 +MODULE_DEVICE_TABLE(of, bcm2835_vcsm_of_match);
4669 +static struct platform_driver bcm2835_vcsm_driver = {
4670 + .probe = bcm2835_vcsm_probe,
4671 + .remove = bcm2835_vcsm_remove,
4673 + .name = DRIVER_NAME,
4674 + .owner = THIS_MODULE,
4675 + .of_match_table = bcm2835_vcsm_of_match,
4679 +module_platform_driver(bcm2835_vcsm_driver);
4681 +MODULE_AUTHOR("Broadcom");
4682 +MODULE_DESCRIPTION("VideoCore SharedMemory Driver");
4683 +MODULE_LICENSE("GPL v2");
4685 +++ b/include/linux/broadcom/vmcs_sm_ioctl.h
4687 +/*****************************************************************************
4688 +* Copyright 2011 Broadcom Corporation. All rights reserved.
4690 +* Unless you and Broadcom execute a separate written software license
4691 +* agreement governing use of this software, this software is licensed to you
4692 +* under the terms of the GNU General Public License version 2, available at
4693 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
4695 +* Notwithstanding the above, under no circumstances may you combine this
4696 +* software in any way with any other Broadcom software provided under a
4697 +* license other than the GPL, without Broadcom's express prior written
4700 +*****************************************************************************/
4702 +#if !defined(__VMCS_SM_IOCTL_H__INCLUDED__)
4703 +#define __VMCS_SM_IOCTL_H__INCLUDED__
4705 +/* ---- Include Files ---------------------------------------------------- */
4707 +#if defined(__KERNEL__)
4708 +#include <linux/types.h> /* Needed for standard types */
4710 +#include <stdint.h>
4713 +#include <linux/ioctl.h>
4715 +/* ---- Constants and Types ---------------------------------------------- */
4717 +#define VMCS_SM_RESOURCE_NAME 32
4718 +#define VMCS_SM_RESOURCE_NAME_DEFAULT "sm-host-resource"
4720 +/* Type define used to create unique IOCTL number */
4721 +#define VMCS_SM_MAGIC_TYPE 'I'
4723 +/* IOCTL commands */
4724 +enum vmcs_sm_cmd_e {
4725 + VMCS_SM_CMD_ALLOC = 0x5A, /* Start at 0x5A arbitrarily */
4726 + VMCS_SM_CMD_ALLOC_SHARE,
4728 + VMCS_SM_CMD_LOCK_CACHE,
4729 + VMCS_SM_CMD_UNLOCK,
4730 + VMCS_SM_CMD_RESIZE,
4731 + VMCS_SM_CMD_UNMAP,
4733 + VMCS_SM_CMD_FLUSH,
4734 + VMCS_SM_CMD_INVALID,
4736 + VMCS_SM_CMD_SIZE_USR_HANDLE,
4737 + VMCS_SM_CMD_CHK_USR_HANDLE,
4739 + VMCS_SM_CMD_MAPPED_USR_HANDLE,
4740 + VMCS_SM_CMD_MAPPED_USR_ADDRESS,
4741 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,
4742 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,
4743 + VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,
4745 + VMCS_SM_CMD_VC_WALK_ALLOC,
4746 + VMCS_SM_CMD_HOST_WALK_MAP,
4747 + VMCS_SM_CMD_HOST_WALK_PID_ALLOC,
4748 + VMCS_SM_CMD_HOST_WALK_PID_MAP,
4750 + VMCS_SM_CMD_CLEAN_INVALID,
4751 + VMCS_SM_CMD_CLEAN_INVALID2,
4753 + VMCS_SM_CMD_IMPORT_DMABUF,
4755 + VMCS_SM_CMD_LAST /* Do not delete */
4758 +/* Cache type supported, conveniently matches the user space definition in
4761 +enum vmcs_sm_cache_e {
4762 + VMCS_SM_CACHE_NONE,
4763 + VMCS_SM_CACHE_HOST,
4765 + VMCS_SM_CACHE_BOTH,
4768 +/* Cache functions */
4769 +#define VCSM_CACHE_OP_INV 0x01
4770 +#define VCSM_CACHE_OP_CLEAN 0x02
4771 +#define VCSM_CACHE_OP_FLUSH 0x03
4773 +/* IOCTL Data structures */
4774 +struct vmcs_sm_ioctl_alloc {
4775 + /* user -> kernel */
4776 + unsigned int size;
4778 + enum vmcs_sm_cache_e cached;
4779 + char name[VMCS_SM_RESOURCE_NAME];
4781 + /* kernel -> user */
4782 + unsigned int handle;
4783 + /* unsigned int base_addr; */
4786 +struct vmcs_sm_ioctl_alloc_share {
4787 + /* user -> kernel */
4788 + unsigned int handle;
4789 + unsigned int size;
4792 +struct vmcs_sm_ioctl_free {
4793 + /* user -> kernel */
4794 + unsigned int handle;
4795 + /* unsigned int base_addr; */
4798 +struct vmcs_sm_ioctl_lock_unlock {
4799 + /* user -> kernel */
4800 + unsigned int handle;
4802 + /* kernel -> user */
4803 + unsigned int addr;
4806 +struct vmcs_sm_ioctl_lock_cache {
4807 + /* user -> kernel */
4808 + unsigned int handle;
4809 + enum vmcs_sm_cache_e cached;
4812 +struct vmcs_sm_ioctl_resize {
4813 + /* user -> kernel */
4814 + unsigned int handle;
4815 + unsigned int new_size;
4817 + /* kernel -> user */
4818 + unsigned int old_size;
4821 +struct vmcs_sm_ioctl_map {
4822 + /* user -> kernel */
4823 + /* and kernel -> user */
4825 + unsigned int handle;
4826 + unsigned int addr;
4828 + /* kernel -> user */
4829 + unsigned int size;
4832 +struct vmcs_sm_ioctl_walk {
4833 + /* user -> kernel */
4837 +struct vmcs_sm_ioctl_chk {
4838 + /* user -> kernel */
4839 + unsigned int handle;
4841 + /* kernel -> user */
4842 + unsigned int addr;
4843 + unsigned int size;
4844 + enum vmcs_sm_cache_e cache;
4847 +struct vmcs_sm_ioctl_size {
4848 + /* user -> kernel */
4849 + unsigned int handle;
4851 + /* kernel -> user */
4852 + unsigned int size;
4855 +struct vmcs_sm_ioctl_cache {
4856 + /* user -> kernel */
4857 + unsigned int handle;
4858 + unsigned int addr;
4859 + unsigned int size;
4863 + * Cache functions to be set to struct vmcs_sm_ioctl_clean_invalid cmd and
4864 + * vmcs_sm_ioctl_clean_invalid2 invalidate_mode.
4866 +#define VCSM_CACHE_OP_NOP 0x00
4867 +#define VCSM_CACHE_OP_INV 0x01
4868 +#define VCSM_CACHE_OP_CLEAN 0x02
4869 +#define VCSM_CACHE_OP_FLUSH 0x03
4871 +struct vmcs_sm_ioctl_clean_invalid {
4872 + /* user -> kernel */
4875 + unsigned int handle;
4876 + unsigned int addr;
4877 + unsigned int size;
4881 +struct vmcs_sm_ioctl_clean_invalid2 {
4884 + struct vmcs_sm_ioctl_clean_invalid_block {
4885 + uint16_t invalidate_mode;
4886 + uint16_t block_count;
4887 + void * start_address;
4888 + uint32_t block_size;
4889 + uint32_t inter_block_stride;
4893 +struct vmcs_sm_ioctl_import_dmabuf {
4894 + /* user -> kernel */
4896 + enum vmcs_sm_cache_e cached;
4897 + char name[VMCS_SM_RESOURCE_NAME];
4899 + /* kernel -> user */
4900 + unsigned int handle;
4903 +/* IOCTL numbers */
4904 +#define VMCS_SM_IOCTL_MEM_ALLOC\
4905 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC,\
4906 + struct vmcs_sm_ioctl_alloc)
4907 +#define VMCS_SM_IOCTL_MEM_ALLOC_SHARE\
4908 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC_SHARE,\
4909 + struct vmcs_sm_ioctl_alloc_share)
4910 +#define VMCS_SM_IOCTL_MEM_LOCK\
4911 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK,\
4912 + struct vmcs_sm_ioctl_lock_unlock)
4913 +#define VMCS_SM_IOCTL_MEM_LOCK_CACHE\
4914 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK_CACHE,\
4915 + struct vmcs_sm_ioctl_lock_cache)
4916 +#define VMCS_SM_IOCTL_MEM_UNLOCK\
4917 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_UNLOCK,\
4918 + struct vmcs_sm_ioctl_lock_unlock)
4919 +#define VMCS_SM_IOCTL_MEM_RESIZE\
4920 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_RESIZE,\
4921 + struct vmcs_sm_ioctl_resize)
4922 +#define VMCS_SM_IOCTL_MEM_FREE\
4923 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FREE,\
4924 + struct vmcs_sm_ioctl_free)
4925 +#define VMCS_SM_IOCTL_MEM_FLUSH\
4926 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FLUSH,\
4927 + struct vmcs_sm_ioctl_cache)
4928 +#define VMCS_SM_IOCTL_MEM_INVALID\
4929 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_INVALID,\
4930 + struct vmcs_sm_ioctl_cache)
4931 +#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID\
4932 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID,\
4933 + struct vmcs_sm_ioctl_clean_invalid)
4934 +#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID2\
4935 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID2,\
4936 + struct vmcs_sm_ioctl_clean_invalid2)
4938 +#define VMCS_SM_IOCTL_SIZE_USR_HDL\
4939 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_SIZE_USR_HANDLE,\
4940 + struct vmcs_sm_ioctl_size)
4941 +#define VMCS_SM_IOCTL_CHK_USR_HDL\
4942 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CHK_USR_HANDLE,\
4943 + struct vmcs_sm_ioctl_chk)
4945 +#define VMCS_SM_IOCTL_MAP_USR_HDL\
4946 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_HANDLE,\
4947 + struct vmcs_sm_ioctl_map)
4948 +#define VMCS_SM_IOCTL_MAP_USR_ADDRESS\
4949 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_ADDRESS,\
4950 + struct vmcs_sm_ioctl_map)
4951 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_ADDR\
4952 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,\
4953 + struct vmcs_sm_ioctl_map)
4954 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_HDL\
4955 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,\
4956 + struct vmcs_sm_ioctl_map)
4957 +#define VMCS_SM_IOCTL_MAP_VC_ADDR_FR_HDL\
4958 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,\
4959 + struct vmcs_sm_ioctl_map)
4961 +#define VMCS_SM_IOCTL_VC_WALK_ALLOC\
4962 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_VC_WALK_ALLOC)
4963 +#define VMCS_SM_IOCTL_HOST_WALK_MAP\
4964 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_MAP)
4965 +#define VMCS_SM_IOCTL_HOST_WALK_PID_ALLOC\
4966 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_ALLOC,\
4967 + struct vmcs_sm_ioctl_walk)
4968 +#define VMCS_SM_IOCTL_HOST_WALK_PID_MAP\
4969 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_MAP,\
4970 + struct vmcs_sm_ioctl_walk)
4972 +#define VMCS_SM_IOCTL_MEM_IMPORT_DMABUF\
4973 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_IMPORT_DMABUF,\
4974 + struct vmcs_sm_ioctl_import_dmabuf)
4976 +/* ---- Variable Externs ------------------------------------------------- */
4978 +/* ---- Function Prototypes ---------------------------------------------- */
4980 +#endif /* __VMCS_SM_IOCTL_H__INCLUDED__ */