1 From 6f27d1904c3f46e00388b1603ceed359387349d2 Mon Sep 17 00:00:00 2001
2 From: Tim Gover <tgover@broadcom.com>
3 Date: Tue, 22 Jul 2014 15:41:04 +0100
4 Subject: [PATCH 047/806] vcsm: VideoCore shared memory service for BCM2835
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 Add experimental support for the VideoCore shared memory service.
10 This allows user processes to allocate memory from VideoCore's
11 GPU relocatable heap and mmap the buffers. Additionally, the memory
12 handles can passed to other VideoCore services such as MMAL, OpenMax
16 * This driver was originally released for BCM28155 which has a different
17 cache architecture to BCM2835. Consequently, in this release only
18 uncached mappings are supported. However, there's no fundamental
19 reason which cached mappings cannot be support or BCM2835
20 * More refactoring is required to remove the typedefs.
21 * Re-enable the some of the commented out debug-fs statistics which were
22 disabled when migrating code from proc-fs.
23 * There's a lot of code to support sharing of VCSM in order to support
24 Android. This could probably done more cleanly or perhaps just
27 Signed-off-by: Tim Gover <timgover@gmail.com>
29 config: Disable VC_SM for now to fix hang with cutdown kernel
31 vcsm: Use boolean as it cannot be built as module
33 On building the bcm_vc_sm as a module we get the following error:
35 v7_dma_flush_range and do_munmap are undefined in vc-sm.ko.
37 Fix by making it not an option to build as module
39 vcsm: Add ioctl for custom cache flushing
41 vc-sm: Move headers out of arch directory
43 Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
45 vcsm: Treat EBUSY as success rather than SIGBUS
47 Currently if two cores access the same page concurrently one will return VM_FAULT_NOPAGE
48 and the other VM_FAULT_SIGBUS crashing the user code.
50 Also report when mapping fails.
52 Signed-off-by: popcornmix <popcornmix@gmail.com>
54 vcsm: Provide new ioctl to clean/invalidate a 2D block
56 vcsm: Convert to loading via device tree.
58 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
60 VCSM: New option to import a DMABUF for VPU use
62 Takes a dmabuf, and then calls over to the VPU to wrap
63 it into a suitable handle.
65 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
67 vcsm: fix multi-platform build
69 vcsm: add macros for cache functions
71 vcsm: use dma APIs for cache functions
73 * Will handle multi-platform builds
75 vcsm: Fix up macros to avoid breaking numbers used by existing apps
77 vcsm: Define cache operation constants in user header
79 Without this change, users have to use raw values (1, 2, 3) to specify
82 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
84 vcsm: Support for finding user/vc handle in memory pool
86 vmcs_sm_{usr,vc}_handle_from_pid_and_address() were failing to find
87 handle if specified user pointer is not exactly the one that the memory
88 locking call returned even if the pointer is in range of map/resource.
89 So fixed the functions to match the range.
91 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
93 vcsm: Unify cache manipulating functions
95 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
97 vcsm: Fix obscure conditions
99 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
101 vcsm: Fix memory leaking on clean_invalid2 ioctl handler
103 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
105 vcsm: Describe the use of cache operation constants
107 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
109 vcsm: Fix obscure conditions again
111 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
113 vcsm: Add no-op cache operation constant
115 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
117 vcsm: Revert to do page-table-walk-based cache manipulating on some ioctl calls
119 On FLUSH, INVALID, CLEAN_INVALID ioctl calls, cache operations based on
120 page table walk were used in case that the buffer of the cache is not
121 pinned. So reverted to do page-table-based cache manipulating.
123 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
125 vcsm: Define cache operation constants in user header
127 Without this change, users have to use raw values (1, 2, 3) to specify
130 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
132 drivers/char/Kconfig | 2 +
133 drivers/char/Makefile | 1 +
134 drivers/char/broadcom/Kconfig | 10 +
135 drivers/char/broadcom/Makefile | 1 +
136 drivers/char/broadcom/vc_sm/Makefile | 9 +
137 drivers/char/broadcom/vc_sm/vc_sm_defs.h | 237 ++
138 drivers/char/broadcom/vc_sm/vc_sm_knl.h | 53 +
139 drivers/char/broadcom/vc_sm/vc_vchi_sm.c | 516 ++++
140 drivers/char/broadcom/vc_sm/vc_vchi_sm.h | 102 +
141 drivers/char/broadcom/vc_sm/vmcs_sm.c | 3543 ++++++++++++++++++++++
142 include/linux/broadcom/vmcs_sm_ioctl.h | 294 ++
143 11 files changed, 4768 insertions(+)
144 create mode 100644 drivers/char/broadcom/vc_sm/Makefile
145 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_defs.h
146 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_knl.h
147 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.c
148 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.h
149 create mode 100644 drivers/char/broadcom/vc_sm/vmcs_sm.c
150 create mode 100644 include/linux/broadcom/vmcs_sm_ioctl.h
152 --- a/drivers/char/Kconfig
153 +++ b/drivers/char/Kconfig
156 menu "Character devices"
158 +source "drivers/char/broadcom/Kconfig"
160 source "drivers/tty/Kconfig"
163 --- a/drivers/char/Makefile
164 +++ b/drivers/char/Makefile
165 @@ -58,3 +58,4 @@ js-rtc-y = rtc.o
166 obj-$(CONFIG_XILLYBUS) += xillybus/
167 obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
168 obj-$(CONFIG_ADI) += adi.o
169 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
170 --- a/drivers/char/broadcom/Kconfig
171 +++ b/drivers/char/broadcom/Kconfig
172 @@ -16,3 +16,13 @@ config BCM2708_VCMEM
173 Helper for videocore memory access and total size allocation.
178 + bool "VMCS Shared Memory"
179 + depends on BCM2835_VCHIQ
180 + select BCM2708_VCMEM
181 + select DMA_SHARED_BUFFER
184 + Support for the VC shared memory on the Broadcom reference
185 + design. Uses the VCHIQ stack.
186 --- a/drivers/char/broadcom/Makefile
187 +++ b/drivers/char/broadcom/Makefile
189 obj-$(CONFIG_BCM2708_VCMEM) += vc_mem.o
190 +obj-$(CONFIG_BCM_VC_SM) += vc_sm/
192 +++ b/drivers/char/broadcom/vc_sm/Makefile
194 +ccflags-$(CONFIG_BCM_VC_SM) += -Werror -Wall -Wstrict-prototypes -Wno-trigraphs -O2
195 +ccflags-$(CONFIG_BCM_VC_SM) += -I"drivers/staging/vc04_services" -I"drivers/staging/vc04_services/interface/vchi" -I"drivers/staging/vc04_services/interface/vchiq_arm" -I"$(srctree)/fs/"
196 +ccflags-$(CONFIG_BCM_VC_SM) += -DOS_ASSERT_FAILURE -D__STDC_VERSION=199901L -D__STDC_VERSION__=199901L -D__VCCOREVER__=0 -D__KERNEL__ -D__linux__
198 +obj-$(CONFIG_BCM_VC_SM) := vc-sm.o
204 +++ b/drivers/char/broadcom/vc_sm/vc_sm_defs.h
207 + ****************************************************************************
208 + * Copyright 2011 Broadcom Corporation. All rights reserved.
210 + * Unless you and Broadcom execute a separate written software license
211 + * agreement governing use of this software, this software is licensed to you
212 + * under the terms of the GNU General Public License version 2, available at
213 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
215 + * Notwithstanding the above, under no circumstances may you combine this
216 + * software in any way with any other Broadcom software provided under a
217 + * license other than the GPL, without Broadcom's express prior written
219 + ****************************************************************************
222 +#ifndef __VC_SM_DEFS_H__INCLUDED__
223 +#define __VC_SM_DEFS_H__INCLUDED__
225 +/* FourCC code used for VCHI connection */
226 +#define VC_SM_SERVER_NAME MAKE_FOURCC("SMEM")
228 +/* Maximum message length */
229 +#define VC_SM_MAX_MSG_LEN (sizeof(union vc_sm_msg_union_t) + \
230 + sizeof(struct vc_sm_msg_hdr_t))
231 +#define VC_SM_MAX_RSP_LEN (sizeof(union vc_sm_msg_union_t))
233 +/* Resource name maximum size */
234 +#define VC_SM_RESOURCE_NAME 32
236 +enum vc_sm_msg_type {
237 + /* Message types supported for HOST->VC direction */
239 + /* Allocate shared memory block */
240 + VC_SM_MSG_TYPE_ALLOC,
241 + /* Lock allocated shared memory block */
242 + VC_SM_MSG_TYPE_LOCK,
243 + /* Unlock allocated shared memory block */
244 + VC_SM_MSG_TYPE_UNLOCK,
245 + /* Unlock allocated shared memory block, do not answer command */
246 + VC_SM_MSG_TYPE_UNLOCK_NOANS,
247 + /* Free shared memory block */
248 + VC_SM_MSG_TYPE_FREE,
249 + /* Resize a shared memory block */
250 + VC_SM_MSG_TYPE_RESIZE,
251 + /* Walk the allocated shared memory block(s) */
252 + VC_SM_MSG_TYPE_WALK_ALLOC,
254 + /* A previously applied action will need to be reverted */
255 + VC_SM_MSG_TYPE_ACTION_CLEAN,
258 + * Import a physical address and wrap into a MEM_HANDLE_T.
259 + * Release with VC_SM_MSG_TYPE_FREE.
261 + VC_SM_MSG_TYPE_IMPORT,
263 + /* Message types supported for VC->HOST direction */
266 + * VC has finished with an imported memory allocation.
267 + * Release any Linux reference counts on the underlying block.
269 + VC_SM_MSG_TYPE_RELEASED,
274 +/* Type of memory to be allocated */
275 +enum vc_sm_alloc_type_t {
276 + VC_SM_ALLOC_CACHED,
277 + VC_SM_ALLOC_NON_CACHED,
280 +/* Message header for all messages in HOST->VC direction */
281 +struct vc_sm_msg_hdr_t {
288 +/* Request to allocate memory (HOST->VC) */
289 +struct vc_sm_alloc_t {
290 + /* type of memory to allocate */
291 + enum vc_sm_alloc_type_t type;
292 + /* byte amount of data to allocate per unit */
293 + uint32_t base_unit;
294 + /* number of unit to allocate */
296 + /* alignement to be applied on allocation */
297 + uint32_t alignement;
298 + /* identity of who allocated this block */
299 + uint32_t allocator;
300 + /* resource name (for easier tracking on vc side) */
301 + char name[VC_SM_RESOURCE_NAME];
305 +/* Result of a requested memory allocation (VC->HOST) */
306 +struct vc_sm_alloc_result_t {
307 + /* Transaction identifier */
310 + /* Resource handle */
311 + uint32_t res_handle;
312 + /* Pointer to resource buffer */
314 + /* Resource base size (bytes) */
315 + uint32_t res_base_size;
316 + /* Resource number */
321 +/* Request to free a previously allocated memory (HOST->VC) */
322 +struct vc_sm_free_t {
323 + /* Resource handle (returned from alloc) */
324 + uint32_t res_handle;
325 + /* Resource buffer (returned from alloc) */
330 +/* Request to lock a previously allocated memory (HOST->VC) */
331 +struct vc_sm_lock_unlock_t {
332 + /* Resource handle (returned from alloc) */
333 + uint32_t res_handle;
334 + /* Resource buffer (returned from alloc) */
339 +/* Request to resize a previously allocated memory (HOST->VC) */
340 +struct vc_sm_resize_t {
341 + /* Resource handle (returned from alloc) */
342 + uint32_t res_handle;
343 + /* Resource buffer (returned from alloc) */
345 + /* Resource *new* size requested (bytes) */
346 + uint32_t res_new_size;
350 +/* Result of a requested memory lock (VC->HOST) */
351 +struct vc_sm_lock_result_t {
352 + /* Transaction identifier */
355 + /* Resource handle */
356 + uint32_t res_handle;
357 + /* Pointer to resource buffer */
360 + * Pointer to former resource buffer if the memory
363 + uint32_t res_old_mem;
367 +/* Generic result for a request (VC->HOST) */
368 +struct vc_sm_result_t {
369 + /* Transaction identifier */
376 +/* Request to revert a previously applied action (HOST->VC) */
377 +struct vc_sm_action_clean_t {
378 + /* Action of interest */
379 + enum vc_sm_msg_type res_action;
380 + /* Transaction identifier for the action of interest */
381 + uint32_t action_trans_id;
385 +/* Request to remove all data associated with a given allocator (HOST->VC) */
386 +struct vc_sm_free_all_t {
387 + /* Allocator identifier */
388 + uint32_t allocator;
391 +/* Request to import memory (HOST->VC) */
392 +struct vc_sm_import {
393 + /* type of memory to allocate */
394 + enum vc_sm_alloc_type_t type;
395 + /* pointer to the VC (ie physical) address of the allocated memory */
397 + /* size of buffer */
399 + /* opaque handle returned in RELEASED messages */
401 + /* Allocator identifier */
402 + uint32_t allocator;
403 + /* resource name (for easier tracking on vc side) */
404 + char name[VC_SM_RESOURCE_NAME];
407 +/* Result of a requested memory import (VC->HOST) */
408 +struct vc_sm_import_result {
409 + /* Transaction identifier */
412 + /* Resource handle */
413 + uint32_t res_handle;
416 +/* Notification that VC has finished with an allocation (VC->HOST) */
417 +struct vc_sm_released {
418 + /* pointer to the VC (ie physical) address of the allocated memory */
420 + /* size of buffer */
422 + /* opaque handle returned in RELEASED messages */
426 +/* Union of ALL messages */
427 +union vc_sm_msg_union_t {
428 + struct vc_sm_alloc_t alloc;
429 + struct vc_sm_alloc_result_t alloc_result;
430 + struct vc_sm_free_t free;
431 + struct vc_sm_lock_unlock_t lock_unlock;
432 + struct vc_sm_action_clean_t action_clean;
433 + struct vc_sm_resize_t resize;
434 + struct vc_sm_lock_result_t lock_result;
435 + struct vc_sm_result_t result;
436 + struct vc_sm_free_all_t free_all;
437 + struct vc_sm_import import;
438 + struct vc_sm_import_result import_result;
439 + struct vc_sm_released released;
442 +#endif /* __VC_SM_DEFS_H__INCLUDED__ */
444 +++ b/drivers/char/broadcom/vc_sm/vc_sm_knl.h
447 + ****************************************************************************
448 + * Copyright 2011 Broadcom Corporation. All rights reserved.
450 + * Unless you and Broadcom execute a separate written software license
451 + * agreement governing use of this software, this software is licensed to you
452 + * under the terms of the GNU General Public License version 2, available at
453 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
455 + * Notwithstanding the above, under no circumstances may you combine this
456 + * software in any way with any other Broadcom software provided under a
457 + * license other than the GPL, without Broadcom's express prior written
459 + ****************************************************************************
462 +#ifndef __VC_SM_KNL_H__INCLUDED__
463 +#define __VC_SM_KNL_H__INCLUDED__
465 +#if !defined(__KERNEL__)
466 +#error "This interface is for kernel use only..."
469 +/* Type of memory to be locked (ie mapped) */
470 +enum vc_sm_lock_cache_mode {
472 + VC_SM_LOCK_NON_CACHED,
475 +/* Allocate a shared memory handle and block. */
476 +int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle);
478 +/* Free a previously allocated shared memory handle and block. */
479 +int vc_sm_free(int handle);
481 +/* Lock a memory handle for use by kernel. */
482 +int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
483 + unsigned long *data);
485 +/* Unlock a memory handle in use by kernel. */
486 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock);
488 +/* Get an internal resource handle mapped from the external one. */
489 +int vc_sm_int_handle(int handle);
491 +/* Map a shared memory region for use by kernel. */
492 +int vc_sm_map(int handle, unsigned int sm_addr,
493 + enum vc_sm_lock_cache_mode mode, unsigned long *data);
495 +/* Import a block of memory into the GPU space. */
496 +int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle);
498 +#endif /* __VC_SM_KNL_H__INCLUDED__ */
500 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.c
503 + ****************************************************************************
504 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
506 + * Unless you and Broadcom execute a separate written software license
507 + * agreement governing use of this software, this software is licensed to you
508 + * under the terms of the GNU General Public License version 2, available at
509 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
511 + * Notwithstanding the above, under no circumstances may you combine this
512 + * software in any way with any other Broadcom software provided under a
513 + * license other than the GPL, without Broadcom's express prior written
515 + ****************************************************************************
518 +/* ---- Include Files ----------------------------------------------------- */
519 +#include <linux/types.h>
520 +#include <linux/kernel.h>
521 +#include <linux/list.h>
522 +#include <linux/semaphore.h>
523 +#include <linux/mutex.h>
524 +#include <linux/slab.h>
525 +#include <linux/kthread.h>
527 +#include "vc_vchi_sm.h"
530 +#define VC_SM_MIN_VER 0
532 +/* ---- Private Constants and Types -------------------------------------- */
534 +/* Command blocks come from a pool */
535 +#define SM_MAX_NUM_CMD_RSP_BLKS 32
537 +struct sm_cmd_rsp_blk {
538 + struct list_head head; /* To create lists */
539 + struct semaphore sema; /* To be signaled when the response is there */
544 + uint8_t msg[VC_SM_MAX_MSG_LEN];
552 +struct sm_instance {
553 + uint32_t num_connections;
554 + VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
555 + struct task_struct *io_thread;
556 + struct semaphore io_sema;
561 + struct list_head cmd_list;
562 + struct list_head rsp_list;
563 + struct list_head dead_list;
565 + struct sm_cmd_rsp_blk free_blk[SM_MAX_NUM_CMD_RSP_BLKS];
566 + struct list_head free_list;
567 + struct mutex free_lock;
568 + struct semaphore free_sema;
572 +/* ---- Private Variables ------------------------------------------------ */
574 +/* ---- Private Function Prototypes -------------------------------------- */
576 +/* ---- Private Functions ------------------------------------------------ */
578 +bcm2835_vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
582 + return vchi_queue_kernel_message(handle,
588 +sm_cmd_rsp_blk *vc_vchi_cmd_create(struct sm_instance *instance,
589 + enum vc_sm_msg_type id, void *msg,
590 + uint32_t size, int wait)
592 + struct sm_cmd_rsp_blk *blk;
593 + struct vc_sm_msg_hdr_t *hdr;
595 + if (down_interruptible(&instance->free_sema)) {
596 + blk = kmalloc(sizeof(*blk), GFP_KERNEL);
601 + sema_init(&blk->sema, 0);
603 + mutex_lock(&instance->free_lock);
605 + list_first_entry(&instance->free_list,
606 + struct sm_cmd_rsp_blk, head);
607 + list_del(&blk->head);
608 + mutex_unlock(&instance->free_lock);
613 + blk->length = sizeof(*hdr) + size;
615 + hdr = (struct vc_sm_msg_hdr_t *) blk->msg;
617 + mutex_lock(&instance->lock);
618 + hdr->trans_id = blk->id = ++instance->trans_id;
619 + mutex_unlock(&instance->lock);
622 + memcpy(hdr->body, msg, size);
628 +vc_vchi_cmd_delete(struct sm_instance *instance, struct sm_cmd_rsp_blk *blk)
635 + mutex_lock(&instance->free_lock);
636 + list_add(&blk->head, &instance->free_list);
637 + mutex_unlock(&instance->free_lock);
638 + up(&instance->free_sema);
641 +static int vc_vchi_sm_videocore_io(void *arg)
643 + struct sm_instance *instance = arg;
644 + struct sm_cmd_rsp_blk *cmd = NULL, *cmd_tmp;
645 + struct vc_sm_result_t *reply;
646 + uint32_t reply_len;
652 + vchi_service_release(instance->vchi_handle[0]);
654 + if (!down_interruptible(&instance->io_sema)) {
655 + vchi_service_use(instance->vchi_handle[0]);
660 + * Get new command and move it to response list
662 + mutex_lock(&instance->lock);
663 + if (list_empty(&instance->cmd_list)) {
664 + /* no more commands to process */
665 + mutex_unlock(&instance->lock);
669 + list_first_entry(&instance->cmd_list,
670 + struct sm_cmd_rsp_blk,
672 + list_move(&cmd->head, &instance->rsp_list);
674 + mutex_unlock(&instance->lock);
676 + /* Send the command */
677 + status = bcm2835_vchi_msg_queue(
678 + instance->vchi_handle[0],
679 + cmd->msg, cmd->length);
681 + pr_err("%s: failed to queue message (%d)",
685 + /* If no reply is needed then we're done */
687 + mutex_lock(&instance->lock);
688 + list_del(&cmd->head);
689 + mutex_unlock(&instance->lock);
690 + vc_vchi_cmd_delete(instance, cmd);
701 + while (!vchi_msg_peek
702 + (instance->vchi_handle[0], (void **)&reply,
703 + &reply_len, VCHI_FLAGS_NONE)) {
704 + mutex_lock(&instance->lock);
705 + list_for_each_entry(cmd, &instance->rsp_list,
707 + if (cmd->id == reply->trans_id)
710 + mutex_unlock(&instance->lock);
712 + if (&cmd->head == &instance->rsp_list) {
713 + pr_debug("%s: received response %u, throw away...",
714 + __func__, reply->trans_id);
715 + } else if (reply_len > sizeof(cmd->msg)) {
716 + pr_err("%s: reply too big (%u) %u, throw away...",
717 + __func__, reply_len,
720 + memcpy(cmd->msg, reply, reply_len);
724 + vchi_msg_remove(instance->vchi_handle[0]);
727 + /* Go through the dead list and free them */
728 + mutex_lock(&instance->lock);
729 + list_for_each_entry_safe(cmd, cmd_tmp,
730 + &instance->dead_list, head) {
731 + list_del(&cmd->head);
732 + vc_vchi_cmd_delete(instance, cmd);
734 + mutex_unlock(&instance->lock);
741 +static void vc_sm_vchi_callback(void *param,
742 + const VCHI_CALLBACK_REASON_T reason,
745 + struct sm_instance *instance = param;
750 + case VCHI_CALLBACK_MSG_AVAILABLE:
751 + up(&instance->io_sema);
754 + case VCHI_CALLBACK_SERVICE_CLOSED:
755 + pr_info("%s: service CLOSED!!", __func__);
761 +struct sm_instance *vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance,
762 + VCHI_CONNECTION_T **vchi_connections,
763 + uint32_t num_connections)
766 + struct sm_instance *instance;
769 + pr_debug("%s: start", __func__);
771 + if (num_connections > VCHI_MAX_NUM_CONNECTIONS) {
772 + pr_err("%s: unsupported number of connections %u (max=%u)",
773 + __func__, num_connections, VCHI_MAX_NUM_CONNECTIONS);
777 + /* Allocate memory for this instance */
778 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
780 + /* Misc initialisations */
781 + mutex_init(&instance->lock);
782 + sema_init(&instance->io_sema, 0);
783 + INIT_LIST_HEAD(&instance->cmd_list);
784 + INIT_LIST_HEAD(&instance->rsp_list);
785 + INIT_LIST_HEAD(&instance->dead_list);
786 + INIT_LIST_HEAD(&instance->free_list);
787 + sema_init(&instance->free_sema, SM_MAX_NUM_CMD_RSP_BLKS);
788 + mutex_init(&instance->free_lock);
789 + for (i = 0; i < SM_MAX_NUM_CMD_RSP_BLKS; i++) {
790 + sema_init(&instance->free_blk[i].sema, 0);
791 + list_add(&instance->free_blk[i].head, &instance->free_list);
794 + /* Open the VCHI service connections */
795 + instance->num_connections = num_connections;
796 + for (i = 0; i < num_connections; i++) {
797 + SERVICE_CREATION_T params = {
798 + VCHI_VERSION_EX(VC_SM_VER, VC_SM_MIN_VER),
800 + vchi_connections[i],
803 + vc_sm_vchi_callback,
810 + status = vchi_service_open(vchi_instance,
811 + ¶ms, &instance->vchi_handle[i]);
813 + pr_err("%s: failed to open VCHI service (%d)",
816 + goto err_close_services;
820 + /* Create the thread which takes care of all io to/from videoocore. */
821 + instance->io_thread = kthread_create(&vc_vchi_sm_videocore_io,
822 + (void *)instance, "SMIO");
823 + if (instance->io_thread == NULL) {
824 + pr_err("%s: failed to create SMIO thread", __func__);
826 + goto err_close_services;
828 + set_user_nice(instance->io_thread, -10);
829 + wake_up_process(instance->io_thread);
831 + pr_debug("%s: success - instance 0x%x", __func__,
832 + (unsigned int)instance);
836 + for (i = 0; i < instance->num_connections; i++) {
837 + if (instance->vchi_handle[i] != NULL)
838 + vchi_service_close(instance->vchi_handle[i]);
842 + pr_debug("%s: FAILED", __func__);
846 +int vc_vchi_sm_stop(struct sm_instance **handle)
848 + struct sm_instance *instance;
851 + if (handle == NULL) {
852 + pr_err("%s: invalid pointer to handle %p", __func__, handle);
856 + if (*handle == NULL) {
857 + pr_err("%s: invalid handle %p", __func__, *handle);
861 + instance = *handle;
863 + /* Close all VCHI service connections */
864 + for (i = 0; i < instance->num_connections; i++) {
867 + vchi_service_use(instance->vchi_handle[i]);
869 + success = vchi_service_close(instance->vchi_handle[i]);
881 +int vc_vchi_sm_send_msg(struct sm_instance *handle,
882 + enum vc_sm_msg_type msg_id,
883 + void *msg, uint32_t msg_size,
884 + void *result, uint32_t result_size,
885 + uint32_t *cur_trans_id, uint8_t wait_reply)
888 + struct sm_instance *instance = handle;
889 + struct sm_cmd_rsp_blk *cmd_blk;
891 + if (handle == NULL) {
892 + pr_err("%s: invalid handle", __func__);
896 + pr_err("%s: invalid msg pointer", __func__);
901 + vc_vchi_cmd_create(instance, msg_id, msg, msg_size, wait_reply);
902 + if (cmd_blk == NULL) {
903 + pr_err("[%s]: failed to allocate global tracking resource",
908 + if (cur_trans_id != NULL)
909 + *cur_trans_id = cmd_blk->id;
911 + mutex_lock(&instance->lock);
912 + list_add_tail(&cmd_blk->head, &instance->cmd_list);
913 + mutex_unlock(&instance->lock);
914 + up(&instance->io_sema);
920 + /* Wait for the response */
921 + if (down_interruptible(&cmd_blk->sema)) {
922 + mutex_lock(&instance->lock);
923 + if (!cmd_blk->sent) {
924 + list_del(&cmd_blk->head);
925 + mutex_unlock(&instance->lock);
926 + vc_vchi_cmd_delete(instance, cmd_blk);
929 + mutex_unlock(&instance->lock);
931 + mutex_lock(&instance->lock);
932 + list_move(&cmd_blk->head, &instance->dead_list);
933 + mutex_unlock(&instance->lock);
934 + up(&instance->io_sema);
935 + return -EINTR; /* We're done */
938 + if (result && result_size) {
939 + memcpy(result, cmd_blk->msg, result_size);
941 + struct vc_sm_result_t *res =
942 + (struct vc_sm_result_t *) cmd_blk->msg;
943 + status = (res->success == 0) ? 0 : -ENXIO;
946 + mutex_lock(&instance->lock);
947 + list_del(&cmd_blk->head);
948 + mutex_unlock(&instance->lock);
949 + vc_vchi_cmd_delete(instance, cmd_blk);
953 +int vc_vchi_sm_alloc(struct sm_instance *handle, struct vc_sm_alloc_t *msg,
954 + struct vc_sm_alloc_result_t *result,
955 + uint32_t *cur_trans_id)
957 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ALLOC,
958 + msg, sizeof(*msg), result, sizeof(*result),
962 +int vc_vchi_sm_free(struct sm_instance *handle,
963 + struct vc_sm_free_t *msg, uint32_t *cur_trans_id)
965 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_FREE,
966 + msg, sizeof(*msg), 0, 0, cur_trans_id, 0);
969 +int vc_vchi_sm_lock(struct sm_instance *handle,
970 + struct vc_sm_lock_unlock_t *msg,
971 + struct vc_sm_lock_result_t *result,
972 + uint32_t *cur_trans_id)
974 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_LOCK,
975 + msg, sizeof(*msg), result, sizeof(*result),
979 +int vc_vchi_sm_unlock(struct sm_instance *handle,
980 + struct vc_sm_lock_unlock_t *msg,
981 + uint32_t *cur_trans_id, uint8_t wait_reply)
983 + return vc_vchi_sm_send_msg(handle, wait_reply ?
984 + VC_SM_MSG_TYPE_UNLOCK :
985 + VC_SM_MSG_TYPE_UNLOCK_NOANS, msg,
986 + sizeof(*msg), 0, 0, cur_trans_id,
990 +int vc_vchi_sm_resize(struct sm_instance *handle, struct vc_sm_resize_t *msg,
991 + uint32_t *cur_trans_id)
993 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_RESIZE,
994 + msg, sizeof(*msg), 0, 0, cur_trans_id, 1);
997 +int vc_vchi_sm_walk_alloc(struct sm_instance *handle)
999 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_WALK_ALLOC,
1000 + 0, 0, 0, 0, 0, 0);
1003 +int vc_vchi_sm_clean_up(struct sm_instance *handle,
1004 + struct vc_sm_action_clean_t *msg)
1006 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ACTION_CLEAN,
1007 + msg, sizeof(*msg), 0, 0, 0, 0);
1010 +int vc_vchi_sm_import(struct sm_instance *handle, struct vc_sm_import *msg,
1011 + struct vc_sm_import_result *result,
1012 + uint32_t *cur_trans_id)
1014 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_IMPORT,
1015 + msg, sizeof(*msg), result, sizeof(*result),
1019 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.h
1022 + ****************************************************************************
1023 + * Copyright 2011 Broadcom Corporation. All rights reserved.
1025 + * Unless you and Broadcom execute a separate written software license
1026 + * agreement governing use of this software, this software is licensed to you
1027 + * under the terms of the GNU General Public License version 2, available at
1028 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1030 + * Notwithstanding the above, under no circumstances may you combine this
1031 + * software in any way with any other Broadcom software provided under a
1032 + * license other than the GPL, without Broadcom's express prior written
1034 + ****************************************************************************
1037 +#ifndef __VC_VCHI_SM_H__INCLUDED__
1038 +#define __VC_VCHI_SM_H__INCLUDED__
1040 +#include "interface/vchi/vchi.h"
1042 +#include "vc_sm_defs.h"
1045 + * Forward declare.
1047 +struct sm_instance;
1050 + * Initialize the shared memory service, opens up vchi connection to talk to it.
1052 +struct sm_instance *vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance,
1053 + VCHI_CONNECTION_T **vchi_connections,
1054 + uint32_t num_connections);
1057 + * Terminates the shared memory service.
1059 +int vc_vchi_sm_stop(struct sm_instance **handle);
1062 + * Ask the shared memory service to allocate some memory on videocre and
1063 + * return the result of this allocation (which upon success will be a pointer
1064 + * to some memory in videocore space).
1066 +int vc_vchi_sm_alloc(struct sm_instance *handle, struct vc_sm_alloc_t *alloc,
1067 + struct vc_sm_alloc_result_t *alloc_result,
1068 + uint32_t *trans_id);
1071 + * Ask the shared memory service to free up some memory that was previously
1072 + * allocated by the vc_vchi_sm_alloc function call.
1074 +int vc_vchi_sm_free(struct sm_instance *handle,
1075 + struct vc_sm_free_t *free, uint32_t *trans_id);
1078 + * Ask the shared memory service to lock up some memory that was previously
1079 + * allocated by the vc_vchi_sm_alloc function call.
1081 +int vc_vchi_sm_lock(struct sm_instance *handle,
1082 + struct vc_sm_lock_unlock_t *lock_unlock,
1083 + struct vc_sm_lock_result_t *lock_result,
1084 + uint32_t *trans_id);
1087 + * Ask the shared memory service to unlock some memory that was previously
1088 + * allocated by the vc_vchi_sm_alloc function call.
1090 +int vc_vchi_sm_unlock(struct sm_instance *handle,
1091 + struct vc_sm_lock_unlock_t *lock_unlock,
1092 + uint32_t *trans_id, uint8_t wait_reply);
1095 + * Ask the shared memory service to resize some memory that was previously
1096 + * allocated by the vc_vchi_sm_alloc function call.
1098 +int vc_vchi_sm_resize(struct sm_instance *handle,
1099 + struct vc_sm_resize_t *resize, uint32_t *trans_id);
1102 + * Walk the allocated resources on the videocore side, the allocation will
1103 + * show up in the log. This is purely for debug/information and takes no
1104 + * specific actions.
1106 +int vc_vchi_sm_walk_alloc(struct sm_instance *handle);
1109 + * Clean up following a previously interrupted action which left the system
1110 + * in a bad state of some sort.
1112 +int vc_vchi_sm_clean_up(struct sm_instance *handle,
1113 + struct vc_sm_action_clean_t *action_clean);
1116 + * Import a contiguous block of memory and wrap it in a GPU MEM_HANDLE_T.
1118 +int vc_vchi_sm_import(struct sm_instance *handle, struct vc_sm_import *msg,
1119 + struct vc_sm_import_result *result,
1120 + uint32_t *cur_trans_id);
1122 +#endif /* __VC_VCHI_SM_H__INCLUDED__ */
1124 +++ b/drivers/char/broadcom/vc_sm/vmcs_sm.c
1127 + ****************************************************************************
1128 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
1130 + * Unless you and Broadcom execute a separate written software license
1131 + * agreement governing use of this software, this software is licensed to you
1132 + * under the terms of the GNU General Public License version 2, available at
1133 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1135 + * Notwithstanding the above, under no circumstances may you combine this
1136 + * software in any way with any other Broadcom software provided under a
1137 + * license other than the GPL, without Broadcom's express prior written
1139 + ****************************************************************************
1142 +/* ---- Include Files ----------------------------------------------------- */
1144 +#include <linux/cdev.h>
1145 +#include <linux/broadcom/vc_mem.h>
1146 +#include <linux/device.h>
1147 +#include <linux/debugfs.h>
1148 +#include <linux/dma-mapping.h>
1149 +#include <linux/dma-buf.h>
1150 +#include <linux/errno.h>
1151 +#include <linux/fs.h>
1152 +#include <linux/hugetlb.h>
1153 +#include <linux/ioctl.h>
1154 +#include <linux/kernel.h>
1155 +#include <linux/list.h>
1156 +#include <linux/module.h>
1157 +#include <linux/mm.h>
1158 +#include <linux/of.h>
1159 +#include <linux/platform_device.h>
1160 +#include <linux/pfn.h>
1161 +#include <linux/proc_fs.h>
1162 +#include <linux/pagemap.h>
1163 +#include <linux/semaphore.h>
1164 +#include <linux/slab.h>
1165 +#include <linux/seq_file.h>
1166 +#include <linux/types.h>
1167 +#include <asm/cacheflush.h>
1169 +#include "vchiq_connected.h"
1170 +#include "vc_vchi_sm.h"
1172 +#include <linux/broadcom/vmcs_sm_ioctl.h>
1173 +#include "vc_sm_knl.h"
1175 +/* ---- Private Constants and Types --------------------------------------- */
1177 +#define DEVICE_NAME "vcsm"
1178 +#define DRIVER_NAME "bcm2835-vcsm"
1179 +#define DEVICE_MINOR 0
1181 +#define VC_SM_DIR_ROOT_NAME "vc-smem"
1182 +#define VC_SM_DIR_ALLOC_NAME "alloc"
1183 +#define VC_SM_STATE "state"
1184 +#define VC_SM_STATS "statistics"
1185 +#define VC_SM_RESOURCES "resources"
1186 +#define VC_SM_DEBUG "debug"
1187 +#define VC_SM_WRITE_BUF_SIZE 128
1189 +/* Statistics tracked per resource and globally. */
1217 +static const char *const sm_stats_human_read[] = {
1224 + "Cache Invalidate",
1228 +typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
1230 + VC_SM_SHOW show; /* Debug fs function hookup. */
1231 + struct dentry *dir_entry; /* Debug fs directory entry. */
1232 + void *priv_data; /* Private data */
1236 +/* Single resource allocation tracked for all devices. */
1238 + struct list_head map_list; /* Linked list of maps. */
1240 + struct sm_resource_t *resource; /* Pointer to the resource. */
1242 + pid_t res_pid; /* PID owning that resource. */
1243 + unsigned int res_vc_hdl; /* Resource handle (videocore). */
1244 + unsigned int res_usr_hdl; /* Resource handle (user). */
1246 + unsigned long res_addr; /* Mapped virtual address. */
1247 + struct vm_area_struct *vma; /* VM area for this mapping. */
1248 + unsigned int ref_count; /* Reference count to this vma. */
1250 + /* Used to link maps associated with a resource. */
1251 + struct list_head resource_map_list;
1254 +/* Single resource allocation tracked for each opened device. */
1255 +struct sm_resource_t {
1256 + struct list_head resource_list; /* List of resources. */
1257 + struct list_head global_resource_list; /* Global list of resources. */
1259 + pid_t pid; /* PID owning that resource. */
1260 + uint32_t res_guid; /* Unique identifier. */
1261 + uint32_t lock_count; /* Lock count for this resource. */
1262 + uint32_t ref_count; /* Ref count for this resource. */
1264 + uint32_t res_handle; /* Resource allocation handle. */
1265 + void *res_base_mem; /* Resource base memory address. */
1266 + uint32_t res_size; /* Resource size allocated. */
1267 + enum vmcs_sm_cache_e res_cached; /* Resource cache type. */
1268 + struct sm_resource_t *res_shared; /* Shared resource */
1270 + enum sm_stats_t res_stats[END_ALL]; /* Resource statistics. */
1272 + uint8_t map_count; /* Counter of mappings for this resource. */
1273 + struct list_head map_list; /* Maps associated with a resource. */
1275 + /* DMABUF related fields */
1276 + struct dma_buf *dma_buf;
1277 + struct dma_buf_attachment *attach;
1278 + struct sg_table *sgt;
1279 + dma_addr_t dma_addr;
1281 + struct sm_priv_data_t *private;
1282 + bool map; /* whether to map pages up front */
1285 +/* Private file data associated with each opened device. */
1286 +struct sm_priv_data_t {
1287 + struct list_head resource_list; /* List of resources. */
1289 + pid_t pid; /* PID of creator. */
1291 + struct dentry *dir_pid; /* Debug fs entries root. */
1292 + struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
1293 + struct sm_pde_t dir_res; /* Debug fs resource sub-tree. */
1295 + int restart_sys; /* Tracks restart on interrupt. */
1296 + enum vc_sm_msg_type int_action; /* Interrupted action. */
1297 + uint32_t int_trans_id; /* Interrupted transaction. */
1301 +/* Global state information. */
1302 +struct sm_state_t {
1303 + struct platform_device *pdev;
1304 + struct sm_instance *sm_handle; /* Handle for videocore service. */
1305 + struct dentry *dir_root; /* Debug fs entries root. */
1306 + struct dentry *dir_alloc; /* Debug fs entries allocations. */
1307 + struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
1308 + struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
1309 + struct dentry *debug; /* Debug fs entries debug. */
1311 + struct mutex map_lock; /* Global map lock. */
1312 + struct list_head map_list; /* List of maps. */
1313 + struct list_head resource_list; /* List of resources. */
1315 + enum sm_stats_t deceased[END_ALL]; /* Natural termination stats. */
1316 + enum sm_stats_t terminated[END_ALL]; /* Forced termination stats. */
1317 + uint32_t res_deceased_cnt; /* Natural termination counter. */
1318 + uint32_t res_terminated_cnt; /* Forced termination counter. */
1320 + struct cdev sm_cdev; /* Device. */
1321 + dev_t sm_devid; /* Device identifier. */
1322 + struct class *sm_class; /* Class. */
1323 + struct device *sm_dev; /* Device. */
1325 + struct sm_priv_data_t *data_knl; /* Kernel internal data tracking. */
1327 + struct mutex lock; /* Global lock. */
1328 + uint32_t guid; /* GUID (next) tracker. */
1332 +/* ---- Private Variables ----------------------------------------------- */
1334 +static struct sm_state_t *sm_state;
1335 +static int sm_inited;
1338 +static const char *const sm_cache_map_vector[] = {
1346 +/* ---- Private Function Prototypes -------------------------------------- */
1348 +/* ---- Private Functions ------------------------------------------------ */
1350 +static inline unsigned int vcaddr_to_pfn(unsigned long vc_addr)
1352 + unsigned long pfn = vc_addr & 0x3FFFFFFF;
1354 + pfn += mm_vc_mem_phys_addr;
1355 + pfn >>= PAGE_SHIFT;
1360 + * Carries over to the state statistics the statistics once owned by a deceased
1363 +static void vc_sm_resource_deceased(struct sm_resource_t *p_res, int terminated)
1365 + if (sm_state != NULL) {
1366 + if (p_res != NULL) {
1370 + sm_state->res_terminated_cnt++;
1372 + sm_state->res_deceased_cnt++;
1374 + for (ix = 0; ix < END_ALL; ix++) {
1376 + sm_state->terminated[ix] +=
1377 + p_res->res_stats[ix];
1379 + sm_state->deceased[ix] +=
1380 + p_res->res_stats[ix];
1387 + * Fetch a videocore handle corresponding to a mapping of the pid+address
1388 + * returns 0 (ie NULL) if no such handle exists in the global map.
1390 +static unsigned int vmcs_sm_vc_handle_from_pid_and_address(unsigned int pid,
1391 + unsigned int addr)
1393 + struct sm_mmap *map = NULL;
1394 + unsigned int handle = 0;
1396 + if (!sm_state || addr == 0)
1399 + mutex_lock(&(sm_state->map_lock));
1401 + /* Lookup the resource. */
1402 + if (!list_empty(&sm_state->map_list)) {
1403 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1404 + if (map->res_pid != pid)
1406 + if (addr < map->res_addr ||
1407 + addr >= (map->res_addr + map->resource->res_size))
1410 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> vc-hdl %x (usr-hdl %x)\n",
1411 + __func__, map, map->res_pid, map->res_addr,
1412 + map->res_vc_hdl, map->res_usr_hdl);
1414 + handle = map->res_vc_hdl;
1419 + mutex_unlock(&(sm_state->map_lock));
1423 + * Use a debug log here as it may be a valid situation that we query
1424 + * for something that is not mapped, we do not want a kernel log each
1427 + * There are other error log that would pop up accordingly if someone
1428 + * subsequently tries to use something invalid after being told not to
1431 + if (handle == 0) {
1432 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1433 + __func__, pid, addr);
1440 + * Fetch a user handle corresponding to a mapping of the pid+address
1441 + * returns 0 (ie NULL) if no such handle exists in the global map.
1443 +static unsigned int vmcs_sm_usr_handle_from_pid_and_address(unsigned int pid,
1444 + unsigned int addr)
1446 + struct sm_mmap *map = NULL;
1447 + unsigned int handle = 0;
1449 + if (!sm_state || addr == 0)
1452 + mutex_lock(&(sm_state->map_lock));
1454 + /* Lookup the resource. */
1455 + if (!list_empty(&sm_state->map_list)) {
1456 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1457 + if (map->res_pid != pid)
1459 + if (addr < map->res_addr ||
1460 + addr >= (map->res_addr + map->resource->res_size))
1463 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> usr-hdl %x (vc-hdl %x)\n",
1464 + __func__, map, map->res_pid, map->res_addr,
1465 + map->res_usr_hdl, map->res_vc_hdl);
1467 + handle = map->res_usr_hdl;
1472 + mutex_unlock(&(sm_state->map_lock));
1476 + * Use a debug log here as it may be a valid situation that we query
1477 + * for something that is not mapped yet.
1479 + * There are other error log that would pop up accordingly if someone
1480 + * subsequently tries to use something invalid after being told not to
1484 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1485 + __func__, pid, addr);
1490 +#if defined(DO_NOT_USE)
1492 + * Fetch an address corresponding to a mapping of the pid+handle
1493 + * returns 0 (ie NULL) if no such address exists in the global map.
1495 +static unsigned int vmcs_sm_usr_address_from_pid_and_vc_handle(unsigned int pid,
1498 + struct sm_mmap *map = NULL;
1499 + unsigned int addr = 0;
1501 + if (sm_state == NULL || hdl == 0)
1504 + mutex_lock(&(sm_state->map_lock));
1506 + /* Lookup the resource. */
1507 + if (!list_empty(&sm_state->map_list)) {
1508 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1509 + if (map->res_pid != pid || map->res_vc_hdl != hdl)
1512 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1513 + __func__, map, map->res_pid, map->res_vc_hdl,
1514 + map->res_usr_hdl, map->res_addr);
1516 + addr = map->res_addr;
1521 + mutex_unlock(&(sm_state->map_lock));
1525 + * Use a debug log here as it may be a valid situation that we query
1526 + * for something that is not mapped, we do not want a kernel log each
1529 + * There are other error log that would pop up accordingly if someone
1530 + * subsequently tries to use something invalid after being told not to
1534 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n",
1535 + __func__, pid, hdl);
1542 + * Fetch an address corresponding to a mapping of the pid+handle
1543 + * returns 0 (ie NULL) if no such address exists in the global map.
1545 +static unsigned int vmcs_sm_usr_address_from_pid_and_usr_handle(unsigned int
1550 + struct sm_mmap *map = NULL;
1551 + unsigned int addr = 0;
1553 + if (sm_state == NULL || hdl == 0)
1556 + mutex_lock(&(sm_state->map_lock));
1558 + /* Lookup the resource. */
1559 + if (!list_empty(&sm_state->map_list)) {
1560 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1561 + if (map->res_pid != pid || map->res_usr_hdl != hdl)
1564 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1565 + __func__, map, map->res_pid, map->res_vc_hdl,
1566 + map->res_usr_hdl, map->res_addr);
1568 + addr = map->res_addr;
1573 + mutex_unlock(&(sm_state->map_lock));
1577 + * Use a debug log here as it may be a valid situation that we query
1578 + * for something that is not mapped, we do not want a kernel log each
1581 + * There are other error log that would pop up accordingly if someone
1582 + * subsequently tries to use something invalid after being told not to
1586 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n", __func__,
1592 +/* Adds a resource mapping to the global data list. */
1593 +static void vmcs_sm_add_map(struct sm_state_t *state,
1594 + struct sm_resource_t *resource, struct sm_mmap *map)
1596 + mutex_lock(&(state->map_lock));
1598 + /* Add to the global list of mappings */
1599 + list_add(&map->map_list, &state->map_list);
1601 + /* Add to the list of mappings for this resource */
1602 + list_add(&map->resource_map_list, &resource->map_list);
1603 + resource->map_count++;
1605 + mutex_unlock(&(state->map_lock));
1607 + pr_debug("[%s]: added map %p (pid %u, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1608 + __func__, map, map->res_pid, map->res_vc_hdl,
1609 + map->res_usr_hdl, map->res_addr);
1612 +/* Removes a resource mapping from the global data list. */
1613 +static void vmcs_sm_remove_map(struct sm_state_t *state,
1614 + struct sm_resource_t *resource,
1615 + struct sm_mmap *map)
1617 + mutex_lock(&(state->map_lock));
1619 + /* Remove from the global list of mappings */
1620 + list_del(&map->map_list);
1622 + /* Remove from the list of mapping for this resource */
1623 + list_del(&map->resource_map_list);
1624 + if (resource->map_count > 0)
1625 + resource->map_count--;
1627 + mutex_unlock(&(state->map_lock));
1629 + pr_debug("[%s]: removed map %p (pid %d, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1630 + __func__, map, map->res_pid, map->res_vc_hdl, map->res_usr_hdl,
1636 +/* Read callback for the global state proc entry. */
1637 +static int vc_sm_global_state_show(struct seq_file *s, void *v)
1639 + struct sm_mmap *map = NULL;
1640 + struct sm_resource_t *resource = NULL;
1641 + int map_count = 0;
1642 + int resource_count = 0;
1644 + if (sm_state == NULL)
1647 + seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
1648 + (unsigned int)sm_state->sm_handle);
1650 + /* Log all applicable mapping(s). */
1652 + mutex_lock(&(sm_state->map_lock));
1653 + seq_puts(s, "\nResources\n");
1654 + if (!list_empty(&sm_state->resource_list)) {
1655 + list_for_each_entry(resource, &sm_state->resource_list,
1656 + global_resource_list) {
1659 + seq_printf(s, "\nResource %p\n",
1661 + seq_printf(s, " PID %u\n",
1663 + seq_printf(s, " RES_GUID 0x%x\n",
1664 + resource->res_guid);
1665 + seq_printf(s, " LOCK_COUNT %u\n",
1666 + resource->lock_count);
1667 + seq_printf(s, " REF_COUNT %u\n",
1668 + resource->ref_count);
1669 + seq_printf(s, " res_handle 0x%X\n",
1670 + resource->res_handle);
1671 + seq_printf(s, " res_base_mem %p\n",
1672 + resource->res_base_mem);
1673 + seq_printf(s, " SIZE %d\n",
1674 + resource->res_size);
1675 + seq_printf(s, " DMABUF %p\n",
1676 + resource->dma_buf);
1677 + seq_printf(s, " ATTACH %p\n",
1678 + resource->attach);
1679 + seq_printf(s, " SGT %p\n",
1681 + seq_printf(s, " DMA_ADDR %pad\n",
1682 + &resource->dma_addr);
1685 + seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
1687 + seq_puts(s, "\nMappings\n");
1688 + if (!list_empty(&sm_state->map_list)) {
1689 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1692 + seq_printf(s, "\nMapping 0x%x\n",
1693 + (unsigned int)map);
1694 + seq_printf(s, " TGID %u\n",
1696 + seq_printf(s, " VC-HDL 0x%x\n",
1698 + seq_printf(s, " USR-HDL 0x%x\n",
1699 + map->res_usr_hdl);
1700 + seq_printf(s, " USR-ADDR 0x%lx\n",
1702 + seq_printf(s, " SIZE %d\n",
1703 + map->resource->res_size);
1707 + mutex_unlock(&(sm_state->map_lock));
1708 + seq_printf(s, "\n\nTotal map count: %d\n\n", map_count);
1713 +static int vc_sm_global_statistics_show(struct seq_file *s, void *v)
1717 + /* Global state tracked statistics. */
1718 + if (sm_state != NULL) {
1719 + seq_puts(s, "\nDeceased Resources Statistics\n");
1721 + seq_printf(s, "\nNatural Cause (%u occurences)\n",
1722 + sm_state->res_deceased_cnt);
1723 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1724 + if (sm_state->deceased[ix] > 0) {
1725 + seq_printf(s, " %u\t%s\n",
1726 + sm_state->deceased[ix],
1727 + sm_stats_human_read[ix]);
1730 + seq_puts(s, "\n");
1731 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1732 + if (sm_state->deceased[ix + END_ATTEMPT] > 0) {
1733 + seq_printf(s, " %u\tFAILED %s\n",
1734 + sm_state->deceased[ix + END_ATTEMPT],
1735 + sm_stats_human_read[ix]);
1739 + seq_printf(s, "\nForcefull (%u occurences)\n",
1740 + sm_state->res_terminated_cnt);
1741 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1742 + if (sm_state->terminated[ix] > 0) {
1743 + seq_printf(s, " %u\t%s\n",
1744 + sm_state->terminated[ix],
1745 + sm_stats_human_read[ix]);
1748 + seq_puts(s, "\n");
1749 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1750 + if (sm_state->terminated[ix + END_ATTEMPT] > 0) {
1751 + seq_printf(s, " %u\tFAILED %s\n",
1752 + sm_state->terminated[ix +
1754 + sm_stats_human_read[ix]);
1763 +/* Read callback for the statistics proc entry. */
1764 +static int vc_sm_statistics_show(struct seq_file *s, void *v)
1767 + struct sm_priv_data_t *file_data;
1768 + struct sm_resource_t *resource;
1769 + int res_count = 0;
1770 + struct sm_pde_t *p_pde;
1772 + p_pde = (struct sm_pde_t *)(s->private);
1773 + file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
1775 + if (file_data == NULL)
1778 + /* Per process statistics. */
1780 + seq_printf(s, "\nStatistics for TGID %d\n", file_data->pid);
1782 + mutex_lock(&(sm_state->map_lock));
1784 + if (!list_empty(&file_data->resource_list)) {
1785 + list_for_each_entry(resource, &file_data->resource_list,
1789 + seq_printf(s, "\nGUID: 0x%x\n\n",
1790 + resource->res_guid);
1791 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1792 + if (resource->res_stats[ix] > 0) {
1795 + resource->res_stats[ix],
1796 + sm_stats_human_read[ix]);
1799 + seq_puts(s, "\n");
1800 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1801 + if (resource->res_stats[ix + END_ATTEMPT] > 0) {
1803 + " %u\tFAILED %s\n",
1804 + resource->res_stats[
1805 + ix + END_ATTEMPT],
1806 + sm_stats_human_read[ix]);
1812 + mutex_unlock(&(sm_state->map_lock));
1814 + seq_printf(s, "\nResources Count %d\n", res_count);
1821 +/* Read callback for the allocation proc entry. */
1822 +static int vc_sm_alloc_show(struct seq_file *s, void *v)
1824 + struct sm_priv_data_t *file_data;
1825 + struct sm_resource_t *resource;
1826 + int alloc_count = 0;
1827 + struct sm_pde_t *p_pde;
1829 + p_pde = (struct sm_pde_t *)(s->private);
1830 + file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
1835 + /* Per process statistics. */
1836 + seq_printf(s, "\nAllocation for TGID %d\n", file_data->pid);
1838 + mutex_lock(&(sm_state->map_lock));
1840 + if (!list_empty(&file_data->resource_list)) {
1841 + list_for_each_entry(resource, &file_data->resource_list,
1845 + seq_printf(s, "\nGUID: 0x%x\n",
1846 + resource->res_guid);
1847 + seq_printf(s, "Lock Count: %u\n",
1848 + resource->lock_count);
1849 + seq_printf(s, "Mapped: %s\n",
1850 + (resource->map_count ? "yes" : "no"));
1851 + seq_printf(s, "VC-handle: 0x%x\n",
1852 + resource->res_handle);
1853 + seq_printf(s, "VC-address: 0x%p\n",
1854 + resource->res_base_mem);
1855 + seq_printf(s, "VC-size (bytes): %u\n",
1856 + resource->res_size);
1857 + seq_printf(s, "Cache: %s\n",
1858 + sm_cache_map_vector[resource->res_cached]);
1862 + mutex_unlock(&(sm_state->map_lock));
1864 + seq_printf(s, "\n\nTotal allocation count: %d\n\n", alloc_count);
1870 +static int vc_sm_seq_file_show(struct seq_file *s, void *v)
1872 + struct sm_pde_t *sm_pde;
1874 + sm_pde = (struct sm_pde_t *)(s->private);
1876 + if (sm_pde && sm_pde->show)
1877 + sm_pde->show(s, v);
1882 +static int vc_sm_single_open(struct inode *inode, struct file *file)
1884 + return single_open(file, vc_sm_seq_file_show, inode->i_private);
1887 +static const struct file_operations vc_sm_debug_fs_fops = {
1888 + .open = vc_sm_single_open,
1890 + .llseek = seq_lseek,
1891 + .release = single_release,
1895 + * Adds a resource to the private data list which tracks all the allocated
1898 +static void vmcs_sm_add_resource(struct sm_priv_data_t *privdata,
1899 + struct sm_resource_t *resource)
1901 + mutex_lock(&(sm_state->map_lock));
1902 + list_add(&resource->resource_list, &privdata->resource_list);
1903 + list_add(&resource->global_resource_list, &sm_state->resource_list);
1904 + mutex_unlock(&(sm_state->map_lock));
1906 + pr_debug("[%s]: added resource %p (base addr %p, hdl %x, size %u, cache %u)\n",
1907 + __func__, resource, resource->res_base_mem,
1908 + resource->res_handle, resource->res_size, resource->res_cached);
1912 + * Locates a resource and acquire a reference on it.
1913 + * The resource won't be deleted while there is a reference on it.
1915 +static struct sm_resource_t *vmcs_sm_acquire_resource(struct sm_priv_data_t
1917 + unsigned int res_guid)
1919 + struct sm_resource_t *resource, *ret = NULL;
1921 + mutex_lock(&(sm_state->map_lock));
1923 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1924 + if (resource->res_guid != res_guid)
1927 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1928 + __func__, resource, resource->res_guid,
1929 + resource->res_base_mem, resource->res_handle,
1930 + resource->res_size, resource->res_cached);
1931 + resource->ref_count++;
1936 + mutex_unlock(&(sm_state->map_lock));
1942 + * Locates a resource and acquire a reference on it.
1943 + * The resource won't be deleted while there is a reference on it.
1945 +static struct sm_resource_t *vmcs_sm_acquire_first_resource(
1946 + struct sm_priv_data_t *private)
1948 + struct sm_resource_t *resource, *ret = NULL;
1950 + mutex_lock(&(sm_state->map_lock));
1952 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1953 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1954 + __func__, resource, resource->res_guid,
1955 + resource->res_base_mem, resource->res_handle,
1956 + resource->res_size, resource->res_cached);
1957 + resource->ref_count++;
1962 + mutex_unlock(&(sm_state->map_lock));
1968 + * Locates a resource and acquire a reference on it.
1969 + * The resource won't be deleted while there is a reference on it.
1971 +static struct sm_resource_t *vmcs_sm_acquire_global_resource(unsigned int
1974 + struct sm_resource_t *resource, *ret = NULL;
1976 + mutex_lock(&(sm_state->map_lock));
1978 + list_for_each_entry(resource, &sm_state->resource_list,
1979 + global_resource_list) {
1980 + if (resource->res_guid != res_guid)
1983 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1984 + __func__, resource, resource->res_guid,
1985 + resource->res_base_mem, resource->res_handle,
1986 + resource->res_size, resource->res_cached);
1987 + resource->ref_count++;
1992 + mutex_unlock(&(sm_state->map_lock));
1998 + * Release a previously acquired resource.
1999 + * The resource will be deleted when its refcount reaches 0.
2001 +static void vmcs_sm_release_resource(struct sm_resource_t *resource, int force)
2003 + struct sm_priv_data_t *private = resource->private;
2004 + struct sm_mmap *map, *map_tmp;
2005 + struct sm_resource_t *res_tmp;
2008 + mutex_lock(&(sm_state->map_lock));
2010 + if (--resource->ref_count) {
2012 + pr_err("[%s]: resource %p in use\n", __func__, resource);
2014 + mutex_unlock(&(sm_state->map_lock));
2018 + /* Time to free the resource. Start by removing it from the list */
2019 + list_del(&resource->resource_list);
2020 + list_del(&resource->global_resource_list);
2023 + * Walk the global resource list, find out if the resource is used
2024 + * somewhere else. In which case we don't want to delete it.
2026 + list_for_each_entry(res_tmp, &sm_state->resource_list,
2027 + global_resource_list) {
2028 + if (res_tmp->res_handle == resource->res_handle) {
2029 + resource->res_handle = 0;
2034 + mutex_unlock(&(sm_state->map_lock));
2036 + pr_debug("[%s]: freeing data - guid %x, hdl %x, base address %p\n",
2037 + __func__, resource->res_guid, resource->res_handle,
2038 + resource->res_base_mem);
2039 + resource->res_stats[FREE]++;
2041 + /* Make sure the resource we're removing is unmapped first */
2042 + if (resource->map_count && !list_empty(&resource->map_list)) {
2043 + down_write(¤t->mm->mmap_sem);
2044 + list_for_each_entry_safe(map, map_tmp, &resource->map_list,
2045 + resource_map_list) {
2047 + do_munmap(current->mm, map->res_addr,
2048 + resource->res_size, NULL);
2050 + pr_err("[%s]: could not unmap resource %p\n",
2051 + __func__, resource);
2054 + up_write(¤t->mm->mmap_sem);
2057 + /* Free up the videocore allocated resource. */
2058 + if (resource->res_handle) {
2059 + struct vc_sm_free_t free = {
2060 + resource->res_handle, (uint32_t)resource->res_base_mem
2062 + int status = vc_vchi_sm_free(sm_state->sm_handle, &free,
2063 + &private->int_trans_id);
2064 + if (status != 0 && status != -EINTR) {
2065 + pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
2066 + __func__, status, private->int_trans_id);
2067 + resource->res_stats[FREE_FAIL]++;
2072 + if (resource->sgt)
2073 + dma_buf_unmap_attachment(resource->attach, resource->sgt,
2074 + DMA_BIDIRECTIONAL);
2075 + if (resource->attach)
2076 + dma_buf_detach(resource->dma_buf, resource->attach);
2077 + if (resource->dma_buf)
2078 + dma_buf_put(resource->dma_buf);
2080 + /* Free up the shared resource. */
2081 + if (resource->res_shared)
2082 + vmcs_sm_release_resource(resource->res_shared, 0);
2084 + /* Free up the local resource tracking this allocation. */
2085 + vc_sm_resource_deceased(resource, force);
2090 + * Dump the map table for the driver. If process is -1, dumps the whole table,
2091 + * if process is a valid pid (non -1) dump only the entries associated with the
2092 + * pid of interest.
2094 +static void vmcs_sm_host_walk_map_per_pid(int pid)
2096 + struct sm_mmap *map = NULL;
2098 + /* Make sure the device was started properly. */
2099 + if (sm_state == NULL) {
2100 + pr_err("[%s]: invalid device\n", __func__);
2104 + mutex_lock(&(sm_state->map_lock));
2106 + /* Log all applicable mapping(s). */
2107 + if (!list_empty(&sm_state->map_list)) {
2108 + list_for_each_entry(map, &sm_state->map_list, map_list) {
2109 + if (pid == -1 || map->res_pid == pid) {
2110 + pr_info("[%s]: tgid: %u - vc-hdl: %x, usr-hdl: %x, usr-addr: %lx\n",
2111 + __func__, map->res_pid, map->res_vc_hdl,
2112 + map->res_usr_hdl, map->res_addr);
2117 + mutex_unlock(&(sm_state->map_lock));
2121 + * Dump the allocation table from host side point of view. This only dumps the
2122 + * data allocated for this process/device referenced by the file_data.
2124 +static void vmcs_sm_host_walk_alloc(struct sm_priv_data_t *file_data)
2126 + struct sm_resource_t *resource = NULL;
2128 + /* Make sure the device was started properly. */
2129 + if ((sm_state == NULL) || (file_data == NULL)) {
2130 + pr_err("[%s]: invalid device\n", __func__);
2134 + mutex_lock(&(sm_state->map_lock));
2136 + if (!list_empty(&file_data->resource_list)) {
2137 + list_for_each_entry(resource, &file_data->resource_list,
2139 + pr_info("[%s]: guid: %x - hdl: %x, vc-mem: %p, size: %u, cache: %u\n",
2140 + __func__, resource->res_guid, resource->res_handle,
2141 + resource->res_base_mem, resource->res_size,
2142 + resource->res_cached);
2146 + mutex_unlock(&(sm_state->map_lock));
2149 +/* Create support for private data tracking. */
2150 +static struct sm_priv_data_t *vc_sm_create_priv_data(pid_t id)
2152 + char alloc_name[32];
2153 + struct sm_priv_data_t *file_data = NULL;
2155 + /* Allocate private structure. */
2156 + file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
2159 + pr_err("[%s]: cannot allocate file data\n", __func__);
2163 + snprintf(alloc_name, sizeof(alloc_name), "%d", id);
2165 + INIT_LIST_HEAD(&file_data->resource_list);
2166 + file_data->pid = id;
2167 + file_data->dir_pid = debugfs_create_dir(alloc_name,
2168 + sm_state->dir_alloc);
2170 + /* TODO: fix this to support querying statistics per pid */
2172 + if (IS_ERR_OR_NULL(file_data->dir_pid)) {
2173 + file_data->dir_pid = NULL;
2175 + struct dentry *dir_entry;
2177 + dir_entry = debugfs_create_file(VC_SM_RESOURCES, 0444,
2178 + file_data->dir_pid, file_data,
2179 + vc_sm_debug_fs_fops);
2181 + file_data->dir_res.dir_entry = dir_entry;
2182 + file_data->dir_res.priv_data = file_data;
2183 + file_data->dir_res.show = &vc_sm_alloc_show;
2185 + dir_entry = debugfs_create_file(VC_SM_STATS, 0444,
2186 + file_data->dir_pid, file_data,
2187 + vc_sm_debug_fs_fops);
2189 + file_data->dir_res.dir_entry = dir_entry;
2190 + file_data->dir_res.priv_data = file_data;
2191 + file_data->dir_res.show = &vc_sm_statistics_show;
2193 + pr_debug("[%s]: private data allocated %p\n", __func__, file_data);
2201 + * Open the device. Creates a private state to help track all allocation
2202 + * associated with this device.
2204 +static int vc_sm_open(struct inode *inode, struct file *file)
2208 + /* Make sure the device was started properly. */
2210 + pr_err("[%s]: invalid device\n", __func__);
2215 + file->private_data = vc_sm_create_priv_data(current->tgid);
2216 + if (file->private_data == NULL) {
2217 + pr_err("[%s]: failed to create data tracker\n", __func__);
2228 + * Close the device. Free up all resources still associated with this device
2231 +static int vc_sm_release(struct inode *inode, struct file *file)
2233 + struct sm_priv_data_t *file_data =
2234 + (struct sm_priv_data_t *)file->private_data;
2235 + struct sm_resource_t *resource;
2238 + /* Make sure the device was started properly. */
2239 + if (sm_state == NULL || file_data == NULL) {
2240 + pr_err("[%s]: invalid device\n", __func__);
2245 + pr_debug("[%s]: using private data %p\n", __func__, file_data);
2247 + if (file_data->restart_sys == -EINTR) {
2248 + struct vc_sm_action_clean_t action_clean;
2250 + pr_debug("[%s]: releasing following EINTR on %u (trans_id: %u) (likely due to signal)...\n",
2251 + __func__, file_data->int_action,
2252 + file_data->int_trans_id);
2254 + action_clean.res_action = file_data->int_action;
2255 + action_clean.action_trans_id = file_data->int_trans_id;
2257 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
2260 + while ((resource = vmcs_sm_acquire_first_resource(file_data)) != NULL) {
2261 + vmcs_sm_release_resource(resource, 0);
2262 + vmcs_sm_release_resource(resource, 1);
2265 + /* Remove the corresponding proc entry. */
2266 + debugfs_remove_recursive(file_data->dir_pid);
2268 + /* Terminate the private data. */
2275 +static void vcsm_vma_open(struct vm_area_struct *vma)
2277 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2279 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2280 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2281 + (int)vma->vm_pgoff);
2286 +static void vcsm_vma_close(struct vm_area_struct *vma)
2288 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2290 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2291 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2292 + (int)vma->vm_pgoff);
2296 + /* Remove from the map table. */
2297 + if (map->ref_count == 0)
2298 + vmcs_sm_remove_map(sm_state, map->resource, map);
2301 +static int vcsm_vma_fault(struct vm_fault *vmf)
2303 + struct sm_mmap *map = (struct sm_mmap *)vmf->vma->vm_private_data;
2304 + struct sm_resource_t *resource = map->resource;
2305 + pgoff_t page_offset;
2306 + unsigned long pfn;
2309 + /* Lock the resource if necessary. */
2310 + if (!resource->lock_count) {
2311 + struct vc_sm_lock_unlock_t lock_unlock;
2312 + struct vc_sm_lock_result_t lock_result;
2315 + lock_unlock.res_handle = resource->res_handle;
2316 + lock_unlock.res_mem = (uint32_t)resource->res_base_mem;
2318 + pr_debug("[%s]: attempt to lock data - hdl %x, base address %p\n",
2319 + __func__, lock_unlock.res_handle,
2320 + (void *)lock_unlock.res_mem);
2322 + /* Lock the videocore allocated resource. */
2323 + status = vc_vchi_sm_lock(sm_state->sm_handle,
2324 + &lock_unlock, &lock_result, 0);
2325 + if (status || !lock_result.res_mem) {
2326 + pr_err("[%s]: failed to lock memory on videocore (status: %u)\n",
2327 + __func__, status);
2328 + resource->res_stats[LOCK_FAIL]++;
2329 + return VM_FAULT_SIGBUS;
2332 + pfn = vcaddr_to_pfn((unsigned long)resource->res_base_mem);
2333 + outer_inv_range(__pfn_to_phys(pfn),
2334 + __pfn_to_phys(pfn) + resource->res_size);
2336 + resource->res_stats[LOCK]++;
2337 + resource->lock_count++;
2339 + /* Keep track of the new base memory. */
2340 + if (lock_result.res_mem &&
2341 + lock_result.res_old_mem &&
2342 + (lock_result.res_mem != lock_result.res_old_mem)) {
2343 + resource->res_base_mem = (void *)lock_result.res_mem;
2347 + /* We don't use vmf->pgoff since that has the fake offset */
2348 + page_offset = ((unsigned long)vmf->address - vmf->vma->vm_start);
2349 + pfn = (uint32_t)resource->res_base_mem & 0x3FFFFFFF;
2350 + pfn += mm_vc_mem_phys_addr;
2351 + pfn += page_offset;
2352 + pfn >>= PAGE_SHIFT;
2354 + /* Finally, remap it */
2355 + ret = vm_insert_pfn(vmf->vma, (unsigned long)vmf->address, pfn);
2359 + case -ERESTARTSYS:
2361 + * EBUSY is ok: this just means that another thread
2362 + * already did the job.
2365 + return VM_FAULT_NOPAGE;
2368 + pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
2369 + pfn, (unsigned long)vmf->address, ret);
2370 + return VM_FAULT_OOM;
2372 + pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
2373 + pfn, (unsigned long)vmf->address, ret);
2374 + return VM_FAULT_SIGBUS;
2378 +static const struct vm_operations_struct vcsm_vm_ops = {
2379 + .open = vcsm_vma_open,
2380 + .close = vcsm_vma_close,
2381 + .fault = vcsm_vma_fault,
2384 +/* Converts VCSM_CACHE_OP_* to an operating function. */
2385 +static void (*cache_op_to_func(const unsigned cache_op))
2386 + (const void*, const void*)
2388 + switch (cache_op) {
2389 + case VCSM_CACHE_OP_NOP:
2392 + case VCSM_CACHE_OP_INV:
2393 + return dmac_inv_range;
2395 + case VCSM_CACHE_OP_CLEAN:
2396 + return dmac_clean_range;
2398 + case VCSM_CACHE_OP_FLUSH:
2399 + return dmac_flush_range;
2402 + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
2408 + * Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
2410 +static int clean_invalid_contiguous_mem_2d(const void __user *addr,
2411 + const size_t block_count, const size_t block_size, const size_t stride,
2412 + const unsigned cache_op)
2415 + void (*op_fn)(const void*, const void*);
2417 + if (!block_size) {
2418 + pr_err("[%s]: size cannot be 0\n", __func__);
2422 + op_fn = cache_op_to_func(cache_op);
2423 + if (op_fn == NULL)
2426 + for (i = 0; i < block_count; i ++, addr += stride)
2427 + op_fn(addr, addr + block_size);
2432 +/* Clean/invalid/flush cache of which buffer may be non-pinned. */
2433 +/* The caller must lock current->mm->mmap_sem for read. */
2434 +static int clean_invalid_mem_walk(unsigned long addr, const size_t size,
2435 + const unsigned cache_op)
2441 + unsigned long pgd_next, pud_next, pmd_next;
2442 + const unsigned long end = ALIGN(addr + size, PAGE_SIZE);
2443 + void (*op_fn)(const void*, const void*);
2445 + addr &= PAGE_MASK;
2450 + op_fn = cache_op_to_func(cache_op);
2451 + if (op_fn == NULL)
2455 + pgd = pgd_offset(current->mm, addr);
2457 + pgd_next = pgd_addr_end(addr, end);
2459 + if (pgd_none(*pgd) || pgd_bad(*pgd))
2463 + pud = pud_offset(pgd, addr);
2465 + pud_next = pud_addr_end(addr, pgd_next);
2466 + if (pud_none(*pud) || pud_bad(*pud))
2470 + pmd = pmd_offset(pud, addr);
2472 + pmd_next = pmd_addr_end(addr, pud_next);
2473 + if (pmd_none(*pmd) || pmd_bad(*pmd))
2477 + pte = pte_offset_map(pmd, addr);
2479 + if (pte_none(*pte) || !pte_present(*pte))
2482 + op_fn((const void __user*) addr,
2483 + (const void __user*) (addr + PAGE_SIZE));
2484 + } while (pte++, addr += PAGE_SIZE, addr != pmd_next);
2487 + } while (pmd++, addr = pmd_next, addr != pud_next);
2489 + } while (pud++, addr = pud_next, addr != pgd_next);
2491 + } while (pgd++, addr = pgd_next, addr != end);
2496 +/* Clean/invalid/flush cache of buffer in resource */
2497 +static int clean_invalid_resource_walk(const void __user *addr,
2498 + const size_t size, const unsigned cache_op, const int usr_hdl,
2499 + struct sm_resource_t *resource)
2502 + enum sm_stats_t stat_attempt, stat_failure;
2503 + void __user *res_addr;
2505 + if (resource == NULL) {
2506 + pr_err("[%s]: resource is NULL\n", __func__);
2509 + if (resource->res_cached != VMCS_SM_CACHE_HOST &&
2510 + resource->res_cached != VMCS_SM_CACHE_BOTH)
2513 + switch (cache_op) {
2514 + case VCSM_CACHE_OP_NOP:
2516 + case VCSM_CACHE_OP_INV:
2517 + stat_attempt = INVALID;
2518 + stat_failure = INVALID_FAIL;
2520 + case VCSM_CACHE_OP_CLEAN:
2521 + /* Like the original VMCS_SM_CMD_CLEAN_INVALID ioctl handler does. */
2522 + stat_attempt = FLUSH;
2523 + stat_failure = FLUSH_FAIL;
2525 + case VCSM_CACHE_OP_FLUSH:
2526 + stat_attempt = FLUSH;
2527 + stat_failure = FLUSH_FAIL;
2530 + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
2533 + resource->res_stats[stat_attempt]++;
2535 + if (size > resource->res_size) {
2536 + pr_err("[%s]: size (0x%08zu) is larger than res_size (0x%08zu)\n",
2537 + __func__, size, resource->res_size);
2540 + res_addr = (void __user*) vmcs_sm_usr_address_from_pid_and_usr_handle(
2541 + current->tgid, usr_hdl);
2542 + if (res_addr == NULL) {
2543 + pr_err("[%s]: Failed to get user address "
2544 + "from pid (%d) and user handle (%d)\n", __func__, current->tgid,
2545 + resource->res_handle);
2548 + if (!(res_addr <= addr && addr + size <= res_addr + resource->res_size)) {
2549 + pr_err("[%s]: Addr (0x%p-0x%p) out of range (0x%p-0x%p)\n",
2550 + __func__, addr, addr + size, res_addr,
2551 + res_addr + resource->res_size);
2555 + down_read(¤t->mm->mmap_sem);
2556 + err = clean_invalid_mem_walk((unsigned long) addr, size, cache_op);
2557 + up_read(¤t->mm->mmap_sem);
2560 + resource->res_stats[stat_failure]++;
2565 +/* Map an allocated data into something that the user space. */
2566 +static int vc_sm_mmap(struct file *file, struct vm_area_struct *vma)
2569 + struct sm_priv_data_t *file_data =
2570 + (struct sm_priv_data_t *)file->private_data;
2571 + struct sm_resource_t *resource = NULL;
2572 + struct sm_mmap *map = NULL;
2574 + /* Make sure the device was started properly. */
2575 + if ((sm_state == NULL) || (file_data == NULL)) {
2576 + pr_err("[%s]: invalid device\n", __func__);
2580 + pr_debug("[%s]: private data %p, guid %x\n", __func__, file_data,
2581 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2584 + * We lookup to make sure that the data we are being asked to mmap is
2585 + * something that we allocated.
2587 + * We use the offset information as the key to tell us which resource
2590 + resource = vmcs_sm_acquire_resource(file_data,
2591 + ((unsigned int)vma->vm_pgoff <<
2593 + if (resource == NULL) {
2594 + pr_err("[%s]: failed to locate resource for guid %x\n", __func__,
2595 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2599 + pr_debug("[%s]: guid %x, tgid %u, %u, %u\n",
2600 + __func__, resource->res_guid, current->tgid, resource->pid,
2603 + /* Check permissions. */
2604 + if (resource->pid && (resource->pid != current->tgid)) {
2605 + pr_err("[%s]: current tgid %u != %u owner\n",
2606 + __func__, current->tgid, resource->pid);
2611 + /* Verify that what we are asked to mmap is proper. */
2612 + if (resource->res_size != (unsigned int)(vma->vm_end - vma->vm_start)) {
2613 + pr_err("[%s]: size inconsistency (resource: %u - mmap: %u)\n",
2615 + resource->res_size,
2616 + (unsigned int)(vma->vm_end - vma->vm_start));
2623 + * Keep track of the tuple in the global resource list such that one
2624 + * can do a mapping lookup for address/memory handle.
2626 + map = kzalloc(sizeof(*map), GFP_KERNEL);
2627 + if (map == NULL) {
2628 + pr_err("[%s]: failed to allocate global tracking resource\n",
2634 + map->res_pid = current->tgid;
2635 + map->res_vc_hdl = resource->res_handle;
2636 + map->res_usr_hdl = resource->res_guid;
2637 + map->res_addr = (unsigned long)vma->vm_start;
2638 + map->resource = resource;
2640 + vmcs_sm_add_map(sm_state, resource, map);
2643 + * We are not actually mapping the pages, we just provide a fault
2644 + * handler to allow pages to be mapped when accessed
2647 + VM_IO | VM_PFNMAP | VM_DONTCOPY | VM_DONTEXPAND;
2648 + vma->vm_ops = &vcsm_vm_ops;
2649 + vma->vm_private_data = map;
2651 + /* vm_pgoff is the first PFN of the mapped memory */
2652 + vma->vm_pgoff = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
2653 + vma->vm_pgoff += mm_vc_mem_phys_addr;
2654 + vma->vm_pgoff >>= PAGE_SHIFT;
2656 + if ((resource->res_cached == VMCS_SM_CACHE_NONE) ||
2657 + (resource->res_cached == VMCS_SM_CACHE_VC)) {
2658 + /* Allocated non host cached memory, honour it. */
2659 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2662 + pr_debug("[%s]: resource %p (guid %x) - cnt %u, base address %p, handle %x, size %u (%u), cache %u\n",
2664 + resource, resource->res_guid, resource->lock_count,
2665 + resource->res_base_mem, resource->res_handle,
2666 + resource->res_size, (unsigned int)(vma->vm_end - vma->vm_start),
2667 + resource->res_cached);
2669 + pr_debug("[%s]: resource %p (base address %p, handle %x) - map-count %d, usr-addr %x\n",
2670 + __func__, resource, resource->res_base_mem,
2671 + resource->res_handle, resource->map_count,
2672 + (unsigned int)vma->vm_start);
2674 + vcsm_vma_open(vma);
2675 + resource->res_stats[MAP]++;
2676 + vmcs_sm_release_resource(resource, 0);
2678 + if (resource->map) {
2679 + /* We don't use vmf->pgoff since that has the fake offset */
2680 + unsigned long addr;
2682 + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
2683 + /* Finally, remap it */
2684 + unsigned long pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
2686 + pfn += mm_vc_mem_phys_addr;
2687 + pfn += addr - vma->vm_start;
2688 + pfn >>= PAGE_SHIFT;
2689 + ret = vm_insert_pfn(vma, addr, pfn);
2696 + resource->res_stats[MAP_FAIL]++;
2697 + vmcs_sm_release_resource(resource, 0);
2701 +/* Allocate a shared memory handle and block. */
2702 +int vc_sm_ioctl_alloc(struct sm_priv_data_t *private,
2703 + struct vmcs_sm_ioctl_alloc *ioparam)
2707 + struct sm_resource_t *resource;
2708 + struct vc_sm_alloc_t alloc = { 0 };
2709 + struct vc_sm_alloc_result_t result = { 0 };
2710 + enum vmcs_sm_cache_e cached = ioparam->cached;
2713 + /* flag to requst buffer is mapped up front, rather than lazily */
2714 + if (cached & 0x80) {
2719 + /* Setup our allocation parameters */
2720 + alloc.type = ((cached == VMCS_SM_CACHE_VC)
2722 + VMCS_SM_CACHE_BOTH)) ? VC_SM_ALLOC_CACHED :
2723 + VC_SM_ALLOC_NON_CACHED;
2724 + alloc.base_unit = ioparam->size;
2725 + alloc.num_unit = ioparam->num;
2726 + alloc.allocator = current->tgid;
2727 + /* Align to kernel page size */
2728 + alloc.alignement = 4096;
2729 + /* Align the size to the kernel page size */
2731 + (alloc.base_unit + alloc.alignement - 1) & ~(alloc.alignement - 1);
2732 + if (*ioparam->name) {
2733 + memcpy(alloc.name, ioparam->name, sizeof(alloc.name) - 1);
2735 + memcpy(alloc.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
2736 + sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
2739 + pr_debug("[%s]: attempt to allocate \"%s\" data - type %u, base %u (%u), num %u, alignement %u\n",
2740 + __func__, alloc.name, alloc.type, ioparam->size,
2741 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2743 + /* Allocate local resource to track this allocation. */
2744 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2749 + INIT_LIST_HEAD(&resource->map_list);
2750 + resource->ref_count++;
2751 + resource->pid = current->tgid;
2753 + /* Allocate the videocore resource. */
2754 + status = vc_vchi_sm_alloc(sm_state->sm_handle, &alloc, &result,
2755 + &private->int_trans_id);
2756 + if (status == -EINTR) {
2757 + pr_debug("[%s]: requesting allocate memory action restart (trans_id: %u)\n",
2758 + __func__, private->int_trans_id);
2759 + ret = -ERESTARTSYS;
2760 + private->restart_sys = -EINTR;
2761 + private->int_action = VC_SM_MSG_TYPE_ALLOC;
2763 + } else if (status != 0 || !result.res_mem) {
2764 + pr_err("[%s]: failed to allocate memory on videocore (status: %u, trans_id: %u)\n",
2765 + __func__, status, private->int_trans_id);
2767 + resource->res_stats[ALLOC_FAIL]++;
2771 + /* Keep track of the resource we created. */
2772 + resource->private = private;
2773 + resource->res_handle = result.res_handle;
2774 + resource->res_base_mem = (void *)result.res_mem;
2775 + resource->res_size = alloc.base_unit * alloc.num_unit;
2776 + resource->res_cached = cached;
2777 + resource->map = map;
2780 + * Kernel/user GUID. This global identifier is used for mmap'ing the
2781 + * allocated region from user space, it is passed as the mmap'ing
2782 + * offset, we use it to 'hide' the videocore handle/address.
2784 + mutex_lock(&sm_state->lock);
2785 + resource->res_guid = ++sm_state->guid;
2786 + mutex_unlock(&sm_state->lock);
2787 + resource->res_guid <<= PAGE_SHIFT;
2789 + vmcs_sm_add_resource(private, resource);
2791 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2792 + __func__, resource->res_guid, resource->res_handle,
2793 + resource->res_base_mem, resource->res_size,
2794 + resource->res_cached);
2797 + resource->res_stats[ALLOC]++;
2798 + ioparam->handle = resource->res_guid;
2802 + pr_err("[%s]: failed to allocate \"%s\" data (%i) - type %u, base %u (%u), num %u, alignment %u\n",
2803 + __func__, alloc.name, ret, alloc.type, ioparam->size,
2804 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2805 + if (resource != NULL) {
2806 + vc_sm_resource_deceased(resource, 1);
2812 +/* Share an allocate memory handle and block.*/
2813 +int vc_sm_ioctl_alloc_share(struct sm_priv_data_t *private,
2814 + struct vmcs_sm_ioctl_alloc_share *ioparam)
2816 + struct sm_resource_t *resource, *shared_resource;
2819 + pr_debug("[%s]: attempt to share resource %u\n", __func__,
2822 + shared_resource = vmcs_sm_acquire_global_resource(ioparam->handle);
2823 + if (shared_resource == NULL) {
2828 + /* Allocate local resource to track this allocation. */
2829 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2830 + if (resource == NULL) {
2831 + pr_err("[%s]: failed to allocate local tracking resource\n",
2836 + INIT_LIST_HEAD(&resource->map_list);
2837 + resource->ref_count++;
2838 + resource->pid = current->tgid;
2840 + /* Keep track of the resource we created. */
2841 + resource->private = private;
2842 + resource->res_handle = shared_resource->res_handle;
2843 + resource->res_base_mem = shared_resource->res_base_mem;
2844 + resource->res_size = shared_resource->res_size;
2845 + resource->res_cached = shared_resource->res_cached;
2846 + resource->res_shared = shared_resource;
2848 + mutex_lock(&sm_state->lock);
2849 + resource->res_guid = ++sm_state->guid;
2850 + mutex_unlock(&sm_state->lock);
2851 + resource->res_guid <<= PAGE_SHIFT;
2853 + vmcs_sm_add_resource(private, resource);
2855 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2856 + __func__, resource->res_guid, resource->res_handle,
2857 + resource->res_base_mem, resource->res_size,
2858 + resource->res_cached);
2861 + resource->res_stats[ALLOC]++;
2862 + ioparam->handle = resource->res_guid;
2863 + ioparam->size = resource->res_size;
2867 + pr_err("[%s]: failed to share %u\n", __func__, ioparam->handle);
2868 + if (shared_resource != NULL)
2869 + vmcs_sm_release_resource(shared_resource, 0);
2874 +/* Free a previously allocated shared memory handle and block.*/
2875 +static int vc_sm_ioctl_free(struct sm_priv_data_t *private,
2876 + struct vmcs_sm_ioctl_free *ioparam)
2878 + struct sm_resource_t *resource =
2879 + vmcs_sm_acquire_resource(private, ioparam->handle);
2881 + if (resource == NULL) {
2882 + pr_err("[%s]: resource for guid %u does not exist\n", __func__,
2887 + /* Check permissions. */
2888 + if (resource->pid && (resource->pid != current->tgid)) {
2889 + pr_err("[%s]: current tgid %u != %u owner\n",
2890 + __func__, current->tgid, resource->pid);
2891 + vmcs_sm_release_resource(resource, 0);
2895 + vmcs_sm_release_resource(resource, 0);
2896 + vmcs_sm_release_resource(resource, 0);
2900 +/* Resize a previously allocated shared memory handle and block. */
2901 +static int vc_sm_ioctl_resize(struct sm_priv_data_t *private,
2902 + struct vmcs_sm_ioctl_resize *ioparam)
2906 + struct vc_sm_resize_t resize;
2907 + struct sm_resource_t *resource;
2909 + /* Locate resource from GUID. */
2910 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2912 + pr_err("[%s]: failed resource - guid %x\n",
2913 + __func__, ioparam->handle);
2919 + * If the resource is locked, its reference count will be not NULL,
2920 + * in which case we will not be allowed to resize it anyways, so
2921 + * reject the attempt here.
2923 + if (resource->lock_count != 0) {
2924 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2925 + __func__, ioparam->handle, resource->lock_count);
2930 + /* Check permissions. */
2931 + if (resource->pid && (resource->pid != current->tgid)) {
2932 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
2933 + current->tgid, resource->pid);
2938 + if (resource->map_count != 0) {
2939 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2940 + __func__, ioparam->handle, resource->map_count);
2945 + resize.res_handle = resource->res_handle;
2946 + resize.res_mem = (uint32_t)resource->res_base_mem;
2947 + resize.res_new_size = ioparam->new_size;
2949 + pr_debug("[%s]: attempt to resize data - guid %x, hdl %x, base address %p\n",
2950 + __func__, ioparam->handle, resize.res_handle,
2951 + (void *)resize.res_mem);
2953 + /* Resize the videocore allocated resource. */
2954 + status = vc_vchi_sm_resize(sm_state->sm_handle, &resize,
2955 + &private->int_trans_id);
2956 + if (status == -EINTR) {
2957 + pr_debug("[%s]: requesting resize memory action restart (trans_id: %u)\n",
2958 + __func__, private->int_trans_id);
2959 + ret = -ERESTARTSYS;
2960 + private->restart_sys = -EINTR;
2961 + private->int_action = VC_SM_MSG_TYPE_RESIZE;
2963 + } else if (status) {
2964 + pr_err("[%s]: failed to resize memory on videocore (status: %u, trans_id: %u)\n",
2965 + __func__, status, private->int_trans_id);
2970 + pr_debug("[%s]: success to resize data - hdl %x, size %d -> %d\n",
2971 + __func__, resize.res_handle, resource->res_size,
2972 + resize.res_new_size);
2974 + /* Successfully resized, save the information and inform the user. */
2975 + ioparam->old_size = resource->res_size;
2976 + resource->res_size = resize.res_new_size;
2980 + vmcs_sm_release_resource(resource, 0);
2985 +/* Lock a previously allocated shared memory handle and block. */
2986 +static int vc_sm_ioctl_lock(struct sm_priv_data_t *private,
2987 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
2988 + int change_cache, enum vmcs_sm_cache_e cache_type,
2989 + unsigned int vc_addr)
2992 + struct vc_sm_lock_unlock_t lock;
2993 + struct vc_sm_lock_result_t result;
2994 + struct sm_resource_t *resource;
2996 + struct sm_mmap *map, *map_tmp;
2997 + unsigned long phys_addr;
3001 + /* Locate resource from GUID. */
3002 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
3003 + if (resource == NULL) {
3008 + /* Check permissions. */
3009 + if (resource->pid && (resource->pid != current->tgid)) {
3010 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
3011 + current->tgid, resource->pid);
3016 + lock.res_handle = resource->res_handle;
3017 + lock.res_mem = (uint32_t)resource->res_base_mem;
3019 + /* Take the lock and get the address to be mapped. */
3020 + if (vc_addr == 0) {
3021 + pr_debug("[%s]: attempt to lock data - guid %x, hdl %x, base address %p\n",
3022 + __func__, ioparam->handle, lock.res_handle,
3023 + (void *)lock.res_mem);
3025 + /* Lock the videocore allocated resource. */
3026 + status = vc_vchi_sm_lock(sm_state->sm_handle, &lock, &result,
3027 + &private->int_trans_id);
3028 + if (status == -EINTR) {
3029 + pr_debug("[%s]: requesting lock memory action restart (trans_id: %u)\n",
3030 + __func__, private->int_trans_id);
3031 + ret = -ERESTARTSYS;
3032 + private->restart_sys = -EINTR;
3033 + private->int_action = VC_SM_MSG_TYPE_LOCK;
3035 + } else if (status ||
3036 + (!status && !(void *)result.res_mem)) {
3037 + pr_err("[%s]: failed to lock memory on videocore (status: %u, trans_id: %u)\n",
3038 + __func__, status, private->int_trans_id);
3040 + resource->res_stats[LOCK_FAIL]++;
3044 + pr_debug("[%s]: succeed to lock data - hdl %x, base address %p (%p), ref-cnt %d\n",
3045 + __func__, lock.res_handle, (void *)result.res_mem,
3046 + (void *)lock.res_mem, resource->lock_count);
3048 + /* Lock assumed taken already, address to be mapped is known. */
3050 + resource->res_base_mem = (void *)vc_addr;
3052 + resource->res_stats[LOCK]++;
3053 + resource->lock_count++;
3055 + /* Keep track of the new base memory allocation if it has changed. */
3056 + if ((vc_addr == 0) &&
3057 + ((void *)result.res_mem) &&
3058 + ((void *)result.res_old_mem) &&
3059 + (result.res_mem != result.res_old_mem)) {
3060 + resource->res_base_mem = (void *)result.res_mem;
3062 + /* Kernel allocated resources. */
3063 + if (resource->pid == 0) {
3064 + if (!list_empty(&resource->map_list)) {
3065 + list_for_each_entry_safe(map, map_tmp,
3066 + &resource->map_list,
3067 + resource_map_list) {
3068 + if (map->res_addr) {
3069 + iounmap((void *)map->res_addr);
3070 + map->res_addr = 0;
3072 + vmcs_sm_remove_map(sm_state,
3083 + resource->res_cached = cache_type;
3085 + if (resource->map_count) {
3087 + vmcs_sm_usr_address_from_pid_and_usr_handle(
3088 + current->tgid, ioparam->handle);
3090 + pr_debug("[%s] map_count %d private->pid %d current->tgid %d hnd %x addr %u\n",
3091 + __func__, resource->map_count, private->pid,
3092 + current->tgid, ioparam->handle, ioparam->addr);
3094 + /* Kernel allocated resources. */
3095 + if (resource->pid == 0) {
3096 + pr_debug("[%s]: attempt mapping kernel resource - guid %x, hdl %x\n",
3097 + __func__, ioparam->handle, lock.res_handle);
3099 + ioparam->addr = 0;
3101 + map = kzalloc(sizeof(*map), GFP_KERNEL);
3102 + if (map == NULL) {
3103 + pr_err("[%s]: failed allocating tracker\n",
3108 + phys_addr = (uint32_t)resource->res_base_mem &
3110 + phys_addr += mm_vc_mem_phys_addr;
3111 + if (resource->res_cached
3112 + == VMCS_SM_CACHE_HOST) {
3113 + ioparam->addr = (unsigned long)
3114 + /* TODO - make cached work */
3115 + ioremap_nocache(phys_addr,
3116 + resource->res_size);
3118 + pr_debug("[%s]: mapping kernel - guid %x, hdl %x - cached mapping %u\n",
3119 + __func__, ioparam->handle,
3120 + lock.res_handle, ioparam->addr);
3122 + ioparam->addr = (unsigned long)
3123 + ioremap_nocache(phys_addr,
3124 + resource->res_size);
3126 + pr_debug("[%s]: mapping kernel- guid %x, hdl %x - non cached mapping %u\n",
3127 + __func__, ioparam->handle,
3128 + lock.res_handle, ioparam->addr);
3132 + map->res_vc_hdl = resource->res_handle;
3133 + map->res_usr_hdl = resource->res_guid;
3134 + map->res_addr = ioparam->addr;
3135 + map->resource = resource;
3138 + vmcs_sm_add_map(sm_state, resource, map);
3141 + ioparam->addr = 0;
3146 + vmcs_sm_release_resource(resource, 0);
3151 +/* Unlock a previously allocated shared memory handle and block.*/
3152 +static int vc_sm_ioctl_unlock(struct sm_priv_data_t *private,
3153 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
3154 + int flush, int wait_reply, int no_vc_unlock)
3157 + struct vc_sm_lock_unlock_t unlock;
3158 + struct sm_mmap *map, *map_tmp;
3159 + struct sm_resource_t *resource;
3164 + /* Locate resource from GUID. */
3165 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
3166 + if (resource == NULL) {
3171 + /* Check permissions. */
3172 + if (resource->pid && (resource->pid != current->tgid)) {
3173 + pr_err("[%s]: current tgid %u != %u owner\n",
3174 + __func__, current->tgid, resource->pid);
3179 + unlock.res_handle = resource->res_handle;
3180 + unlock.res_mem = (uint32_t)resource->res_base_mem;
3182 + pr_debug("[%s]: attempt to unlock data - guid %x, hdl %x, base address %p\n",
3183 + __func__, ioparam->handle, unlock.res_handle,
3184 + (void *)unlock.res_mem);
3186 + /* User space allocated resources. */
3187 + if (resource->pid) {
3188 + /* Flush if requested */
3189 + if (resource->res_cached && flush) {
3190 + dma_addr_t phys_addr = 0;
3192 + resource->res_stats[FLUSH]++;
3195 + (dma_addr_t)((uint32_t)resource->res_base_mem &
3197 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
3199 + /* L1 cache flush */
3200 + down_read(¤t->mm->mmap_sem);
3201 + list_for_each_entry(map, &resource->map_list,
3202 + resource_map_list) {
3204 + const unsigned long start = map->vma->vm_start;
3205 + const unsigned long end = map->vma->vm_end;
3207 + ret = clean_invalid_mem_walk(start, end - start,
3208 + VCSM_CACHE_OP_FLUSH);
3213 + up_read(¤t->mm->mmap_sem);
3215 + /* L2 cache flush */
3216 + outer_clean_range(phys_addr,
3218 + (size_t) resource->res_size);
3221 + /* We need to zap all the vmas associated with this resource */
3222 + if (resource->lock_count == 1) {
3223 + down_read(¤t->mm->mmap_sem);
3224 + list_for_each_entry(map, &resource->map_list,
3225 + resource_map_list) {
3227 + zap_vma_ptes(map->vma,
3228 + map->vma->vm_start,
3229 + map->vma->vm_end -
3230 + map->vma->vm_start);
3233 + up_read(¤t->mm->mmap_sem);
3236 + /* Kernel allocated resources. */
3238 + /* Global + Taken in this context */
3239 + if (resource->ref_count == 2) {
3240 + if (!list_empty(&resource->map_list)) {
3241 + list_for_each_entry_safe(map, map_tmp,
3242 + &resource->map_list,
3243 + resource_map_list) {
3244 + if (map->res_addr) {
3246 + (resource->res_cached ==
3247 + VMCS_SM_CACHE_HOST)) {
3250 + phys_addr = (uint32_t)
3251 + resource->res_base_mem & 0x3FFFFFFF;
3253 + mm_vc_mem_phys_addr;
3255 + /* L1 cache flush */
3256 + dmac_flush_range((const
3259 + map->res_addr, (const void *)
3260 + (map->res_addr + resource->res_size));
3262 + /* L2 cache flush */
3267 + resource->res_size);
3270 + iounmap((void *)map->res_addr);
3271 + map->res_addr = 0;
3273 + vmcs_sm_remove_map(sm_state,
3283 + if (resource->lock_count) {
3284 + /* Bypass the videocore unlock. */
3287 + /* Unlock the videocore allocated resource. */
3290 + vc_vchi_sm_unlock(sm_state->sm_handle, &unlock,
3291 + &private->int_trans_id,
3293 + if (status == -EINTR) {
3294 + pr_debug("[%s]: requesting unlock memory action restart (trans_id: %u)\n",
3295 + __func__, private->int_trans_id);
3297 + ret = -ERESTARTSYS;
3298 + resource->res_stats[UNLOCK]--;
3299 + private->restart_sys = -EINTR;
3300 + private->int_action = VC_SM_MSG_TYPE_UNLOCK;
3302 + } else if (status != 0) {
3303 + pr_err("[%s]: failed to unlock vc mem (status: %u, trans_id: %u)\n",
3304 + __func__, status, private->int_trans_id);
3307 + resource->res_stats[UNLOCK_FAIL]++;
3312 + resource->res_stats[UNLOCK]++;
3313 + resource->lock_count--;
3316 + pr_debug("[%s]: success to unlock data - hdl %x, base address %p, ref-cnt %d\n",
3317 + __func__, unlock.res_handle, (void *)unlock.res_mem,
3318 + resource->lock_count);
3322 + vmcs_sm_release_resource(resource, 0);
3327 +/* Import a contiguous block of memory to be shared with VC. */
3328 +int vc_sm_ioctl_import_dmabuf(struct sm_priv_data_t *private,
3329 + struct vmcs_sm_ioctl_import_dmabuf *ioparam,
3330 + struct dma_buf *src_dma_buf)
3334 + struct sm_resource_t *resource = NULL;
3335 + struct vc_sm_import import = { 0 };
3336 + struct vc_sm_import_result result = { 0 };
3337 + struct dma_buf *dma_buf;
3338 + struct dma_buf_attachment *attach = NULL;
3339 + struct sg_table *sgt = NULL;
3341 + /* Setup our allocation parameters */
3342 + if (src_dma_buf) {
3343 + get_dma_buf(src_dma_buf);
3344 + dma_buf = src_dma_buf;
3346 + dma_buf = dma_buf_get(ioparam->dmabuf_fd);
3348 + if (IS_ERR(dma_buf))
3349 + return PTR_ERR(dma_buf);
3351 + attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
3352 + if (IS_ERR(attach)) {
3353 + ret = PTR_ERR(attach);
3357 + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
3358 + if (IS_ERR(sgt)) {
3359 + ret = PTR_ERR(sgt);
3363 + /* Verify that the address block is contiguous */
3364 + if (sgt->nents != 1) {
3369 + import.type = ((ioparam->cached == VMCS_SM_CACHE_VC) ||
3370 + (ioparam->cached == VMCS_SM_CACHE_BOTH)) ?
3371 + VC_SM_ALLOC_CACHED : VC_SM_ALLOC_NON_CACHED;
3372 + import.addr = (uint32_t)sg_dma_address(sgt->sgl);
3373 + import.size = sg_dma_len(sgt->sgl);
3374 + import.allocator = current->tgid;
3376 + if (*ioparam->name)
3377 + memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
3379 + memcpy(import.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
3380 + sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
3382 + pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %p, size %u\n",
3383 + __func__, import.name, import.type,
3384 + (void *)import.addr, import.size);
3386 + /* Allocate local resource to track this allocation. */
3387 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
3392 + INIT_LIST_HEAD(&resource->map_list);
3393 + resource->ref_count++;
3394 + resource->pid = current->tgid;
3396 + /* Allocate the videocore resource. */
3397 + status = vc_vchi_sm_import(sm_state->sm_handle, &import, &result,
3398 + &private->int_trans_id);
3399 + if (status == -EINTR) {
3400 + pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
3401 + __func__, private->int_trans_id);
3402 + ret = -ERESTARTSYS;
3403 + private->restart_sys = -EINTR;
3404 + private->int_action = VC_SM_MSG_TYPE_IMPORT;
3406 + } else if (status || !result.res_handle) {
3407 + pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
3408 + __func__, status, private->int_trans_id);
3410 + resource->res_stats[ALLOC_FAIL]++;
3414 + /* Keep track of the resource we created. */
3415 + resource->private = private;
3416 + resource->res_handle = result.res_handle;
3417 + resource->res_size = import.size;
3418 + resource->res_cached = ioparam->cached;
3420 + resource->dma_buf = dma_buf;
3421 + resource->attach = attach;
3422 + resource->sgt = sgt;
3423 + resource->dma_addr = sg_dma_address(sgt->sgl);
3426 + * Kernel/user GUID. This global identifier is used for mmap'ing the
3427 + * allocated region from user space, it is passed as the mmap'ing
3428 + * offset, we use it to 'hide' the videocore handle/address.
3430 + mutex_lock(&sm_state->lock);
3431 + resource->res_guid = ++sm_state->guid;
3432 + mutex_unlock(&sm_state->lock);
3433 + resource->res_guid <<= PAGE_SHIFT;
3435 + vmcs_sm_add_resource(private, resource);
3438 + resource->res_stats[IMPORT]++;
3439 + ioparam->handle = resource->res_guid;
3443 + resource->res_stats[IMPORT_FAIL]++;
3445 + vc_sm_resource_deceased(resource, 1);
3449 + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
3451 + dma_buf_detach(dma_buf, attach);
3452 + dma_buf_put(dma_buf);
3456 +/* Handle control from host. */
3457 +static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3460 + unsigned int cmdnr = _IOC_NR(cmd);
3461 + struct sm_priv_data_t *file_data =
3462 + (struct sm_priv_data_t *)file->private_data;
3463 + struct sm_resource_t *resource = NULL;
3465 + /* Validate we can work with this device. */
3466 + if ((sm_state == NULL) || (file_data == NULL)) {
3467 + pr_err("[%s]: invalid device\n", __func__);
3472 + pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
3473 + current->tgid, file_data->pid);
3475 + /* Action is a re-post of a previously interrupted action? */
3476 + if (file_data->restart_sys == -EINTR) {
3477 + struct vc_sm_action_clean_t action_clean;
3479 + pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
3480 + __func__, file_data->int_action,
3481 + file_data->int_trans_id);
3483 + action_clean.res_action = file_data->int_action;
3484 + action_clean.action_trans_id = file_data->int_trans_id;
3486 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
3488 + file_data->restart_sys = 0;
3491 + /* Now process the command. */
3493 + /* New memory allocation.
3495 + case VMCS_SM_CMD_ALLOC:
3497 + struct vmcs_sm_ioctl_alloc ioparam;
3499 + /* Get the parameter data. */
3500 + if (copy_from_user
3501 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3502 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3508 + ret = vc_sm_ioctl_alloc(file_data, &ioparam);
3510 + (copy_to_user((void *)arg,
3511 + &ioparam, sizeof(ioparam)) != 0)) {
3512 + struct vmcs_sm_ioctl_free freeparam = {
3515 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3517 + vc_sm_ioctl_free(file_data, &freeparam);
3526 + /* Share existing memory allocation. */
3527 + case VMCS_SM_CMD_ALLOC_SHARE:
3529 + struct vmcs_sm_ioctl_alloc_share ioparam;
3531 + /* Get the parameter data. */
3532 + if (copy_from_user
3533 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3534 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3540 + ret = vc_sm_ioctl_alloc_share(file_data, &ioparam);
3542 + /* Copy result back to user. */
3544 + && copy_to_user((void *)arg, &ioparam,
3545 + sizeof(ioparam)) != 0) {
3546 + struct vmcs_sm_ioctl_free freeparam = {
3549 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3551 + vc_sm_ioctl_free(file_data, &freeparam);
3560 + case VMCS_SM_CMD_IMPORT_DMABUF:
3562 + struct vmcs_sm_ioctl_import_dmabuf ioparam;
3564 + /* Get the parameter data. */
3565 + if (copy_from_user
3566 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3567 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3573 + ret = vc_sm_ioctl_import_dmabuf(file_data, &ioparam,
3576 + (copy_to_user((void *)arg,
3577 + &ioparam, sizeof(ioparam)) != 0)) {
3578 + struct vmcs_sm_ioctl_free freeparam = {
3581 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3583 + vc_sm_ioctl_free(file_data, &freeparam);
3592 + /* Lock (attempt to) *and* register a cache behavior change. */
3593 + case VMCS_SM_CMD_LOCK_CACHE:
3595 + struct vmcs_sm_ioctl_lock_cache ioparam;
3596 + struct vmcs_sm_ioctl_lock_unlock lock;
3598 + /* Get parameter data. */
3599 + if (copy_from_user
3600 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3601 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3607 + lock.handle = ioparam.handle;
3609 + vc_sm_ioctl_lock(file_data, &lock, 1,
3610 + ioparam.cached, 0);
3617 + /* Lock (attempt to) existing memory allocation. */
3618 + case VMCS_SM_CMD_LOCK:
3620 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3622 + /* Get parameter data. */
3623 + if (copy_from_user
3624 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3625 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3631 + ret = vc_sm_ioctl_lock(file_data, &ioparam, 0, 0, 0);
3633 + /* Copy result back to user. */
3634 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3636 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3646 + /* Unlock (attempt to) existing memory allocation. */
3647 + case VMCS_SM_CMD_UNLOCK:
3649 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3651 + /* Get parameter data. */
3652 + if (copy_from_user
3653 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3654 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3660 + ret = vc_sm_ioctl_unlock(file_data, &ioparam, 0, 1, 0);
3667 + /* Resize (attempt to) existing memory allocation. */
3668 + case VMCS_SM_CMD_RESIZE:
3670 + struct vmcs_sm_ioctl_resize ioparam;
3672 + /* Get parameter data. */
3673 + if (copy_from_user
3674 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3675 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3681 + ret = vc_sm_ioctl_resize(file_data, &ioparam);
3683 + /* Copy result back to user. */
3684 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3686 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3694 + /* Terminate existing memory allocation.
3696 + case VMCS_SM_CMD_FREE:
3698 + struct vmcs_sm_ioctl_free ioparam;
3700 + /* Get parameter data.
3702 + if (copy_from_user
3703 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3704 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3710 + ret = vc_sm_ioctl_free(file_data, &ioparam);
3718 + /* Walk allocation on videocore, information shows up in the
3721 + case VMCS_SM_CMD_VC_WALK_ALLOC:
3723 + pr_debug("[%s]: invoking walk alloc\n", __func__);
3725 + if (vc_vchi_sm_walk_alloc(sm_state->sm_handle) != 0)
3726 + pr_err("[%s]: failed to walk-alloc on videocore\n",
3734 + /* Walk mapping table on host, information shows up in the
3737 + case VMCS_SM_CMD_HOST_WALK_MAP:
3739 + /* Use pid of -1 to tell to walk the whole map. */
3740 + vmcs_sm_host_walk_map_per_pid(-1);
3747 + /* Walk mapping table per process on host. */
3748 + case VMCS_SM_CMD_HOST_WALK_PID_ALLOC:
3750 + struct vmcs_sm_ioctl_walk ioparam;
3752 + /* Get parameter data. */
3753 + if (copy_from_user(&ioparam,
3754 + (void *)arg, sizeof(ioparam)) != 0) {
3755 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3761 + vmcs_sm_host_walk_alloc(file_data);
3768 + /* Walk allocation per process on host. */
3769 + case VMCS_SM_CMD_HOST_WALK_PID_MAP:
3771 + struct vmcs_sm_ioctl_walk ioparam;
3773 + /* Get parameter data. */
3774 + if (copy_from_user(&ioparam,
3775 + (void *)arg, sizeof(ioparam)) != 0) {
3776 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3782 + vmcs_sm_host_walk_map_per_pid(ioparam.pid);
3789 + /* Gets the size of the memory associated with a user handle. */
3790 + case VMCS_SM_CMD_SIZE_USR_HANDLE:
3792 + struct vmcs_sm_ioctl_size ioparam;
3794 + /* Get parameter data. */
3795 + if (copy_from_user(&ioparam,
3796 + (void *)arg, sizeof(ioparam)) != 0) {
3797 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3803 + /* Locate resource from GUID. */
3805 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3806 + if (resource != NULL) {
3807 + ioparam.size = resource->res_size;
3808 + vmcs_sm_release_resource(resource, 0);
3813 + if (copy_to_user((void *)arg,
3814 + &ioparam, sizeof(ioparam)) != 0) {
3815 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3825 + /* Verify we are dealing with a valid resource. */
3826 + case VMCS_SM_CMD_CHK_USR_HANDLE:
3828 + struct vmcs_sm_ioctl_chk ioparam;
3830 + /* Get parameter data. */
3831 + if (copy_from_user(&ioparam,
3832 + (void *)arg, sizeof(ioparam)) != 0) {
3833 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3840 + /* Locate resource from GUID. */
3842 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3843 + if (resource == NULL)
3846 + * If the resource is cacheable, return additional
3847 + * information that may be needed to flush the cache.
3849 + else if ((resource->res_cached == VMCS_SM_CACHE_HOST) ||
3850 + (resource->res_cached == VMCS_SM_CACHE_BOTH)) {
3852 + vmcs_sm_usr_address_from_pid_and_usr_handle
3853 + (current->tgid, ioparam.handle);
3854 + ioparam.size = resource->res_size;
3855 + ioparam.cache = resource->res_cached;
3859 + ioparam.cache = resource->res_cached;
3863 + vmcs_sm_release_resource(resource, 0);
3865 + if (copy_to_user((void *)arg,
3866 + &ioparam, sizeof(ioparam)) != 0) {
3867 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3878 + * Maps a user handle given the process and the virtual address.
3880 + case VMCS_SM_CMD_MAPPED_USR_HANDLE:
3882 + struct vmcs_sm_ioctl_map ioparam;
3884 + /* Get parameter data. */
3885 + if (copy_from_user(&ioparam,
3886 + (void *)arg, sizeof(ioparam)) != 0) {
3887 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3895 + vmcs_sm_usr_handle_from_pid_and_address(
3896 + ioparam.pid, ioparam.addr);
3899 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3900 + if ((resource != NULL)
3901 + && ((resource->res_cached == VMCS_SM_CACHE_HOST)
3902 + || (resource->res_cached ==
3903 + VMCS_SM_CACHE_BOTH))) {
3904 + ioparam.size = resource->res_size;
3910 + vmcs_sm_release_resource(resource, 0);
3912 + if (copy_to_user((void *)arg,
3913 + &ioparam, sizeof(ioparam)) != 0) {
3914 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3925 + * Maps a videocore handle given process and virtual address.
3927 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR:
3929 + struct vmcs_sm_ioctl_map ioparam;
3931 + /* Get parameter data. */
3932 + if (copy_from_user(&ioparam,
3933 + (void *)arg, sizeof(ioparam)) != 0) {
3934 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3940 + ioparam.handle = vmcs_sm_vc_handle_from_pid_and_address(
3941 + ioparam.pid, ioparam.addr);
3943 + if (copy_to_user((void *)arg,
3944 + &ioparam, sizeof(ioparam)) != 0) {
3945 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3956 + /* Maps a videocore handle given process and user handle. */
3957 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL:
3959 + struct vmcs_sm_ioctl_map ioparam;
3961 + /* Get parameter data. */
3962 + if (copy_from_user(&ioparam,
3963 + (void *)arg, sizeof(ioparam)) != 0) {
3964 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3970 + /* Locate resource from GUID. */
3972 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3973 + if (resource != NULL) {
3974 + ioparam.handle = resource->res_handle;
3975 + vmcs_sm_release_resource(resource, 0);
3977 + ioparam.handle = 0;
3980 + if (copy_to_user((void *)arg,
3981 + &ioparam, sizeof(ioparam)) != 0) {
3982 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3994 + * Maps a videocore address given process and videocore handle.
3996 + case VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL:
3998 + struct vmcs_sm_ioctl_map ioparam;
4000 + /* Get parameter data. */
4001 + if (copy_from_user(&ioparam,
4002 + (void *)arg, sizeof(ioparam)) != 0) {
4003 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4010 + /* Locate resource from GUID. */
4012 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
4013 + if (resource != NULL) {
4015 + (unsigned int)resource->res_base_mem;
4016 + vmcs_sm_release_resource(resource, 0);
4021 + if (copy_to_user((void *)arg,
4022 + &ioparam, sizeof(ioparam)) != 0) {
4023 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
4033 + /* Maps a user address given process and vc handle. */
4034 + case VMCS_SM_CMD_MAPPED_USR_ADDRESS:
4036 + struct vmcs_sm_ioctl_map ioparam;
4038 + /* Get parameter data. */
4039 + if (copy_from_user(&ioparam,
4040 + (void *)arg, sizeof(ioparam)) != 0) {
4041 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4048 + * Return the address information from the mapping,
4049 + * 0 (ie NULL) if it cannot locate the actual mapping.
4052 + vmcs_sm_usr_address_from_pid_and_usr_handle
4053 + (ioparam.pid, ioparam.handle);
4055 + if (copy_to_user((void *)arg,
4056 + &ioparam, sizeof(ioparam)) != 0) {
4057 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
4067 + /* Flush the cache for a given mapping. */
4068 + case VMCS_SM_CMD_FLUSH:
4070 + struct vmcs_sm_ioctl_cache ioparam;
4072 + /* Get parameter data. */
4073 + if (copy_from_user(&ioparam,
4074 + (void *)arg, sizeof(ioparam)) != 0) {
4075 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4081 + /* Locate resource from GUID. */
4083 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
4084 + if (resource == NULL) {
4089 + ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
4090 + ioparam.size, VCSM_CACHE_OP_FLUSH, ioparam.handle,
4092 + vmcs_sm_release_resource(resource, 0);
4098 + /* Invalidate the cache for a given mapping. */
4099 + case VMCS_SM_CMD_INVALID:
4101 + struct vmcs_sm_ioctl_cache ioparam;
4103 + /* Get parameter data. */
4104 + if (copy_from_user(&ioparam,
4105 + (void *)arg, sizeof(ioparam)) != 0) {
4106 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4112 + /* Locate resource from GUID. */
4114 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
4115 + if (resource == NULL) {
4120 + ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
4121 + ioparam.size, VCSM_CACHE_OP_INV, ioparam.handle, resource);
4122 + vmcs_sm_release_resource(resource, 0);
4128 + /* Flush/Invalidate the cache for a given mapping. */
4129 + case VMCS_SM_CMD_CLEAN_INVALID:
4132 + struct vmcs_sm_ioctl_clean_invalid ioparam;
4134 + /* Get parameter data. */
4135 + if (copy_from_user(&ioparam,
4136 + (void *)arg, sizeof(ioparam)) != 0) {
4137 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4142 + for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) {
4143 + if (ioparam.s[i].cmd == VCSM_CACHE_OP_NOP)
4146 + /* Locate resource from GUID. */
4148 + vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
4149 + if (resource == NULL) {
4154 + ret = clean_invalid_resource_walk(
4155 + (void __user*) ioparam.s[i].addr, ioparam.s[i].size,
4156 + ioparam.s[i].cmd, ioparam.s[i].handle, resource);
4157 + vmcs_sm_release_resource(resource, 0);
4164 + * Flush/Invalidate the cache for a given mapping.
4165 + * Blocks must be pinned (i.e. accessed) before this call.
4167 + case VMCS_SM_CMD_CLEAN_INVALID2:
4170 + struct vmcs_sm_ioctl_clean_invalid2 ioparam;
4171 + struct vmcs_sm_ioctl_clean_invalid_block *block = NULL;
4173 + /* Get parameter data. */
4174 + if (copy_from_user(&ioparam,
4175 + (void *)arg, sizeof(ioparam)) != 0) {
4176 + pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
4181 + block = kmalloc(ioparam.op_count *
4182 + sizeof(struct vmcs_sm_ioctl_clean_invalid_block),
4188 + if (copy_from_user(block,
4189 + (void *)(arg + sizeof(ioparam)), ioparam.op_count * sizeof(struct vmcs_sm_ioctl_clean_invalid_block)) != 0) {
4190 + pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
4196 + for (i = 0; i < ioparam.op_count; i++) {
4197 + const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i;
4199 + if (op->invalidate_mode == VCSM_CACHE_OP_NOP)
4202 + ret = clean_invalid_contiguous_mem_2d(
4203 + (void __user*) op->start_address, op->block_count,
4204 + op->block_size, op->inter_block_stride,
4205 + op->invalidate_mode);
4225 +/* Device operations that we managed in this driver. */
4226 +static const struct file_operations vmcs_sm_ops = {
4227 + .owner = THIS_MODULE,
4228 + .unlocked_ioctl = vc_sm_ioctl,
4229 + .open = vc_sm_open,
4230 + .release = vc_sm_release,
4231 + .mmap = vc_sm_mmap,
4234 +/* Creation of device. */
4235 +static int vc_sm_create_sharedmemory(void)
4239 + if (sm_state == NULL) {
4244 + /* Create a device class for creating dev nodes. */
4245 + sm_state->sm_class = class_create(THIS_MODULE, "vc-sm");
4246 + if (IS_ERR(sm_state->sm_class)) {
4247 + pr_err("[%s]: unable to create device class\n", __func__);
4248 + ret = PTR_ERR(sm_state->sm_class);
4252 + /* Create a character driver. */
4253 + ret = alloc_chrdev_region(&sm_state->sm_devid,
4254 + DEVICE_MINOR, 1, DEVICE_NAME);
4256 + pr_err("[%s]: unable to allocate device number\n", __func__);
4257 + goto out_dev_class_destroy;
4260 + cdev_init(&sm_state->sm_cdev, &vmcs_sm_ops);
4261 + ret = cdev_add(&sm_state->sm_cdev, sm_state->sm_devid, 1);
4263 + pr_err("[%s]: unable to register device\n", __func__);
4264 + goto out_chrdev_unreg;
4267 + /* Create a device node. */
4268 + sm_state->sm_dev = device_create(sm_state->sm_class,
4270 + MKDEV(MAJOR(sm_state->sm_devid),
4271 + DEVICE_MINOR), NULL,
4273 + if (IS_ERR(sm_state->sm_dev)) {
4274 + pr_err("[%s]: unable to create device node\n", __func__);
4275 + ret = PTR_ERR(sm_state->sm_dev);
4276 + goto out_chrdev_del;
4282 + cdev_del(&sm_state->sm_cdev);
4284 + unregister_chrdev_region(sm_state->sm_devid, 1);
4285 +out_dev_class_destroy:
4286 + class_destroy(sm_state->sm_class);
4287 + sm_state->sm_class = NULL;
4292 +/* Termination of the device. */
4293 +static int vc_sm_remove_sharedmemory(void)
4297 + if (sm_state == NULL) {
4298 + /* Nothing to do. */
4303 + /* Remove the sharedmemory character driver. */
4304 + cdev_del(&sm_state->sm_cdev);
4306 + /* Unregister region. */
4307 + unregister_chrdev_region(sm_state->sm_devid, 1);
4316 +/* Videocore connected. */
4317 +static void vc_sm_connected_init(void)
4320 + VCHI_INSTANCE_T vchi_instance;
4321 + VCHI_CONNECTION_T *vchi_connection = NULL;
4323 + pr_info("[%s]: start\n", __func__);
4326 + * Initialize and create a VCHI connection for the shared memory service
4327 + * running on videocore.
4329 + ret = vchi_initialise(&vchi_instance);
4331 + pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
4335 + goto err_free_mem;
4338 + ret = vchi_connect(NULL, 0, vchi_instance);
4340 + pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
4344 + goto err_free_mem;
4347 + /* Initialize an instance of the shared memory service. */
4348 + sm_state->sm_handle =
4349 + vc_vchi_sm_init(vchi_instance, &vchi_connection, 1);
4350 + if (sm_state->sm_handle == NULL) {
4351 + pr_err("[%s]: failed to initialize shared memory service\n",
4355 + goto err_free_mem;
4358 + /* Create a debug fs directory entry (root). */
4359 + sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
4360 + if (!sm_state->dir_root) {
4361 + pr_err("[%s]: failed to create \'%s\' directory entry\n",
4362 + __func__, VC_SM_DIR_ROOT_NAME);
4365 + goto err_stop_sm_service;
4368 + sm_state->dir_state.show = &vc_sm_global_state_show;
4369 + sm_state->dir_state.dir_entry = debugfs_create_file(VC_SM_STATE,
4370 + 0444, sm_state->dir_root, &sm_state->dir_state,
4371 + &vc_sm_debug_fs_fops);
4373 + sm_state->dir_stats.show = &vc_sm_global_statistics_show;
4374 + sm_state->dir_stats.dir_entry = debugfs_create_file(VC_SM_STATS,
4375 + 0444, sm_state->dir_root, &sm_state->dir_stats,
4376 + &vc_sm_debug_fs_fops);
4378 + /* Create the proc entry children. */
4379 + sm_state->dir_alloc = debugfs_create_dir(VC_SM_DIR_ALLOC_NAME,
4380 + sm_state->dir_root);
4382 + /* Create a shared memory device. */
4383 + ret = vc_sm_create_sharedmemory();
4385 + pr_err("[%s]: failed to create shared memory device\n",
4387 + goto err_remove_debugfs;
4390 + INIT_LIST_HEAD(&sm_state->map_list);
4391 + INIT_LIST_HEAD(&sm_state->resource_list);
4393 + sm_state->data_knl = vc_sm_create_priv_data(0);
4394 + if (sm_state->data_knl == NULL) {
4395 + pr_err("[%s]: failed to create kernel private data tracker\n",
4397 + goto err_remove_shared_memory;
4404 +err_remove_shared_memory:
4405 + vc_sm_remove_sharedmemory();
4406 +err_remove_debugfs:
4407 + debugfs_remove_recursive(sm_state->dir_root);
4408 +err_stop_sm_service:
4409 + vc_vchi_sm_stop(&sm_state->sm_handle);
4413 + pr_info("[%s]: end - returning %d\n", __func__, ret);
4416 +/* Driver loading. */
4417 +static int bcm2835_vcsm_probe(struct platform_device *pdev)
4419 + pr_info("vc-sm: Videocore shared memory driver\n");
4421 + sm_state = kzalloc(sizeof(*sm_state), GFP_KERNEL);
4424 + sm_state->pdev = pdev;
4425 + mutex_init(&sm_state->lock);
4426 + mutex_init(&sm_state->map_lock);
4428 + vchiq_add_connected_callback(vc_sm_connected_init);
4432 +/* Driver unloading. */
4433 +static int bcm2835_vcsm_remove(struct platform_device *pdev)
4435 + pr_debug("[%s]: start\n", __func__);
4437 + /* Remove shared memory device. */
4438 + vc_sm_remove_sharedmemory();
4440 + /* Remove all proc entries. */
4441 + debugfs_remove_recursive(sm_state->dir_root);
4443 + /* Stop the videocore shared memory service. */
4444 + vc_vchi_sm_stop(&sm_state->sm_handle);
4446 + /* Free the memory for the state structure. */
4447 + mutex_destroy(&(sm_state->map_lock));
4451 + pr_debug("[%s]: end\n", __func__);
4455 +#if defined(__KERNEL__)
4456 +/* Allocate a shared memory handle and block. */
4457 +int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle)
4459 + struct vmcs_sm_ioctl_alloc ioparam = { 0 };
4461 + struct sm_resource_t *resource;
4463 + /* Validate we can work with this device. */
4464 + if (sm_state == NULL || alloc == NULL || handle == NULL) {
4465 + pr_err("[%s]: invalid input\n", __func__);
4469 + ioparam.size = alloc->base_unit;
4470 + ioparam.num = alloc->num_unit;
4472 + alloc->type == VC_SM_ALLOC_CACHED ? VMCS_SM_CACHE_VC : 0;
4474 + ret = vc_sm_ioctl_alloc(sm_state->data_knl, &ioparam);
4478 + vmcs_sm_acquire_resource(sm_state->data_knl,
4481 + resource->pid = 0;
4482 + vmcs_sm_release_resource(resource, 0);
4484 + /* Assign valid handle at this time. */
4485 + *handle = ioparam.handle;
4493 +EXPORT_SYMBOL_GPL(vc_sm_alloc);
4495 +/* Get an internal resource handle mapped from the external one. */
4496 +int vc_sm_int_handle(int handle)
4498 + struct sm_resource_t *resource;
4501 + /* Validate we can work with this device. */
4502 + if (sm_state == NULL || handle == 0) {
4503 + pr_err("[%s]: invalid input\n", __func__);
4507 + /* Locate resource from GUID. */
4508 + resource = vmcs_sm_acquire_resource(sm_state->data_knl, handle);
4510 + ret = resource->res_handle;
4511 + vmcs_sm_release_resource(resource, 0);
4516 +EXPORT_SYMBOL_GPL(vc_sm_int_handle);
4518 +/* Free a previously allocated shared memory handle and block. */
4519 +int vc_sm_free(int handle)
4521 + struct vmcs_sm_ioctl_free ioparam = { handle };
4523 + /* Validate we can work with this device. */
4524 + if (sm_state == NULL || handle == 0) {
4525 + pr_err("[%s]: invalid input\n", __func__);
4529 + return vc_sm_ioctl_free(sm_state->data_knl, &ioparam);
4531 +EXPORT_SYMBOL_GPL(vc_sm_free);
4533 +/* Lock a memory handle for use by kernel. */
4534 +int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
4535 + unsigned long *data)
4537 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4540 + /* Validate we can work with this device. */
4541 + if (sm_state == NULL || handle == 0 || data == NULL) {
4542 + pr_err("[%s]: invalid input\n", __func__);
4548 + ioparam.handle = handle;
4549 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4553 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4554 + VMCS_SM_CACHE_NONE), 0);
4556 + *data = ioparam.addr;
4559 +EXPORT_SYMBOL_GPL(vc_sm_lock);
4561 +/* Unlock a memory handle in use by kernel. */
4562 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock)
4564 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4566 + /* Validate we can work with this device. */
4567 + if (sm_state == NULL || handle == 0) {
4568 + pr_err("[%s]: invalid input\n", __func__);
4572 + ioparam.handle = handle;
4573 + return vc_sm_ioctl_unlock(sm_state->data_knl,
4574 + &ioparam, flush, 0, no_vc_unlock);
4576 +EXPORT_SYMBOL_GPL(vc_sm_unlock);
4578 +/* Map a shared memory region for use by kernel. */
4579 +int vc_sm_map(int handle, unsigned int sm_addr,
4580 + enum vc_sm_lock_cache_mode mode, unsigned long *data)
4582 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4585 + /* Validate we can work with this device. */
4586 + if (sm_state == NULL || handle == 0 || data == NULL || sm_addr == 0) {
4587 + pr_err("[%s]: invalid input\n", __func__);
4593 + ioparam.handle = handle;
4594 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4598 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4599 + VMCS_SM_CACHE_NONE), sm_addr);
4601 + *data = ioparam.addr;
4604 +EXPORT_SYMBOL_GPL(vc_sm_map);
4606 +/* Import a dmabuf to be shared with VC. */
4607 +int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle)
4609 + struct vmcs_sm_ioctl_import_dmabuf ioparam = { 0 };
4611 + struct sm_resource_t *resource;
4613 + /* Validate we can work with this device. */
4614 + if (!sm_state || !dmabuf || !handle) {
4615 + pr_err("[%s]: invalid input\n", __func__);
4619 + ioparam.cached = 0;
4620 + strcpy(ioparam.name, "KRNL DMABUF");
4622 + ret = vc_sm_ioctl_import_dmabuf(sm_state->data_knl, &ioparam, dmabuf);
4625 + resource = vmcs_sm_acquire_resource(sm_state->data_knl,
4628 + resource->pid = 0;
4629 + vmcs_sm_release_resource(resource, 0);
4631 + /* Assign valid handle at this time.*/
4632 + *handle = ioparam.handle;
4640 +EXPORT_SYMBOL_GPL(vc_sm_import_dmabuf);
4644 + * Register the driver with device tree
4647 +static const struct of_device_id bcm2835_vcsm_of_match[] = {
4648 + {.compatible = "raspberrypi,bcm2835-vcsm",},
4649 + { /* sentinel */ },
4652 +MODULE_DEVICE_TABLE(of, bcm2835_vcsm_of_match);
4654 +static struct platform_driver bcm2835_vcsm_driver = {
4655 + .probe = bcm2835_vcsm_probe,
4656 + .remove = bcm2835_vcsm_remove,
4658 + .name = DRIVER_NAME,
4659 + .owner = THIS_MODULE,
4660 + .of_match_table = bcm2835_vcsm_of_match,
4664 +module_platform_driver(bcm2835_vcsm_driver);
4666 +MODULE_AUTHOR("Broadcom");
4667 +MODULE_DESCRIPTION("VideoCore SharedMemory Driver");
4668 +MODULE_LICENSE("GPL v2");
4670 +++ b/include/linux/broadcom/vmcs_sm_ioctl.h
4672 +/*****************************************************************************
4673 +* Copyright 2011 Broadcom Corporation. All rights reserved.
4675 +* Unless you and Broadcom execute a separate written software license
4676 +* agreement governing use of this software, this software is licensed to you
4677 +* under the terms of the GNU General Public License version 2, available at
4678 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
4680 +* Notwithstanding the above, under no circumstances may you combine this
4681 +* software in any way with any other Broadcom software provided under a
4682 +* license other than the GPL, without Broadcom's express prior written
4685 +*****************************************************************************/
4687 +#if !defined(__VMCS_SM_IOCTL_H__INCLUDED__)
4688 +#define __VMCS_SM_IOCTL_H__INCLUDED__
4690 +/* ---- Include Files ---------------------------------------------------- */
4692 +#if defined(__KERNEL__)
4693 +#include <linux/types.h> /* Needed for standard types */
4695 +#include <stdint.h>
4698 +#include <linux/ioctl.h>
4700 +/* ---- Constants and Types ---------------------------------------------- */
4702 +#define VMCS_SM_RESOURCE_NAME 32
4703 +#define VMCS_SM_RESOURCE_NAME_DEFAULT "sm-host-resource"
4705 +/* Type define used to create unique IOCTL number */
4706 +#define VMCS_SM_MAGIC_TYPE 'I'
4708 +/* IOCTL commands */
4709 +enum vmcs_sm_cmd_e {
4710 + VMCS_SM_CMD_ALLOC = 0x5A, /* Start at 0x5A arbitrarily */
4711 + VMCS_SM_CMD_ALLOC_SHARE,
4713 + VMCS_SM_CMD_LOCK_CACHE,
4714 + VMCS_SM_CMD_UNLOCK,
4715 + VMCS_SM_CMD_RESIZE,
4716 + VMCS_SM_CMD_UNMAP,
4718 + VMCS_SM_CMD_FLUSH,
4719 + VMCS_SM_CMD_INVALID,
4721 + VMCS_SM_CMD_SIZE_USR_HANDLE,
4722 + VMCS_SM_CMD_CHK_USR_HANDLE,
4724 + VMCS_SM_CMD_MAPPED_USR_HANDLE,
4725 + VMCS_SM_CMD_MAPPED_USR_ADDRESS,
4726 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,
4727 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,
4728 + VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,
4730 + VMCS_SM_CMD_VC_WALK_ALLOC,
4731 + VMCS_SM_CMD_HOST_WALK_MAP,
4732 + VMCS_SM_CMD_HOST_WALK_PID_ALLOC,
4733 + VMCS_SM_CMD_HOST_WALK_PID_MAP,
4735 + VMCS_SM_CMD_CLEAN_INVALID,
4736 + VMCS_SM_CMD_CLEAN_INVALID2,
4738 + VMCS_SM_CMD_IMPORT_DMABUF,
4740 + VMCS_SM_CMD_LAST /* Do not delete */
4743 +/* Cache type supported, conveniently matches the user space definition in
4746 +enum vmcs_sm_cache_e {
4747 + VMCS_SM_CACHE_NONE,
4748 + VMCS_SM_CACHE_HOST,
4750 + VMCS_SM_CACHE_BOTH,
4753 +/* Cache functions */
4754 +#define VCSM_CACHE_OP_INV 0x01
4755 +#define VCSM_CACHE_OP_CLEAN 0x02
4756 +#define VCSM_CACHE_OP_FLUSH 0x03
4758 +/* IOCTL Data structures */
4759 +struct vmcs_sm_ioctl_alloc {
4760 + /* user -> kernel */
4761 + unsigned int size;
4763 + enum vmcs_sm_cache_e cached;
4764 + char name[VMCS_SM_RESOURCE_NAME];
4766 + /* kernel -> user */
4767 + unsigned int handle;
4768 + /* unsigned int base_addr; */
4771 +struct vmcs_sm_ioctl_alloc_share {
4772 + /* user -> kernel */
4773 + unsigned int handle;
4774 + unsigned int size;
4777 +struct vmcs_sm_ioctl_free {
4778 + /* user -> kernel */
4779 + unsigned int handle;
4780 + /* unsigned int base_addr; */
4783 +struct vmcs_sm_ioctl_lock_unlock {
4784 + /* user -> kernel */
4785 + unsigned int handle;
4787 + /* kernel -> user */
4788 + unsigned int addr;
4791 +struct vmcs_sm_ioctl_lock_cache {
4792 + /* user -> kernel */
4793 + unsigned int handle;
4794 + enum vmcs_sm_cache_e cached;
4797 +struct vmcs_sm_ioctl_resize {
4798 + /* user -> kernel */
4799 + unsigned int handle;
4800 + unsigned int new_size;
4802 + /* kernel -> user */
4803 + unsigned int old_size;
4806 +struct vmcs_sm_ioctl_map {
4807 + /* user -> kernel */
4808 + /* and kernel -> user */
4810 + unsigned int handle;
4811 + unsigned int addr;
4813 + /* kernel -> user */
4814 + unsigned int size;
4817 +struct vmcs_sm_ioctl_walk {
4818 + /* user -> kernel */
4822 +struct vmcs_sm_ioctl_chk {
4823 + /* user -> kernel */
4824 + unsigned int handle;
4826 + /* kernel -> user */
4827 + unsigned int addr;
4828 + unsigned int size;
4829 + enum vmcs_sm_cache_e cache;
4832 +struct vmcs_sm_ioctl_size {
4833 + /* user -> kernel */
4834 + unsigned int handle;
4836 + /* kernel -> user */
4837 + unsigned int size;
4840 +struct vmcs_sm_ioctl_cache {
4841 + /* user -> kernel */
4842 + unsigned int handle;
4843 + unsigned int addr;
4844 + unsigned int size;
4848 + * Cache functions to be set to struct vmcs_sm_ioctl_clean_invalid cmd and
4849 + * vmcs_sm_ioctl_clean_invalid2 invalidate_mode.
4851 +#define VCSM_CACHE_OP_NOP 0x00
4852 +#define VCSM_CACHE_OP_INV 0x01
4853 +#define VCSM_CACHE_OP_CLEAN 0x02
4854 +#define VCSM_CACHE_OP_FLUSH 0x03
4856 +struct vmcs_sm_ioctl_clean_invalid {
4857 + /* user -> kernel */
4860 + unsigned int handle;
4861 + unsigned int addr;
4862 + unsigned int size;
4866 +struct vmcs_sm_ioctl_clean_invalid2 {
4869 + struct vmcs_sm_ioctl_clean_invalid_block {
4870 + uint16_t invalidate_mode;
4871 + uint16_t block_count;
4872 + void * start_address;
4873 + uint32_t block_size;
4874 + uint32_t inter_block_stride;
4878 +struct vmcs_sm_ioctl_import_dmabuf {
4879 + /* user -> kernel */
4881 + enum vmcs_sm_cache_e cached;
4882 + char name[VMCS_SM_RESOURCE_NAME];
4884 + /* kernel -> user */
4885 + unsigned int handle;
4888 +/* IOCTL numbers */
4889 +#define VMCS_SM_IOCTL_MEM_ALLOC\
4890 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC,\
4891 + struct vmcs_sm_ioctl_alloc)
4892 +#define VMCS_SM_IOCTL_MEM_ALLOC_SHARE\
4893 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC_SHARE,\
4894 + struct vmcs_sm_ioctl_alloc_share)
4895 +#define VMCS_SM_IOCTL_MEM_LOCK\
4896 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK,\
4897 + struct vmcs_sm_ioctl_lock_unlock)
4898 +#define VMCS_SM_IOCTL_MEM_LOCK_CACHE\
4899 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK_CACHE,\
4900 + struct vmcs_sm_ioctl_lock_cache)
4901 +#define VMCS_SM_IOCTL_MEM_UNLOCK\
4902 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_UNLOCK,\
4903 + struct vmcs_sm_ioctl_lock_unlock)
4904 +#define VMCS_SM_IOCTL_MEM_RESIZE\
4905 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_RESIZE,\
4906 + struct vmcs_sm_ioctl_resize)
4907 +#define VMCS_SM_IOCTL_MEM_FREE\
4908 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FREE,\
4909 + struct vmcs_sm_ioctl_free)
4910 +#define VMCS_SM_IOCTL_MEM_FLUSH\
4911 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FLUSH,\
4912 + struct vmcs_sm_ioctl_cache)
4913 +#define VMCS_SM_IOCTL_MEM_INVALID\
4914 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_INVALID,\
4915 + struct vmcs_sm_ioctl_cache)
4916 +#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID\
4917 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID,\
4918 + struct vmcs_sm_ioctl_clean_invalid)
4919 +#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID2\
4920 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID2,\
4921 + struct vmcs_sm_ioctl_clean_invalid2)
4923 +#define VMCS_SM_IOCTL_SIZE_USR_HDL\
4924 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_SIZE_USR_HANDLE,\
4925 + struct vmcs_sm_ioctl_size)
4926 +#define VMCS_SM_IOCTL_CHK_USR_HDL\
4927 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CHK_USR_HANDLE,\
4928 + struct vmcs_sm_ioctl_chk)
4930 +#define VMCS_SM_IOCTL_MAP_USR_HDL\
4931 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_HANDLE,\
4932 + struct vmcs_sm_ioctl_map)
4933 +#define VMCS_SM_IOCTL_MAP_USR_ADDRESS\
4934 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_ADDRESS,\
4935 + struct vmcs_sm_ioctl_map)
4936 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_ADDR\
4937 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,\
4938 + struct vmcs_sm_ioctl_map)
4939 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_HDL\
4940 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,\
4941 + struct vmcs_sm_ioctl_map)
4942 +#define VMCS_SM_IOCTL_MAP_VC_ADDR_FR_HDL\
4943 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,\
4944 + struct vmcs_sm_ioctl_map)
4946 +#define VMCS_SM_IOCTL_VC_WALK_ALLOC\
4947 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_VC_WALK_ALLOC)
4948 +#define VMCS_SM_IOCTL_HOST_WALK_MAP\
4949 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_MAP)
4950 +#define VMCS_SM_IOCTL_HOST_WALK_PID_ALLOC\
4951 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_ALLOC,\
4952 + struct vmcs_sm_ioctl_walk)
4953 +#define VMCS_SM_IOCTL_HOST_WALK_PID_MAP\
4954 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_MAP,\
4955 + struct vmcs_sm_ioctl_walk)
4957 +#define VMCS_SM_IOCTL_MEM_IMPORT_DMABUF\
4958 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_IMPORT_DMABUF,\
4959 + struct vmcs_sm_ioctl_import_dmabuf)
4961 +/* ---- Variable Externs ------------------------------------------------- */
4963 +/* ---- Function Prototypes ---------------------------------------------- */
4965 +#endif /* __VMCS_SM_IOCTL_H__INCLUDED__ */