1 From f5a5cd2721763f3759fc49d08f1b1ca0b039fcf2 Mon Sep 17 00:00:00 2001
2 From: Tim Gover <tgover@broadcom.com>
3 Date: Tue, 22 Jul 2014 15:41:04 +0100
4 Subject: [PATCH 038/381] vcsm: VideoCore shared memory service for BCM2835
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 Add experimental support for the VideoCore shared memory service.
10 This allows user processes to allocate memory from VideoCore's
11 GPU relocatable heap and mmap the buffers. Additionally, the memory
12 handles can passed to other VideoCore services such as MMAL, OpenMax
16 * This driver was originally released for BCM28155 which has a different
17 cache architecture to BCM2835. Consequently, in this release only
18 uncached mappings are supported. However, there's no fundamental
19 reason which cached mappings cannot be support or BCM2835
20 * More refactoring is required to remove the typedefs.
21 * Re-enable the some of the commented out debug-fs statistics which were
22 disabled when migrating code from proc-fs.
23 * There's a lot of code to support sharing of VCSM in order to support
24 Android. This could probably done more cleanly or perhaps just
27 Signed-off-by: Tim Gover <timgover@gmail.com>
29 config: Disable VC_SM for now to fix hang with cutdown kernel
31 vcsm: Use boolean as it cannot be built as module
33 On building the bcm_vc_sm as a module we get the following error:
35 v7_dma_flush_range and do_munmap are undefined in vc-sm.ko.
37 Fix by making it not an option to build as module
39 vcsm: Add ioctl for custom cache flushing
41 vc-sm: Move headers out of arch directory
43 Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
45 drivers/char/broadcom/Kconfig | 9 +
46 drivers/char/broadcom/Makefile | 1 +
47 drivers/char/broadcom/vc_sm/Makefile | 20 +
48 drivers/char/broadcom/vc_sm/vc_sm_defs.h | 181 ++
49 drivers/char/broadcom/vc_sm/vc_sm_knl.h | 55 +
50 drivers/char/broadcom/vc_sm/vc_vchi_sm.c | 492 +++++
51 drivers/char/broadcom/vc_sm/vc_vchi_sm.h | 82 +
52 drivers/char/broadcom/vc_sm/vmcs_sm.c | 3211 ++++++++++++++++++++++++++++++
53 include/linux/broadcom/vmcs_sm_ioctl.h | 248 +++
54 9 files changed, 4299 insertions(+)
55 create mode 100644 drivers/char/broadcom/vc_sm/Makefile
56 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_defs.h
57 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_knl.h
58 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.c
59 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.h
60 create mode 100644 drivers/char/broadcom/vc_sm/vmcs_sm.c
61 create mode 100644 include/linux/broadcom/vmcs_sm_ioctl.h
63 --- a/drivers/char/broadcom/Kconfig
64 +++ b/drivers/char/broadcom/Kconfig
65 @@ -23,3 +23,12 @@ config BCM2708_VCMEM
66 Helper for videocore memory access and total size allocation.
71 + bool "VMCS Shared Memory"
72 + depends on BCM2708_VCHIQ
73 + select BCM2708_VCMEM
76 + Support for the VC shared memory on the Broadcom reference
77 + design. Uses the VCHIQ stack.
78 --- a/drivers/char/broadcom/Makefile
79 +++ b/drivers/char/broadcom/Makefile
81 obj-$(CONFIG_BCM_VC_CMA) += vc_cma/
82 obj-$(CONFIG_BCM2708_VCMEM) += vc_mem.o
83 +obj-$(CONFIG_BCM_VC_SM) += vc_sm/
85 +++ b/drivers/char/broadcom/vc_sm/Makefile
87 +EXTRA_CFLAGS += -Wall -Wstrict-prototypes -Wno-trigraphs -O2
89 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services"
90 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchi"
91 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchiq_arm"
92 +EXTRA_CFLAGS += -I"$(srctree)/fs/"
94 +EXTRA_CFLAGS += -DOS_ASSERT_FAILURE
95 +EXTRA_CFLAGS += -D__STDC_VERSION=199901L
96 +EXTRA_CFLAGS += -D__STDC_VERSION__=199901L
97 +EXTRA_CFLAGS += -D__VCCOREVER__=0
98 +EXTRA_CFLAGS += -D__KERNEL__
99 +EXTRA_CFLAGS += -D__linux__
100 +EXTRA_CFLAGS += -Werror
102 +obj-$(CONFIG_BCM_VC_SM) := vc-sm.o
108 +++ b/drivers/char/broadcom/vc_sm/vc_sm_defs.h
110 +/*****************************************************************************
111 +* Copyright 2011 Broadcom Corporation. All rights reserved.
113 +* Unless you and Broadcom execute a separate written software license
114 +* agreement governing use of this software, this software is licensed to you
115 +* under the terms of the GNU General Public License version 2, available at
116 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
118 +* Notwithstanding the above, under no circumstances may you combine this
119 +* software in any way with any other Broadcom software provided under a
120 +* license other than the GPL, without Broadcom's express prior written
122 +*****************************************************************************/
124 +#ifndef __VC_SM_DEFS_H__INCLUDED__
125 +#define __VC_SM_DEFS_H__INCLUDED__
127 +/* FourCC code used for VCHI connection */
128 +#define VC_SM_SERVER_NAME MAKE_FOURCC("SMEM")
130 +/* Maximum message length */
131 +#define VC_SM_MAX_MSG_LEN (sizeof(VC_SM_MSG_UNION_T) + \
132 + sizeof(VC_SM_MSG_HDR_T))
133 +#define VC_SM_MAX_RSP_LEN (sizeof(VC_SM_MSG_UNION_T))
135 +/* Resource name maximum size */
136 +#define VC_SM_RESOURCE_NAME 32
138 +/* All message types supported for HOST->VC direction */
140 + /* Allocate shared memory block */
141 + VC_SM_MSG_TYPE_ALLOC,
142 + /* Lock allocated shared memory block */
143 + VC_SM_MSG_TYPE_LOCK,
144 + /* Unlock allocated shared memory block */
145 + VC_SM_MSG_TYPE_UNLOCK,
146 + /* Unlock allocated shared memory block, do not answer command */
147 + VC_SM_MSG_TYPE_UNLOCK_NOANS,
148 + /* Free shared memory block */
149 + VC_SM_MSG_TYPE_FREE,
150 + /* Resize a shared memory block */
151 + VC_SM_MSG_TYPE_RESIZE,
152 + /* Walk the allocated shared memory block(s) */
153 + VC_SM_MSG_TYPE_WALK_ALLOC,
155 + /* A previously applied action will need to be reverted */
156 + VC_SM_MSG_TYPE_ACTION_CLEAN,
160 +/* Type of memory to be allocated */
162 + VC_SM_ALLOC_CACHED,
163 + VC_SM_ALLOC_NON_CACHED,
165 +} VC_SM_ALLOC_TYPE_T;
167 +/* Message header for all messages in HOST->VC direction */
175 +/* Request to allocate memory (HOST->VC) */
177 + /* type of memory to allocate */
178 + VC_SM_ALLOC_TYPE_T type;
179 + /* byte amount of data to allocate per unit */
180 + uint32_t base_unit;
181 + /* number of unit to allocate */
183 + /* alignement to be applied on allocation */
184 + uint32_t alignement;
185 + /* identity of who allocated this block */
186 + uint32_t allocator;
187 + /* resource name (for easier tracking on vc side) */
188 + char name[VC_SM_RESOURCE_NAME];
192 +/* Result of a requested memory allocation (VC->HOST) */
194 + /* Transaction identifier */
197 + /* Resource handle */
198 + uint32_t res_handle;
199 + /* Pointer to resource buffer */
201 + /* Resource base size (bytes) */
202 + uint32_t res_base_size;
203 + /* Resource number */
206 +} VC_SM_ALLOC_RESULT_T;
208 +/* Request to free a previously allocated memory (HOST->VC) */
210 + /* Resource handle (returned from alloc) */
211 + uint32_t res_handle;
212 + /* Resource buffer (returned from alloc) */
217 +/* Request to lock a previously allocated memory (HOST->VC) */
219 + /* Resource handle (returned from alloc) */
220 + uint32_t res_handle;
221 + /* Resource buffer (returned from alloc) */
224 +} VC_SM_LOCK_UNLOCK_T;
226 +/* Request to resize a previously allocated memory (HOST->VC) */
228 + /* Resource handle (returned from alloc) */
229 + uint32_t res_handle;
230 + /* Resource buffer (returned from alloc) */
232 + /* Resource *new* size requested (bytes) */
233 + uint32_t res_new_size;
237 +/* Result of a requested memory lock (VC->HOST) */
239 + /* Transaction identifier */
242 + /* Resource handle */
243 + uint32_t res_handle;
244 + /* Pointer to resource buffer */
246 + /* Pointer to former resource buffer if the memory
247 + * was reallocated */
250 +} VC_SM_LOCK_RESULT_T;
252 +/* Generic result for a request (VC->HOST) */
254 + /* Transaction identifier */
261 +/* Request to revert a previously applied action (HOST->VC) */
263 + /* Action of interest */
264 + VC_SM_MSG_TYPE res_action;
265 + /* Transaction identifier for the action of interest */
266 + uint32_t action_trans_id;
268 +} VC_SM_ACTION_CLEAN_T;
270 +/* Request to remove all data associated with a given allocator (HOST->VC) */
272 + /* Allocator identifier */
273 + uint32_t allocator;
277 +/* Union of ALL messages */
279 + VC_SM_ALLOC_T alloc;
280 + VC_SM_ALLOC_RESULT_T alloc_result;
282 + VC_SM_ACTION_CLEAN_T action_clean;
283 + VC_SM_RESIZE_T resize;
284 + VC_SM_LOCK_RESULT_T lock_result;
285 + VC_SM_RESULT_T result;
286 + VC_SM_FREE_ALL_T free_all;
288 +} VC_SM_MSG_UNION_T;
290 +#endif /* __VC_SM_DEFS_H__INCLUDED__ */
292 +++ b/drivers/char/broadcom/vc_sm/vc_sm_knl.h
294 +/*****************************************************************************
295 +* Copyright 2011 Broadcom Corporation. All rights reserved.
297 +* Unless you and Broadcom execute a separate written software license
298 +* agreement governing use of this software, this software is licensed to you
299 +* under the terms of the GNU General Public License version 2, available at
300 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
302 +* Notwithstanding the above, under no circumstances may you combine this
303 +* software in any way with any other Broadcom software provided under a
304 +* license other than the GPL, without Broadcom's express prior written
306 +*****************************************************************************/
308 +#ifndef __VC_SM_KNL_H__INCLUDED__
309 +#define __VC_SM_KNL_H__INCLUDED__
311 +#if !defined(__KERNEL__)
312 +#error "This interface is for kernel use only..."
315 +/* Type of memory to be locked (ie mapped) */
318 + VC_SM_LOCK_NON_CACHED,
320 +} VC_SM_LOCK_CACHE_MODE_T;
322 +/* Allocate a shared memory handle and block.
324 +int vc_sm_alloc(VC_SM_ALLOC_T *alloc, int *handle);
326 +/* Free a previously allocated shared memory handle and block.
328 +int vc_sm_free(int handle);
330 +/* Lock a memory handle for use by kernel.
332 +int vc_sm_lock(int handle, VC_SM_LOCK_CACHE_MODE_T mode,
333 + long unsigned int *data);
335 +/* Unlock a memory handle in use by kernel.
337 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock);
339 +/* Get an internal resource handle mapped from the external one.
341 +int vc_sm_int_handle(int handle);
343 +/* Map a shared memory region for use by kernel.
345 +int vc_sm_map(int handle, unsigned int sm_addr, VC_SM_LOCK_CACHE_MODE_T mode,
346 + long unsigned int *data);
348 +#endif /* __VC_SM_KNL_H__INCLUDED__ */
350 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.c
352 +/*****************************************************************************
353 +* Copyright 2011-2012 Broadcom Corporation. All rights reserved.
355 +* Unless you and Broadcom execute a separate written software license
356 +* agreement governing use of this software, this software is licensed to you
357 +* under the terms of the GNU General Public License version 2, available at
358 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
360 +* Notwithstanding the above, under no circumstances may you combine this
361 +* software in any way with any other Broadcom software provided under a
362 +* license other than the GPL, without Broadcom's express prior written
364 +*****************************************************************************/
366 +/* ---- Include Files ----------------------------------------------------- */
367 +#include <linux/types.h>
368 +#include <linux/kernel.h>
369 +#include <linux/list.h>
370 +#include <linux/semaphore.h>
371 +#include <linux/mutex.h>
372 +#include <linux/slab.h>
373 +#include <linux/kthread.h>
375 +#include "vc_vchi_sm.h"
378 +#define VC_SM_MIN_VER 0
380 +/* ---- Private Constants and Types -------------------------------------- */
382 +/* Command blocks come from a pool */
383 +#define SM_MAX_NUM_CMD_RSP_BLKS 32
385 +struct sm_cmd_rsp_blk {
386 + struct list_head head; /* To create lists */
387 + struct semaphore sema; /* To be signaled when the response is there */
392 + uint8_t msg[VC_SM_MAX_MSG_LEN];
400 +struct sm_instance {
401 + uint32_t num_connections;
402 + VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
403 + struct task_struct *io_thread;
404 + struct semaphore io_sema;
409 + struct list_head cmd_list;
410 + struct list_head rsp_list;
411 + struct list_head dead_list;
413 + struct sm_cmd_rsp_blk free_blk[SM_MAX_NUM_CMD_RSP_BLKS];
414 + struct list_head free_list;
415 + struct mutex free_lock;
416 + struct semaphore free_sema;
420 +/* ---- Private Variables ------------------------------------------------ */
422 +/* ---- Private Function Prototypes -------------------------------------- */
424 +/* ---- Private Functions ------------------------------------------------ */
426 +sm_cmd_rsp_blk *vc_vchi_cmd_create(struct sm_instance *instance,
427 + VC_SM_MSG_TYPE id, void *msg,
428 + uint32_t size, int wait)
430 + struct sm_cmd_rsp_blk *blk;
431 + VC_SM_MSG_HDR_T *hdr;
433 + if (down_interruptible(&instance->free_sema)) {
434 + blk = kmalloc(sizeof(*blk), GFP_KERNEL);
439 + sema_init(&blk->sema, 0);
441 + mutex_lock(&instance->free_lock);
443 + list_first_entry(&instance->free_list,
444 + struct sm_cmd_rsp_blk, head);
445 + list_del(&blk->head);
446 + mutex_unlock(&instance->free_lock);
451 + blk->length = sizeof(*hdr) + size;
453 + hdr = (VC_SM_MSG_HDR_T *) blk->msg;
455 + mutex_lock(&instance->lock);
456 + hdr->trans_id = blk->id = ++instance->trans_id;
457 + mutex_unlock(&instance->lock);
460 + memcpy(hdr->body, msg, size);
466 +vc_vchi_cmd_delete(struct sm_instance *instance, struct sm_cmd_rsp_blk *blk)
473 + mutex_lock(&instance->free_lock);
474 + list_add(&blk->head, &instance->free_list);
475 + mutex_unlock(&instance->free_lock);
476 + up(&instance->free_sema);
479 +static int vc_vchi_sm_videocore_io(void *arg)
481 + struct sm_instance *instance = arg;
482 + struct sm_cmd_rsp_blk *cmd = NULL, *cmd_tmp;
483 + VC_SM_RESULT_T *reply;
484 + uint32_t reply_len;
490 + vchi_service_release(instance->vchi_handle[0]);
492 + if (!down_interruptible(&instance->io_sema)) {
493 + vchi_service_use(instance->vchi_handle[0]);
497 + unsigned int flags;
499 + * Get new command and move it to response list
501 + mutex_lock(&instance->lock);
502 + if (list_empty(&instance->cmd_list)) {
503 + /* no more commands to process */
504 + mutex_unlock(&instance->lock);
508 + list_first_entry(&instance->cmd_list,
509 + struct sm_cmd_rsp_blk,
511 + list_move(&cmd->head, &instance->rsp_list);
513 + mutex_unlock(&instance->lock);
515 + /* Send the command */
516 + flags = VCHI_FLAGS_BLOCK_UNTIL_QUEUED;
517 + status = vchi_msg_queue(
518 + instance->vchi_handle[0],
519 + cmd->msg, cmd->length,
522 + pr_err("%s: failed to queue message (%d)",
526 + /* If no reply is needed then we're done */
528 + mutex_lock(&instance->lock);
529 + list_del(&cmd->head);
530 + mutex_unlock(&instance->lock);
531 + vc_vchi_cmd_delete(instance, cmd);
542 + while (!vchi_msg_peek
543 + (instance->vchi_handle[0], (void **)&reply,
544 + &reply_len, VCHI_FLAGS_NONE)) {
545 + mutex_lock(&instance->lock);
546 + list_for_each_entry(cmd, &instance->rsp_list,
548 + if (cmd->id == reply->trans_id)
551 + mutex_unlock(&instance->lock);
553 + if (&cmd->head == &instance->rsp_list) {
554 + pr_debug("%s: received response %u, throw away...",
555 + __func__, reply->trans_id);
556 + } else if (reply_len > sizeof(cmd->msg)) {
557 + pr_err("%s: reply too big (%u) %u, throw away...",
558 + __func__, reply_len,
561 + memcpy(cmd->msg, reply, reply_len);
565 + vchi_msg_remove(instance->vchi_handle[0]);
568 + /* Go through the dead list and free them */
569 + mutex_lock(&instance->lock);
570 + list_for_each_entry_safe(cmd, cmd_tmp,
571 + &instance->dead_list, head) {
572 + list_del(&cmd->head);
573 + vc_vchi_cmd_delete(instance, cmd);
575 + mutex_unlock(&instance->lock);
582 +static void vc_sm_vchi_callback(void *param,
583 + const VCHI_CALLBACK_REASON_T reason,
586 + struct sm_instance *instance = param;
591 + case VCHI_CALLBACK_MSG_AVAILABLE:
592 + up(&instance->io_sema);
595 + case VCHI_CALLBACK_SERVICE_CLOSED:
596 + pr_info("%s: service CLOSED!!", __func__);
602 +VC_VCHI_SM_HANDLE_T vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance,
603 + VCHI_CONNECTION_T **vchi_connections,
604 + uint32_t num_connections)
607 + struct sm_instance *instance;
610 + pr_debug("%s: start", __func__);
612 + if (num_connections > VCHI_MAX_NUM_CONNECTIONS) {
613 + pr_err("%s: unsupported number of connections %u (max=%u)",
614 + __func__, num_connections, VCHI_MAX_NUM_CONNECTIONS);
618 + /* Allocate memory for this instance */
619 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
621 + /* Misc initialisations */
622 + mutex_init(&instance->lock);
623 + sema_init(&instance->io_sema, 0);
624 + INIT_LIST_HEAD(&instance->cmd_list);
625 + INIT_LIST_HEAD(&instance->rsp_list);
626 + INIT_LIST_HEAD(&instance->dead_list);
627 + INIT_LIST_HEAD(&instance->free_list);
628 + sema_init(&instance->free_sema, SM_MAX_NUM_CMD_RSP_BLKS);
629 + mutex_init(&instance->free_lock);
630 + for (i = 0; i < SM_MAX_NUM_CMD_RSP_BLKS; i++) {
631 + sema_init(&instance->free_blk[i].sema, 0);
632 + list_add(&instance->free_blk[i].head, &instance->free_list);
635 + /* Open the VCHI service connections */
636 + instance->num_connections = num_connections;
637 + for (i = 0; i < num_connections; i++) {
638 + SERVICE_CREATION_T params = {
639 + VCHI_VERSION_EX(VC_SM_VER, VC_SM_MIN_VER),
641 + vchi_connections[i],
644 + vc_sm_vchi_callback,
651 + status = vchi_service_open(vchi_instance,
652 + ¶ms, &instance->vchi_handle[i]);
654 + pr_err("%s: failed to open VCHI service (%d)",
657 + goto err_close_services;
661 + /* Create the thread which takes care of all io to/from videoocore. */
662 + instance->io_thread = kthread_create(&vc_vchi_sm_videocore_io,
663 + (void *)instance, "SMIO");
664 + if (instance->io_thread == NULL) {
665 + pr_err("%s: failed to create SMIO thread", __func__);
667 + goto err_close_services;
669 + set_user_nice(instance->io_thread, -10);
670 + wake_up_process(instance->io_thread);
672 + pr_debug("%s: success - instance 0x%x", __func__, (unsigned)instance);
676 + for (i = 0; i < instance->num_connections; i++) {
677 + if (instance->vchi_handle[i] != NULL)
678 + vchi_service_close(instance->vchi_handle[i]);
682 + pr_debug("%s: FAILED", __func__);
686 +int vc_vchi_sm_stop(VC_VCHI_SM_HANDLE_T *handle)
688 + struct sm_instance *instance;
691 + if (handle == NULL) {
692 + pr_err("%s: invalid pointer to handle %p", __func__, handle);
696 + if (*handle == NULL) {
697 + pr_err("%s: invalid handle %p", __func__, *handle);
701 + instance = *handle;
703 + /* Close all VCHI service connections */
704 + for (i = 0; i < instance->num_connections; i++) {
706 + vchi_service_use(instance->vchi_handle[i]);
708 + success = vchi_service_close(instance->vchi_handle[i]);
720 +int vc_vchi_sm_send_msg(VC_VCHI_SM_HANDLE_T handle,
721 + VC_SM_MSG_TYPE msg_id,
722 + void *msg, uint32_t msg_size,
723 + void *result, uint32_t result_size,
724 + uint32_t *cur_trans_id, uint8_t wait_reply)
727 + struct sm_instance *instance = handle;
728 + struct sm_cmd_rsp_blk *cmd_blk;
730 + if (handle == NULL) {
731 + pr_err("%s: invalid handle", __func__);
735 + pr_err("%s: invalid msg pointer", __func__);
740 + vc_vchi_cmd_create(instance, msg_id, msg, msg_size, wait_reply);
741 + if (cmd_blk == NULL) {
742 + pr_err("[%s]: failed to allocate global tracking resource",
747 + if (cur_trans_id != NULL)
748 + *cur_trans_id = cmd_blk->id;
750 + mutex_lock(&instance->lock);
751 + list_add_tail(&cmd_blk->head, &instance->cmd_list);
752 + mutex_unlock(&instance->lock);
753 + up(&instance->io_sema);
759 + /* Wait for the response */
760 + if (down_interruptible(&cmd_blk->sema)) {
761 + mutex_lock(&instance->lock);
762 + if (!cmd_blk->sent) {
763 + list_del(&cmd_blk->head);
764 + mutex_unlock(&instance->lock);
765 + vc_vchi_cmd_delete(instance, cmd_blk);
768 + mutex_unlock(&instance->lock);
770 + mutex_lock(&instance->lock);
771 + list_move(&cmd_blk->head, &instance->dead_list);
772 + mutex_unlock(&instance->lock);
773 + up(&instance->io_sema);
774 + return -EINTR; /* We're done */
777 + if (result && result_size) {
778 + memcpy(result, cmd_blk->msg, result_size);
780 + VC_SM_RESULT_T *res = (VC_SM_RESULT_T *) cmd_blk->msg;
781 + status = (res->success == 0) ? 0 : -ENXIO;
784 + mutex_lock(&instance->lock);
785 + list_del(&cmd_blk->head);
786 + mutex_unlock(&instance->lock);
787 + vc_vchi_cmd_delete(instance, cmd_blk);
791 +int vc_vchi_sm_alloc(VC_VCHI_SM_HANDLE_T handle, VC_SM_ALLOC_T *msg,
792 + VC_SM_ALLOC_RESULT_T *result, uint32_t *cur_trans_id)
794 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ALLOC,
795 + msg, sizeof(*msg), result, sizeof(*result),
799 +int vc_vchi_sm_free(VC_VCHI_SM_HANDLE_T handle,
800 + VC_SM_FREE_T *msg, uint32_t *cur_trans_id)
802 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_FREE,
803 + msg, sizeof(*msg), 0, 0, cur_trans_id, 0);
806 +int vc_vchi_sm_lock(VC_VCHI_SM_HANDLE_T handle,
807 + VC_SM_LOCK_UNLOCK_T *msg,
808 + VC_SM_LOCK_RESULT_T *result, uint32_t *cur_trans_id)
810 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_LOCK,
811 + msg, sizeof(*msg), result, sizeof(*result),
815 +int vc_vchi_sm_unlock(VC_VCHI_SM_HANDLE_T handle,
816 + VC_SM_LOCK_UNLOCK_T *msg,
817 + uint32_t *cur_trans_id, uint8_t wait_reply)
819 + return vc_vchi_sm_send_msg(handle, wait_reply ?
820 + VC_SM_MSG_TYPE_UNLOCK :
821 + VC_SM_MSG_TYPE_UNLOCK_NOANS, msg,
822 + sizeof(*msg), 0, 0, cur_trans_id,
826 +int vc_vchi_sm_resize(VC_VCHI_SM_HANDLE_T handle, VC_SM_RESIZE_T *msg,
827 + uint32_t *cur_trans_id)
829 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_RESIZE,
830 + msg, sizeof(*msg), 0, 0, cur_trans_id, 1);
833 +int vc_vchi_sm_walk_alloc(VC_VCHI_SM_HANDLE_T handle)
835 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_WALK_ALLOC,
839 +int vc_vchi_sm_clean_up(VC_VCHI_SM_HANDLE_T handle, VC_SM_ACTION_CLEAN_T *msg)
841 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ACTION_CLEAN,
842 + msg, sizeof(*msg), 0, 0, 0, 0);
845 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.h
847 +/*****************************************************************************
848 +* Copyright 2011 Broadcom Corporation. All rights reserved.
850 +* Unless you and Broadcom execute a separate written software license
851 +* agreement governing use of this software, this software is licensed to you
852 +* under the terms of the GNU General Public License version 2, available at
853 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
855 +* Notwithstanding the above, under no circumstances may you combine this
856 +* software in any way with any other Broadcom software provided under a
857 +* license other than the GPL, without Broadcom's express prior written
859 +*****************************************************************************/
861 +#ifndef __VC_VCHI_SM_H__INCLUDED__
862 +#define __VC_VCHI_SM_H__INCLUDED__
864 +#include "interface/vchi/vchi.h"
866 +#include "vc_sm_defs.h"
870 +typedef struct sm_instance *VC_VCHI_SM_HANDLE_T;
872 +/* Initialize the shared memory service, opens up vchi connection to talk to it.
874 +VC_VCHI_SM_HANDLE_T vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance,
875 + VCHI_CONNECTION_T **vchi_connections,
876 + uint32_t num_connections);
878 +/* Terminates the shared memory service.
880 +int vc_vchi_sm_stop(VC_VCHI_SM_HANDLE_T *handle);
882 +/* Ask the shared memory service to allocate some memory on videocre and
883 +** return the result of this allocation (which upon success will be a pointer
884 +** to some memory in videocore space).
886 +int vc_vchi_sm_alloc(VC_VCHI_SM_HANDLE_T handle,
887 + VC_SM_ALLOC_T *alloc,
888 + VC_SM_ALLOC_RESULT_T *alloc_result, uint32_t *trans_id);
890 +/* Ask the shared memory service to free up some memory that was previously
891 +** allocated by the vc_vchi_sm_alloc function call.
893 +int vc_vchi_sm_free(VC_VCHI_SM_HANDLE_T handle,
894 + VC_SM_FREE_T *free, uint32_t *trans_id);
896 +/* Ask the shared memory service to lock up some memory that was previously
897 +** allocated by the vc_vchi_sm_alloc function call.
899 +int vc_vchi_sm_lock(VC_VCHI_SM_HANDLE_T handle,
900 + VC_SM_LOCK_UNLOCK_T *lock_unlock,
901 + VC_SM_LOCK_RESULT_T *lock_result, uint32_t *trans_id);
903 +/* Ask the shared memory service to unlock some memory that was previously
904 +** allocated by the vc_vchi_sm_alloc function call.
906 +int vc_vchi_sm_unlock(VC_VCHI_SM_HANDLE_T handle,
907 + VC_SM_LOCK_UNLOCK_T *lock_unlock,
908 + uint32_t *trans_id, uint8_t wait_reply);
910 +/* Ask the shared memory service to resize some memory that was previously
911 +** allocated by the vc_vchi_sm_alloc function call.
913 +int vc_vchi_sm_resize(VC_VCHI_SM_HANDLE_T handle,
914 + VC_SM_RESIZE_T *resize, uint32_t *trans_id);
916 +/* Walk the allocated resources on the videocore side, the allocation will
917 +** show up in the log. This is purely for debug/information and takes no
918 +** specific actions.
920 +int vc_vchi_sm_walk_alloc(VC_VCHI_SM_HANDLE_T handle);
922 +/* Clean up following a previously interrupted action which left the system
923 +** in a bad state of some sort.
925 +int vc_vchi_sm_clean_up(VC_VCHI_SM_HANDLE_T handle,
926 + VC_SM_ACTION_CLEAN_T *action_clean);
928 +#endif /* __VC_VCHI_SM_H__INCLUDED__ */
930 +++ b/drivers/char/broadcom/vc_sm/vmcs_sm.c
932 +/*****************************************************************************
933 +* Copyright 2011-2012 Broadcom Corporation. All rights reserved.
935 +* Unless you and Broadcom execute a separate written software license
936 +* agreement governing use of this software, this software is licensed to you
937 +* under the terms of the GNU General Public License version 2, available at
938 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
940 +* Notwithstanding the above, under no circumstances may you combine this
941 +* software in any way with any other Broadcom software provided under a
942 +* license other than the GPL, without Broadcom's express prior written
944 +*****************************************************************************/
946 +/* ---- Include Files ----------------------------------------------------- */
948 +#include <linux/cdev.h>
949 +#include <linux/broadcom/vc_mem.h>
950 +#include <linux/device.h>
951 +#include <linux/debugfs.h>
952 +#include <linux/dma-mapping.h>
953 +#include <linux/errno.h>
954 +#include <linux/fs.h>
955 +#include <linux/hugetlb.h>
956 +#include <linux/ioctl.h>
957 +#include <linux/kernel.h>
958 +#include <linux/list.h>
959 +#include <linux/module.h>
960 +#include <linux/mm.h>
961 +#include <linux/pfn.h>
962 +#include <linux/proc_fs.h>
963 +#include <linux/pagemap.h>
964 +#include <linux/semaphore.h>
965 +#include <linux/slab.h>
966 +#include <linux/seq_file.h>
967 +#include <linux/types.h>
968 +#include <asm/cacheflush.h>
970 +#include "vchiq_connected.h"
971 +#include "vc_vchi_sm.h"
973 +#include <linux/broadcom/vmcs_sm_ioctl.h>
974 +#include "vc_sm_knl.h"
976 +/* ---- Private Constants and Types --------------------------------------- */
978 +#define DEVICE_NAME "vcsm"
979 +#define DEVICE_MINOR 0
981 +#define VC_SM_DIR_ROOT_NAME "vc-smem"
982 +#define VC_SM_DIR_ALLOC_NAME "alloc"
983 +#define VC_SM_STATE "state"
984 +#define VC_SM_STATS "statistics"
985 +#define VC_SM_RESOURCES "resources"
986 +#define VC_SM_DEBUG "debug"
987 +#define VC_SM_WRITE_BUF_SIZE 128
989 +/* Statistics tracked per resource and globally.
1016 +static const char *const sm_stats_human_read[] = {
1023 + "Cache Invalidate",
1026 +typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
1028 + VC_SM_SHOW show; /* Debug fs function hookup. */
1029 + struct dentry *dir_entry; /* Debug fs directory entry. */
1030 + void *priv_data; /* Private data */
1034 +/* Single resource allocation tracked for all devices.
1037 + struct list_head map_list; /* Linked list of maps. */
1039 + struct SM_RESOURCE_T *resource; /* Pointer to the resource. */
1041 + pid_t res_pid; /* PID owning that resource. */
1042 + unsigned int res_vc_hdl; /* Resource handle (videocore). */
1043 + unsigned int res_usr_hdl; /* Resource handle (user). */
1045 + long unsigned int res_addr; /* Mapped virtual address. */
1046 + struct vm_area_struct *vma; /* VM area for this mapping. */
1047 + unsigned int ref_count; /* Reference count to this vma. */
1049 + /* Used to link maps associated with a resource. */
1050 + struct list_head resource_map_list;
1053 +/* Single resource allocation tracked for each opened device.
1055 +struct SM_RESOURCE_T {
1056 + struct list_head resource_list; /* List of resources. */
1057 + struct list_head global_resource_list; /* Global list of resources. */
1059 + pid_t pid; /* PID owning that resource. */
1060 + uint32_t res_guid; /* Unique identifier. */
1061 + uint32_t lock_count; /* Lock count for this resource. */
1062 + uint32_t ref_count; /* Ref count for this resource. */
1064 + uint32_t res_handle; /* Resource allocation handle. */
1065 + void *res_base_mem; /* Resource base memory address. */
1066 + uint32_t res_size; /* Resource size allocated. */
1067 + enum vmcs_sm_cache_e res_cached; /* Resource cache type. */
1068 + struct SM_RESOURCE_T *res_shared; /* Shared resource */
1070 + enum SM_STATS_T res_stats[END_ALL]; /* Resource statistics. */
1072 + uint8_t map_count; /* Counter of mappings for this resource. */
1073 + struct list_head map_list; /* Maps associated with a resource. */
1075 + struct SM_PRIV_DATA_T *private;
1078 +/* Private file data associated with each opened device.
1080 +struct SM_PRIV_DATA_T {
1081 + struct list_head resource_list; /* List of resources. */
1083 + pid_t pid; /* PID of creator. */
1085 + struct dentry *dir_pid; /* Debug fs entries root. */
1086 + struct SM_PDE_T dir_stats; /* Debug fs entries statistics sub-tree. */
1087 + struct SM_PDE_T dir_res; /* Debug fs resource sub-tree. */
1089 + int restart_sys; /* Tracks restart on interrupt. */
1090 + VC_SM_MSG_TYPE int_action; /* Interrupted action. */
1091 + uint32_t int_trans_id; /* Interrupted transaction. */
1095 +/* Global state information.
1097 +struct SM_STATE_T {
1098 + VC_VCHI_SM_HANDLE_T sm_handle; /* Handle for videocore service. */
1099 + struct dentry *dir_root; /* Debug fs entries root. */
1100 + struct dentry *dir_alloc; /* Debug fs entries allocations. */
1101 + struct SM_PDE_T dir_stats; /* Debug fs entries statistics sub-tree. */
1102 + struct SM_PDE_T dir_state; /* Debug fs entries state sub-tree. */
1103 + struct dentry *debug; /* Debug fs entries debug. */
1105 + struct mutex map_lock; /* Global map lock. */
1106 + struct list_head map_list; /* List of maps. */
1107 + struct list_head resource_list; /* List of resources. */
1109 + enum SM_STATS_T deceased[END_ALL]; /* Natural termination stats. */
1110 + enum SM_STATS_T terminated[END_ALL]; /* Forced termination stats. */
1111 + uint32_t res_deceased_cnt; /* Natural termination counter. */
1112 + uint32_t res_terminated_cnt; /* Forced termination counter. */
1114 + struct cdev sm_cdev; /* Device. */
1115 + dev_t sm_devid; /* Device identifier. */
1116 + struct class *sm_class; /* Class. */
1117 + struct device *sm_dev; /* Device. */
1119 + struct SM_PRIV_DATA_T *data_knl; /* Kernel internal data tracking. */
1121 + struct mutex lock; /* Global lock. */
1122 + uint32_t guid; /* GUID (next) tracker. */
1126 +/* ---- Private Variables ----------------------------------------------- */
1128 +static struct SM_STATE_T *sm_state;
1129 +static int sm_inited;
1131 +static const char *const sm_cache_map_vector[] = {
1138 +/* ---- Private Function Prototypes -------------------------------------- */
1140 +/* ---- Private Functions ------------------------------------------------ */
1142 +static inline unsigned vcaddr_to_pfn(unsigned long vc_addr)
1144 + unsigned long pfn = vc_addr & 0x3FFFFFFF;
1145 + pfn += mm_vc_mem_phys_addr;
1146 + pfn >>= PAGE_SHIFT;
1150 +/* Carries over to the state statistics the statistics once owned by a deceased
1153 +static void vc_sm_resource_deceased(struct SM_RESOURCE_T *p_res, int terminated)
1155 + if (sm_state != NULL) {
1156 + if (p_res != NULL) {
1160 + sm_state->res_terminated_cnt++;
1162 + sm_state->res_deceased_cnt++;
1164 + for (ix = 0; ix < END_ALL; ix++) {
1166 + sm_state->terminated[ix] +=
1167 + p_res->res_stats[ix];
1169 + sm_state->deceased[ix] +=
1170 + p_res->res_stats[ix];
1176 +/* Fetch a videocore handle corresponding to a mapping of the pid+address
1177 +** returns 0 (ie NULL) if no such handle exists in the global map.
1179 +static unsigned int vmcs_sm_vc_handle_from_pid_and_address(unsigned int pid,
1180 + unsigned int addr)
1182 + struct sm_mmap *map = NULL;
1183 + unsigned int handle = 0;
1185 + if (!sm_state || addr == 0)
1188 + mutex_lock(&(sm_state->map_lock));
1190 + /* Lookup the resource.
1192 + if (!list_empty(&sm_state->map_list)) {
1193 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1194 + if (map->res_pid != pid || map->res_addr != addr)
1197 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> vc-hdl %x (usr-hdl %x)\n",
1198 + __func__, map, map->res_pid, map->res_addr,
1199 + map->res_vc_hdl, map->res_usr_hdl);
1201 + handle = map->res_vc_hdl;
1206 + mutex_unlock(&(sm_state->map_lock));
1209 + /* Use a debug log here as it may be a valid situation that we query
1210 + ** for something that is not mapped, we do not want a kernel log each
1213 + ** There are other error log that would pop up accordingly if someone
1214 + ** subsequently tries to use something invalid after being told not to
1217 + if (handle == 0) {
1218 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1219 + __func__, pid, addr);
1225 +/* Fetch a user handle corresponding to a mapping of the pid+address
1226 +** returns 0 (ie NULL) if no such handle exists in the global map.
1228 +static unsigned int vmcs_sm_usr_handle_from_pid_and_address(unsigned int pid,
1229 + unsigned int addr)
1231 + struct sm_mmap *map = NULL;
1232 + unsigned int handle = 0;
1234 + if (!sm_state || addr == 0)
1237 + mutex_lock(&(sm_state->map_lock));
1239 + /* Lookup the resource.
1241 + if (!list_empty(&sm_state->map_list)) {
1242 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1243 + if (map->res_pid != pid || map->res_addr != addr)
1246 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> usr-hdl %x (vc-hdl %x)\n",
1247 + __func__, map, map->res_pid, map->res_addr,
1248 + map->res_usr_hdl, map->res_vc_hdl);
1250 + handle = map->res_usr_hdl;
1255 + mutex_unlock(&(sm_state->map_lock));
1258 + /* Use a debug log here as it may be a valid situation that we query
1259 + * for something that is not mapped yet.
1261 + * There are other error log that would pop up accordingly if someone
1262 + * subsequently tries to use something invalid after being told not to
1266 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1267 + __func__, pid, addr);
1272 +#if defined(DO_NOT_USE)
1273 +/* Fetch an address corresponding to a mapping of the pid+handle
1274 +** returns 0 (ie NULL) if no such address exists in the global map.
1276 +static unsigned int vmcs_sm_usr_address_from_pid_and_vc_handle(unsigned int pid,
1279 + struct sm_mmap *map = NULL;
1280 + unsigned int addr = 0;
1282 + if (sm_state == NULL || hdl == 0)
1285 + mutex_lock(&(sm_state->map_lock));
1287 + /* Lookup the resource.
1289 + if (!list_empty(&sm_state->map_list)) {
1290 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1291 + if (map->res_pid != pid || map->res_vc_hdl != hdl)
1294 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1295 + __func__, map, map->res_pid, map->res_vc_hdl,
1296 + map->res_usr_hdl, map->res_addr);
1298 + addr = map->res_addr;
1303 + mutex_unlock(&(sm_state->map_lock));
1306 + /* Use a debug log here as it may be a valid situation that we query
1307 + ** for something that is not mapped, we do not want a kernel log each
1310 + ** There are other error log that would pop up accordingly if someone
1311 + ** subsequently tries to use something invalid after being told not to
1315 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n",
1316 + __func__, pid, hdl);
1322 +/* Fetch an address corresponding to a mapping of the pid+handle
1323 +** returns 0 (ie NULL) if no such address exists in the global map.
1325 +static unsigned int vmcs_sm_usr_address_from_pid_and_usr_handle(unsigned int
1330 + struct sm_mmap *map = NULL;
1331 + unsigned int addr = 0;
1333 + if (sm_state == NULL || hdl == 0)
1336 + mutex_lock(&(sm_state->map_lock));
1338 + /* Lookup the resource.
1340 + if (!list_empty(&sm_state->map_list)) {
1341 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1342 + if (map->res_pid != pid || map->res_usr_hdl != hdl)
1345 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1346 + __func__, map, map->res_pid, map->res_vc_hdl,
1347 + map->res_usr_hdl, map->res_addr);
1349 + addr = map->res_addr;
1354 + mutex_unlock(&(sm_state->map_lock));
1357 + /* Use a debug log here as it may be a valid situation that we query
1358 + * for something that is not mapped, we do not want a kernel log each
1361 + * There are other error log that would pop up accordingly if someone
1362 + * subsequently tries to use something invalid after being told not to
1366 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n", __func__,
1372 +/* Adds a resource mapping to the global data list.
1374 +static void vmcs_sm_add_map(struct SM_STATE_T *state,
1375 + struct SM_RESOURCE_T *resource, struct sm_mmap *map)
1377 + mutex_lock(&(state->map_lock));
1379 + /* Add to the global list of mappings
1381 + list_add(&map->map_list, &state->map_list);
1383 + /* Add to the list of mappings for this resource
1385 + list_add(&map->resource_map_list, &resource->map_list);
1386 + resource->map_count++;
1388 + mutex_unlock(&(state->map_lock));
1390 + pr_debug("[%s]: added map %p (pid %u, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1391 + __func__, map, map->res_pid, map->res_vc_hdl,
1392 + map->res_usr_hdl, map->res_addr);
1395 +/* Removes a resource mapping from the global data list.
1397 +static void vmcs_sm_remove_map(struct SM_STATE_T *state,
1398 + struct SM_RESOURCE_T *resource,
1399 + struct sm_mmap *map)
1401 + mutex_lock(&(state->map_lock));
1403 + /* Remove from the global list of mappings
1405 + list_del(&map->map_list);
1407 + /* Remove from the list of mapping for this resource
1409 + list_del(&map->resource_map_list);
1410 + if (resource->map_count > 0)
1411 + resource->map_count--;
1413 + mutex_unlock(&(state->map_lock));
1415 + pr_debug("[%s]: removed map %p (pid %d, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1416 + __func__, map, map->res_pid, map->res_vc_hdl, map->res_usr_hdl,
1422 +/* Read callback for the global state proc entry.
1424 +static int vc_sm_global_state_show(struct seq_file *s, void *v)
1426 + struct sm_mmap *map = NULL;
1427 + int map_count = 0;
1429 + if (sm_state == NULL)
1432 + seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
1433 + (unsigned int)sm_state->sm_handle);
1435 + /* Log all applicable mapping(s).
1438 + mutex_lock(&(sm_state->map_lock));
1440 + if (!list_empty(&sm_state->map_list)) {
1441 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1444 + seq_printf(s, "\nMapping 0x%x\n",
1445 + (unsigned int)map);
1446 + seq_printf(s, " TGID %u\n",
1448 + seq_printf(s, " VC-HDL 0x%x\n",
1450 + seq_printf(s, " USR-HDL 0x%x\n",
1451 + map->res_usr_hdl);
1452 + seq_printf(s, " USR-ADDR 0x%lx\n",
1457 + mutex_unlock(&(sm_state->map_lock));
1458 + seq_printf(s, "\n\nTotal map count: %d\n\n", map_count);
1463 +static int vc_sm_global_statistics_show(struct seq_file *s, void *v)
1467 + /* Global state tracked statistics.
1469 + if (sm_state != NULL) {
1470 + seq_puts(s, "\nDeceased Resources Statistics\n");
1472 + seq_printf(s, "\nNatural Cause (%u occurences)\n",
1473 + sm_state->res_deceased_cnt);
1474 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1475 + if (sm_state->deceased[ix] > 0) {
1476 + seq_printf(s, " %u\t%s\n",
1477 + sm_state->deceased[ix],
1478 + sm_stats_human_read[ix]);
1481 + seq_puts(s, "\n");
1482 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1483 + if (sm_state->deceased[ix + END_ATTEMPT] > 0) {
1484 + seq_printf(s, " %u\tFAILED %s\n",
1485 + sm_state->deceased[ix + END_ATTEMPT],
1486 + sm_stats_human_read[ix]);
1490 + seq_printf(s, "\nForcefull (%u occurences)\n",
1491 + sm_state->res_terminated_cnt);
1492 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1493 + if (sm_state->terminated[ix] > 0) {
1494 + seq_printf(s, " %u\t%s\n",
1495 + sm_state->terminated[ix],
1496 + sm_stats_human_read[ix]);
1499 + seq_puts(s, "\n");
1500 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1501 + if (sm_state->terminated[ix + END_ATTEMPT] > 0) {
1502 + seq_printf(s, " %u\tFAILED %s\n",
1503 + sm_state->terminated[ix +
1505 + sm_stats_human_read[ix]);
1514 +/* Read callback for the statistics proc entry.
1516 +static int vc_sm_statistics_show(struct seq_file *s, void *v)
1519 + struct SM_PRIV_DATA_T *file_data;
1520 + struct SM_RESOURCE_T *resource;
1521 + int res_count = 0;
1522 + struct SM_PDE_T *p_pde;
1524 + p_pde = (struct SM_PDE_T *)(s->private);
1525 + file_data = (struct SM_PRIV_DATA_T *)(p_pde->priv_data);
1527 + if (file_data == NULL)
1530 + /* Per process statistics.
1533 + seq_printf(s, "\nStatistics for TGID %d\n", file_data->pid);
1535 + mutex_lock(&(sm_state->map_lock));
1537 + if (!list_empty(&file_data->resource_list)) {
1538 + list_for_each_entry(resource, &file_data->resource_list,
1542 + seq_printf(s, "\nGUID: 0x%x\n\n",
1543 + resource->res_guid);
1544 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1545 + if (resource->res_stats[ix] > 0) {
1548 + resource->res_stats[ix],
1549 + sm_stats_human_read[ix]);
1552 + seq_puts(s, "\n");
1553 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1554 + if (resource->res_stats[ix + END_ATTEMPT] > 0) {
1556 + " %u\tFAILED %s\n",
1557 + resource->res_stats[
1558 + ix + END_ATTEMPT],
1559 + sm_stats_human_read[ix]);
1565 + mutex_unlock(&(sm_state->map_lock));
1567 + seq_printf(s, "\nResources Count %d\n", res_count);
1574 +/* Read callback for the allocation proc entry. */
1575 +static int vc_sm_alloc_show(struct seq_file *s, void *v)
1577 + struct SM_PRIV_DATA_T *file_data;
1578 + struct SM_RESOURCE_T *resource;
1579 + int alloc_count = 0;
1580 + struct SM_PDE_T *p_pde;
1582 + p_pde = (struct SM_PDE_T *)(s->private);
1583 + file_data = (struct SM_PRIV_DATA_T *)(p_pde->priv_data);
1588 + /* Per process statistics. */
1589 + seq_printf(s, "\nAllocation for TGID %d\n", file_data->pid);
1591 + mutex_lock(&(sm_state->map_lock));
1593 + if (!list_empty(&file_data->resource_list)) {
1594 + list_for_each_entry(resource, &file_data->resource_list,
1598 + seq_printf(s, "\nGUID: 0x%x\n",
1599 + resource->res_guid);
1600 + seq_printf(s, "Lock Count: %u\n",
1601 + resource->lock_count);
1602 + seq_printf(s, "Mapped: %s\n",
1603 + (resource->map_count ? "yes" : "no"));
1604 + seq_printf(s, "VC-handle: 0x%x\n",
1605 + resource->res_handle);
1606 + seq_printf(s, "VC-address: 0x%p\n",
1607 + resource->res_base_mem);
1608 + seq_printf(s, "VC-size (bytes): %u\n",
1609 + resource->res_size);
1610 + seq_printf(s, "Cache: %s\n",
1611 + sm_cache_map_vector[resource->res_cached]);
1615 + mutex_unlock(&(sm_state->map_lock));
1617 + seq_printf(s, "\n\nTotal allocation count: %d\n\n", alloc_count);
1623 +static int vc_sm_seq_file_show(struct seq_file *s, void *v)
1625 + struct SM_PDE_T *sm_pde;
1627 + sm_pde = (struct SM_PDE_T *)(s->private);
1629 + if (sm_pde && sm_pde->show)
1630 + sm_pde->show(s, v);
1635 +static int vc_sm_single_open(struct inode *inode, struct file *file)
1637 + return single_open(file, vc_sm_seq_file_show, inode->i_private);
1640 +static const struct file_operations vc_sm_debug_fs_fops = {
1641 + .open = vc_sm_single_open,
1643 + .llseek = seq_lseek,
1644 + .release = single_release,
1647 +/* Adds a resource to the private data list which tracks all the allocated
1650 +static void vmcs_sm_add_resource(struct SM_PRIV_DATA_T *privdata,
1651 + struct SM_RESOURCE_T *resource)
1653 + mutex_lock(&(sm_state->map_lock));
1654 + list_add(&resource->resource_list, &privdata->resource_list);
1655 + list_add(&resource->global_resource_list, &sm_state->resource_list);
1656 + mutex_unlock(&(sm_state->map_lock));
1658 + pr_debug("[%s]: added resource %p (base addr %p, hdl %x, size %u, cache %u)\n",
1659 + __func__, resource, resource->res_base_mem,
1660 + resource->res_handle, resource->res_size, resource->res_cached);
1663 +/* Locates a resource and acquire a reference on it.
1664 +** The resource won't be deleted while there is a reference on it.
1666 +static struct SM_RESOURCE_T *vmcs_sm_acquire_resource(struct SM_PRIV_DATA_T
1668 + unsigned int res_guid)
1670 + struct SM_RESOURCE_T *resource, *ret = NULL;
1672 + mutex_lock(&(sm_state->map_lock));
1674 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1675 + if (resource->res_guid != res_guid)
1678 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1679 + __func__, resource, resource->res_guid,
1680 + resource->res_base_mem, resource->res_handle,
1681 + resource->res_size, resource->res_cached);
1682 + resource->ref_count++;
1687 + mutex_unlock(&(sm_state->map_lock));
1692 +/* Locates a resource and acquire a reference on it.
1693 +** The resource won't be deleted while there is a reference on it.
1695 +static struct SM_RESOURCE_T *vmcs_sm_acquire_first_resource(
1696 + struct SM_PRIV_DATA_T *private)
1698 + struct SM_RESOURCE_T *resource, *ret = NULL;
1700 + mutex_lock(&(sm_state->map_lock));
1702 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1703 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1704 + __func__, resource, resource->res_guid,
1705 + resource->res_base_mem, resource->res_handle,
1706 + resource->res_size, resource->res_cached);
1707 + resource->ref_count++;
1712 + mutex_unlock(&(sm_state->map_lock));
1717 +/* Locates a resource and acquire a reference on it.
1718 +** The resource won't be deleted while there is a reference on it.
1720 +static struct SM_RESOURCE_T *vmcs_sm_acquire_global_resource(unsigned int
1723 + struct SM_RESOURCE_T *resource, *ret = NULL;
1725 + mutex_lock(&(sm_state->map_lock));
1727 + list_for_each_entry(resource, &sm_state->resource_list,
1728 + global_resource_list) {
1729 + if (resource->res_guid != res_guid)
1732 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1733 + __func__, resource, resource->res_guid,
1734 + resource->res_base_mem, resource->res_handle,
1735 + resource->res_size, resource->res_cached);
1736 + resource->ref_count++;
1741 + mutex_unlock(&(sm_state->map_lock));
1746 +/* Release a previously acquired resource.
1747 +** The resource will be deleted when its refcount reaches 0.
1749 +static void vmcs_sm_release_resource(struct SM_RESOURCE_T *resource, int force)
1751 + struct SM_PRIV_DATA_T *private = resource->private;
1752 + struct sm_mmap *map, *map_tmp;
1753 + struct SM_RESOURCE_T *res_tmp;
1756 + mutex_lock(&(sm_state->map_lock));
1758 + if (--resource->ref_count) {
1760 + pr_err("[%s]: resource %p in use\n", __func__, resource);
1762 + mutex_unlock(&(sm_state->map_lock));
1766 + /* Time to free the resource. Start by removing it from the list */
1767 + list_del(&resource->resource_list);
1768 + list_del(&resource->global_resource_list);
1770 + /* Walk the global resource list, find out if the resource is used
1771 + * somewhere else. In which case we don't want to delete it.
1773 + list_for_each_entry(res_tmp, &sm_state->resource_list,
1774 + global_resource_list) {
1775 + if (res_tmp->res_handle == resource->res_handle) {
1776 + resource->res_handle = 0;
1781 + mutex_unlock(&(sm_state->map_lock));
1783 + pr_debug("[%s]: freeing data - guid %x, hdl %x, base address %p\n",
1784 + __func__, resource->res_guid, resource->res_handle,
1785 + resource->res_base_mem);
1786 + resource->res_stats[FREE]++;
1788 + /* Make sure the resource we're removing is unmapped first */
1789 + if (resource->map_count && !list_empty(&resource->map_list)) {
1790 + down_write(¤t->mm->mmap_sem);
1791 + list_for_each_entry_safe(map, map_tmp, &resource->map_list,
1792 + resource_map_list) {
1794 + do_munmap(current->mm, map->res_addr,
1795 + resource->res_size);
1797 + pr_err("[%s]: could not unmap resource %p\n",
1798 + __func__, resource);
1801 + up_write(¤t->mm->mmap_sem);
1804 + /* Free up the videocore allocated resource.
1806 + if (resource->res_handle) {
1807 + VC_SM_FREE_T free = {
1808 + resource->res_handle, resource->res_base_mem
1810 + int status = vc_vchi_sm_free(sm_state->sm_handle, &free,
1811 + &private->int_trans_id);
1812 + if (status != 0 && status != -EINTR) {
1813 + pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
1814 + __func__, status, private->int_trans_id);
1815 + resource->res_stats[FREE_FAIL]++;
1820 + /* Free up the shared resource.
1822 + if (resource->res_shared)
1823 + vmcs_sm_release_resource(resource->res_shared, 0);
1825 + /* Free up the local resource tracking this allocation.
1827 + vc_sm_resource_deceased(resource, force);
1831 +/* Dump the map table for the driver. If process is -1, dumps the whole table,
1832 +** if process is a valid pid (non -1) dump only the entries associated with the
1833 +** pid of interest.
1835 +static void vmcs_sm_host_walk_map_per_pid(int pid)
1837 + struct sm_mmap *map = NULL;
1839 + /* Make sure the device was started properly.
1841 + if (sm_state == NULL) {
1842 + pr_err("[%s]: invalid device\n", __func__);
1846 + mutex_lock(&(sm_state->map_lock));
1848 + /* Log all applicable mapping(s).
1850 + if (!list_empty(&sm_state->map_list)) {
1851 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1852 + if (pid == -1 || map->res_pid == pid) {
1853 + pr_info("[%s]: tgid: %u - vc-hdl: %x, usr-hdl: %x, usr-addr: %lx\n",
1854 + __func__, map->res_pid, map->res_vc_hdl,
1855 + map->res_usr_hdl, map->res_addr);
1860 + mutex_unlock(&(sm_state->map_lock));
1865 +/* Dump the allocation table from host side point of view. This only dumps the
1866 +** data allocated for this process/device referenced by the file_data.
1868 +static void vmcs_sm_host_walk_alloc(struct SM_PRIV_DATA_T *file_data)
1870 + struct SM_RESOURCE_T *resource = NULL;
1872 + /* Make sure the device was started properly.
1874 + if ((sm_state == NULL) || (file_data == NULL)) {
1875 + pr_err("[%s]: invalid device\n", __func__);
1879 + mutex_lock(&(sm_state->map_lock));
1881 + if (!list_empty(&file_data->resource_list)) {
1882 + list_for_each_entry(resource, &file_data->resource_list,
1884 + pr_info("[%s]: guid: %x - hdl: %x, vc-mem: %p, size: %u, cache: %u\n",
1885 + __func__, resource->res_guid, resource->res_handle,
1886 + resource->res_base_mem, resource->res_size,
1887 + resource->res_cached);
1891 + mutex_unlock(&(sm_state->map_lock));
1896 +/* Create support for private data tracking.
1898 +static struct SM_PRIV_DATA_T *vc_sm_create_priv_data(pid_t id)
1900 + char alloc_name[32];
1901 + struct SM_PRIV_DATA_T *file_data = NULL;
1903 + /* Allocate private structure. */
1904 + file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
1907 + pr_err("[%s]: cannot allocate file data\n", __func__);
1911 + snprintf(alloc_name, sizeof(alloc_name), "%d", id);
1913 + INIT_LIST_HEAD(&file_data->resource_list);
1914 + file_data->pid = id;
1915 + file_data->dir_pid = debugfs_create_dir(alloc_name,
1916 + sm_state->dir_alloc);
1918 + /* TODO: fix this to support querying statistics per pid */
1920 + if (IS_ERR_OR_NULL(file_data->dir_pid)) {
1921 + file_data->dir_pid = NULL;
1923 + struct dentry *dir_entry;
1925 + dir_entry = debugfs_create_file(VC_SM_RESOURCES, S_IRUGO,
1926 + file_data->dir_pid, file_data,
1927 + vc_sm_debug_fs_fops);
1929 + file_data->dir_res.dir_entry = dir_entry;
1930 + file_data->dir_res.priv_data = file_data;
1931 + file_data->dir_res.show = &vc_sm_alloc_show;
1933 + dir_entry = debugfs_create_file(VC_SM_STATS, S_IRUGO,
1934 + file_data->dir_pid, file_data,
1935 + vc_sm_debug_fs_fops);
1937 + file_data->dir_res.dir_entry = dir_entry;
1938 + file_data->dir_res.priv_data = file_data;
1939 + file_data->dir_res.show = &vc_sm_statistics_show;
1941 + pr_debug("[%s]: private data allocated %p\n", __func__, file_data);
1948 +/* Open the device. Creates a private state to help track all allocation
1949 +** associated with this device.
1951 +static int vc_sm_open(struct inode *inode, struct file *file)
1955 + /* Make sure the device was started properly.
1958 + pr_err("[%s]: invalid device\n", __func__);
1963 + file->private_data = vc_sm_create_priv_data(current->tgid);
1964 + if (file->private_data == NULL) {
1965 + pr_err("[%s]: failed to create data tracker\n", __func__);
1975 +/* Close the device. Free up all resources still associated with this device
1978 +static int vc_sm_release(struct inode *inode, struct file *file)
1980 + struct SM_PRIV_DATA_T *file_data =
1981 + (struct SM_PRIV_DATA_T *)file->private_data;
1982 + struct SM_RESOURCE_T *resource;
1985 + /* Make sure the device was started properly.
1987 + if (sm_state == NULL || file_data == NULL) {
1988 + pr_err("[%s]: invalid device\n", __func__);
1993 + pr_debug("[%s]: using private data %p\n", __func__, file_data);
1995 + if (file_data->restart_sys == -EINTR) {
1996 + VC_SM_ACTION_CLEAN_T action_clean;
1998 + pr_debug("[%s]: releasing following EINTR on %u (trans_id: %u) (likely due to signal)...\n",
1999 + __func__, file_data->int_action,
2000 + file_data->int_trans_id);
2002 + action_clean.res_action = file_data->int_action;
2003 + action_clean.action_trans_id = file_data->int_trans_id;
2005 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
2008 + while ((resource = vmcs_sm_acquire_first_resource(file_data)) != NULL) {
2009 + vmcs_sm_release_resource(resource, 0);
2010 + vmcs_sm_release_resource(resource, 1);
2013 + /* Remove the corresponding proc entry. */
2014 + debugfs_remove_recursive(file_data->dir_pid);
2016 + /* Terminate the private data.
2024 +static void vcsm_vma_open(struct vm_area_struct *vma)
2026 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2028 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2029 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2030 + (int)vma->vm_pgoff);
2035 +static void vcsm_vma_close(struct vm_area_struct *vma)
2037 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2039 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2040 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2041 + (int)vma->vm_pgoff);
2045 + /* Remove from the map table.
2047 + if (map->ref_count == 0)
2048 + vmcs_sm_remove_map(sm_state, map->resource, map);
2051 +static int vcsm_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2053 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2054 + struct SM_RESOURCE_T *resource = map->resource;
2055 + pgoff_t page_offset;
2056 + unsigned long pfn;
2059 + /* Lock the resource if necessary.
2061 + if (!resource->lock_count) {
2062 + VC_SM_LOCK_UNLOCK_T lock_unlock;
2063 + VC_SM_LOCK_RESULT_T lock_result;
2066 + lock_unlock.res_handle = resource->res_handle;
2067 + lock_unlock.res_mem = resource->res_base_mem;
2069 + pr_debug("[%s]: attempt to lock data - hdl %x, base address %p\n",
2070 + __func__, lock_unlock.res_handle, lock_unlock.res_mem);
2072 + /* Lock the videocore allocated resource.
2074 + status = vc_vchi_sm_lock(sm_state->sm_handle,
2075 + &lock_unlock, &lock_result, 0);
2076 + if ((status != 0) ||
2077 + ((status == 0) && (lock_result.res_mem == NULL))) {
2078 + pr_err("[%s]: failed to lock memory on videocore (status: %u)\n",
2079 + __func__, status);
2080 + resource->res_stats[LOCK_FAIL]++;
2081 + return VM_FAULT_SIGBUS;
2084 + pfn = vcaddr_to_pfn((unsigned long)resource->res_base_mem);
2085 + outer_inv_range(__pfn_to_phys(pfn),
2086 + __pfn_to_phys(pfn) + resource->res_size);
2088 + resource->res_stats[LOCK]++;
2089 + resource->lock_count++;
2091 + /* Keep track of the new base memory.
2093 + if ((lock_result.res_mem != NULL) &&
2094 + (lock_result.res_old_mem != NULL) &&
2095 + (lock_result.res_mem != lock_result.res_old_mem)) {
2096 + resource->res_base_mem = lock_result.res_mem;
2100 + /* We don't use vmf->pgoff since that has the fake offset */
2101 + page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start);
2102 + pfn = (uint32_t)resource->res_base_mem & 0x3FFFFFFF;
2103 + pfn += mm_vc_mem_phys_addr;
2104 + pfn += page_offset;
2105 + pfn >>= PAGE_SHIFT;
2107 + /* Finally, remap it */
2108 + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
2112 + case -ERESTARTSYS:
2113 + return VM_FAULT_NOPAGE;
2116 + return VM_FAULT_OOM;
2118 + return VM_FAULT_SIGBUS;
2122 +static struct vm_operations_struct vcsm_vm_ops = {
2123 + .open = vcsm_vma_open,
2124 + .close = vcsm_vma_close,
2125 + .fault = vcsm_vma_fault,
2128 +/* Walks a VMA and clean each valid page from the cache */
2129 +static void vcsm_vma_cache_clean_page_range(unsigned long addr,
2130 + unsigned long end)
2136 + unsigned long pgd_next, pud_next, pmd_next;
2142 + pgd = pgd_offset(current->mm, addr);
2144 + pgd_next = pgd_addr_end(addr, end);
2146 + if (pgd_none(*pgd) || pgd_bad(*pgd))
2150 + pud = pud_offset(pgd, addr);
2152 + pud_next = pud_addr_end(addr, pgd_next);
2153 + if (pud_none(*pud) || pud_bad(*pud))
2157 + pmd = pmd_offset(pud, addr);
2159 + pmd_next = pmd_addr_end(addr, pud_next);
2160 + if (pmd_none(*pmd) || pmd_bad(*pmd))
2164 + pte = pte_offset_map(pmd, addr);
2166 + if (pte_none(*pte)
2167 + || !pte_present(*pte))
2170 + /* Clean + invalidate */
2171 + dmac_flush_range((const void *) addr,
2173 + (addr + PAGE_SIZE));
2175 + } while (pte++, addr +=
2176 + PAGE_SIZE, addr != pmd_next);
2179 + } while (pmd++, addr = pmd_next, addr != pud_next);
2181 + } while (pud++, addr = pud_next, addr != pgd_next);
2182 + } while (pgd++, addr = pgd_next, addr != end);
2185 +/* Map an allocated data into something that the user space.
2187 +static int vc_sm_mmap(struct file *file, struct vm_area_struct *vma)
2190 + struct SM_PRIV_DATA_T *file_data =
2191 + (struct SM_PRIV_DATA_T *)file->private_data;
2192 + struct SM_RESOURCE_T *resource = NULL;
2193 + struct sm_mmap *map = NULL;
2195 + /* Make sure the device was started properly.
2197 + if ((sm_state == NULL) || (file_data == NULL)) {
2198 + pr_err("[%s]: invalid device\n", __func__);
2202 + pr_debug("[%s]: private data %p, guid %x\n", __func__, file_data,
2203 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2205 + /* We lookup to make sure that the data we are being asked to mmap is
2206 + ** something that we allocated.
2208 + ** We use the offset information as the key to tell us which resource
2209 + ** we are mapping.
2211 + resource = vmcs_sm_acquire_resource(file_data,
2212 + ((unsigned int)vma->vm_pgoff <<
2214 + if (resource == NULL) {
2215 + pr_err("[%s]: failed to locate resource for guid %x\n", __func__,
2216 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2220 + pr_debug("[%s]: guid %x, tgid %u, %u, %u\n",
2221 + __func__, resource->res_guid, current->tgid, resource->pid,
2224 + /* Check permissions.
2226 + if (resource->pid && (resource->pid != current->tgid)) {
2227 + pr_err("[%s]: current tgid %u != %u owner\n",
2228 + __func__, current->tgid, resource->pid);
2233 + /* Verify that what we are asked to mmap is proper.
2235 + if (resource->res_size != (unsigned int)(vma->vm_end - vma->vm_start)) {
2236 + pr_err("[%s]: size inconsistency (resource: %u - mmap: %u)\n",
2238 + resource->res_size,
2239 + (unsigned int)(vma->vm_end - vma->vm_start));
2245 + /* Keep track of the tuple in the global resource list such that one
2246 + * can do a mapping lookup for address/memory handle.
2248 + map = kzalloc(sizeof(*map), GFP_KERNEL);
2249 + if (map == NULL) {
2250 + pr_err("[%s]: failed to allocate global tracking resource\n",
2256 + map->res_pid = current->tgid;
2257 + map->res_vc_hdl = resource->res_handle;
2258 + map->res_usr_hdl = resource->res_guid;
2259 + map->res_addr = (long unsigned int)vma->vm_start;
2260 + map->resource = resource;
2262 + vmcs_sm_add_map(sm_state, resource, map);
2264 + /* We are not actually mapping the pages, we just provide a fault
2265 + ** handler to allow pages to be mapped when accessed
2268 + VM_IO | VM_PFNMAP | VM_DONTCOPY | VM_DONTEXPAND;
2269 + vma->vm_ops = &vcsm_vm_ops;
2270 + vma->vm_private_data = map;
2272 + /* vm_pgoff is the first PFN of the mapped memory */
2273 + vma->vm_pgoff = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
2274 + vma->vm_pgoff += mm_vc_mem_phys_addr;
2275 + vma->vm_pgoff >>= PAGE_SHIFT;
2277 + if ((resource->res_cached == VMCS_SM_CACHE_NONE) ||
2278 + (resource->res_cached == VMCS_SM_CACHE_VC)) {
2279 + /* Allocated non host cached memory, honour it.
2281 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2284 + pr_debug("[%s]: resource %p (guid %x) - cnt %u, base address %p, handle %x, size %u (%u), cache %u\n",
2286 + resource, resource->res_guid, resource->lock_count,
2287 + resource->res_base_mem, resource->res_handle,
2288 + resource->res_size, (unsigned int)(vma->vm_end - vma->vm_start),
2289 + resource->res_cached);
2291 + pr_debug("[%s]: resource %p (base address %p, handle %x) - map-count %d, usr-addr %x\n",
2292 + __func__, resource, resource->res_base_mem,
2293 + resource->res_handle, resource->map_count,
2294 + (unsigned int)vma->vm_start);
2296 + vcsm_vma_open(vma);
2297 + resource->res_stats[MAP]++;
2298 + vmcs_sm_release_resource(resource, 0);
2302 + resource->res_stats[MAP_FAIL]++;
2303 + vmcs_sm_release_resource(resource, 0);
2307 +/* Allocate a shared memory handle and block.
2309 +int vc_sm_ioctl_alloc(struct SM_PRIV_DATA_T *private,
2310 + struct vmcs_sm_ioctl_alloc *ioparam)
2314 + struct SM_RESOURCE_T *resource;
2315 + VC_SM_ALLOC_T alloc = { 0 };
2316 + VC_SM_ALLOC_RESULT_T result = { 0 };
2318 + /* Setup our allocation parameters */
2319 + alloc.type = ((ioparam->cached == VMCS_SM_CACHE_VC)
2320 + || (ioparam->cached ==
2321 + VMCS_SM_CACHE_BOTH)) ? VC_SM_ALLOC_CACHED :
2322 + VC_SM_ALLOC_NON_CACHED;
2323 + alloc.base_unit = ioparam->size;
2324 + alloc.num_unit = ioparam->num;
2325 + alloc.allocator = current->tgid;
2326 + /* Align to kernel page size */
2327 + alloc.alignement = 4096;
2328 + /* Align the size to the kernel page size */
2330 + (alloc.base_unit + alloc.alignement - 1) & ~(alloc.alignement - 1);
2331 + if (*ioparam->name) {
2332 + memcpy(alloc.name, ioparam->name, sizeof(alloc.name) - 1);
2334 + memcpy(alloc.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
2335 + sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
2338 + pr_debug("[%s]: attempt to allocate \"%s\" data - type %u, base %u (%u), num %u, alignement %u\n",
2339 + __func__, alloc.name, alloc.type, ioparam->size,
2340 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2342 + /* Allocate local resource to track this allocation.
2344 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2349 + INIT_LIST_HEAD(&resource->map_list);
2350 + resource->ref_count++;
2351 + resource->pid = current->tgid;
2353 + /* Allocate the videocore resource.
2355 + status = vc_vchi_sm_alloc(sm_state->sm_handle, &alloc, &result,
2356 + &private->int_trans_id);
2357 + if (status == -EINTR) {
2358 + pr_debug("[%s]: requesting allocate memory action restart (trans_id: %u)\n",
2359 + __func__, private->int_trans_id);
2360 + ret = -ERESTARTSYS;
2361 + private->restart_sys = -EINTR;
2362 + private->int_action = VC_SM_MSG_TYPE_ALLOC;
2364 + } else if (status != 0 || (status == 0 && result.res_mem == NULL)) {
2365 + pr_err("[%s]: failed to allocate memory on videocore (status: %u, trans_id: %u)\n",
2366 + __func__, status, private->int_trans_id);
2368 + resource->res_stats[ALLOC_FAIL]++;
2372 + /* Keep track of the resource we created.
2374 + resource->private = private;
2375 + resource->res_handle = result.res_handle;
2376 + resource->res_base_mem = result.res_mem;
2377 + resource->res_size = alloc.base_unit * alloc.num_unit;
2378 + resource->res_cached = ioparam->cached;
2380 + /* Kernel/user GUID. This global identifier is used for mmap'ing the
2381 + * allocated region from user space, it is passed as the mmap'ing
2382 + * offset, we use it to 'hide' the videocore handle/address.
2384 + mutex_lock(&sm_state->lock);
2385 + resource->res_guid = ++sm_state->guid;
2386 + mutex_unlock(&sm_state->lock);
2387 + resource->res_guid <<= PAGE_SHIFT;
2389 + vmcs_sm_add_resource(private, resource);
2391 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2392 + __func__, resource->res_guid, resource->res_handle,
2393 + resource->res_base_mem, resource->res_size,
2394 + resource->res_cached);
2397 + resource->res_stats[ALLOC]++;
2398 + ioparam->handle = resource->res_guid;
2402 + pr_err("[%s]: failed to allocate \"%s\" data (%i) - type %u, base %u (%u), num %u, alignment %u\n",
2403 + __func__, alloc.name, ret, alloc.type, ioparam->size,
2404 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2405 + if (resource != NULL) {
2406 + vc_sm_resource_deceased(resource, 1);
2412 +/* Share an allocate memory handle and block.
2414 +int vc_sm_ioctl_alloc_share(struct SM_PRIV_DATA_T *private,
2415 + struct vmcs_sm_ioctl_alloc_share *ioparam)
2417 + struct SM_RESOURCE_T *resource, *shared_resource;
2420 + pr_debug("[%s]: attempt to share resource %u\n", __func__,
2423 + shared_resource = vmcs_sm_acquire_global_resource(ioparam->handle);
2424 + if (shared_resource == NULL) {
2429 + /* Allocate local resource to track this allocation.
2431 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2432 + if (resource == NULL) {
2433 + pr_err("[%s]: failed to allocate local tracking resource\n",
2438 + INIT_LIST_HEAD(&resource->map_list);
2439 + resource->ref_count++;
2440 + resource->pid = current->tgid;
2442 + /* Keep track of the resource we created.
2444 + resource->private = private;
2445 + resource->res_handle = shared_resource->res_handle;
2446 + resource->res_base_mem = shared_resource->res_base_mem;
2447 + resource->res_size = shared_resource->res_size;
2448 + resource->res_cached = shared_resource->res_cached;
2449 + resource->res_shared = shared_resource;
2451 + mutex_lock(&sm_state->lock);
2452 + resource->res_guid = ++sm_state->guid;
2453 + mutex_unlock(&sm_state->lock);
2454 + resource->res_guid <<= PAGE_SHIFT;
2456 + vmcs_sm_add_resource(private, resource);
2458 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2459 + __func__, resource->res_guid, resource->res_handle,
2460 + resource->res_base_mem, resource->res_size,
2461 + resource->res_cached);
2464 + resource->res_stats[ALLOC]++;
2465 + ioparam->handle = resource->res_guid;
2466 + ioparam->size = resource->res_size;
2470 + pr_err("[%s]: failed to share %u\n", __func__, ioparam->handle);
2471 + if (shared_resource != NULL)
2472 + vmcs_sm_release_resource(shared_resource, 0);
2477 +/* Free a previously allocated shared memory handle and block.
2479 +static int vc_sm_ioctl_free(struct SM_PRIV_DATA_T *private,
2480 + struct vmcs_sm_ioctl_free *ioparam)
2482 + struct SM_RESOURCE_T *resource =
2483 + vmcs_sm_acquire_resource(private, ioparam->handle);
2485 + if (resource == NULL) {
2486 + pr_err("[%s]: resource for guid %u does not exist\n", __func__,
2491 + /* Check permissions.
2493 + if (resource->pid && (resource->pid != current->tgid)) {
2494 + pr_err("[%s]: current tgid %u != %u owner\n",
2495 + __func__, current->tgid, resource->pid);
2496 + vmcs_sm_release_resource(resource, 0);
2500 + vmcs_sm_release_resource(resource, 0);
2501 + vmcs_sm_release_resource(resource, 0);
2505 +/* Resize a previously allocated shared memory handle and block.
2507 +static int vc_sm_ioctl_resize(struct SM_PRIV_DATA_T *private,
2508 + struct vmcs_sm_ioctl_resize *ioparam)
2512 + VC_SM_RESIZE_T resize;
2513 + struct SM_RESOURCE_T *resource;
2515 + /* Locate resource from GUID.
2517 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2519 + pr_err("[%s]: failed resource - guid %x\n",
2520 + __func__, ioparam->handle);
2525 + /* If the resource is locked, its reference count will be not NULL,
2526 + ** in which case we will not be allowed to resize it anyways, so
2527 + ** reject the attempt here.
2529 + if (resource->lock_count != 0) {
2530 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2531 + __func__, ioparam->handle, resource->lock_count);
2536 + /* Check permissions.
2538 + if (resource->pid && (resource->pid != current->tgid)) {
2539 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
2540 + current->tgid, resource->pid);
2545 + if (resource->map_count != 0) {
2546 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2547 + __func__, ioparam->handle, resource->map_count);
2552 + resize.res_handle = resource->res_handle;
2553 + resize.res_mem = resource->res_base_mem;
2554 + resize.res_new_size = ioparam->new_size;
2556 + pr_debug("[%s]: attempt to resize data - guid %x, hdl %x, base address %p\n",
2557 + __func__, ioparam->handle, resize.res_handle, resize.res_mem);
2559 + /* Resize the videocore allocated resource.
2561 + status = vc_vchi_sm_resize(sm_state->sm_handle, &resize,
2562 + &private->int_trans_id);
2563 + if (status == -EINTR) {
2564 + pr_debug("[%s]: requesting resize memory action restart (trans_id: %u)\n",
2565 + __func__, private->int_trans_id);
2566 + ret = -ERESTARTSYS;
2567 + private->restart_sys = -EINTR;
2568 + private->int_action = VC_SM_MSG_TYPE_RESIZE;
2570 + } else if (status != 0) {
2571 + pr_err("[%s]: failed to resize memory on videocore (status: %u, trans_id: %u)\n",
2572 + __func__, status, private->int_trans_id);
2577 + pr_debug("[%s]: success to resize data - hdl %x, size %d -> %d\n",
2578 + __func__, resize.res_handle, resource->res_size,
2579 + resize.res_new_size);
2581 + /* Successfully resized, save the information and inform the user.
2583 + ioparam->old_size = resource->res_size;
2584 + resource->res_size = resize.res_new_size;
2588 + vmcs_sm_release_resource(resource, 0);
2593 +/* Lock a previously allocated shared memory handle and block.
2595 +static int vc_sm_ioctl_lock(struct SM_PRIV_DATA_T *private,
2596 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
2597 + int change_cache, enum vmcs_sm_cache_e cache_type,
2598 + unsigned int vc_addr)
2601 + VC_SM_LOCK_UNLOCK_T lock;
2602 + VC_SM_LOCK_RESULT_T result;
2603 + struct SM_RESOURCE_T *resource;
2605 + struct sm_mmap *map, *map_tmp;
2606 + long unsigned int phys_addr;
2610 + /* Locate resource from GUID.
2612 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2613 + if (resource == NULL) {
2618 + /* Check permissions.
2620 + if (resource->pid && (resource->pid != current->tgid)) {
2621 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
2622 + current->tgid, resource->pid);
2627 + lock.res_handle = resource->res_handle;
2628 + lock.res_mem = resource->res_base_mem;
2630 + /* Take the lock and get the address to be mapped.
2632 + if (vc_addr == 0) {
2633 + pr_debug("[%s]: attempt to lock data - guid %x, hdl %x, base address %p\n",
2634 + __func__, ioparam->handle, lock.res_handle,
2637 + /* Lock the videocore allocated resource.
2639 + status = vc_vchi_sm_lock(sm_state->sm_handle, &lock, &result,
2640 + &private->int_trans_id);
2641 + if (status == -EINTR) {
2642 + pr_debug("[%s]: requesting lock memory action restart (trans_id: %u)\n",
2643 + __func__, private->int_trans_id);
2644 + ret = -ERESTARTSYS;
2645 + private->restart_sys = -EINTR;
2646 + private->int_action = VC_SM_MSG_TYPE_LOCK;
2648 + } else if (status != 0 ||
2649 + (status == 0 && result.res_mem == NULL)) {
2650 + pr_err("[%s]: failed to lock memory on videocore (status: %u, trans_id: %u)\n",
2651 + __func__, status, private->int_trans_id);
2653 + resource->res_stats[LOCK_FAIL]++;
2657 + pr_debug("[%s]: succeed to lock data - hdl %x, base address %p (%p), ref-cnt %d\n",
2658 + __func__, lock.res_handle, result.res_mem,
2659 + lock.res_mem, resource->lock_count);
2661 + /* Lock assumed taken already, address to be mapped is known.
2664 + resource->res_base_mem = (void *)vc_addr;
2666 + resource->res_stats[LOCK]++;
2667 + resource->lock_count++;
2669 + /* Keep track of the new base memory allocation if it has changed.
2671 + if ((vc_addr == 0) &&
2672 + (result.res_mem != NULL) &&
2673 + (result.res_old_mem != NULL) &&
2674 + (result.res_mem != result.res_old_mem)) {
2675 + resource->res_base_mem = result.res_mem;
2677 + /* Kernel allocated resources.
2679 + if (resource->pid == 0) {
2680 + if (!list_empty(&resource->map_list)) {
2681 + list_for_each_entry_safe(map, map_tmp,
2682 + &resource->map_list,
2683 + resource_map_list) {
2684 + if (map->res_addr) {
2685 + iounmap((void *)map->res_addr);
2686 + map->res_addr = 0;
2688 + vmcs_sm_remove_map(sm_state,
2699 + resource->res_cached = cache_type;
2701 + if (resource->map_count) {
2703 + vmcs_sm_usr_address_from_pid_and_usr_handle(
2704 + current->tgid, ioparam->handle);
2706 + pr_debug("[%s] map_count %d private->pid %d current->tgid %d hnd %x addr %u\n",
2707 + __func__, resource->map_count, private->pid,
2708 + current->tgid, ioparam->handle, ioparam->addr);
2710 + /* Kernel allocated resources.
2712 + if (resource->pid == 0) {
2713 + pr_debug("[%s]: attempt mapping kernel resource - guid %x, hdl %x\n",
2714 + __func__, ioparam->handle, lock.res_handle);
2716 + ioparam->addr = 0;
2718 + map = kzalloc(sizeof(*map), GFP_KERNEL);
2719 + if (map == NULL) {
2720 + pr_err("[%s]: failed allocating tracker\n",
2725 + phys_addr = (uint32_t)resource->res_base_mem &
2727 + phys_addr += mm_vc_mem_phys_addr;
2728 + if (resource->res_cached
2729 + == VMCS_SM_CACHE_HOST) {
2730 + ioparam->addr = (long unsigned int)
2731 + /* TODO - make cached work */
2732 + ioremap_nocache(phys_addr,
2733 + resource->res_size);
2735 + pr_debug("[%s]: mapping kernel - guid %x, hdl %x - cached mapping %u\n",
2736 + __func__, ioparam->handle,
2737 + lock.res_handle, ioparam->addr);
2739 + ioparam->addr = (long unsigned int)
2740 + ioremap_nocache(phys_addr,
2741 + resource->res_size);
2743 + pr_debug("[%s]: mapping kernel- guid %x, hdl %x - non cached mapping %u\n",
2744 + __func__, ioparam->handle,
2745 + lock.res_handle, ioparam->addr);
2749 + map->res_vc_hdl = resource->res_handle;
2750 + map->res_usr_hdl = resource->res_guid;
2751 + map->res_addr = ioparam->addr;
2752 + map->resource = resource;
2755 + vmcs_sm_add_map(sm_state, resource, map);
2758 + ioparam->addr = 0;
2763 + vmcs_sm_release_resource(resource, 0);
2768 +/* Unlock a previously allocated shared memory handle and block.
2770 +static int vc_sm_ioctl_unlock(struct SM_PRIV_DATA_T *private,
2771 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
2772 + int flush, int wait_reply, int no_vc_unlock)
2775 + VC_SM_LOCK_UNLOCK_T unlock;
2776 + struct sm_mmap *map, *map_tmp;
2777 + struct SM_RESOURCE_T *resource;
2782 + /* Locate resource from GUID.
2784 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2785 + if (resource == NULL) {
2790 + /* Check permissions.
2792 + if (resource->pid && (resource->pid != current->tgid)) {
2793 + pr_err("[%s]: current tgid %u != %u owner\n",
2794 + __func__, current->tgid, resource->pid);
2799 + unlock.res_handle = resource->res_handle;
2800 + unlock.res_mem = resource->res_base_mem;
2802 + pr_debug("[%s]: attempt to unlock data - guid %x, hdl %x, base address %p\n",
2803 + __func__, ioparam->handle, unlock.res_handle, unlock.res_mem);
2805 + /* User space allocated resources.
2807 + if (resource->pid) {
2808 + /* Flush if requested */
2809 + if (resource->res_cached && flush) {
2810 + dma_addr_t phys_addr = 0;
2811 + resource->res_stats[FLUSH]++;
2814 + (dma_addr_t)((uint32_t)resource->res_base_mem &
2816 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
2818 + /* L1 cache flush */
2819 + down_read(¤t->mm->mmap_sem);
2820 + list_for_each_entry(map, &resource->map_list,
2821 + resource_map_list) {
2823 + unsigned long start;
2824 + unsigned long end;
2825 + start = map->vma->vm_start;
2826 + end = map->vma->vm_end;
2828 + vcsm_vma_cache_clean_page_range(
2832 + up_read(¤t->mm->mmap_sem);
2834 + /* L2 cache flush */
2835 + outer_clean_range(phys_addr,
2837 + (size_t) resource->res_size);
2840 + /* We need to zap all the vmas associated with this resource */
2841 + if (resource->lock_count == 1) {
2842 + down_read(¤t->mm->mmap_sem);
2843 + list_for_each_entry(map, &resource->map_list,
2844 + resource_map_list) {
2846 + zap_vma_ptes(map->vma,
2847 + map->vma->vm_start,
2848 + map->vma->vm_end -
2849 + map->vma->vm_start);
2852 + up_read(¤t->mm->mmap_sem);
2855 + /* Kernel allocated resources. */
2857 + /* Global + Taken in this context */
2858 + if (resource->ref_count == 2) {
2859 + if (!list_empty(&resource->map_list)) {
2860 + list_for_each_entry_safe(map, map_tmp,
2861 + &resource->map_list,
2862 + resource_map_list) {
2863 + if (map->res_addr) {
2865 + (resource->res_cached ==
2866 + VMCS_SM_CACHE_HOST)) {
2869 + phys_addr = (uint32_t)
2870 + resource->res_base_mem & 0x3FFFFFFF;
2872 + mm_vc_mem_phys_addr;
2874 + /* L1 cache flush */
2875 + dmac_flush_range((const
2878 + map->res_addr, (const void *)
2879 + (map->res_addr + resource->res_size));
2881 + /* L2 cache flush */
2886 + resource->res_size);
2889 + iounmap((void *)map->res_addr);
2890 + map->res_addr = 0;
2892 + vmcs_sm_remove_map(sm_state,
2902 + if (resource->lock_count) {
2903 + /* Bypass the videocore unlock.
2907 + /* Unlock the videocore allocated resource.
2911 + vc_vchi_sm_unlock(sm_state->sm_handle, &unlock,
2912 + &private->int_trans_id,
2914 + if (status == -EINTR) {
2915 + pr_debug("[%s]: requesting unlock memory action restart (trans_id: %u)\n",
2916 + __func__, private->int_trans_id);
2918 + ret = -ERESTARTSYS;
2919 + resource->res_stats[UNLOCK]--;
2920 + private->restart_sys = -EINTR;
2921 + private->int_action = VC_SM_MSG_TYPE_UNLOCK;
2923 + } else if (status != 0) {
2924 + pr_err("[%s]: failed to unlock vc mem (status: %u, trans_id: %u)\n",
2925 + __func__, status, private->int_trans_id);
2928 + resource->res_stats[UNLOCK_FAIL]++;
2933 + resource->res_stats[UNLOCK]++;
2934 + resource->lock_count--;
2937 + pr_debug("[%s]: success to unlock data - hdl %x, base address %p, ref-cnt %d\n",
2938 + __func__, unlock.res_handle, unlock.res_mem,
2939 + resource->lock_count);
2943 + vmcs_sm_release_resource(resource, 0);
2948 +/* Handle control from host. */
2949 +static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2952 + unsigned int cmdnr = _IOC_NR(cmd);
2953 + struct SM_PRIV_DATA_T *file_data =
2954 + (struct SM_PRIV_DATA_T *)file->private_data;
2955 + struct SM_RESOURCE_T *resource = NULL;
2957 + /* Validate we can work with this device. */
2958 + if ((sm_state == NULL) || (file_data == NULL)) {
2959 + pr_err("[%s]: invalid device\n", __func__);
2964 + pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
2965 + current->tgid, file_data->pid);
2967 + /* Action is a re-post of a previously interrupted action? */
2968 + if (file_data->restart_sys == -EINTR) {
2969 + VC_SM_ACTION_CLEAN_T action_clean;
2971 + pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
2972 + __func__, file_data->int_action,
2973 + file_data->int_trans_id);
2975 + action_clean.res_action = file_data->int_action;
2976 + action_clean.action_trans_id = file_data->int_trans_id;
2978 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
2980 + file_data->restart_sys = 0;
2983 + /* Now process the command.
2986 + /* New memory allocation.
2988 + case VMCS_SM_CMD_ALLOC:
2990 + struct vmcs_sm_ioctl_alloc ioparam;
2992 + /* Get the parameter data.
2994 + if (copy_from_user
2995 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2996 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3002 + ret = vc_sm_ioctl_alloc(file_data, &ioparam);
3004 + (copy_to_user((void *)arg,
3005 + &ioparam, sizeof(ioparam)) != 0)) {
3006 + struct vmcs_sm_ioctl_free freeparam = {
3009 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3011 + vc_sm_ioctl_free(file_data, &freeparam);
3021 + /* Share existing memory allocation.
3023 + case VMCS_SM_CMD_ALLOC_SHARE:
3025 + struct vmcs_sm_ioctl_alloc_share ioparam;
3027 + /* Get the parameter data.
3029 + if (copy_from_user
3030 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3031 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3037 + ret = vc_sm_ioctl_alloc_share(file_data, &ioparam);
3039 + /* Copy result back to user.
3042 + && copy_to_user((void *)arg, &ioparam,
3043 + sizeof(ioparam)) != 0) {
3044 + struct vmcs_sm_ioctl_free freeparam = {
3047 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3049 + vc_sm_ioctl_free(file_data, &freeparam);
3059 + /* Lock (attempt to) *and* register a cache behavior change.
3061 + case VMCS_SM_CMD_LOCK_CACHE:
3063 + struct vmcs_sm_ioctl_lock_cache ioparam;
3064 + struct vmcs_sm_ioctl_lock_unlock lock;
3066 + /* Get parameter data.
3068 + if (copy_from_user
3069 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3070 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3076 + lock.handle = ioparam.handle;
3078 + vc_sm_ioctl_lock(file_data, &lock, 1,
3079 + ioparam.cached, 0);
3087 + /* Lock (attempt to) existing memory allocation.
3089 + case VMCS_SM_CMD_LOCK:
3091 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3093 + /* Get parameter data.
3095 + if (copy_from_user
3096 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3097 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3103 + ret = vc_sm_ioctl_lock(file_data, &ioparam, 0, 0, 0);
3105 + /* Copy result back to user.
3107 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3109 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3120 + /* Unlock (attempt to) existing memory allocation.
3122 + case VMCS_SM_CMD_UNLOCK:
3124 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3126 + /* Get parameter data.
3128 + if (copy_from_user
3129 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3130 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3136 + ret = vc_sm_ioctl_unlock(file_data, &ioparam, 0, 1, 0);
3144 + /* Resize (attempt to) existing memory allocation.
3146 + case VMCS_SM_CMD_RESIZE:
3148 + struct vmcs_sm_ioctl_resize ioparam;
3150 + /* Get parameter data.
3152 + if (copy_from_user
3153 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3154 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3160 + ret = vc_sm_ioctl_resize(file_data, &ioparam);
3162 + /* Copy result back to user.
3164 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3166 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3177 + /* Terminate existing memory allocation.
3179 + case VMCS_SM_CMD_FREE:
3181 + struct vmcs_sm_ioctl_free ioparam;
3183 + /* Get parameter data.
3185 + if (copy_from_user
3186 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3187 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3193 + ret = vc_sm_ioctl_free(file_data, &ioparam);
3201 + /* Walk allocation on videocore, information shows up in the
3204 + case VMCS_SM_CMD_VC_WALK_ALLOC:
3206 + pr_debug("[%s]: invoking walk alloc\n", __func__);
3208 + if (vc_vchi_sm_walk_alloc(sm_state->sm_handle) != 0)
3209 + pr_err("[%s]: failed to walk-alloc on videocore\n",
3217 +/* Walk mapping table on host, information shows up in the
3220 + case VMCS_SM_CMD_HOST_WALK_MAP:
3222 + /* Use pid of -1 to tell to walk the whole map. */
3223 + vmcs_sm_host_walk_map_per_pid(-1);
3230 + /* Walk mapping table per process on host. */
3231 + case VMCS_SM_CMD_HOST_WALK_PID_ALLOC:
3233 + struct vmcs_sm_ioctl_walk ioparam;
3235 + /* Get parameter data. */
3236 + if (copy_from_user(&ioparam,
3237 + (void *)arg, sizeof(ioparam)) != 0) {
3238 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3244 + vmcs_sm_host_walk_alloc(file_data);
3251 + /* Walk allocation per process on host. */
3252 + case VMCS_SM_CMD_HOST_WALK_PID_MAP:
3254 + struct vmcs_sm_ioctl_walk ioparam;
3256 + /* Get parameter data. */
3257 + if (copy_from_user(&ioparam,
3258 + (void *)arg, sizeof(ioparam)) != 0) {
3259 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3265 + vmcs_sm_host_walk_map_per_pid(ioparam.pid);
3272 + /* Gets the size of the memory associated with a user handle. */
3273 + case VMCS_SM_CMD_SIZE_USR_HANDLE:
3275 + struct vmcs_sm_ioctl_size ioparam;
3277 + /* Get parameter data. */
3278 + if (copy_from_user(&ioparam,
3279 + (void *)arg, sizeof(ioparam)) != 0) {
3280 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3286 + /* Locate resource from GUID. */
3288 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3289 + if (resource != NULL) {
3290 + ioparam.size = resource->res_size;
3291 + vmcs_sm_release_resource(resource, 0);
3296 + if (copy_to_user((void *)arg,
3297 + &ioparam, sizeof(ioparam)) != 0) {
3298 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3308 + /* Verify we are dealing with a valid resource. */
3309 + case VMCS_SM_CMD_CHK_USR_HANDLE:
3311 + struct vmcs_sm_ioctl_chk ioparam;
3313 + /* Get parameter data.
3315 + if (copy_from_user(&ioparam,
3316 + (void *)arg, sizeof(ioparam)) != 0) {
3317 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3324 + /* Locate resource from GUID. */
3326 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3327 + if (resource == NULL)
3329 + /* If the resource is cacheable, return additional
3330 + * information that may be needed to flush the cache.
3332 + else if ((resource->res_cached == VMCS_SM_CACHE_HOST) ||
3333 + (resource->res_cached == VMCS_SM_CACHE_BOTH)) {
3335 + vmcs_sm_usr_address_from_pid_and_usr_handle
3336 + (current->tgid, ioparam.handle);
3337 + ioparam.size = resource->res_size;
3338 + ioparam.cache = resource->res_cached;
3342 + ioparam.cache = resource->res_cached;
3346 + vmcs_sm_release_resource(resource, 0);
3348 + if (copy_to_user((void *)arg,
3349 + &ioparam, sizeof(ioparam)) != 0) {
3350 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3362 + * Maps a user handle given the process and the virtual address.
3364 + case VMCS_SM_CMD_MAPPED_USR_HANDLE:
3366 + struct vmcs_sm_ioctl_map ioparam;
3368 + /* Get parameter data. */
3369 + if (copy_from_user(&ioparam,
3370 + (void *)arg, sizeof(ioparam)) != 0) {
3371 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3379 + vmcs_sm_usr_handle_from_pid_and_address(
3380 + ioparam.pid, ioparam.addr);
3383 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3384 + if ((resource != NULL)
3385 + && ((resource->res_cached == VMCS_SM_CACHE_HOST)
3386 + || (resource->res_cached ==
3387 + VMCS_SM_CACHE_BOTH))) {
3388 + ioparam.size = resource->res_size;
3394 + vmcs_sm_release_resource(resource, 0);
3396 + if (copy_to_user((void *)arg,
3397 + &ioparam, sizeof(ioparam)) != 0) {
3398 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3409 + * Maps a videocore handle given process and virtual address.
3411 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR:
3413 + struct vmcs_sm_ioctl_map ioparam;
3415 + /* Get parameter data. */
3416 + if (copy_from_user(&ioparam,
3417 + (void *)arg, sizeof(ioparam)) != 0) {
3418 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3424 + ioparam.handle = vmcs_sm_vc_handle_from_pid_and_address(
3425 + ioparam.pid, ioparam.addr);
3427 + if (copy_to_user((void *)arg,
3428 + &ioparam, sizeof(ioparam)) != 0) {
3429 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3441 + /* Maps a videocore handle given process and user handle. */
3442 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL:
3444 + struct vmcs_sm_ioctl_map ioparam;
3446 + /* Get parameter data. */
3447 + if (copy_from_user(&ioparam,
3448 + (void *)arg, sizeof(ioparam)) != 0) {
3449 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3455 + /* Locate resource from GUID. */
3457 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3458 + if (resource != NULL) {
3459 + ioparam.handle = resource->res_handle;
3460 + vmcs_sm_release_resource(resource, 0);
3462 + ioparam.handle = 0;
3465 + if (copy_to_user((void *)arg,
3466 + &ioparam, sizeof(ioparam)) != 0) {
3467 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3479 + * Maps a videocore address given process and videocore handle.
3481 + case VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL:
3483 + struct vmcs_sm_ioctl_map ioparam;
3485 + /* Get parameter data. */
3486 + if (copy_from_user(&ioparam,
3487 + (void *)arg, sizeof(ioparam)) != 0) {
3488 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3495 + /* Locate resource from GUID. */
3497 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3498 + if (resource != NULL) {
3500 + (unsigned int)resource->res_base_mem;
3501 + vmcs_sm_release_resource(resource, 0);
3506 + if (copy_to_user((void *)arg,
3507 + &ioparam, sizeof(ioparam)) != 0) {
3508 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3518 + /* Maps a user address given process and vc handle.
3520 + case VMCS_SM_CMD_MAPPED_USR_ADDRESS:
3522 + struct vmcs_sm_ioctl_map ioparam;
3524 + /* Get parameter data. */
3525 + if (copy_from_user(&ioparam,
3526 + (void *)arg, sizeof(ioparam)) != 0) {
3527 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3534 + * Return the address information from the mapping,
3535 + * 0 (ie NULL) if it cannot locate the actual mapping.
3538 + vmcs_sm_usr_address_from_pid_and_usr_handle
3539 + (ioparam.pid, ioparam.handle);
3541 + if (copy_to_user((void *)arg,
3542 + &ioparam, sizeof(ioparam)) != 0) {
3543 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3553 + /* Flush the cache for a given mapping. */
3554 + case VMCS_SM_CMD_FLUSH:
3556 + struct vmcs_sm_ioctl_cache ioparam;
3558 + /* Get parameter data. */
3559 + if (copy_from_user(&ioparam,
3560 + (void *)arg, sizeof(ioparam)) != 0) {
3561 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3567 + /* Locate resource from GUID. */
3569 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3571 + if ((resource != NULL) && resource->res_cached) {
3572 + dma_addr_t phys_addr = 0;
3574 + resource->res_stats[FLUSH]++;
3577 + (dma_addr_t)((uint32_t)
3578 + resource->res_base_mem &
3580 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
3582 + /* L1 cache flush */
3583 + down_read(¤t->mm->mmap_sem);
3584 + vcsm_vma_cache_clean_page_range((unsigned long)
3589 + up_read(¤t->mm->mmap_sem);
3591 + /* L2 cache flush */
3592 + outer_clean_range(phys_addr,
3594 + (size_t) ioparam.size);
3595 + } else if (resource == NULL) {
3601 + vmcs_sm_release_resource(resource, 0);
3608 + /* Invalidate the cache for a given mapping. */
3609 + case VMCS_SM_CMD_INVALID:
3611 + struct vmcs_sm_ioctl_cache ioparam;
3613 + /* Get parameter data. */
3614 + if (copy_from_user(&ioparam,
3615 + (void *)arg, sizeof(ioparam)) != 0) {
3616 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3622 + /* Locate resource from GUID.
3625 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3627 + if ((resource != NULL) && resource->res_cached) {
3628 + dma_addr_t phys_addr = 0;
3630 + resource->res_stats[INVALID]++;
3633 + (dma_addr_t)((uint32_t)
3634 + resource->res_base_mem &
3636 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
3638 + /* L2 cache invalidate */
3639 + outer_inv_range(phys_addr,
3641 + (size_t) ioparam.size);
3643 + /* L1 cache invalidate */
3644 + down_read(¤t->mm->mmap_sem);
3645 + vcsm_vma_cache_clean_page_range((unsigned long)
3650 + up_read(¤t->mm->mmap_sem);
3651 + } else if (resource == NULL) {
3657 + vmcs_sm_release_resource(resource, 0);
3665 + /* Flush/Invalidate the cache for a given mapping. */
3666 + case VMCS_SM_CMD_CLEAN_INVALID:
3669 + struct vmcs_sm_ioctl_clean_invalid ioparam;
3671 + /* Get parameter data. */
3672 + if (copy_from_user(&ioparam,
3673 + (void *)arg, sizeof(ioparam)) != 0) {
3674 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3679 + for (i=0; i<sizeof ioparam.s/sizeof *ioparam.s; i++) {
3680 + switch (ioparam.s[i].cmd) {
3681 + default: case 0: break; /* NOOP */
3682 + case 1: /* L1/L2 invalidate virtual range */
3683 + case 2: /* L1/L2 clean physical range */
3684 + case 3: /* L1/L2 clean+invalidate all */
3686 + /* Locate resource from GUID.
3689 + vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
3691 + if ((resource != NULL) && resource->res_cached) {
3692 + unsigned long base = ioparam.s[i].addr & ~(PAGE_SIZE-1);
3693 + unsigned long end = (ioparam.s[i].addr + ioparam.s[i].size + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
3694 + resource->res_stats[ioparam.s[i].cmd == 1 ? INVALID:FLUSH]++;
3696 + /* L1/L2 cache flush */
3697 + down_read(¤t->mm->mmap_sem);
3698 + vcsm_vma_cache_clean_page_range(base, end);
3699 + up_read(¤t->mm->mmap_sem);
3700 + } else if (resource == NULL) {
3706 + vmcs_sm_release_resource(resource, 0);
3726 +/* Device operations that we managed in this driver.
3728 +static const struct file_operations vmcs_sm_ops = {
3729 + .owner = THIS_MODULE,
3730 + .unlocked_ioctl = vc_sm_ioctl,
3731 + .open = vc_sm_open,
3732 + .release = vc_sm_release,
3733 + .mmap = vc_sm_mmap,
3736 +/* Creation of device.
3738 +static int vc_sm_create_sharedmemory(void)
3742 + if (sm_state == NULL) {
3747 + /* Create a device class for creating dev nodes.
3749 + sm_state->sm_class = class_create(THIS_MODULE, "vc-sm");
3750 + if (IS_ERR(sm_state->sm_class)) {
3751 + pr_err("[%s]: unable to create device class\n", __func__);
3752 + ret = PTR_ERR(sm_state->sm_class);
3756 + /* Create a character driver.
3758 + ret = alloc_chrdev_region(&sm_state->sm_devid,
3759 + DEVICE_MINOR, 1, DEVICE_NAME);
3761 + pr_err("[%s]: unable to allocate device number\n", __func__);
3762 + goto out_dev_class_destroy;
3765 + cdev_init(&sm_state->sm_cdev, &vmcs_sm_ops);
3766 + ret = cdev_add(&sm_state->sm_cdev, sm_state->sm_devid, 1);
3768 + pr_err("[%s]: unable to register device\n", __func__);
3769 + goto out_chrdev_unreg;
3772 + /* Create a device node.
3774 + sm_state->sm_dev = device_create(sm_state->sm_class,
3776 + MKDEV(MAJOR(sm_state->sm_devid),
3777 + DEVICE_MINOR), NULL,
3779 + if (IS_ERR(sm_state->sm_dev)) {
3780 + pr_err("[%s]: unable to create device node\n", __func__);
3781 + ret = PTR_ERR(sm_state->sm_dev);
3782 + goto out_chrdev_del;
3788 + cdev_del(&sm_state->sm_cdev);
3790 + unregister_chrdev_region(sm_state->sm_devid, 1);
3791 +out_dev_class_destroy:
3792 + class_destroy(sm_state->sm_class);
3793 + sm_state->sm_class = NULL;
3798 +/* Termination of the device.
3800 +static int vc_sm_remove_sharedmemory(void)
3804 + if (sm_state == NULL) {
3811 + /* Remove the sharedmemory character driver.
3813 + cdev_del(&sm_state->sm_cdev);
3815 + /* Unregister region.
3817 + unregister_chrdev_region(sm_state->sm_devid, 1);
3826 +/* Videocore connected. */
3827 +static void vc_sm_connected_init(void)
3830 + VCHI_INSTANCE_T vchi_instance;
3831 + VCHI_CONNECTION_T *vchi_connection = NULL;
3833 + pr_info("[%s]: start\n", __func__);
3835 + /* Allocate memory for the state structure.
3837 + sm_state = kzalloc(sizeof(struct SM_STATE_T), GFP_KERNEL);
3838 + if (sm_state == NULL) {
3839 + pr_err("[%s]: failed to allocate memory\n", __func__);
3844 + mutex_init(&sm_state->lock);
3845 + mutex_init(&sm_state->map_lock);
3847 + /* Initialize and create a VCHI connection for the shared memory service
3848 + ** running on videocore.
3850 + ret = vchi_initialise(&vchi_instance);
3852 + pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
3856 + goto err_free_mem;
3859 + ret = vchi_connect(NULL, 0, vchi_instance);
3861 + pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
3865 + goto err_free_mem;
3868 + /* Initialize an instance of the shared memory service. */
3869 + sm_state->sm_handle =
3870 + vc_vchi_sm_init(vchi_instance, &vchi_connection, 1);
3871 + if (sm_state->sm_handle == NULL) {
3872 + pr_err("[%s]: failed to initialize shared memory service\n",
3876 + goto err_free_mem;
3879 + /* Create a debug fs directory entry (root). */
3880 + sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
3881 + if (!sm_state->dir_root) {
3882 + pr_err("[%s]: failed to create \'%s\' directory entry\n",
3883 + __func__, VC_SM_DIR_ROOT_NAME);
3886 + goto err_stop_sm_service;
3889 + sm_state->dir_state.show = &vc_sm_global_state_show;
3890 + sm_state->dir_state.dir_entry = debugfs_create_file(VC_SM_STATE,
3891 + S_IRUGO, sm_state->dir_root, &sm_state->dir_state,
3892 + &vc_sm_debug_fs_fops);
3894 + sm_state->dir_stats.show = &vc_sm_global_statistics_show;
3895 + sm_state->dir_stats.dir_entry = debugfs_create_file(VC_SM_STATS,
3896 + S_IRUGO, sm_state->dir_root, &sm_state->dir_stats,
3897 + &vc_sm_debug_fs_fops);
3899 + /* Create the proc entry children. */
3900 + sm_state->dir_alloc = debugfs_create_dir(VC_SM_DIR_ALLOC_NAME,
3901 + sm_state->dir_root);
3903 + /* Create a shared memory device. */
3904 + ret = vc_sm_create_sharedmemory();
3906 + pr_err("[%s]: failed to create shared memory device\n",
3908 + goto err_remove_debugfs;
3911 + INIT_LIST_HEAD(&sm_state->map_list);
3912 + INIT_LIST_HEAD(&sm_state->resource_list);
3914 + sm_state->data_knl = vc_sm_create_priv_data(0);
3915 + if (sm_state->data_knl == NULL) {
3916 + pr_err("[%s]: failed to create kernel private data tracker\n",
3918 + goto err_remove_shared_memory;
3926 +err_remove_shared_memory:
3927 + vc_sm_remove_sharedmemory();
3928 +err_remove_debugfs:
3929 + debugfs_remove_recursive(sm_state->dir_root);
3930 +err_stop_sm_service:
3931 + vc_vchi_sm_stop(&sm_state->sm_handle);
3935 + pr_info("[%s]: end - returning %d\n", __func__, ret);
3938 +/* Driver loading. */
3939 +static int __init vc_sm_init(void)
3941 + pr_info("vc-sm: Videocore shared memory driver\n");
3942 + vchiq_add_connected_callback(vc_sm_connected_init);
3946 +/* Driver unloading. */
3947 +static void __exit vc_sm_exit(void)
3949 + pr_debug("[%s]: start\n", __func__);
3951 + /* Remove shared memory device.
3953 + vc_sm_remove_sharedmemory();
3955 + /* Remove all proc entries.
3957 + debugfs_remove_recursive(sm_state->dir_root);
3959 + /* Stop the videocore shared memory service.
3961 + vc_vchi_sm_stop(&sm_state->sm_handle);
3963 + /* Free the memory for the state structure.
3965 + mutex_destroy(&(sm_state->map_lock));
3969 + pr_debug("[%s]: end\n", __func__);
3972 +#if defined(__KERNEL__)
3973 +/* Allocate a shared memory handle and block. */
3974 +int vc_sm_alloc(VC_SM_ALLOC_T *alloc, int *handle)
3976 + struct vmcs_sm_ioctl_alloc ioparam = { 0 };
3978 + struct SM_RESOURCE_T *resource;
3980 + /* Validate we can work with this device.
3982 + if (sm_state == NULL || alloc == NULL || handle == NULL) {
3983 + pr_err("[%s]: invalid input\n", __func__);
3987 + ioparam.size = alloc->base_unit;
3988 + ioparam.num = alloc->num_unit;
3990 + alloc->type == VC_SM_ALLOC_CACHED ? VMCS_SM_CACHE_VC : 0;
3992 + ret = vc_sm_ioctl_alloc(sm_state->data_knl, &ioparam);
3996 + vmcs_sm_acquire_resource(sm_state->data_knl,
3999 + resource->pid = 0;
4000 + vmcs_sm_release_resource(resource, 0);
4002 + /* Assign valid handle at this time.
4004 + *handle = ioparam.handle;
4012 +EXPORT_SYMBOL_GPL(vc_sm_alloc);
4014 +/* Get an internal resource handle mapped from the external one.
4016 +int vc_sm_int_handle(int handle)
4018 + struct SM_RESOURCE_T *resource;
4021 + /* Validate we can work with this device.
4023 + if (sm_state == NULL || handle == 0) {
4024 + pr_err("[%s]: invalid input\n", __func__);
4028 + /* Locate resource from GUID.
4030 + resource = vmcs_sm_acquire_resource(sm_state->data_knl, handle);
4032 + ret = resource->res_handle;
4033 + vmcs_sm_release_resource(resource, 0);
4038 +EXPORT_SYMBOL_GPL(vc_sm_int_handle);
4040 +/* Free a previously allocated shared memory handle and block.
4042 +int vc_sm_free(int handle)
4044 + struct vmcs_sm_ioctl_free ioparam = { handle };
4046 + /* Validate we can work with this device.
4048 + if (sm_state == NULL || handle == 0) {
4049 + pr_err("[%s]: invalid input\n", __func__);
4053 + return vc_sm_ioctl_free(sm_state->data_knl, &ioparam);
4055 +EXPORT_SYMBOL_GPL(vc_sm_free);
4057 +/* Lock a memory handle for use by kernel.
4059 +int vc_sm_lock(int handle, VC_SM_LOCK_CACHE_MODE_T mode,
4060 + long unsigned int *data)
4062 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4065 + /* Validate we can work with this device.
4067 + if (sm_state == NULL || handle == 0 || data == NULL) {
4068 + pr_err("[%s]: invalid input\n", __func__);
4074 + ioparam.handle = handle;
4075 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4079 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4080 + VMCS_SM_CACHE_NONE), 0);
4082 + *data = ioparam.addr;
4085 +EXPORT_SYMBOL_GPL(vc_sm_lock);
4087 +/* Unlock a memory handle in use by kernel.
4089 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock)
4091 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4093 + /* Validate we can work with this device.
4095 + if (sm_state == NULL || handle == 0) {
4096 + pr_err("[%s]: invalid input\n", __func__);
4100 + ioparam.handle = handle;
4101 + return vc_sm_ioctl_unlock(sm_state->data_knl,
4102 + &ioparam, flush, 0, no_vc_unlock);
4104 +EXPORT_SYMBOL_GPL(vc_sm_unlock);
4106 +/* Map a shared memory region for use by kernel.
4108 +int vc_sm_map(int handle, unsigned int sm_addr, VC_SM_LOCK_CACHE_MODE_T mode,
4109 + long unsigned int *data)
4111 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4114 + /* Validate we can work with this device.
4116 + if (sm_state == NULL || handle == 0 || data == NULL || sm_addr == 0) {
4117 + pr_err("[%s]: invalid input\n", __func__);
4123 + ioparam.handle = handle;
4124 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4128 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4129 + VMCS_SM_CACHE_NONE), sm_addr);
4131 + *data = ioparam.addr;
4134 +EXPORT_SYMBOL_GPL(vc_sm_map);
4137 +late_initcall(vc_sm_init);
4138 +module_exit(vc_sm_exit);
4140 +MODULE_AUTHOR("Broadcom");
4141 +MODULE_DESCRIPTION("VideoCore SharedMemory Driver");
4142 +MODULE_LICENSE("GPL v2");
4144 +++ b/include/linux/broadcom/vmcs_sm_ioctl.h
4146 +/*****************************************************************************
4147 +* Copyright 2011 Broadcom Corporation. All rights reserved.
4149 +* Unless you and Broadcom execute a separate written software license
4150 +* agreement governing use of this software, this software is licensed to you
4151 +* under the terms of the GNU General Public License version 2, available at
4152 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
4154 +* Notwithstanding the above, under no circumstances may you combine this
4155 +* software in any way with any other Broadcom software provided under a
4156 +* license other than the GPL, without Broadcom's express prior written
4159 +*****************************************************************************/
4161 +#if !defined(__VMCS_SM_IOCTL_H__INCLUDED__)
4162 +#define __VMCS_SM_IOCTL_H__INCLUDED__
4164 +/* ---- Include Files ---------------------------------------------------- */
4166 +#if defined(__KERNEL__)
4167 +#include <linux/types.h> /* Needed for standard types */
4169 +#include <stdint.h>
4172 +#include <linux/ioctl.h>
4174 +/* ---- Constants and Types ---------------------------------------------- */
4176 +#define VMCS_SM_RESOURCE_NAME 32
4177 +#define VMCS_SM_RESOURCE_NAME_DEFAULT "sm-host-resource"
4179 +/* Type define used to create unique IOCTL number */
4180 +#define VMCS_SM_MAGIC_TYPE 'I'
4182 +/* IOCTL commands */
4183 +enum vmcs_sm_cmd_e {
4184 + VMCS_SM_CMD_ALLOC = 0x5A, /* Start at 0x5A arbitrarily */
4185 + VMCS_SM_CMD_ALLOC_SHARE,
4187 + VMCS_SM_CMD_LOCK_CACHE,
4188 + VMCS_SM_CMD_UNLOCK,
4189 + VMCS_SM_CMD_RESIZE,
4190 + VMCS_SM_CMD_UNMAP,
4192 + VMCS_SM_CMD_FLUSH,
4193 + VMCS_SM_CMD_INVALID,
4195 + VMCS_SM_CMD_SIZE_USR_HANDLE,
4196 + VMCS_SM_CMD_CHK_USR_HANDLE,
4198 + VMCS_SM_CMD_MAPPED_USR_HANDLE,
4199 + VMCS_SM_CMD_MAPPED_USR_ADDRESS,
4200 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,
4201 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,
4202 + VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,
4204 + VMCS_SM_CMD_VC_WALK_ALLOC,
4205 + VMCS_SM_CMD_HOST_WALK_MAP,
4206 + VMCS_SM_CMD_HOST_WALK_PID_ALLOC,
4207 + VMCS_SM_CMD_HOST_WALK_PID_MAP,
4209 + VMCS_SM_CMD_CLEAN_INVALID,
4211 + VMCS_SM_CMD_LAST /* Do no delete */
4214 +/* Cache type supported, conveniently matches the user space definition in
4217 +enum vmcs_sm_cache_e {
4218 + VMCS_SM_CACHE_NONE,
4219 + VMCS_SM_CACHE_HOST,
4221 + VMCS_SM_CACHE_BOTH,
4224 +/* IOCTL Data structures */
4225 +struct vmcs_sm_ioctl_alloc {
4226 + /* user -> kernel */
4227 + unsigned int size;
4229 + enum vmcs_sm_cache_e cached;
4230 + char name[VMCS_SM_RESOURCE_NAME];
4232 + /* kernel -> user */
4233 + unsigned int handle;
4234 + /* unsigned int base_addr; */
4237 +struct vmcs_sm_ioctl_alloc_share {
4238 + /* user -> kernel */
4239 + unsigned int handle;
4240 + unsigned int size;
4243 +struct vmcs_sm_ioctl_free {
4244 + /* user -> kernel */
4245 + unsigned int handle;
4246 + /* unsigned int base_addr; */
4249 +struct vmcs_sm_ioctl_lock_unlock {
4250 + /* user -> kernel */
4251 + unsigned int handle;
4253 + /* kernel -> user */
4254 + unsigned int addr;
4257 +struct vmcs_sm_ioctl_lock_cache {
4258 + /* user -> kernel */
4259 + unsigned int handle;
4260 + enum vmcs_sm_cache_e cached;
4263 +struct vmcs_sm_ioctl_resize {
4264 + /* user -> kernel */
4265 + unsigned int handle;
4266 + unsigned int new_size;
4268 + /* kernel -> user */
4269 + unsigned int old_size;
4272 +struct vmcs_sm_ioctl_map {
4273 + /* user -> kernel */
4274 + /* and kernel -> user */
4276 + unsigned int handle;
4277 + unsigned int addr;
4279 + /* kernel -> user */
4280 + unsigned int size;
4283 +struct vmcs_sm_ioctl_walk {
4284 + /* user -> kernel */
4288 +struct vmcs_sm_ioctl_chk {
4289 + /* user -> kernel */
4290 + unsigned int handle;
4292 + /* kernel -> user */
4293 + unsigned int addr;
4294 + unsigned int size;
4295 + enum vmcs_sm_cache_e cache;
4298 +struct vmcs_sm_ioctl_size {
4299 + /* user -> kernel */
4300 + unsigned int handle;
4302 + /* kernel -> user */
4303 + unsigned int size;
4306 +struct vmcs_sm_ioctl_cache {
4307 + /* user -> kernel */
4308 + unsigned int handle;
4309 + unsigned int addr;
4310 + unsigned int size;
4313 +struct vmcs_sm_ioctl_clean_invalid {
4314 + /* user -> kernel */
4317 + unsigned int handle;
4318 + unsigned int addr;
4319 + unsigned int size;
4323 +/* IOCTL numbers */
4324 +#define VMCS_SM_IOCTL_MEM_ALLOC\
4325 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC,\
4326 + struct vmcs_sm_ioctl_alloc)
4327 +#define VMCS_SM_IOCTL_MEM_ALLOC_SHARE\
4328 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC_SHARE,\
4329 + struct vmcs_sm_ioctl_alloc_share)
4330 +#define VMCS_SM_IOCTL_MEM_LOCK\
4331 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK,\
4332 + struct vmcs_sm_ioctl_lock_unlock)
4333 +#define VMCS_SM_IOCTL_MEM_LOCK_CACHE\
4334 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK_CACHE,\
4335 + struct vmcs_sm_ioctl_lock_cache)
4336 +#define VMCS_SM_IOCTL_MEM_UNLOCK\
4337 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_UNLOCK,\
4338 + struct vmcs_sm_ioctl_lock_unlock)
4339 +#define VMCS_SM_IOCTL_MEM_RESIZE\
4340 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_RESIZE,\
4341 + struct vmcs_sm_ioctl_resize)
4342 +#define VMCS_SM_IOCTL_MEM_FREE\
4343 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FREE,\
4344 + struct vmcs_sm_ioctl_free)
4345 +#define VMCS_SM_IOCTL_MEM_FLUSH\
4346 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FLUSH,\
4347 + struct vmcs_sm_ioctl_cache)
4348 +#define VMCS_SM_IOCTL_MEM_INVALID\
4349 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_INVALID,\
4350 + struct vmcs_sm_ioctl_cache)
4351 +#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID\
4352 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID,\
4353 + struct vmcs_sm_ioctl_clean_invalid)
4355 +#define VMCS_SM_IOCTL_SIZE_USR_HDL\
4356 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_SIZE_USR_HANDLE,\
4357 + struct vmcs_sm_ioctl_size)
4358 +#define VMCS_SM_IOCTL_CHK_USR_HDL\
4359 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CHK_USR_HANDLE,\
4360 + struct vmcs_sm_ioctl_chk)
4362 +#define VMCS_SM_IOCTL_MAP_USR_HDL\
4363 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_HANDLE,\
4364 + struct vmcs_sm_ioctl_map)
4365 +#define VMCS_SM_IOCTL_MAP_USR_ADDRESS\
4366 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_ADDRESS,\
4367 + struct vmcs_sm_ioctl_map)
4368 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_ADDR\
4369 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,\
4370 + struct vmcs_sm_ioctl_map)
4371 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_HDL\
4372 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,\
4373 + struct vmcs_sm_ioctl_map)
4374 +#define VMCS_SM_IOCTL_MAP_VC_ADDR_FR_HDL\
4375 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,\
4376 + struct vmcs_sm_ioctl_map)
4378 +#define VMCS_SM_IOCTL_VC_WALK_ALLOC\
4379 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_VC_WALK_ALLOC)
4380 +#define VMCS_SM_IOCTL_HOST_WALK_MAP\
4381 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_MAP)
4382 +#define VMCS_SM_IOCTL_HOST_WALK_PID_ALLOC\
4383 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_ALLOC,\
4384 + struct vmcs_sm_ioctl_walk)
4385 +#define VMCS_SM_IOCTL_HOST_WALK_PID_MAP\
4386 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_MAP,\
4387 + struct vmcs_sm_ioctl_walk)
4389 +/* ---- Variable Externs ------------------------------------------------- */
4391 +/* ---- Function Prototypes ---------------------------------------------- */
4393 +#endif /* __VMCS_SM_IOCTL_H__INCLUDED__ */