kernel: refresh patches
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-3.14 / 0008-cma-Add-vc_cma-driver-to-enable-use-of-CMA.patch
1 From 0db77c38e5817cf501d2bb29d21e7389491078d3 Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Wed, 3 Jul 2013 00:31:47 +0100
4 Subject: [PATCH 08/54] cma: Add vc_cma driver to enable use of CMA
5
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
7 ---
8 drivers/char/Kconfig | 2 +
9 drivers/char/Makefile | 2 +
10 drivers/char/broadcom/Kconfig | 16 +
11 drivers/char/broadcom/Makefile | 1 +
12 drivers/char/broadcom/vc_cma/Makefile | 14 +
13 drivers/char/broadcom/vc_cma/vc_cma.c | 1143 +++++++++++++++++++++++++++++++++
14 drivers/misc/Makefile | 2 +-
15 include/linux/broadcom/vc_cma.h | 29 +
16 8 files changed, 1208 insertions(+), 1 deletion(-)
17 create mode 100644 drivers/char/broadcom/Kconfig
18 create mode 100644 drivers/char/broadcom/Makefile
19 create mode 100644 drivers/char/broadcom/vc_cma/Makefile
20 create mode 100644 drivers/char/broadcom/vc_cma/vc_cma.c
21 create mode 100644 include/linux/broadcom/vc_cma.h
22
23 --- a/drivers/char/Kconfig
24 +++ b/drivers/char/Kconfig
25 @@ -581,6 +581,8 @@ config DEVPORT
26
27 source "drivers/s390/char/Kconfig"
28
29 +source "drivers/char/broadcom/Kconfig"
30 +
31 config MSM_SMD_PKT
32 bool "Enable device interface for some SMD packet ports"
33 default n
34 --- a/drivers/char/Makefile
35 +++ b/drivers/char/Makefile
36 @@ -61,3 +61,5 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
37 js-rtc-y = rtc.o
38
39 obj-$(CONFIG_TILE_SROM) += tile-srom.o
40 +
41 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
42 --- /dev/null
43 +++ b/drivers/char/broadcom/Kconfig
44 @@ -0,0 +1,16 @@
45 +#
46 +# Broadcom char driver config
47 +#
48 +
49 +menuconfig BRCM_CHAR_DRIVERS
50 + bool "Broadcom Char Drivers"
51 + help
52 + Broadcom's char drivers
53 +
54 +config BCM_VC_CMA
55 + bool "Videocore CMA"
56 + depends on CMA && BRCM_CHAR_DRIVERS && BCM2708_VCHIQ
57 + default n
58 + help
59 + Helper for videocore CMA access.
60 +
61 --- /dev/null
62 +++ b/drivers/char/broadcom/Makefile
63 @@ -0,0 +1 @@
64 +obj-$(CONFIG_BCM_VC_CMA) += vc_cma/
65 --- /dev/null
66 +++ b/drivers/char/broadcom/vc_cma/Makefile
67 @@ -0,0 +1,14 @@
68 +ccflags-y += -Wall -Wstrict-prototypes -Wno-trigraphs
69 +ccflags-y += -Werror
70 +ccflags-y += -Iinclude/linux/broadcom
71 +ccflags-y += -Idrivers/misc/vc04_services
72 +ccflags-y += -Idrivers/misc/vc04_services/interface/vchi
73 +ccflags-y += -Idrivers/misc/vc04_services/interface/vchiq_arm
74 +
75 +ccflags-y += -D__KERNEL__
76 +ccflags-y += -D__linux__
77 +ccflags-y += -Werror
78 +
79 +obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o
80 +
81 +vc-cma-objs := vc_cma.o
82 --- /dev/null
83 +++ b/drivers/char/broadcom/vc_cma/vc_cma.c
84 @@ -0,0 +1,1143 @@
85 +/**
86 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
87 + *
88 + * Redistribution and use in source and binary forms, with or without
89 + * modification, are permitted provided that the following conditions
90 + * are met:
91 + * 1. Redistributions of source code must retain the above copyright
92 + * notice, this list of conditions, and the following disclaimer,
93 + * without modification.
94 + * 2. Redistributions in binary form must reproduce the above copyright
95 + * notice, this list of conditions and the following disclaimer in the
96 + * documentation and/or other materials provided with the distribution.
97 + * 3. The names of the above-listed copyright holders may not be used
98 + * to endorse or promote products derived from this software without
99 + * specific prior written permission.
100 + *
101 + * ALTERNATIVELY, this software may be distributed under the terms of the
102 + * GNU General Public License ("GPL") version 2, as published by the Free
103 + * Software Foundation.
104 + *
105 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
106 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
107 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
108 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
109 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
110 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
111 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
112 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
113 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
114 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
115 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
116 + */
117 +
118 +#include <linux/kernel.h>
119 +#include <linux/module.h>
120 +#include <linux/kthread.h>
121 +#include <linux/fs.h>
122 +#include <linux/device.h>
123 +#include <linux/cdev.h>
124 +#include <linux/mm.h>
125 +#include <linux/proc_fs.h>
126 +#include <linux/seq_file.h>
127 +#include <linux/dma-mapping.h>
128 +#include <linux/dma-contiguous.h>
129 +#include <linux/platform_device.h>
130 +#include <linux/uaccess.h>
131 +#include <asm/cacheflush.h>
132 +
133 +#include "vc_cma.h"
134 +
135 +#include "vchiq_util.h"
136 +#include "vchiq_connected.h"
137 +//#include "debug_sym.h"
138 +//#include "vc_mem.h"
139 +
140 +#define DRIVER_NAME "vc-cma"
141 +
142 +#define LOG_DBG(fmt, ...) \
143 + if (vc_cma_debug) \
144 + printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
145 +#define LOG_ERR(fmt, ...) \
146 + printk(KERN_ERR fmt "\n", ##__VA_ARGS__)
147 +
148 +#define VC_CMA_FOURCC VCHIQ_MAKE_FOURCC('C', 'M', 'A', ' ')
149 +#define VC_CMA_VERSION 2
150 +
151 +#define VC_CMA_CHUNK_ORDER 6 /* 256K */
152 +#define VC_CMA_CHUNK_SIZE (4096 << VC_CMA_CHUNK_ORDER)
153 +#define VC_CMA_MAX_PARAMS_PER_MSG \
154 + ((VCHIQ_MAX_MSG_SIZE - sizeof(unsigned short))/sizeof(unsigned short))
155 +#define VC_CMA_RESERVE_COUNT_MAX 16
156 +
157 +#define PAGES_PER_CHUNK (VC_CMA_CHUNK_SIZE / PAGE_SIZE)
158 +
159 +#define VCADDR_TO_PHYSADDR(vcaddr) (mm_vc_mem_phys_addr + vcaddr)
160 +
161 +#define loud_error(...) \
162 + LOG_ERR("===== " __VA_ARGS__)
163 +
164 +enum {
165 + VC_CMA_MSG_QUIT,
166 + VC_CMA_MSG_OPEN,
167 + VC_CMA_MSG_TICK,
168 + VC_CMA_MSG_ALLOC, /* chunk count */
169 + VC_CMA_MSG_FREE, /* chunk, chunk, ... */
170 + VC_CMA_MSG_ALLOCATED, /* chunk, chunk, ... */
171 + VC_CMA_MSG_REQUEST_ALLOC, /* chunk count */
172 + VC_CMA_MSG_REQUEST_FREE, /* chunk count */
173 + VC_CMA_MSG_RESERVE, /* bytes lo, bytes hi */
174 + VC_CMA_MSG_UPDATE_RESERVE,
175 + VC_CMA_MSG_MAX
176 +};
177 +
178 +struct cma_msg {
179 + unsigned short type;
180 + unsigned short params[VC_CMA_MAX_PARAMS_PER_MSG];
181 +};
182 +
183 +struct vc_cma_reserve_user {
184 + unsigned int pid;
185 + unsigned int reserve;
186 +};
187 +
188 +/* Device (/dev) related variables */
189 +static dev_t vc_cma_devnum;
190 +static struct class *vc_cma_class;
191 +static struct cdev vc_cma_cdev;
192 +static int vc_cma_inited;
193 +static int vc_cma_debug;
194 +
195 +/* Proc entry */
196 +static struct proc_dir_entry *vc_cma_proc_entry;
197 +
198 +phys_addr_t vc_cma_base;
199 +struct page *vc_cma_base_page;
200 +unsigned int vc_cma_size;
201 +EXPORT_SYMBOL(vc_cma_size);
202 +unsigned int vc_cma_initial;
203 +unsigned int vc_cma_chunks;
204 +unsigned int vc_cma_chunks_used;
205 +unsigned int vc_cma_chunks_reserved;
206 +
207 +static int in_loud_error;
208 +
209 +unsigned int vc_cma_reserve_total;
210 +unsigned int vc_cma_reserve_count;
211 +struct vc_cma_reserve_user vc_cma_reserve_users[VC_CMA_RESERVE_COUNT_MAX];
212 +static DEFINE_SEMAPHORE(vc_cma_reserve_mutex);
213 +static DEFINE_SEMAPHORE(vc_cma_worker_queue_push_mutex);
214 +
215 +static u64 vc_cma_dma_mask = DMA_BIT_MASK(32);
216 +static struct platform_device vc_cma_device = {
217 + .name = "vc-cma",
218 + .id = 0,
219 + .dev = {
220 + .dma_mask = &vc_cma_dma_mask,
221 + .coherent_dma_mask = DMA_BIT_MASK(32),
222 + },
223 +};
224 +
225 +static VCHIQ_INSTANCE_T cma_instance;
226 +static VCHIQ_SERVICE_HANDLE_T cma_service;
227 +static VCHIU_QUEUE_T cma_msg_queue;
228 +static struct task_struct *cma_worker;
229 +
230 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid);
231 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply);
232 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
233 + VCHIQ_HEADER_T * header,
234 + VCHIQ_SERVICE_HANDLE_T service,
235 + void *bulk_userdata);
236 +static void send_vc_msg(unsigned short type,
237 + unsigned short param1, unsigned short param2);
238 +static bool send_worker_msg(VCHIQ_HEADER_T * msg);
239 +
240 +static int early_vc_cma_mem(char *p)
241 +{
242 + unsigned int new_size;
243 + printk(KERN_NOTICE "early_vc_cma_mem(%s)", p);
244 + vc_cma_size = memparse(p, &p);
245 + vc_cma_initial = vc_cma_size;
246 + if (*p == '/')
247 + vc_cma_size = memparse(p + 1, &p);
248 + if (*p == '@')
249 + vc_cma_base = memparse(p + 1, &p);
250 +
251 + new_size = (vc_cma_size - ((-vc_cma_base) & (VC_CMA_CHUNK_SIZE - 1)))
252 + & ~(VC_CMA_CHUNK_SIZE - 1);
253 + if (new_size > vc_cma_size)
254 + vc_cma_size = 0;
255 + vc_cma_initial = (vc_cma_initial + VC_CMA_CHUNK_SIZE - 1)
256 + & ~(VC_CMA_CHUNK_SIZE - 1);
257 + if (vc_cma_initial > vc_cma_size)
258 + vc_cma_initial = vc_cma_size;
259 + vc_cma_base = (vc_cma_base + VC_CMA_CHUNK_SIZE - 1)
260 + & ~(VC_CMA_CHUNK_SIZE - 1);
261 +
262 + printk(KERN_NOTICE " -> initial %x, size %x, base %x", vc_cma_initial,
263 + vc_cma_size, (unsigned int)vc_cma_base);
264 +
265 + return 0;
266 +}
267 +
268 +early_param("vc-cma-mem", early_vc_cma_mem);
269 +
270 +void vc_cma_early_init(void)
271 +{
272 + LOG_DBG("vc_cma_early_init - vc_cma_chunks = %d", vc_cma_chunks);
273 + if (vc_cma_size) {
274 + int rc = platform_device_register(&vc_cma_device);
275 + LOG_DBG("platform_device_register -> %d", rc);
276 + }
277 +}
278 +
279 +void vc_cma_reserve(void)
280 +{
281 + /* if vc_cma_size is set, then declare vc CMA area of the same
282 + * size from the end of memory
283 + */
284 + if (vc_cma_size) {
285 + if (dma_declare_contiguous(NULL /*&vc_cma_device.dev*/, vc_cma_size,
286 + vc_cma_base, 0) == 0) {
287 + } else {
288 + LOG_ERR("vc_cma: dma_declare_contiguous(%x,%x) failed",
289 + vc_cma_size, (unsigned int)vc_cma_base);
290 + vc_cma_size = 0;
291 + }
292 + }
293 + vc_cma_chunks = vc_cma_size / VC_CMA_CHUNK_SIZE;
294 +}
295 +
296 +/****************************************************************************
297 +*
298 +* vc_cma_open
299 +*
300 +***************************************************************************/
301 +
302 +static int vc_cma_open(struct inode *inode, struct file *file)
303 +{
304 + (void)inode;
305 + (void)file;
306 +
307 + return 0;
308 +}
309 +
310 +/****************************************************************************
311 +*
312 +* vc_cma_release
313 +*
314 +***************************************************************************/
315 +
316 +static int vc_cma_release(struct inode *inode, struct file *file)
317 +{
318 + (void)inode;
319 + (void)file;
320 +
321 + vc_cma_set_reserve(0, current->tgid);
322 +
323 + return 0;
324 +}
325 +
326 +/****************************************************************************
327 +*
328 +* vc_cma_ioctl
329 +*
330 +***************************************************************************/
331 +
332 +static long vc_cma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
333 +{
334 + int rc = 0;
335 +
336 + (void)cmd;
337 + (void)arg;
338 +
339 + switch (cmd) {
340 + case VC_CMA_IOC_RESERVE:
341 + rc = vc_cma_set_reserve((unsigned int)arg, current->tgid);
342 + if (rc >= 0)
343 + rc = 0;
344 + break;
345 + default:
346 + LOG_ERR("vc-cma: Unknown ioctl %x", cmd);
347 + return -ENOTTY;
348 + }
349 +
350 + return rc;
351 +}
352 +
353 +/****************************************************************************
354 +*
355 +* File Operations for the driver.
356 +*
357 +***************************************************************************/
358 +
359 +static const struct file_operations vc_cma_fops = {
360 + .owner = THIS_MODULE,
361 + .open = vc_cma_open,
362 + .release = vc_cma_release,
363 + .unlocked_ioctl = vc_cma_ioctl,
364 +};
365 +
366 +/****************************************************************************
367 +*
368 +* vc_cma_proc_open
369 +*
370 +***************************************************************************/
371 +
372 +static int vc_cma_show_info(struct seq_file *m, void *v)
373 +{
374 + int i;
375 +
376 + seq_printf(m, "Videocore CMA:\n");
377 + seq_printf(m, " Base : %08x\n", (unsigned int)vc_cma_base);
378 + seq_printf(m, " Length : %08x\n", vc_cma_size);
379 + seq_printf(m, " Initial : %08x\n", vc_cma_initial);
380 + seq_printf(m, " Chunk size : %08x\n", VC_CMA_CHUNK_SIZE);
381 + seq_printf(m, " Chunks : %4d (%d bytes)\n",
382 + (int)vc_cma_chunks,
383 + (int)(vc_cma_chunks * VC_CMA_CHUNK_SIZE));
384 + seq_printf(m, " Used : %4d (%d bytes)\n",
385 + (int)vc_cma_chunks_used,
386 + (int)(vc_cma_chunks_used * VC_CMA_CHUNK_SIZE));
387 + seq_printf(m, " Reserved : %4d (%d bytes)\n",
388 + (unsigned int)vc_cma_chunks_reserved,
389 + (int)(vc_cma_chunks_reserved * VC_CMA_CHUNK_SIZE));
390 +
391 + for (i = 0; i < vc_cma_reserve_count; i++) {
392 + struct vc_cma_reserve_user *user = &vc_cma_reserve_users[i];
393 + seq_printf(m, " PID %5d: %d bytes\n", user->pid,
394 + user->reserve);
395 + }
396 +
397 + seq_printf(m, "\n");
398 +
399 + return 0;
400 +}
401 +
402 +static int vc_cma_proc_open(struct inode *inode, struct file *file)
403 +{
404 + return single_open(file, vc_cma_show_info, NULL);
405 +}
406 +
407 +/****************************************************************************
408 +*
409 +* vc_cma_proc_write
410 +*
411 +***************************************************************************/
412 +
413 +static int vc_cma_proc_write(struct file *file,
414 + const char __user *buffer,
415 + size_t size, loff_t *ppos)
416 +{
417 + int rc = -EFAULT;
418 + char input_str[20];
419 +
420 + memset(input_str, 0, sizeof(input_str));
421 +
422 + if (size > sizeof(input_str)) {
423 + LOG_ERR("%s: input string length too long", __func__);
424 + goto out;
425 + }
426 +
427 + if (copy_from_user(input_str, buffer, size - 1)) {
428 + LOG_ERR("%s: failed to get input string", __func__);
429 + goto out;
430 + }
431 +#define ALLOC_STR "alloc"
432 +#define FREE_STR "free"
433 +#define DEBUG_STR "debug"
434 +#define RESERVE_STR "reserve"
435 + if (strncmp(input_str, ALLOC_STR, strlen(ALLOC_STR)) == 0) {
436 + int size;
437 + char *p = input_str + strlen(ALLOC_STR);
438 +
439 + while (*p == ' ')
440 + p++;
441 + size = memparse(p, NULL);
442 + LOG_ERR("/proc/vc-cma: alloc %d", size);
443 + if (size)
444 + send_vc_msg(VC_CMA_MSG_REQUEST_FREE,
445 + size / VC_CMA_CHUNK_SIZE, 0);
446 + else
447 + LOG_ERR("invalid size '%s'", p);
448 + rc = size;
449 + } else if (strncmp(input_str, FREE_STR, strlen(FREE_STR)) == 0) {
450 + int size;
451 + char *p = input_str + strlen(FREE_STR);
452 +
453 + while (*p == ' ')
454 + p++;
455 + size = memparse(p, NULL);
456 + LOG_ERR("/proc/vc-cma: free %d", size);
457 + if (size)
458 + send_vc_msg(VC_CMA_MSG_REQUEST_ALLOC,
459 + size / VC_CMA_CHUNK_SIZE, 0);
460 + else
461 + LOG_ERR("invalid size '%s'", p);
462 + rc = size;
463 + } else if (strncmp(input_str, DEBUG_STR, strlen(DEBUG_STR)) == 0) {
464 + char *p = input_str + strlen(DEBUG_STR);
465 + while (*p == ' ')
466 + p++;
467 + if ((strcmp(p, "on") == 0) || (strcmp(p, "1") == 0))
468 + vc_cma_debug = 1;
469 + else if ((strcmp(p, "off") == 0) || (strcmp(p, "0") == 0))
470 + vc_cma_debug = 0;
471 + LOG_ERR("/proc/vc-cma: debug %s", vc_cma_debug ? "on" : "off");
472 + rc = size;
473 + } else if (strncmp(input_str, RESERVE_STR, strlen(RESERVE_STR)) == 0) {
474 + int size;
475 + int reserved;
476 + char *p = input_str + strlen(RESERVE_STR);
477 + while (*p == ' ')
478 + p++;
479 + size = memparse(p, NULL);
480 +
481 + reserved = vc_cma_set_reserve(size, current->tgid);
482 + rc = (reserved >= 0) ? size : reserved;
483 + }
484 +
485 +out:
486 + return rc;
487 +}
488 +
489 +/****************************************************************************
490 +*
491 +* File Operations for /proc interface.
492 +*
493 +***************************************************************************/
494 +
495 +static const struct file_operations vc_cma_proc_fops = {
496 + .open = vc_cma_proc_open,
497 + .read = seq_read,
498 + .write = vc_cma_proc_write,
499 + .llseek = seq_lseek,
500 + .release = single_release
501 +};
502 +
503 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid)
504 +{
505 + struct vc_cma_reserve_user *user = NULL;
506 + int delta = 0;
507 + int i;
508 +
509 + if (down_interruptible(&vc_cma_reserve_mutex))
510 + return -ERESTARTSYS;
511 +
512 + for (i = 0; i < vc_cma_reserve_count; i++) {
513 + if (pid == vc_cma_reserve_users[i].pid) {
514 + user = &vc_cma_reserve_users[i];
515 + delta = reserve - user->reserve;
516 + if (reserve)
517 + user->reserve = reserve;
518 + else {
519 + /* Remove this entry by copying downwards */
520 + while ((i + 1) < vc_cma_reserve_count) {
521 + user[0].pid = user[1].pid;
522 + user[0].reserve = user[1].reserve;
523 + user++;
524 + i++;
525 + }
526 + vc_cma_reserve_count--;
527 + user = NULL;
528 + }
529 + break;
530 + }
531 + }
532 +
533 + if (reserve && !user) {
534 + if (vc_cma_reserve_count == VC_CMA_RESERVE_COUNT_MAX) {
535 + LOG_ERR("vc-cma: Too many reservations - "
536 + "increase CMA_RESERVE_COUNT_MAX");
537 + up(&vc_cma_reserve_mutex);
538 + return -EBUSY;
539 + }
540 + user = &vc_cma_reserve_users[vc_cma_reserve_count];
541 + user->pid = pid;
542 + user->reserve = reserve;
543 + delta = reserve;
544 + vc_cma_reserve_count++;
545 + }
546 +
547 + vc_cma_reserve_total += delta;
548 +
549 + send_vc_msg(VC_CMA_MSG_RESERVE,
550 + vc_cma_reserve_total & 0xffff, vc_cma_reserve_total >> 16);
551 +
552 + send_worker_msg((VCHIQ_HEADER_T *) VC_CMA_MSG_UPDATE_RESERVE);
553 +
554 + LOG_DBG("/proc/vc-cma: reserve %d (PID %d) - total %u",
555 + reserve, pid, vc_cma_reserve_total);
556 +
557 + up(&vc_cma_reserve_mutex);
558 +
559 + return vc_cma_reserve_total;
560 +}
561 +
562 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
563 + VCHIQ_HEADER_T * header,
564 + VCHIQ_SERVICE_HANDLE_T service,
565 + void *bulk_userdata)
566 +{
567 + switch (reason) {
568 + case VCHIQ_MESSAGE_AVAILABLE:
569 + if (!send_worker_msg(header))
570 + return VCHIQ_RETRY;
571 + break;
572 + case VCHIQ_SERVICE_CLOSED:
573 + LOG_DBG("CMA service closed");
574 + break;
575 + default:
576 + LOG_ERR("Unexpected CMA callback reason %d", reason);
577 + break;
578 + }
579 + return VCHIQ_SUCCESS;
580 +}
581 +
582 +static void send_vc_msg(unsigned short type,
583 + unsigned short param1, unsigned short param2)
584 +{
585 + unsigned short msg[] = { type, param1, param2 };
586 + VCHIQ_ELEMENT_T elem = { &msg, sizeof(msg) };
587 + VCHIQ_STATUS_T ret;
588 + vchiq_use_service(cma_service);
589 + ret = vchiq_queue_message(cma_service, &elem, 1);
590 + vchiq_release_service(cma_service);
591 + if (ret != VCHIQ_SUCCESS)
592 + LOG_ERR("vchiq_queue_message returned %x", ret);
593 +}
594 +
595 +static bool send_worker_msg(VCHIQ_HEADER_T * msg)
596 +{
597 + if (down_interruptible(&vc_cma_worker_queue_push_mutex))
598 + return false;
599 + vchiu_queue_push(&cma_msg_queue, msg);
600 + up(&vc_cma_worker_queue_push_mutex);
601 + return true;
602 +}
603 +
604 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply)
605 +{
606 + int i;
607 + for (i = 0; i < num_chunks; i++) {
608 + struct page *chunk;
609 + unsigned int chunk_num;
610 + uint8_t *chunk_addr;
611 + size_t chunk_size = PAGES_PER_CHUNK << PAGE_SHIFT;
612 +
613 + chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
614 + PAGES_PER_CHUNK,
615 + VC_CMA_CHUNK_ORDER);
616 + if (!chunk)
617 + break;
618 +
619 + chunk_addr = page_address(chunk);
620 + dmac_flush_range(chunk_addr, chunk_addr + chunk_size);
621 + outer_inv_range(__pa(chunk_addr), __pa(chunk_addr) +
622 + chunk_size);
623 +
624 + chunk_num =
625 + (page_to_phys(chunk) - vc_cma_base) / VC_CMA_CHUNK_SIZE;
626 + BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
627 + VC_CMA_CHUNK_SIZE) != 0);
628 + if (chunk_num >= vc_cma_chunks) {
629 + LOG_ERR("%s: ===============================",
630 + __func__);
631 + LOG_ERR("%s: chunk phys %x, vc_cma %x-%x - "
632 + "bad SPARSEMEM configuration?",
633 + __func__, (unsigned int)page_to_phys(chunk),
634 + vc_cma_base, vc_cma_base + vc_cma_size - 1);
635 + LOG_ERR("%s: dev->cma_area = %p\n", __func__,
636 + (void*)0/*vc_cma_device.dev.cma_area*/);
637 + LOG_ERR("%s: ===============================",
638 + __func__);
639 + break;
640 + }
641 + reply->params[i] = chunk_num;
642 + vc_cma_chunks_used++;
643 + }
644 +
645 + if (i < num_chunks) {
646 + LOG_ERR("%s: dma_alloc_from_contiguous failed "
647 + "for %x bytes (alloc %d of %d, %d free)",
648 + __func__, VC_CMA_CHUNK_SIZE, i,
649 + num_chunks, vc_cma_chunks - vc_cma_chunks_used);
650 + num_chunks = i;
651 + }
652 +
653 + LOG_DBG("CMA allocated %d chunks -> %d used",
654 + num_chunks, vc_cma_chunks_used);
655 + reply->type = VC_CMA_MSG_ALLOCATED;
656 +
657 + {
658 + VCHIQ_ELEMENT_T elem = {
659 + reply,
660 + offsetof(struct cma_msg, params[0]) +
661 + num_chunks * sizeof(reply->params[0])
662 + };
663 + VCHIQ_STATUS_T ret;
664 + vchiq_use_service(cma_service);
665 + ret = vchiq_queue_message(cma_service, &elem, 1);
666 + vchiq_release_service(cma_service);
667 + if (ret != VCHIQ_SUCCESS)
668 + LOG_ERR("vchiq_queue_message return " "%x", ret);
669 + }
670 +
671 + return num_chunks;
672 +}
673 +
674 +static int cma_worker_proc(void *param)
675 +{
676 + static struct cma_msg reply;
677 + (void)param;
678 +
679 + while (1) {
680 + VCHIQ_HEADER_T *msg;
681 + static struct cma_msg msg_copy;
682 + struct cma_msg *cma_msg = &msg_copy;
683 + int type, msg_size;
684 +
685 + msg = vchiu_queue_pop(&cma_msg_queue);
686 + if ((unsigned int)msg >= VC_CMA_MSG_MAX) {
687 + msg_size = msg->size;
688 + memcpy(&msg_copy, msg->data, msg_size);
689 + type = cma_msg->type;
690 + vchiq_release_message(cma_service, msg);
691 + } else {
692 + msg_size = 0;
693 + type = (int)msg;
694 + if (type == VC_CMA_MSG_QUIT)
695 + break;
696 + else if (type == VC_CMA_MSG_UPDATE_RESERVE) {
697 + msg = NULL;
698 + cma_msg = NULL;
699 + } else {
700 + BUG();
701 + continue;
702 + }
703 + }
704 +
705 + switch (type) {
706 + case VC_CMA_MSG_ALLOC:{
707 + int num_chunks, free_chunks;
708 + num_chunks = cma_msg->params[0];
709 + free_chunks =
710 + vc_cma_chunks - vc_cma_chunks_used;
711 + LOG_DBG("CMA_MSG_ALLOC(%d chunks)", num_chunks);
712 + if (num_chunks > VC_CMA_MAX_PARAMS_PER_MSG) {
713 + LOG_ERR
714 + ("CMA_MSG_ALLOC - chunk count (%d) "
715 + "exceeds VC_CMA_MAX_PARAMS_PER_MSG (%d)",
716 + num_chunks,
717 + VC_CMA_MAX_PARAMS_PER_MSG);
718 + num_chunks = VC_CMA_MAX_PARAMS_PER_MSG;
719 + }
720 +
721 + if (num_chunks > free_chunks) {
722 + LOG_ERR
723 + ("CMA_MSG_ALLOC - chunk count (%d) "
724 + "exceeds free chunks (%d)",
725 + num_chunks, free_chunks);
726 + num_chunks = free_chunks;
727 + }
728 +
729 + vc_cma_alloc_chunks(num_chunks, &reply);
730 + }
731 + break;
732 +
733 + case VC_CMA_MSG_FREE:{
734 + int chunk_count =
735 + (msg_size -
736 + offsetof(struct cma_msg,
737 + params)) /
738 + sizeof(cma_msg->params[0]);
739 + int i;
740 + BUG_ON(chunk_count <= 0);
741 +
742 + LOG_DBG("CMA_MSG_FREE(%d chunks - %x, ...)",
743 + chunk_count, cma_msg->params[0]);
744 + for (i = 0; i < chunk_count; i++) {
745 + int chunk_num = cma_msg->params[i];
746 + struct page *page = vc_cma_base_page +
747 + chunk_num * PAGES_PER_CHUNK;
748 + if (chunk_num >= vc_cma_chunks) {
749 + LOG_ERR
750 + ("CMA_MSG_FREE - chunk %d of %d"
751 + " (value %x) exceeds maximum "
752 + "(%x)", i, chunk_count,
753 + chunk_num,
754 + vc_cma_chunks - 1);
755 + break;
756 + }
757 +
758 + if (!dma_release_from_contiguous
759 + (NULL /*&vc_cma_device.dev*/, page,
760 + PAGES_PER_CHUNK)) {
761 + LOG_ERR
762 + ("CMA_MSG_FREE - failed to "
763 + "release chunk %d (phys %x, "
764 + "page %x)", chunk_num,
765 + page_to_phys(page),
766 + (unsigned int)page);
767 + }
768 + vc_cma_chunks_used--;
769 + }
770 + LOG_DBG("CMA released %d chunks -> %d used",
771 + i, vc_cma_chunks_used);
772 + }
773 + break;
774 +
775 + case VC_CMA_MSG_UPDATE_RESERVE:{
776 + int chunks_needed =
777 + ((vc_cma_reserve_total + VC_CMA_CHUNK_SIZE -
778 + 1)
779 + / VC_CMA_CHUNK_SIZE) -
780 + vc_cma_chunks_reserved;
781 +
782 + LOG_DBG
783 + ("CMA_MSG_UPDATE_RESERVE(%d chunks needed)",
784 + chunks_needed);
785 +
786 + /* Cap the reservations to what is available */
787 + if (chunks_needed > 0) {
788 + if (chunks_needed >
789 + (vc_cma_chunks -
790 + vc_cma_chunks_used))
791 + chunks_needed =
792 + (vc_cma_chunks -
793 + vc_cma_chunks_used);
794 +
795 + chunks_needed =
796 + vc_cma_alloc_chunks(chunks_needed,
797 + &reply);
798 + }
799 +
800 + LOG_DBG
801 + ("CMA_MSG_UPDATE_RESERVE(%d chunks allocated)",
802 + chunks_needed);
803 + vc_cma_chunks_reserved += chunks_needed;
804 + }
805 + break;
806 +
807 + default:
808 + LOG_ERR("unexpected msg type %d", type);
809 + break;
810 + }
811 + }
812 +
813 + LOG_DBG("quitting...");
814 + return 0;
815 +}
816 +
817 +/****************************************************************************
818 +*
819 +* vc_cma_connected_init
820 +*
821 +* This function is called once the videocore has been connected.
822 +*
823 +***************************************************************************/
824 +
825 +static void vc_cma_connected_init(void)
826 +{
827 + VCHIQ_SERVICE_PARAMS_T service_params;
828 +
829 + LOG_DBG("vc_cma_connected_init");
830 +
831 + if (!vchiu_queue_init(&cma_msg_queue, 16)) {
832 + LOG_ERR("could not create CMA msg queue");
833 + goto fail_queue;
834 + }
835 +
836 + if (vchiq_initialise(&cma_instance) != VCHIQ_SUCCESS)
837 + goto fail_vchiq_init;
838 +
839 + vchiq_connect(cma_instance);
840 +
841 + service_params.fourcc = VC_CMA_FOURCC;
842 + service_params.callback = cma_service_callback;
843 + service_params.userdata = NULL;
844 + service_params.version = VC_CMA_VERSION;
845 + service_params.version_min = VC_CMA_VERSION;
846 +
847 + if (vchiq_open_service(cma_instance, &service_params,
848 + &cma_service) != VCHIQ_SUCCESS) {
849 + LOG_ERR("failed to open service - already in use?");
850 + goto fail_vchiq_open;
851 + }
852 +
853 + vchiq_release_service(cma_service);
854 +
855 + cma_worker = kthread_create(cma_worker_proc, NULL, "cma_worker");
856 + if (!cma_worker) {
857 + LOG_ERR("could not create CMA worker thread");
858 + goto fail_worker;
859 + }
860 + set_user_nice(cma_worker, -20);
861 + wake_up_process(cma_worker);
862 +
863 + return;
864 +
865 +fail_worker:
866 + vchiq_close_service(cma_service);
867 +fail_vchiq_open:
868 + vchiq_shutdown(cma_instance);
869 +fail_vchiq_init:
870 + vchiu_queue_delete(&cma_msg_queue);
871 +fail_queue:
872 + return;
873 +}
874 +
875 +void
876 +loud_error_header(void)
877 +{
878 + if (in_loud_error)
879 + return;
880 +
881 + LOG_ERR("============================================================"
882 + "================");
883 + LOG_ERR("============================================================"
884 + "================");
885 + LOG_ERR("=====");
886 +
887 + in_loud_error = 1;
888 +}
889 +
890 +void
891 +loud_error_footer(void)
892 +{
893 + if (!in_loud_error)
894 + return;
895 +
896 + LOG_ERR("=====");
897 + LOG_ERR("============================================================"
898 + "================");
899 + LOG_ERR("============================================================"
900 + "================");
901 +
902 + in_loud_error = 0;
903 +}
904 +
905 +#if 1
906 +static int check_cma_config(void) { return 1; }
907 +#else
908 +static int
909 +read_vc_debug_var(VC_MEM_ACCESS_HANDLE_T handle,
910 + const char *symbol,
911 + void *buf, size_t bufsize)
912 +{
913 + VC_MEM_ADDR_T vcMemAddr;
914 + size_t vcMemSize;
915 + uint8_t *mapAddr;
916 + off_t vcMapAddr;
917 +
918 + if (!LookupVideoCoreSymbol(handle, symbol,
919 + &vcMemAddr,
920 + &vcMemSize)) {
921 + loud_error_header();
922 + loud_error(
923 + "failed to find VC symbol \"%s\".",
924 + symbol);
925 + loud_error_footer();
926 + return 0;
927 + }
928 +
929 + if (vcMemSize != bufsize) {
930 + loud_error_header();
931 + loud_error(
932 + "VC symbol \"%s\" is the wrong size.",
933 + symbol);
934 + loud_error_footer();
935 + return 0;
936 + }
937 +
938 + vcMapAddr = (off_t)vcMemAddr & VC_MEM_TO_ARM_ADDR_MASK;
939 + vcMapAddr += mm_vc_mem_phys_addr;
940 + mapAddr = ioremap_nocache(vcMapAddr, vcMemSize);
941 + if (mapAddr == 0) {
942 + loud_error_header();
943 + loud_error(
944 + "failed to ioremap \"%s\" @ 0x%x "
945 + "(phys: 0x%x, size: %u).",
946 + symbol,
947 + (unsigned int)vcMapAddr,
948 + (unsigned int)vcMemAddr,
949 + (unsigned int)vcMemSize);
950 + loud_error_footer();
951 + return 0;
952 + }
953 +
954 + memcpy(buf, mapAddr, bufsize);
955 + iounmap(mapAddr);
956 +
957 + return 1;
958 +}
959 +
960 +
961 +static int
962 +check_cma_config(void)
963 +{
964 + VC_MEM_ACCESS_HANDLE_T mem_hndl;
965 + VC_MEM_ADDR_T mempool_start;
966 + VC_MEM_ADDR_T mempool_end;
967 + VC_MEM_ADDR_T mempool_offline_start;
968 + VC_MEM_ADDR_T mempool_offline_end;
969 + VC_MEM_ADDR_T cam_alloc_base;
970 + VC_MEM_ADDR_T cam_alloc_size;
971 + VC_MEM_ADDR_T cam_alloc_end;
972 + int success = 0;
973 +
974 + if (OpenVideoCoreMemory(&mem_hndl) != 0)
975 + goto out;
976 +
977 + /* Read the relevant VideoCore variables */
978 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_START",
979 + &mempool_start,
980 + sizeof(mempool_start)))
981 + goto close;
982 +
983 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_END",
984 + &mempool_end,
985 + sizeof(mempool_end)))
986 + goto close;
987 +
988 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_START",
989 + &mempool_offline_start,
990 + sizeof(mempool_offline_start)))
991 + goto close;
992 +
993 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_END",
994 + &mempool_offline_end,
995 + sizeof(mempool_offline_end)))
996 + goto close;
997 +
998 + if (!read_vc_debug_var(mem_hndl, "cam_alloc_base",
999 + &cam_alloc_base,
1000 + sizeof(cam_alloc_base)))
1001 + goto close;
1002 +
1003 + if (!read_vc_debug_var(mem_hndl, "cam_alloc_size",
1004 + &cam_alloc_size,
1005 + sizeof(cam_alloc_size)))
1006 + goto close;
1007 +
1008 + cam_alloc_end = cam_alloc_base + cam_alloc_size;
1009 +
1010 + success = 1;
1011 +
1012 + /* Now the sanity checks */
1013 + if (!mempool_offline_start)
1014 + mempool_offline_start = mempool_start;
1015 + if (!mempool_offline_end)
1016 + mempool_offline_end = mempool_end;
1017 +
1018 + if (VCADDR_TO_PHYSADDR(mempool_offline_start) != vc_cma_base) {
1019 + loud_error_header();
1020 + loud_error(
1021 + "__MEMPOOL_OFFLINE_START(%x -> %lx) doesn't match "
1022 + "vc_cma_base(%x)",
1023 + mempool_offline_start,
1024 + VCADDR_TO_PHYSADDR(mempool_offline_start),
1025 + vc_cma_base);
1026 + success = 0;
1027 + }
1028 +
1029 + if (VCADDR_TO_PHYSADDR(mempool_offline_end) !=
1030 + (vc_cma_base + vc_cma_size)) {
1031 + loud_error_header();
1032 + loud_error(
1033 + "__MEMPOOL_OFFLINE_END(%x -> %lx) doesn't match "
1034 + "vc_cma_base(%x) + vc_cma_size(%x) = %x",
1035 + mempool_offline_start,
1036 + VCADDR_TO_PHYSADDR(mempool_offline_end),
1037 + vc_cma_base, vc_cma_size, vc_cma_base + vc_cma_size);
1038 + success = 0;
1039 + }
1040 +
1041 + if (mempool_end < mempool_start) {
1042 + loud_error_header();
1043 + loud_error(
1044 + "__MEMPOOL_END(%x) must not be before "
1045 + "__MEMPOOL_START(%x)",
1046 + mempool_end,
1047 + mempool_start);
1048 + success = 0;
1049 + }
1050 +
1051 + if (mempool_offline_end < mempool_offline_start) {
1052 + loud_error_header();
1053 + loud_error(
1054 + "__MEMPOOL_OFFLINE_END(%x) must not be before "
1055 + "__MEMPOOL_OFFLINE_START(%x)",
1056 + mempool_offline_end,
1057 + mempool_offline_start);
1058 + success = 0;
1059 + }
1060 +
1061 + if (mempool_offline_start < mempool_start) {
1062 + loud_error_header();
1063 + loud_error(
1064 + "__MEMPOOL_OFFLINE_START(%x) must not be before "
1065 + "__MEMPOOL_START(%x)",
1066 + mempool_offline_start,
1067 + mempool_start);
1068 + success = 0;
1069 + }
1070 +
1071 + if (mempool_offline_end > mempool_end) {
1072 + loud_error_header();
1073 + loud_error(
1074 + "__MEMPOOL_OFFLINE_END(%x) must not be after "
1075 + "__MEMPOOL_END(%x)",
1076 + mempool_offline_end,
1077 + mempool_end);
1078 + success = 0;
1079 + }
1080 +
1081 + if ((cam_alloc_base < mempool_end) &&
1082 + (cam_alloc_end > mempool_start)) {
1083 + loud_error_header();
1084 + loud_error(
1085 + "cam_alloc pool(%x-%x) overlaps "
1086 + "mempool(%x-%x)",
1087 + cam_alloc_base, cam_alloc_end,
1088 + mempool_start, mempool_end);
1089 + success = 0;
1090 + }
1091 +
1092 + loud_error_footer();
1093 +
1094 +close:
1095 + CloseVideoCoreMemory(mem_hndl);
1096 +
1097 +out:
1098 + return success;
1099 +}
1100 +#endif
1101 +
1102 +static int vc_cma_init(void)
1103 +{
1104 + int rc = -EFAULT;
1105 + struct device *dev;
1106 +
1107 + if (!check_cma_config())
1108 + goto out_release;
1109 +
1110 + printk(KERN_INFO "vc-cma: Videocore CMA driver\n");
1111 + printk(KERN_INFO "vc-cma: vc_cma_base = 0x%08x\n", vc_cma_base);
1112 + printk(KERN_INFO "vc-cma: vc_cma_size = 0x%08x (%u MiB)\n",
1113 + vc_cma_size, vc_cma_size / (1024 * 1024));
1114 + printk(KERN_INFO "vc-cma: vc_cma_initial = 0x%08x (%u MiB)\n",
1115 + vc_cma_initial, vc_cma_initial / (1024 * 1024));
1116 +
1117 + vc_cma_base_page = phys_to_page(vc_cma_base);
1118 +
1119 + if (vc_cma_chunks) {
1120 + int chunks_needed = vc_cma_initial / VC_CMA_CHUNK_SIZE;
1121 +
1122 + for (vc_cma_chunks_used = 0;
1123 + vc_cma_chunks_used < chunks_needed; vc_cma_chunks_used++) {
1124 + struct page *chunk;
1125 + chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
1126 + PAGES_PER_CHUNK,
1127 + VC_CMA_CHUNK_ORDER);
1128 + if (!chunk)
1129 + break;
1130 + BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
1131 + VC_CMA_CHUNK_SIZE) != 0);
1132 + }
1133 + if (vc_cma_chunks_used != chunks_needed) {
1134 + LOG_ERR("%s: dma_alloc_from_contiguous failed (%d "
1135 + "bytes, allocation %d of %d)",
1136 + __func__, VC_CMA_CHUNK_SIZE,
1137 + vc_cma_chunks_used, chunks_needed);
1138 + goto out_release;
1139 + }
1140 +
1141 + vchiq_add_connected_callback(vc_cma_connected_init);
1142 + }
1143 +
1144 + rc = alloc_chrdev_region(&vc_cma_devnum, 0, 1, DRIVER_NAME);
1145 + if (rc < 0) {
1146 + LOG_ERR("%s: alloc_chrdev_region failed (rc=%d)", __func__, rc);
1147 + goto out_release;
1148 + }
1149 +
1150 + cdev_init(&vc_cma_cdev, &vc_cma_fops);
1151 + rc = cdev_add(&vc_cma_cdev, vc_cma_devnum, 1);
1152 + if (rc != 0) {
1153 + LOG_ERR("%s: cdev_add failed (rc=%d)", __func__, rc);
1154 + goto out_unregister;
1155 + }
1156 +
1157 + vc_cma_class = class_create(THIS_MODULE, DRIVER_NAME);
1158 + if (IS_ERR(vc_cma_class)) {
1159 + rc = PTR_ERR(vc_cma_class);
1160 + LOG_ERR("%s: class_create failed (rc=%d)", __func__, rc);
1161 + goto out_cdev_del;
1162 + }
1163 +
1164 + dev = device_create(vc_cma_class, NULL, vc_cma_devnum, NULL,
1165 + DRIVER_NAME);
1166 + if (IS_ERR(dev)) {
1167 + rc = PTR_ERR(dev);
1168 + LOG_ERR("%s: device_create failed (rc=%d)", __func__, rc);
1169 + goto out_class_destroy;
1170 + }
1171 +
1172 + vc_cma_proc_entry = proc_create(DRIVER_NAME, 0444, NULL, &vc_cma_proc_fops);
1173 + if (vc_cma_proc_entry == NULL) {
1174 + rc = -EFAULT;
1175 + LOG_ERR("%s: proc_create failed", __func__);
1176 + goto out_device_destroy;
1177 + }
1178 +
1179 + vc_cma_inited = 1;
1180 + return 0;
1181 +
1182 +out_device_destroy:
1183 + device_destroy(vc_cma_class, vc_cma_devnum);
1184 +
1185 +out_class_destroy:
1186 + class_destroy(vc_cma_class);
1187 + vc_cma_class = NULL;
1188 +
1189 +out_cdev_del:
1190 + cdev_del(&vc_cma_cdev);
1191 +
1192 +out_unregister:
1193 + unregister_chrdev_region(vc_cma_devnum, 1);
1194 +
1195 +out_release:
1196 + /* It is tempting to try to clean up by calling
1197 + dma_release_from_contiguous for all allocated chunks, but it isn't
1198 + a very safe thing to do. If vc_cma_initial is non-zero it is because
1199 + VideoCore is already using that memory, so giving it back to Linux
1200 + is likely to be fatal.
1201 + */
1202 + return -1;
1203 +}
1204 +
1205 +/****************************************************************************
1206 +*
1207 +* vc_cma_exit
1208 +*
1209 +***************************************************************************/
1210 +
1211 +static void __exit vc_cma_exit(void)
1212 +{
1213 + LOG_DBG("%s: called", __func__);
1214 +
1215 + if (vc_cma_inited) {
1216 + remove_proc_entry(DRIVER_NAME, NULL);
1217 + device_destroy(vc_cma_class, vc_cma_devnum);
1218 + class_destroy(vc_cma_class);
1219 + cdev_del(&vc_cma_cdev);
1220 + unregister_chrdev_region(vc_cma_devnum, 1);
1221 + }
1222 +}
1223 +
1224 +module_init(vc_cma_init);
1225 +module_exit(vc_cma_exit);
1226 +MODULE_LICENSE("GPL");
1227 +MODULE_AUTHOR("Broadcom Corporation");
1228 --- a/drivers/misc/Makefile
1229 +++ b/drivers/misc/Makefile
1230 @@ -52,6 +52,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/
1231 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
1232 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
1233 obj-$(CONFIG_SRAM) += sram.o
1234 -obj-y += vc04_services/
1235 +obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
1236 obj-y += mic/
1237 obj-$(CONFIG_GENWQE) += genwqe/
1238 --- /dev/null
1239 +++ b/include/linux/broadcom/vc_cma.h
1240 @@ -0,0 +1,29 @@
1241 +/*****************************************************************************
1242 +* Copyright 2012 Broadcom Corporation. All rights reserved.
1243 +*
1244 +* Unless you and Broadcom execute a separate written software license
1245 +* agreement governing use of this software, this software is licensed to you
1246 +* under the terms of the GNU General Public License version 2, available at
1247 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1248 +*
1249 +* Notwithstanding the above, under no circumstances may you combine this
1250 +* software in any way with any other Broadcom software provided under a
1251 +* license other than the GPL, without Broadcom's express prior written
1252 +* consent.
1253 +*****************************************************************************/
1254 +
1255 +#if !defined( VC_CMA_H )
1256 +#define VC_CMA_H
1257 +
1258 +#include <linux/ioctl.h>
1259 +
1260 +#define VC_CMA_IOC_MAGIC 0xc5
1261 +
1262 +#define VC_CMA_IOC_RESERVE _IO(VC_CMA_IOC_MAGIC, 0)
1263 +
1264 +#ifdef __KERNEL__
1265 +extern void __init vc_cma_early_init(void);
1266 +extern void __init vc_cma_reserve(void);
1267 +#endif
1268 +
1269 +#endif /* VC_CMA_H */