cd73ca4b5b220762ba1fd637fdfc569da9f98026
[openwrt/svn-archive/archive.git] / target / linux / brcm2708 / patches-3.10 / 005-bcm2708-vchiq-driver.patch
1 diff -urN linux-3.10/drivers/char/broadcom/Kconfig linux-rpi-3.10.y/drivers/char/broadcom/Kconfig
2 --- linux-3.10/drivers/char/broadcom/Kconfig 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-rpi-3.10.y/drivers/char/broadcom/Kconfig 2013-07-06 15:25:50.000000000 +0100
4 @@ -0,0 +1,17 @@
5 +#
6 +# Broadcom char driver config
7 +#
8 +
9 +menuconfig BRCM_CHAR_DRIVERS
10 + tristate "Broadcom Char Drivers"
11 + depends on PROC_FS
12 + help
13 + Broadcom's char drivers
14 +
15 +config BCM_VC_CMA
16 + bool "Videocore CMA"
17 + depends on CMA
18 + default n
19 + help
20 + Helper for videocore CMA access.
21 +
22 diff -urN linux-3.10/drivers/char/broadcom/Makefile linux-rpi-3.10.y/drivers/char/broadcom/Makefile
23 --- linux-3.10/drivers/char/broadcom/Makefile 1970-01-01 01:00:00.000000000 +0100
24 +++ linux-rpi-3.10.y/drivers/char/broadcom/Makefile 2013-07-06 15:25:50.000000000 +0100
25 @@ -0,0 +1,2 @@
26 +obj-$(CONFIG_BCM_VC_CMA) += vc_cma/
27 +
28 diff -urN linux-3.10/drivers/char/broadcom/vc_cma/Makefile linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/Makefile
29 --- linux-3.10/drivers/char/broadcom/vc_cma/Makefile 1970-01-01 01:00:00.000000000 +0100
30 +++ linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/Makefile 2013-07-06 15:25:50.000000000 +0100
31 @@ -0,0 +1,15 @@
32 +EXTRA_CFLAGS += -Wall -Wstrict-prototypes -Wno-trigraphs
33 +EXTRA_CFLAGS += -Werror
34 +EXTRA_CFLAGS += -I"include/linux/broadcom"
35 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services"
36 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchi"
37 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchiq_arm"
38 +
39 +EXTRA_CFLAGS += -D__KERNEL__
40 +EXTRA_CFLAGS += -D__linux__
41 +EXTRA_CFLAGS += -Werror
42 +
43 +obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o
44 +
45 +vc-cma-objs := vc_cma.o
46 +
47 diff -urN linux-3.10/drivers/char/broadcom/vc_cma/vc_cma.c linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/vc_cma.c
48 --- linux-3.10/drivers/char/broadcom/vc_cma/vc_cma.c 1970-01-01 01:00:00.000000000 +0100
49 +++ linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/vc_cma.c 2013-07-06 15:25:50.000000000 +0100
50 @@ -0,0 +1,1145 @@
51 +/**
52 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
53 + *
54 + * Redistribution and use in source and binary forms, with or without
55 + * modification, are permitted provided that the following conditions
56 + * are met:
57 + * 1. Redistributions of source code must retain the above copyright
58 + * notice, this list of conditions, and the following disclaimer,
59 + * without modification.
60 + * 2. Redistributions in binary form must reproduce the above copyright
61 + * notice, this list of conditions and the following disclaimer in the
62 + * documentation and/or other materials provided with the distribution.
63 + * 3. The names of the above-listed copyright holders may not be used
64 + * to endorse or promote products derived from this software without
65 + * specific prior written permission.
66 + *
67 + * ALTERNATIVELY, this software may be distributed under the terms of the
68 + * GNU General Public License ("GPL") version 2, as published by the Free
69 + * Software Foundation.
70 + *
71 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
72 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
73 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
74 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
75 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
76 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
77 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
78 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
79 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
80 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
81 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
82 + */
83 +
84 +#include <linux/kernel.h>
85 +#include <linux/module.h>
86 +#include <linux/kthread.h>
87 +#include <linux/fs.h>
88 +#include <linux/device.h>
89 +#include <linux/cdev.h>
90 +#include <linux/mm.h>
91 +#include <linux/proc_fs.h>
92 +#include <linux/seq_file.h>
93 +#include <linux/dma-mapping.h>
94 +#include <linux/dma-contiguous.h>
95 +#include <linux/platform_device.h>
96 +#include <linux/uaccess.h>
97 +#include <asm/cacheflush.h>
98 +
99 +#include "vc_cma.h"
100 +
101 +#include "vchiq_util.h"
102 +#include "vchiq_connected.h"
103 +//#include "debug_sym.h"
104 +//#include "vc_mem.h"
105 +
106 +#define DRIVER_NAME "vc-cma"
107 +
108 +#define LOG_DBG(fmt, ...) \
109 + if (vc_cma_debug) \
110 + printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
111 +#define LOG_ERR(fmt, ...) \
112 + printk(KERN_ERR fmt "\n", ##__VA_ARGS__)
113 +
114 +#define VC_CMA_FOURCC VCHIQ_MAKE_FOURCC('C', 'M', 'A', ' ')
115 +#define VC_CMA_VERSION 2
116 +
117 +#define VC_CMA_CHUNK_ORDER 6 /* 256K */
118 +#define VC_CMA_CHUNK_SIZE (4096 << VC_CMA_CHUNK_ORDER)
119 +#define VC_CMA_MAX_PARAMS_PER_MSG \
120 + ((VCHIQ_MAX_MSG_SIZE - sizeof(unsigned short))/sizeof(unsigned short))
121 +#define VC_CMA_RESERVE_COUNT_MAX 16
122 +
123 +#define PAGES_PER_CHUNK (VC_CMA_CHUNK_SIZE / PAGE_SIZE)
124 +
125 +#define VCADDR_TO_PHYSADDR(vcaddr) (mm_vc_mem_phys_addr + vcaddr)
126 +
127 +#define loud_error(...) \
128 + LOG_ERR("===== " __VA_ARGS__)
129 +
130 +enum {
131 + VC_CMA_MSG_QUIT,
132 + VC_CMA_MSG_OPEN,
133 + VC_CMA_MSG_TICK,
134 + VC_CMA_MSG_ALLOC, /* chunk count */
135 + VC_CMA_MSG_FREE, /* chunk, chunk, ... */
136 + VC_CMA_MSG_ALLOCATED, /* chunk, chunk, ... */
137 + VC_CMA_MSG_REQUEST_ALLOC, /* chunk count */
138 + VC_CMA_MSG_REQUEST_FREE, /* chunk count */
139 + VC_CMA_MSG_RESERVE, /* bytes lo, bytes hi */
140 + VC_CMA_MSG_UPDATE_RESERVE,
141 + VC_CMA_MSG_MAX
142 +};
143 +
144 +struct cma_msg {
145 + unsigned short type;
146 + unsigned short params[VC_CMA_MAX_PARAMS_PER_MSG];
147 +};
148 +
149 +struct vc_cma_reserve_user {
150 + unsigned int pid;
151 + unsigned int reserve;
152 +};
153 +
154 +/* Device (/dev) related variables */
155 +static dev_t vc_cma_devnum;
156 +static struct class *vc_cma_class;
157 +static struct cdev vc_cma_cdev;
158 +static int vc_cma_inited;
159 +static int vc_cma_debug;
160 +
161 +/* Proc entry */
162 +static struct proc_dir_entry *vc_cma_proc_entry;
163 +
164 +phys_addr_t vc_cma_base;
165 +struct page *vc_cma_base_page;
166 +unsigned int vc_cma_size;
167 +EXPORT_SYMBOL(vc_cma_size);
168 +unsigned int vc_cma_initial;
169 +unsigned int vc_cma_chunks;
170 +unsigned int vc_cma_chunks_used;
171 +unsigned int vc_cma_chunks_reserved;
172 +
173 +static int in_loud_error;
174 +
175 +unsigned int vc_cma_reserve_total;
176 +unsigned int vc_cma_reserve_count;
177 +struct vc_cma_reserve_user vc_cma_reserve_users[VC_CMA_RESERVE_COUNT_MAX];
178 +static DEFINE_SEMAPHORE(vc_cma_reserve_mutex);
179 +static DEFINE_SEMAPHORE(vc_cma_worker_queue_push_mutex);
180 +
181 +static u64 vc_cma_dma_mask = DMA_BIT_MASK(32);
182 +static struct platform_device vc_cma_device = {
183 + .name = "vc-cma",
184 + .id = 0,
185 + .dev = {
186 + .dma_mask = &vc_cma_dma_mask,
187 + .coherent_dma_mask = DMA_BIT_MASK(32),
188 + },
189 +};
190 +
191 +static VCHIQ_INSTANCE_T cma_instance;
192 +static VCHIQ_SERVICE_HANDLE_T cma_service;
193 +static VCHIU_QUEUE_T cma_msg_queue;
194 +static struct task_struct *cma_worker;
195 +
196 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid);
197 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply);
198 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
199 + VCHIQ_HEADER_T * header,
200 + VCHIQ_SERVICE_HANDLE_T service,
201 + void *bulk_userdata);
202 +static void send_vc_msg(unsigned short type,
203 + unsigned short param1, unsigned short param2);
204 +static bool send_worker_msg(VCHIQ_HEADER_T * msg);
205 +
206 +static int early_vc_cma_mem(char *p)
207 +{
208 + unsigned int new_size;
209 + printk(KERN_NOTICE "early_vc_cma_mem(%s)", p);
210 + vc_cma_size = memparse(p, &p);
211 + vc_cma_initial = vc_cma_size;
212 + if (*p == '/')
213 + vc_cma_size = memparse(p + 1, &p);
214 + if (*p == '@')
215 + vc_cma_base = memparse(p + 1, &p);
216 +
217 + new_size = (vc_cma_size - ((-vc_cma_base) & (VC_CMA_CHUNK_SIZE - 1)))
218 + & ~(VC_CMA_CHUNK_SIZE - 1);
219 + if (new_size > vc_cma_size)
220 + vc_cma_size = 0;
221 + vc_cma_initial = (vc_cma_initial + VC_CMA_CHUNK_SIZE - 1)
222 + & ~(VC_CMA_CHUNK_SIZE - 1);
223 + if (vc_cma_initial > vc_cma_size)
224 + vc_cma_initial = vc_cma_size;
225 + vc_cma_base = (vc_cma_base + VC_CMA_CHUNK_SIZE - 1)
226 + & ~(VC_CMA_CHUNK_SIZE - 1);
227 +
228 + printk(KERN_NOTICE " -> initial %x, size %x, base %x", vc_cma_initial,
229 + vc_cma_size, (unsigned int)vc_cma_base);
230 +
231 + return 0;
232 +}
233 +
234 +early_param("vc-cma-mem", early_vc_cma_mem);
235 +
236 +void vc_cma_early_init(void)
237 +{
238 + LOG_DBG("vc_cma_early_init - vc_cma_chunks = %d", vc_cma_chunks);
239 + if (vc_cma_size) {
240 + int rc = platform_device_register(&vc_cma_device);
241 + LOG_DBG("platform_device_register -> %d", rc);
242 + }
243 +}
244 +
245 +void vc_cma_reserve(void)
246 +{
247 + /* if vc_cma_size is set, then declare vc CMA area of the same
248 + * size from the end of memory
249 + */
250 + if (vc_cma_size) {
251 + if (dma_declare_contiguous(NULL /*&vc_cma_device.dev*/, vc_cma_size,
252 + vc_cma_base, 0) == 0) {
253 + } else {
254 + LOG_ERR("vc_cma: dma_declare_contiguous(%x,%x) failed",
255 + vc_cma_size, (unsigned int)vc_cma_base);
256 + vc_cma_size = 0;
257 + }
258 + }
259 + vc_cma_chunks = vc_cma_size / VC_CMA_CHUNK_SIZE;
260 +}
261 +
262 +/****************************************************************************
263 +*
264 +* vc_cma_open
265 +*
266 +***************************************************************************/
267 +
268 +static int vc_cma_open(struct inode *inode, struct file *file)
269 +{
270 + (void)inode;
271 + (void)file;
272 +
273 + return 0;
274 +}
275 +
276 +/****************************************************************************
277 +*
278 +* vc_cma_release
279 +*
280 +***************************************************************************/
281 +
282 +static int vc_cma_release(struct inode *inode, struct file *file)
283 +{
284 + (void)inode;
285 + (void)file;
286 +
287 + vc_cma_set_reserve(0, current->tgid);
288 +
289 + return 0;
290 +}
291 +
292 +/****************************************************************************
293 +*
294 +* vc_cma_ioctl
295 +*
296 +***************************************************************************/
297 +
298 +static long vc_cma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
299 +{
300 + int rc = 0;
301 +
302 + (void)cmd;
303 + (void)arg;
304 +
305 + switch (cmd) {
306 + case VC_CMA_IOC_RESERVE:
307 + rc = vc_cma_set_reserve((unsigned int)arg, current->tgid);
308 + if (rc >= 0)
309 + rc = 0;
310 + break;
311 + default:
312 + LOG_ERR("vc-cma: Unknown ioctl %x", cmd);
313 + return -ENOTTY;
314 + }
315 +
316 + return rc;
317 +}
318 +
319 +/****************************************************************************
320 +*
321 +* File Operations for the driver.
322 +*
323 +***************************************************************************/
324 +
325 +static const struct file_operations vc_cma_fops = {
326 + .owner = THIS_MODULE,
327 + .open = vc_cma_open,
328 + .release = vc_cma_release,
329 + .unlocked_ioctl = vc_cma_ioctl,
330 +};
331 +
332 +/****************************************************************************
333 +*
334 +* vc_cma_proc_open
335 +*
336 +***************************************************************************/
337 +
338 +static int vc_cma_show_info(struct seq_file *m, void *v)
339 +{
340 + int i;
341 +
342 + seq_printf(m, "Videocore CMA:\n");
343 + seq_printf(m, " Base : %08x\n", (unsigned int)vc_cma_base);
344 + seq_printf(m, " Length : %08x\n", vc_cma_size);
345 + seq_printf(m, " Initial : %08x\n", vc_cma_initial);
346 + seq_printf(m, " Chunk size : %08x\n", VC_CMA_CHUNK_SIZE);
347 + seq_printf(m, " Chunks : %4d (%d bytes)\n",
348 + (int)vc_cma_chunks,
349 + (int)(vc_cma_chunks * VC_CMA_CHUNK_SIZE));
350 + seq_printf(m, " Used : %4d (%d bytes)\n",
351 + (int)vc_cma_chunks_used,
352 + (int)(vc_cma_chunks_used * VC_CMA_CHUNK_SIZE));
353 + seq_printf(m, " Reserved : %4d (%d bytes)\n",
354 + (unsigned int)vc_cma_chunks_reserved,
355 + (int)(vc_cma_chunks_reserved * VC_CMA_CHUNK_SIZE));
356 +
357 + for (i = 0; i < vc_cma_reserve_count; i++) {
358 + struct vc_cma_reserve_user *user = &vc_cma_reserve_users[i];
359 + seq_printf(m, " PID %5d: %d bytes\n", user->pid,
360 + user->reserve);
361 + }
362 +
363 + seq_printf(m, "\n");
364 +
365 + return 0;
366 +}
367 +
368 +static int vc_cma_proc_open(struct inode *inode, struct file *file)
369 +{
370 + return single_open(file, vc_cma_show_info, NULL);
371 +}
372 +
373 +/****************************************************************************
374 +*
375 +* vc_cma_proc_write
376 +*
377 +***************************************************************************/
378 +
379 +static int vc_cma_proc_write(struct file *file,
380 + const char __user *buffer,
381 + size_t size, loff_t *ppos)
382 +{
383 + int rc = -EFAULT;
384 + char input_str[20];
385 +
386 + memset(input_str, 0, sizeof(input_str));
387 +
388 + if (size > sizeof(input_str)) {
389 + LOG_ERR("%s: input string length too long", __func__);
390 + goto out;
391 + }
392 +
393 + if (copy_from_user(input_str, buffer, size - 1)) {
394 + LOG_ERR("%s: failed to get input string", __func__);
395 + goto out;
396 + }
397 +#define ALLOC_STR "alloc"
398 +#define FREE_STR "free"
399 +#define DEBUG_STR "debug"
400 +#define RESERVE_STR "reserve"
401 + if (strncmp(input_str, ALLOC_STR, strlen(ALLOC_STR)) == 0) {
402 + int size;
403 + char *p = input_str + strlen(ALLOC_STR);
404 +
405 + while (*p == ' ')
406 + p++;
407 + size = memparse(p, NULL);
408 + LOG_ERR("/proc/vc-cma: alloc %d", size);
409 + if (size)
410 + send_vc_msg(VC_CMA_MSG_REQUEST_FREE,
411 + size / VC_CMA_CHUNK_SIZE, 0);
412 + else
413 + LOG_ERR("invalid size '%s'", p);
414 + rc = size;
415 + } else if (strncmp(input_str, FREE_STR, strlen(FREE_STR)) == 0) {
416 + int size;
417 + char *p = input_str + strlen(FREE_STR);
418 +
419 + while (*p == ' ')
420 + p++;
421 + size = memparse(p, NULL);
422 + LOG_ERR("/proc/vc-cma: free %d", size);
423 + if (size)
424 + send_vc_msg(VC_CMA_MSG_REQUEST_ALLOC,
425 + size / VC_CMA_CHUNK_SIZE, 0);
426 + else
427 + LOG_ERR("invalid size '%s'", p);
428 + rc = size;
429 + } else if (strncmp(input_str, DEBUG_STR, strlen(DEBUG_STR)) == 0) {
430 + char *p = input_str + strlen(DEBUG_STR);
431 + while (*p == ' ')
432 + p++;
433 + if ((strcmp(p, "on") == 0) || (strcmp(p, "1") == 0))
434 + vc_cma_debug = 1;
435 + else if ((strcmp(p, "off") == 0) || (strcmp(p, "0") == 0))
436 + vc_cma_debug = 0;
437 + LOG_ERR("/proc/vc-cma: debug %s", vc_cma_debug ? "on" : "off");
438 + rc = size;
439 + } else if (strncmp(input_str, RESERVE_STR, strlen(RESERVE_STR)) == 0) {
440 + int size;
441 + int reserved;
442 + char *p = input_str + strlen(RESERVE_STR);
443 + while (*p == ' ')
444 + p++;
445 + size = memparse(p, NULL);
446 +
447 + reserved = vc_cma_set_reserve(size, current->tgid);
448 + rc = (reserved >= 0) ? size : reserved;
449 + }
450 +
451 +out:
452 + return rc;
453 +}
454 +
455 +/****************************************************************************
456 +*
457 +* File Operations for /proc interface.
458 +*
459 +***************************************************************************/
460 +
461 +static const struct file_operations vc_cma_proc_fops = {
462 + .open = vc_cma_proc_open,
463 + .read = seq_read,
464 + .write = vc_cma_proc_write,
465 + .llseek = seq_lseek,
466 + .release = single_release
467 +};
468 +
469 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid)
470 +{
471 + struct vc_cma_reserve_user *user = NULL;
472 + int delta = 0;
473 + int i;
474 +
475 + if (down_interruptible(&vc_cma_reserve_mutex))
476 + return -ERESTARTSYS;
477 +
478 + for (i = 0; i < vc_cma_reserve_count; i++) {
479 + if (pid == vc_cma_reserve_users[i].pid) {
480 + user = &vc_cma_reserve_users[i];
481 + delta = reserve - user->reserve;
482 + if (reserve)
483 + user->reserve = reserve;
484 + else {
485 + /* Remove this entry by copying downwards */
486 + while ((i + 1) < vc_cma_reserve_count) {
487 + user[0].pid = user[1].pid;
488 + user[0].reserve = user[1].reserve;
489 + user++;
490 + i++;
491 + }
492 + vc_cma_reserve_count--;
493 + user = NULL;
494 + }
495 + break;
496 + }
497 + }
498 +
499 + if (reserve && !user) {
500 + if (vc_cma_reserve_count == VC_CMA_RESERVE_COUNT_MAX) {
501 + LOG_ERR("vc-cma: Too many reservations - "
502 + "increase CMA_RESERVE_COUNT_MAX");
503 + up(&vc_cma_reserve_mutex);
504 + return -EBUSY;
505 + }
506 + user = &vc_cma_reserve_users[vc_cma_reserve_count];
507 + user->pid = pid;
508 + user->reserve = reserve;
509 + delta = reserve;
510 + vc_cma_reserve_count++;
511 + }
512 +
513 + vc_cma_reserve_total += delta;
514 +
515 + send_vc_msg(VC_CMA_MSG_RESERVE,
516 + vc_cma_reserve_total & 0xffff, vc_cma_reserve_total >> 16);
517 +
518 + send_worker_msg((VCHIQ_HEADER_T *) VC_CMA_MSG_UPDATE_RESERVE);
519 +
520 + LOG_DBG("/proc/vc-cma: reserve %d (PID %d) - total %u",
521 + reserve, pid, vc_cma_reserve_total);
522 +
523 + up(&vc_cma_reserve_mutex);
524 +
525 + return vc_cma_reserve_total;
526 +}
527 +
528 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
529 + VCHIQ_HEADER_T * header,
530 + VCHIQ_SERVICE_HANDLE_T service,
531 + void *bulk_userdata)
532 +{
533 + switch (reason) {
534 + case VCHIQ_MESSAGE_AVAILABLE:
535 + if (!send_worker_msg(header))
536 + return VCHIQ_RETRY;
537 + break;
538 + case VCHIQ_SERVICE_CLOSED:
539 + LOG_DBG("CMA service closed");
540 + break;
541 + default:
542 + LOG_ERR("Unexpected CMA callback reason %d", reason);
543 + break;
544 + }
545 + return VCHIQ_SUCCESS;
546 +}
547 +
548 +static void send_vc_msg(unsigned short type,
549 + unsigned short param1, unsigned short param2)
550 +{
551 + unsigned short msg[] = { type, param1, param2 };
552 + VCHIQ_ELEMENT_T elem = { &msg, sizeof(msg) };
553 + VCHIQ_STATUS_T ret;
554 + vchiq_use_service(cma_service);
555 + ret = vchiq_queue_message(cma_service, &elem, 1);
556 + vchiq_release_service(cma_service);
557 + if (ret != VCHIQ_SUCCESS)
558 + LOG_ERR("vchiq_queue_message returned %x", ret);
559 +}
560 +
561 +static bool send_worker_msg(VCHIQ_HEADER_T * msg)
562 +{
563 + if (down_interruptible(&vc_cma_worker_queue_push_mutex))
564 + return false;
565 + vchiu_queue_push(&cma_msg_queue, msg);
566 + up(&vc_cma_worker_queue_push_mutex);
567 + return true;
568 +}
569 +
570 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply)
571 +{
572 + int i;
573 + for (i = 0; i < num_chunks; i++) {
574 + struct page *chunk;
575 + unsigned int chunk_num;
576 + uint8_t *chunk_addr;
577 + size_t chunk_size = PAGES_PER_CHUNK << PAGE_SHIFT;
578 +
579 + chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
580 + PAGES_PER_CHUNK,
581 + VC_CMA_CHUNK_ORDER);
582 + if (!chunk)
583 + break;
584 +
585 + chunk_addr = page_address(chunk);
586 + dmac_flush_range(chunk_addr, chunk_addr + chunk_size);
587 + outer_inv_range(__pa(chunk_addr), __pa(chunk_addr) +
588 + chunk_size);
589 +
590 + chunk_num =
591 + (page_to_phys(chunk) - vc_cma_base) / VC_CMA_CHUNK_SIZE;
592 + BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
593 + VC_CMA_CHUNK_SIZE) != 0);
594 + if (chunk_num >= vc_cma_chunks) {
595 + LOG_ERR("%s: ===============================",
596 + __func__);
597 + LOG_ERR("%s: chunk phys %x, vc_cma %x-%x - "
598 + "bad SPARSEMEM configuration?",
599 + __func__, (unsigned int)page_to_phys(chunk),
600 + vc_cma_base, vc_cma_base + vc_cma_size - 1);
601 + LOG_ERR("%s: dev->cma_area = %p\n", __func__,
602 + vc_cma_device.dev.cma_area);
603 + LOG_ERR("%s: ===============================",
604 + __func__);
605 + break;
606 + }
607 + reply->params[i] = chunk_num;
608 + vc_cma_chunks_used++;
609 + }
610 +
611 + if (i < num_chunks) {
612 + LOG_ERR("%s: dma_alloc_from_contiguous failed "
613 + "for %x bytes (alloc %d of %d, %d free)",
614 + __func__, VC_CMA_CHUNK_SIZE, i,
615 + num_chunks, vc_cma_chunks - vc_cma_chunks_used);
616 + num_chunks = i;
617 + }
618 +
619 + LOG_DBG("CMA allocated %d chunks -> %d used",
620 + num_chunks, vc_cma_chunks_used);
621 + reply->type = VC_CMA_MSG_ALLOCATED;
622 +
623 + {
624 + VCHIQ_ELEMENT_T elem = {
625 + reply,
626 + offsetof(struct cma_msg, params[0]) +
627 + num_chunks * sizeof(reply->params[0])
628 + };
629 + VCHIQ_STATUS_T ret;
630 + vchiq_use_service(cma_service);
631 + ret = vchiq_queue_message(cma_service, &elem, 1);
632 + vchiq_release_service(cma_service);
633 + if (ret != VCHIQ_SUCCESS)
634 + LOG_ERR("vchiq_queue_message return " "%x", ret);
635 + }
636 +
637 + return num_chunks;
638 +}
639 +
640 +static int cma_worker_proc(void *param)
641 +{
642 + static struct cma_msg reply;
643 + (void)param;
644 +
645 + while (1) {
646 + VCHIQ_HEADER_T *msg;
647 + static struct cma_msg msg_copy;
648 + struct cma_msg *cma_msg = &msg_copy;
649 + int type, msg_size;
650 +
651 + msg = vchiu_queue_pop(&cma_msg_queue);
652 + if ((unsigned int)msg >= VC_CMA_MSG_MAX) {
653 + msg_size = msg->size;
654 + memcpy(&msg_copy, msg->data, msg_size);
655 + type = cma_msg->type;
656 + vchiq_release_message(cma_service, msg);
657 + } else {
658 + msg_size = 0;
659 + type = (int)msg;
660 + if (type == VC_CMA_MSG_QUIT)
661 + break;
662 + else if (type == VC_CMA_MSG_UPDATE_RESERVE) {
663 + msg = NULL;
664 + cma_msg = NULL;
665 + } else {
666 + BUG();
667 + continue;
668 + }
669 + }
670 +
671 + switch (type) {
672 + case VC_CMA_MSG_ALLOC:{
673 + int num_chunks, free_chunks;
674 + num_chunks = cma_msg->params[0];
675 + free_chunks =
676 + vc_cma_chunks - vc_cma_chunks_used;
677 + LOG_DBG("CMA_MSG_ALLOC(%d chunks)", num_chunks);
678 + if (num_chunks > VC_CMA_MAX_PARAMS_PER_MSG) {
679 + LOG_ERR
680 + ("CMA_MSG_ALLOC - chunk count (%d) "
681 + "exceeds VC_CMA_MAX_PARAMS_PER_MSG (%d)",
682 + num_chunks,
683 + VC_CMA_MAX_PARAMS_PER_MSG);
684 + num_chunks = VC_CMA_MAX_PARAMS_PER_MSG;
685 + }
686 +
687 + if (num_chunks > free_chunks) {
688 + LOG_ERR
689 + ("CMA_MSG_ALLOC - chunk count (%d) "
690 + "exceeds free chunks (%d)",
691 + num_chunks, free_chunks);
692 + num_chunks = free_chunks;
693 + }
694 +
695 + vc_cma_alloc_chunks(num_chunks, &reply);
696 + }
697 + break;
698 +
699 + case VC_CMA_MSG_FREE:{
700 + int chunk_count =
701 + (msg_size -
702 + offsetof(struct cma_msg,
703 + params)) /
704 + sizeof(cma_msg->params[0]);
705 + int i;
706 + BUG_ON(chunk_count <= 0);
707 +
708 + LOG_DBG("CMA_MSG_FREE(%d chunks - %x, ...)",
709 + chunk_count, cma_msg->params[0]);
710 + for (i = 0; i < chunk_count; i++) {
711 + int chunk_num = cma_msg->params[i];
712 + struct page *page = vc_cma_base_page +
713 + chunk_num * PAGES_PER_CHUNK;
714 + if (chunk_num >= vc_cma_chunks) {
715 + LOG_ERR
716 + ("CMA_MSG_FREE - chunk %d of %d"
717 + " (value %x) exceeds maximum "
718 + "(%x)", i, chunk_count,
719 + chunk_num,
720 + vc_cma_chunks - 1);
721 + break;
722 + }
723 +
724 + if (!dma_release_from_contiguous
725 + (NULL /*&vc_cma_device.dev*/, page,
726 + PAGES_PER_CHUNK)) {
727 + LOG_ERR
728 + ("CMA_MSG_FREE - failed to "
729 + "release chunk %d (phys %x, "
730 + "page %x)", chunk_num,
731 + page_to_phys(page),
732 + (unsigned int)page);
733 + }
734 + vc_cma_chunks_used--;
735 + }
736 + LOG_DBG("CMA released %d chunks -> %d used",
737 + i, vc_cma_chunks_used);
738 + }
739 + break;
740 +
741 + case VC_CMA_MSG_UPDATE_RESERVE:{
742 + int chunks_needed =
743 + ((vc_cma_reserve_total + VC_CMA_CHUNK_SIZE -
744 + 1)
745 + / VC_CMA_CHUNK_SIZE) -
746 + vc_cma_chunks_reserved;
747 +
748 + LOG_DBG
749 + ("CMA_MSG_UPDATE_RESERVE(%d chunks needed)",
750 + chunks_needed);
751 +
752 + /* Cap the reservations to what is available */
753 + if (chunks_needed > 0) {
754 + if (chunks_needed >
755 + (vc_cma_chunks -
756 + vc_cma_chunks_used))
757 + chunks_needed =
758 + (vc_cma_chunks -
759 + vc_cma_chunks_used);
760 +
761 + chunks_needed =
762 + vc_cma_alloc_chunks(chunks_needed,
763 + &reply);
764 + }
765 +
766 + LOG_DBG
767 + ("CMA_MSG_UPDATE_RESERVE(%d chunks allocated)",
768 + chunks_needed);
769 + vc_cma_chunks_reserved += chunks_needed;
770 + }
771 + break;
772 +
773 + default:
774 + LOG_ERR("unexpected msg type %d", type);
775 + break;
776 + }
777 + }
778 +
779 + LOG_DBG("quitting...");
780 + return 0;
781 +}
782 +
783 +/****************************************************************************
784 +*
785 +* vc_cma_connected_init
786 +*
787 +* This function is called once the videocore has been connected.
788 +*
789 +***************************************************************************/
790 +
791 +static void vc_cma_connected_init(void)
792 +{
793 + VCHIQ_SERVICE_PARAMS_T service_params;
794 +
795 + LOG_DBG("vc_cma_connected_init");
796 +
797 + if (!vchiu_queue_init(&cma_msg_queue, 16)) {
798 + LOG_ERR("could not create CMA msg queue");
799 + goto fail_queue;
800 + }
801 +
802 + if (vchiq_initialise(&cma_instance) != VCHIQ_SUCCESS)
803 + goto fail_vchiq_init;
804 +
805 + vchiq_connect(cma_instance);
806 +
807 + service_params.fourcc = VC_CMA_FOURCC;
808 + service_params.callback = cma_service_callback;
809 + service_params.userdata = NULL;
810 + service_params.version = VC_CMA_VERSION;
811 + service_params.version_min = VC_CMA_VERSION;
812 +
813 + if (vchiq_open_service(cma_instance, &service_params,
814 + &cma_service) != VCHIQ_SUCCESS) {
815 + LOG_ERR("failed to open service - already in use?");
816 + goto fail_vchiq_open;
817 + }
818 +
819 + vchiq_release_service(cma_service);
820 +
821 + cma_worker = kthread_create(cma_worker_proc, NULL, "cma_worker");
822 + if (!cma_worker) {
823 + LOG_ERR("could not create CMA worker thread");
824 + goto fail_worker;
825 + }
826 + set_user_nice(cma_worker, -20);
827 + wake_up_process(cma_worker);
828 +
829 + return;
830 +
831 +fail_worker:
832 + vchiq_close_service(cma_service);
833 +fail_vchiq_open:
834 + vchiq_shutdown(cma_instance);
835 +fail_vchiq_init:
836 + vchiu_queue_delete(&cma_msg_queue);
837 +fail_queue:
838 + return;
839 +}
840 +
841 +void
842 +loud_error_header(void)
843 +{
844 + if (in_loud_error)
845 + return;
846 +
847 + LOG_ERR("============================================================"
848 + "================");
849 + LOG_ERR("============================================================"
850 + "================");
851 + LOG_ERR("=====");
852 +
853 + in_loud_error = 1;
854 +}
855 +
856 +void
857 +loud_error_footer(void)
858 +{
859 + if (!in_loud_error)
860 + return;
861 +
862 + LOG_ERR("=====");
863 + LOG_ERR("============================================================"
864 + "================");
865 + LOG_ERR("============================================================"
866 + "================");
867 +
868 + in_loud_error = 0;
869 +}
870 +
871 +#if 1
872 +static int check_cma_config(void) { return 1; }
873 +#else
874 +static int
875 +read_vc_debug_var(VC_MEM_ACCESS_HANDLE_T handle,
876 + const char *symbol,
877 + void *buf, size_t bufsize)
878 +{
879 + VC_MEM_ADDR_T vcMemAddr;
880 + size_t vcMemSize;
881 + uint8_t *mapAddr;
882 + off_t vcMapAddr;
883 +
884 + if (!LookupVideoCoreSymbol(handle, symbol,
885 + &vcMemAddr,
886 + &vcMemSize)) {
887 + loud_error_header();
888 + loud_error(
889 + "failed to find VC symbol \"%s\".",
890 + symbol);
891 + loud_error_footer();
892 + return 0;
893 + }
894 +
895 + if (vcMemSize != bufsize) {
896 + loud_error_header();
897 + loud_error(
898 + "VC symbol \"%s\" is the wrong size.",
899 + symbol);
900 + loud_error_footer();
901 + return 0;
902 + }
903 +
904 + vcMapAddr = (off_t)vcMemAddr & VC_MEM_TO_ARM_ADDR_MASK;
905 + vcMapAddr += mm_vc_mem_phys_addr;
906 + mapAddr = ioremap_nocache(vcMapAddr, vcMemSize);
907 + if (mapAddr == 0) {
908 + loud_error_header();
909 + loud_error(
910 + "failed to ioremap \"%s\" @ 0x%x "
911 + "(phys: 0x%x, size: %u).",
912 + symbol,
913 + (unsigned int)vcMapAddr,
914 + (unsigned int)vcMemAddr,
915 + (unsigned int)vcMemSize);
916 + loud_error_footer();
917 + return 0;
918 + }
919 +
920 + memcpy(buf, mapAddr, bufsize);
921 + iounmap(mapAddr);
922 +
923 + return 1;
924 +}
925 +
926 +
927 +static int
928 +check_cma_config(void)
929 +{
930 + VC_MEM_ACCESS_HANDLE_T mem_hndl;
931 + VC_MEM_ADDR_T mempool_start;
932 + VC_MEM_ADDR_T mempool_end;
933 + VC_MEM_ADDR_T mempool_offline_start;
934 + VC_MEM_ADDR_T mempool_offline_end;
935 + VC_MEM_ADDR_T cam_alloc_base;
936 + VC_MEM_ADDR_T cam_alloc_size;
937 + VC_MEM_ADDR_T cam_alloc_end;
938 + int success = 0;
939 +
940 + if (OpenVideoCoreMemory(&mem_hndl) != 0)
941 + goto out;
942 +
943 + /* Read the relevant VideoCore variables */
944 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_START",
945 + &mempool_start,
946 + sizeof(mempool_start)))
947 + goto close;
948 +
949 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_END",
950 + &mempool_end,
951 + sizeof(mempool_end)))
952 + goto close;
953 +
954 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_START",
955 + &mempool_offline_start,
956 + sizeof(mempool_offline_start)))
957 + goto close;
958 +
959 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_END",
960 + &mempool_offline_end,
961 + sizeof(mempool_offline_end)))
962 + goto close;
963 +
964 + if (!read_vc_debug_var(mem_hndl, "cam_alloc_base",
965 + &cam_alloc_base,
966 + sizeof(cam_alloc_base)))
967 + goto close;
968 +
969 + if (!read_vc_debug_var(mem_hndl, "cam_alloc_size",
970 + &cam_alloc_size,
971 + sizeof(cam_alloc_size)))
972 + goto close;
973 +
974 + cam_alloc_end = cam_alloc_base + cam_alloc_size;
975 +
976 + success = 1;
977 +
978 + /* Now the sanity checks */
979 + if (!mempool_offline_start)
980 + mempool_offline_start = mempool_start;
981 + if (!mempool_offline_end)
982 + mempool_offline_end = mempool_end;
983 +
984 + if (VCADDR_TO_PHYSADDR(mempool_offline_start) != vc_cma_base) {
985 + loud_error_header();
986 + loud_error(
987 + "__MEMPOOL_OFFLINE_START(%x -> %lx) doesn't match "
988 + "vc_cma_base(%x)",
989 + mempool_offline_start,
990 + VCADDR_TO_PHYSADDR(mempool_offline_start),
991 + vc_cma_base);
992 + success = 0;
993 + }
994 +
995 + if (VCADDR_TO_PHYSADDR(mempool_offline_end) !=
996 + (vc_cma_base + vc_cma_size)) {
997 + loud_error_header();
998 + loud_error(
999 + "__MEMPOOL_OFFLINE_END(%x -> %lx) doesn't match "
1000 + "vc_cma_base(%x) + vc_cma_size(%x) = %x",
1001 + mempool_offline_start,
1002 + VCADDR_TO_PHYSADDR(mempool_offline_end),
1003 + vc_cma_base, vc_cma_size, vc_cma_base + vc_cma_size);
1004 + success = 0;
1005 + }
1006 +
1007 + if (mempool_end < mempool_start) {
1008 + loud_error_header();
1009 + loud_error(
1010 + "__MEMPOOL_END(%x) must not be before "
1011 + "__MEMPOOL_START(%x)",
1012 + mempool_end,
1013 + mempool_start);
1014 + success = 0;
1015 + }
1016 +
1017 + if (mempool_offline_end < mempool_offline_start) {
1018 + loud_error_header();
1019 + loud_error(
1020 + "__MEMPOOL_OFFLINE_END(%x) must not be before "
1021 + "__MEMPOOL_OFFLINE_START(%x)",
1022 + mempool_offline_end,
1023 + mempool_offline_start);
1024 + success = 0;
1025 + }
1026 +
1027 + if (mempool_offline_start < mempool_start) {
1028 + loud_error_header();
1029 + loud_error(
1030 + "__MEMPOOL_OFFLINE_START(%x) must not be before "
1031 + "__MEMPOOL_START(%x)",
1032 + mempool_offline_start,
1033 + mempool_start);
1034 + success = 0;
1035 + }
1036 +
1037 + if (mempool_offline_end > mempool_end) {
1038 + loud_error_header();
1039 + loud_error(
1040 + "__MEMPOOL_OFFLINE_END(%x) must not be after "
1041 + "__MEMPOOL_END(%x)",
1042 + mempool_offline_end,
1043 + mempool_end);
1044 + success = 0;
1045 + }
1046 +
1047 + if ((cam_alloc_base < mempool_end) &&
1048 + (cam_alloc_end > mempool_start)) {
1049 + loud_error_header();
1050 + loud_error(
1051 + "cam_alloc pool(%x-%x) overlaps "
1052 + "mempool(%x-%x)",
1053 + cam_alloc_base, cam_alloc_end,
1054 + mempool_start, mempool_end);
1055 + success = 0;
1056 + }
1057 +
1058 + loud_error_footer();
1059 +
1060 +close:
1061 + CloseVideoCoreMemory(mem_hndl);
1062 +
1063 +out:
1064 + return success;
1065 +}
1066 +#endif
1067 +
1068 +static int vc_cma_init(void)
1069 +{
1070 + int rc = -EFAULT;
1071 + struct device *dev;
1072 +
1073 + if (!check_cma_config())
1074 + goto out_release;
1075 +
1076 + printk(KERN_INFO "vc-cma: Videocore CMA driver\n");
1077 + printk(KERN_INFO "vc-cma: vc_cma_base = 0x%08x\n", vc_cma_base);
1078 + printk(KERN_INFO "vc-cma: vc_cma_size = 0x%08x (%u MiB)\n",
1079 + vc_cma_size, vc_cma_size / (1024 * 1024));
1080 + printk(KERN_INFO "vc-cma: vc_cma_initial = 0x%08x (%u MiB)\n",
1081 + vc_cma_initial, vc_cma_initial / (1024 * 1024));
1082 +
1083 + vc_cma_base_page = phys_to_page(vc_cma_base);
1084 +
1085 + if (vc_cma_chunks) {
1086 + int chunks_needed = vc_cma_initial / VC_CMA_CHUNK_SIZE;
1087 +
1088 + for (vc_cma_chunks_used = 0;
1089 + vc_cma_chunks_used < chunks_needed; vc_cma_chunks_used++) {
1090 + struct page *chunk;
1091 + chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
1092 + PAGES_PER_CHUNK,
1093 + VC_CMA_CHUNK_ORDER);
1094 + if (!chunk)
1095 + break;
1096 + BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
1097 + VC_CMA_CHUNK_SIZE) != 0);
1098 + }
1099 + if (vc_cma_chunks_used != chunks_needed) {
1100 + LOG_ERR("%s: dma_alloc_from_contiguous failed (%d "
1101 + "bytes, allocation %d of %d)",
1102 + __func__, VC_CMA_CHUNK_SIZE,
1103 + vc_cma_chunks_used, chunks_needed);
1104 + goto out_release;
1105 + }
1106 +
1107 + vchiq_add_connected_callback(vc_cma_connected_init);
1108 + }
1109 +
1110 + rc = alloc_chrdev_region(&vc_cma_devnum, 0, 1, DRIVER_NAME);
1111 + if (rc < 0) {
1112 + LOG_ERR("%s: alloc_chrdev_region failed (rc=%d)", __func__, rc);
1113 + goto out_release;
1114 + }
1115 +
1116 + cdev_init(&vc_cma_cdev, &vc_cma_fops);
1117 + rc = cdev_add(&vc_cma_cdev, vc_cma_devnum, 1);
1118 + if (rc != 0) {
1119 + LOG_ERR("%s: cdev_add failed (rc=%d)", __func__, rc);
1120 + goto out_unregister;
1121 + }
1122 +
1123 + vc_cma_class = class_create(THIS_MODULE, DRIVER_NAME);
1124 + if (IS_ERR(vc_cma_class)) {
1125 + rc = PTR_ERR(vc_cma_class);
1126 + LOG_ERR("%s: class_create failed (rc=%d)", __func__, rc);
1127 + goto out_cdev_del;
1128 + }
1129 +
1130 + dev = device_create(vc_cma_class, NULL, vc_cma_devnum, NULL,
1131 + DRIVER_NAME);
1132 + if (IS_ERR(dev)) {
1133 + rc = PTR_ERR(dev);
1134 + LOG_ERR("%s: device_create failed (rc=%d)", __func__, rc);
1135 + goto out_class_destroy;
1136 + }
1137 +
1138 + vc_cma_proc_entry = create_proc_entry(DRIVER_NAME, 0444, NULL);
1139 + if (vc_cma_proc_entry == NULL) {
1140 + rc = -EFAULT;
1141 + LOG_ERR("%s: create_proc_entry failed", __func__);
1142 + goto out_device_destroy;
1143 + }
1144 +
1145 + vc_cma_proc_entry->proc_fops = &vc_cma_proc_fops;
1146 +
1147 + vc_cma_inited = 1;
1148 + return 0;
1149 +
1150 +out_device_destroy:
1151 + device_destroy(vc_cma_class, vc_cma_devnum);
1152 +
1153 +out_class_destroy:
1154 + class_destroy(vc_cma_class);
1155 + vc_cma_class = NULL;
1156 +
1157 +out_cdev_del:
1158 + cdev_del(&vc_cma_cdev);
1159 +
1160 +out_unregister:
1161 + unregister_chrdev_region(vc_cma_devnum, 1);
1162 +
1163 +out_release:
1164 + /* It is tempting to try to clean up by calling
1165 + dma_release_from_contiguous for all allocated chunks, but it isn't
1166 + a very safe thing to do. If vc_cma_initial is non-zero it is because
1167 + VideoCore is already using that memory, so giving it back to Linux
1168 + is likely to be fatal.
1169 + */
1170 + return -1;
1171 +}
1172 +
1173 +/****************************************************************************
1174 +*
1175 +* vc_cma_exit
1176 +*
1177 +***************************************************************************/
1178 +
1179 +static void __exit vc_cma_exit(void)
1180 +{
1181 + LOG_DBG("%s: called", __func__);
1182 +
1183 + if (vc_cma_inited) {
1184 + remove_proc_entry(vc_cma_proc_entry->name, NULL);
1185 + device_destroy(vc_cma_class, vc_cma_devnum);
1186 + class_destroy(vc_cma_class);
1187 + cdev_del(&vc_cma_cdev);
1188 + unregister_chrdev_region(vc_cma_devnum, 1);
1189 + }
1190 +}
1191 +
1192 +module_init(vc_cma_init);
1193 +module_exit(vc_cma_exit);
1194 +MODULE_LICENSE("GPL");
1195 +MODULE_AUTHOR("Broadcom Corporation");
1196 --- linux-3.10/drivers/char/Makefile 2013-06-30 23:13:29.000000000 +0100
1197 +++ linux-rpi-3.10.y/drivers/char/Makefile 2013-07-06 15:25:50.000000000 +0100
1198 @@ -62,3 +62,6 @@
1199 js-rtc-y = rtc.o
1200
1201 obj-$(CONFIG_TILE_SROM) += tile-srom.o
1202 +
1203 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
1204 +
1205 --- linux-3.10/drivers/char/Kconfig 2013-06-30 23:13:29.000000000 +0100
1206 +++ linux-rpi-3.10.y/drivers/char/Kconfig 2013-07-06 15:25:50.000000000 +0100
1207 @@ -586,6 +586,8 @@
1208
1209 source "drivers/s390/char/Kconfig"
1210
1211 +source "drivers/char/broadcom/Kconfig"
1212 +
1213 config MSM_SMD_PKT
1214 bool "Enable device interface for some SMD packet ports"
1215 default n
1216 diff -urN linux-3.10/drivers/misc/Kconfig linux-rpi-3.10.y/drivers/misc/Kconfig
1217 --- linux-3.10/drivers/misc/Kconfig 2013-06-30 23:13:29.000000000 +0100
1218 +++ linux-rpi-3.10.y/drivers/misc/Kconfig 2013-07-06 15:25:50.000000000 +0100
1219 @@ -536,4 +536,6 @@
1220 source "drivers/misc/altera-stapl/Kconfig"
1221 source "drivers/misc/mei/Kconfig"
1222 source "drivers/misc/vmw_vmci/Kconfig"
1223 +source "drivers/misc/vc04_services/Kconfig"
1224 endmenu
1225 +
1226 diff -urN linux-3.10/drivers/misc/Makefile linux-rpi-3.10.y/drivers/misc/Makefile
1227 --- linux-3.10/drivers/misc/Makefile 2013-06-30 23:13:29.000000000 +0100
1228 +++ linux-rpi-3.10.y/drivers/misc/Makefile 2013-07-06 15:25:50.000000000 +0100
1229 @@ -53,3 +53,4 @@
1230 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
1231 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
1232 obj-$(CONFIG_SRAM) += sram.o
1233 +obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
1234 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/connections/connection.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/connections/connection.h
1235 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/connections/connection.h 1970-01-01 01:00:00.000000000 +0100
1236 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/connections/connection.h 2013-07-06 15:25:50.000000000 +0100
1237 @@ -0,0 +1,328 @@
1238 +/**
1239 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1240 + *
1241 + * Redistribution and use in source and binary forms, with or without
1242 + * modification, are permitted provided that the following conditions
1243 + * are met:
1244 + * 1. Redistributions of source code must retain the above copyright
1245 + * notice, this list of conditions, and the following disclaimer,
1246 + * without modification.
1247 + * 2. Redistributions in binary form must reproduce the above copyright
1248 + * notice, this list of conditions and the following disclaimer in the
1249 + * documentation and/or other materials provided with the distribution.
1250 + * 3. The names of the above-listed copyright holders may not be used
1251 + * to endorse or promote products derived from this software without
1252 + * specific prior written permission.
1253 + *
1254 + * ALTERNATIVELY, this software may be distributed under the terms of the
1255 + * GNU General Public License ("GPL") version 2, as published by the Free
1256 + * Software Foundation.
1257 + *
1258 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1259 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1260 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1261 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1262 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1263 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1264 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1265 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1266 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1267 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1268 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1269 + */
1270 +
1271 +#ifndef CONNECTION_H_
1272 +#define CONNECTION_H_
1273 +
1274 +#include <linux/kernel.h>
1275 +#include <linux/types.h>
1276 +#include <linux/semaphore.h>
1277 +
1278 +#include "interface/vchi/vchi_cfg_internal.h"
1279 +#include "interface/vchi/vchi_common.h"
1280 +#include "interface/vchi/message_drivers/message.h"
1281 +
1282 +/******************************************************************************
1283 + Global defs
1284 + *****************************************************************************/
1285 +
1286 +// Opaque handle for a connection / service pair
1287 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
1288 +
1289 +// opaque handle to the connection state information
1290 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
1291 +
1292 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
1293 +
1294 +
1295 +/******************************************************************************
1296 + API
1297 + *****************************************************************************/
1298 +
1299 +// Routine to init a connection with a particular low level driver
1300 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
1301 + const VCHI_MESSAGE_DRIVER_T * driver );
1302 +
1303 +// Routine to control CRC enabling at a connection level
1304 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
1305 + VCHI_CRC_CONTROL_T control );
1306 +
1307 +// Routine to create a service
1308 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
1309 + int32_t service_id,
1310 + uint32_t rx_fifo_size,
1311 + uint32_t tx_fifo_size,
1312 + int server,
1313 + VCHI_CALLBACK_T callback,
1314 + void *callback_param,
1315 + int32_t want_crc,
1316 + int32_t want_unaligned_bulk_rx,
1317 + int32_t want_unaligned_bulk_tx,
1318 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
1319 +
1320 +// Routine to close a service
1321 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
1322 +
1323 +// Routine to queue a message
1324 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1325 + const void *data,
1326 + uint32_t data_size,
1327 + VCHI_FLAGS_T flags,
1328 + void *msg_handle );
1329 +
1330 +// scatter-gather (vector) message queueing
1331 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1332 + VCHI_MSG_VECTOR_T *vector,
1333 + uint32_t count,
1334 + VCHI_FLAGS_T flags,
1335 + void *msg_handle );
1336 +
1337 +// Routine to dequeue a message
1338 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1339 + void *data,
1340 + uint32_t max_data_size_to_read,
1341 + uint32_t *actual_msg_size,
1342 + VCHI_FLAGS_T flags );
1343 +
1344 +// Routine to peek at a message
1345 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1346 + void **data,
1347 + uint32_t *msg_size,
1348 + VCHI_FLAGS_T flags );
1349 +
1350 +// Routine to hold a message
1351 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1352 + void **data,
1353 + uint32_t *msg_size,
1354 + VCHI_FLAGS_T flags,
1355 + void **message_handle );
1356 +
1357 +// Routine to initialise a received message iterator
1358 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1359 + VCHI_MSG_ITER_T *iter,
1360 + VCHI_FLAGS_T flags );
1361 +
1362 +// Routine to release a held message
1363 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1364 + void *message_handle );
1365 +
1366 +// Routine to get info on a held message
1367 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1368 + void *message_handle,
1369 + void **data,
1370 + int32_t *msg_size,
1371 + uint32_t *tx_timestamp,
1372 + uint32_t *rx_timestamp );
1373 +
1374 +// Routine to check whether the iterator has a next message
1375 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1376 + const VCHI_MSG_ITER_T *iter );
1377 +
1378 +// Routine to advance the iterator
1379 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1380 + VCHI_MSG_ITER_T *iter,
1381 + void **data,
1382 + uint32_t *msg_size );
1383 +
1384 +// Routine to remove the last message returned by the iterator
1385 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1386 + VCHI_MSG_ITER_T *iter );
1387 +
1388 +// Routine to hold the last message returned by the iterator
1389 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1390 + VCHI_MSG_ITER_T *iter,
1391 + void **msg_handle );
1392 +
1393 +// Routine to transmit bulk data
1394 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1395 + const void *data_src,
1396 + uint32_t data_size,
1397 + VCHI_FLAGS_T flags,
1398 + void *bulk_handle );
1399 +
1400 +// Routine to receive data
1401 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1402 + void *data_dst,
1403 + uint32_t data_size,
1404 + VCHI_FLAGS_T flags,
1405 + void *bulk_handle );
1406 +
1407 +// Routine to report if a server is available
1408 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
1409 +
1410 +// Routine to report the number of RX slots available
1411 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
1412 +
1413 +// Routine to report the RX slot size
1414 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
1415 +
1416 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
1417 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
1418 + int32_t service,
1419 + uint32_t length,
1420 + MESSAGE_TX_CHANNEL_T channel,
1421 + uint32_t channel_params,
1422 + uint32_t data_length,
1423 + uint32_t data_offset);
1424 +
1425 +// Callback to inform a service that a Xon or Xoff message has been received
1426 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
1427 +
1428 +// Callback to inform a service that a server available reply message has been received
1429 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
1430 +
1431 +// Callback to indicate that bulk auxiliary messages have arrived
1432 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
1433 +
1434 +// Callback to indicate that bulk auxiliary messages have arrived
1435 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
1436 +
1437 +// Callback with all the connection info you require
1438 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
1439 +
1440 +// Callback to inform of a disconnect
1441 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
1442 +
1443 +// Callback to inform of a power control request
1444 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
1445 +
1446 +// allocate memory suitably aligned for this connection
1447 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
1448 +
1449 +// free memory allocated by buffer_allocate
1450 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
1451 +
1452 +
1453 +/******************************************************************************
1454 + System driver struct
1455 + *****************************************************************************/
1456 +
1457 +struct opaque_vchi_connection_api_t
1458 +{
1459 + // Routine to init the connection
1460 + VCHI_CONNECTION_INIT_T init;
1461 +
1462 + // Connection-level CRC control
1463 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
1464 +
1465 + // Routine to connect to or create service
1466 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
1467 +
1468 + // Routine to disconnect from a service
1469 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
1470 +
1471 + // Routine to queue a message
1472 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
1473 +
1474 + // scatter-gather (vector) message queue
1475 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
1476 +
1477 + // Routine to dequeue a message
1478 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
1479 +
1480 + // Routine to peek at a message
1481 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
1482 +
1483 + // Routine to hold a message
1484 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
1485 +
1486 + // Routine to initialise a received message iterator
1487 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
1488 +
1489 + // Routine to release a message
1490 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
1491 +
1492 + // Routine to get information on a held message
1493 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
1494 +
1495 + // Routine to check for next message on iterator
1496 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
1497 +
1498 + // Routine to get next message on iterator
1499 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
1500 +
1501 + // Routine to remove the last message returned by iterator
1502 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
1503 +
1504 + // Routine to hold the last message returned by iterator
1505 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
1506 +
1507 + // Routine to transmit bulk data
1508 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
1509 +
1510 + // Routine to receive data
1511 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
1512 +
1513 + // Routine to report the available servers
1514 + VCHI_CONNECTION_SERVER_PRESENT server_present;
1515 +
1516 + // Routine to report the number of RX slots available
1517 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
1518 +
1519 + // Routine to report the RX slot size
1520 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
1521 +
1522 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
1523 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
1524 +
1525 + // Callback to inform a service that a Xon or Xoff message has been received
1526 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
1527 +
1528 + // Callback to inform a service that a server available reply message has been received
1529 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
1530 +
1531 + // Callback to indicate that bulk auxiliary messages have arrived
1532 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
1533 +
1534 + // Callback to indicate that a bulk auxiliary message has been transmitted
1535 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
1536 +
1537 + // Callback to provide information about the connection
1538 + VCHI_CONNECTION_INFO connection_info;
1539 +
1540 + // Callback to notify that peer has requested disconnect
1541 + VCHI_CONNECTION_DISCONNECT disconnect;
1542 +
1543 + // Callback to notify that peer has requested power change
1544 + VCHI_CONNECTION_POWER_CONTROL power_control;
1545 +
1546 + // allocate memory suitably aligned for this connection
1547 + VCHI_BUFFER_ALLOCATE buffer_allocate;
1548 +
1549 + // free memory allocated by buffer_allocate
1550 + VCHI_BUFFER_FREE buffer_free;
1551 +
1552 +};
1553 +
1554 +struct vchi_connection_t {
1555 + const VCHI_CONNECTION_API_T *api;
1556 + VCHI_CONNECTION_STATE_T *state;
1557 +#ifdef VCHI_COARSE_LOCKING
1558 + struct semaphore sem;
1559 +#endif
1560 +};
1561 +
1562 +
1563 +#endif /* CONNECTION_H_ */
1564 +
1565 +/****************************** End of file **********************************/
1566 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
1567 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 1970-01-01 01:00:00.000000000 +0100
1568 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 2013-07-06 15:25:50.000000000 +0100
1569 @@ -0,0 +1,204 @@
1570 +/**
1571 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1572 + *
1573 + * Redistribution and use in source and binary forms, with or without
1574 + * modification, are permitted provided that the following conditions
1575 + * are met:
1576 + * 1. Redistributions of source code must retain the above copyright
1577 + * notice, this list of conditions, and the following disclaimer,
1578 + * without modification.
1579 + * 2. Redistributions in binary form must reproduce the above copyright
1580 + * notice, this list of conditions and the following disclaimer in the
1581 + * documentation and/or other materials provided with the distribution.
1582 + * 3. The names of the above-listed copyright holders may not be used
1583 + * to endorse or promote products derived from this software without
1584 + * specific prior written permission.
1585 + *
1586 + * ALTERNATIVELY, this software may be distributed under the terms of the
1587 + * GNU General Public License ("GPL") version 2, as published by the Free
1588 + * Software Foundation.
1589 + *
1590 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1591 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1592 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1593 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1594 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1595 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1596 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1597 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1598 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1599 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1600 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1601 + */
1602 +
1603 +#ifndef _VCHI_MESSAGE_H_
1604 +#define _VCHI_MESSAGE_H_
1605 +
1606 +#include <linux/kernel.h>
1607 +#include <linux/types.h>
1608 +#include <linux/semaphore.h>
1609 +
1610 +#include "interface/vchi/vchi_cfg_internal.h"
1611 +#include "interface/vchi/vchi_common.h"
1612 +
1613 +
1614 +typedef enum message_event_type {
1615 + MESSAGE_EVENT_NONE,
1616 + MESSAGE_EVENT_NOP,
1617 + MESSAGE_EVENT_MESSAGE,
1618 + MESSAGE_EVENT_SLOT_COMPLETE,
1619 + MESSAGE_EVENT_RX_BULK_PAUSED,
1620 + MESSAGE_EVENT_RX_BULK_COMPLETE,
1621 + MESSAGE_EVENT_TX_COMPLETE,
1622 + MESSAGE_EVENT_MSG_DISCARDED
1623 +} MESSAGE_EVENT_TYPE_T;
1624 +
1625 +typedef enum vchi_msg_flags
1626 +{
1627 + VCHI_MSG_FLAGS_NONE = 0x0,
1628 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
1629 +} VCHI_MSG_FLAGS_T;
1630 +
1631 +typedef enum message_tx_channel
1632 +{
1633 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
1634 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
1635 +} MESSAGE_TX_CHANNEL_T;
1636 +
1637 +// Macros used for cycling through bulk channels
1638 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
1639 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
1640 +
1641 +typedef enum message_rx_channel
1642 +{
1643 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
1644 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
1645 +} MESSAGE_RX_CHANNEL_T;
1646 +
1647 +// Message receive slot information
1648 +typedef struct rx_msg_slot_info {
1649 +
1650 + struct rx_msg_slot_info *next;
1651 + //struct slot_info *prev;
1652 +#if !defined VCHI_COARSE_LOCKING
1653 + struct semaphore sem;
1654 +#endif
1655 +
1656 + uint8_t *addr; // base address of slot
1657 + uint32_t len; // length of slot in bytes
1658 +
1659 + uint32_t write_ptr; // hardware causes this to advance
1660 + uint32_t read_ptr; // this module does the reading
1661 + int active; // is this slot in the hardware dma fifo?
1662 + uint32_t msgs_parsed; // count how many messages are in this slot
1663 + uint32_t msgs_released; // how many messages have been released
1664 + void *state; // connection state information
1665 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
1666 +} RX_MSG_SLOTINFO_T;
1667 +
1668 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
1669 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
1670 +// driver will be tasked with sending the aligned core section.
1671 +typedef struct rx_bulk_slotinfo_t {
1672 + struct rx_bulk_slotinfo_t *next;
1673 +
1674 + struct semaphore *blocking;
1675 +
1676 + // needed by DMA
1677 + void *addr;
1678 + uint32_t len;
1679 +
1680 + // needed for the callback
1681 + void *service;
1682 + void *handle;
1683 + VCHI_FLAGS_T flags;
1684 +} RX_BULK_SLOTINFO_T;
1685 +
1686 +
1687 +/* ----------------------------------------------------------------------
1688 + * each connection driver will have a pool of the following struct.
1689 + *
1690 + * the pool will be managed by vchi_qman_*
1691 + * this means there will be multiple queues (single linked lists)
1692 + * a given struct message_info will be on exactly one of these queues
1693 + * at any one time
1694 + * -------------------------------------------------------------------- */
1695 +typedef struct rx_message_info {
1696 +
1697 + struct message_info *next;
1698 + //struct message_info *prev;
1699 +
1700 + uint8_t *addr;
1701 + uint32_t len;
1702 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
1703 + uint32_t tx_timestamp;
1704 + uint32_t rx_timestamp;
1705 +
1706 +} RX_MESSAGE_INFO_T;
1707 +
1708 +typedef struct {
1709 + MESSAGE_EVENT_TYPE_T type;
1710 +
1711 + struct {
1712 + // for messages
1713 + void *addr; // address of message
1714 + uint16_t slot_delta; // whether this message indicated slot delta
1715 + uint32_t len; // length of message
1716 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
1717 + int32_t service; // service id this message is destined for
1718 + uint32_t tx_timestamp; // timestamp from the header
1719 + uint32_t rx_timestamp; // timestamp when we parsed it
1720 + } message;
1721 +
1722 + // FIXME: cleanup slot reporting...
1723 + RX_MSG_SLOTINFO_T *rx_msg;
1724 + RX_BULK_SLOTINFO_T *rx_bulk;
1725 + void *tx_handle;
1726 + MESSAGE_TX_CHANNEL_T tx_channel;
1727 +
1728 +} MESSAGE_EVENT_T;
1729 +
1730 +
1731 +// callbacks
1732 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
1733 +
1734 +typedef struct {
1735 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
1736 +} VCHI_MESSAGE_DRIVER_OPEN_T;
1737 +
1738 +
1739 +// handle to this instance of message driver (as returned by ->open)
1740 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
1741 +
1742 +struct opaque_vchi_message_driver_t {
1743 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
1744 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
1745 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
1746 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
1747 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
1748 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
1749 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
1750 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
1751 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
1752 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
1753 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
1754 +
1755 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
1756 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
1757 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
1758 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
1759 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
1760 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
1761 +
1762 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1763 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1764 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1765 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
1766 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
1767 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
1768 +};
1769 +
1770 +
1771 +#endif // _VCHI_MESSAGE_H_
1772 +
1773 +/****************************** End of file ***********************************/
1774 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1775 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 1970-01-01 01:00:00.000000000 +0100
1776 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 2013-07-06 15:25:50.000000000 +0100
1777 @@ -0,0 +1,224 @@
1778 +/**
1779 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1780 + *
1781 + * Redistribution and use in source and binary forms, with or without
1782 + * modification, are permitted provided that the following conditions
1783 + * are met:
1784 + * 1. Redistributions of source code must retain the above copyright
1785 + * notice, this list of conditions, and the following disclaimer,
1786 + * without modification.
1787 + * 2. Redistributions in binary form must reproduce the above copyright
1788 + * notice, this list of conditions and the following disclaimer in the
1789 + * documentation and/or other materials provided with the distribution.
1790 + * 3. The names of the above-listed copyright holders may not be used
1791 + * to endorse or promote products derived from this software without
1792 + * specific prior written permission.
1793 + *
1794 + * ALTERNATIVELY, this software may be distributed under the terms of the
1795 + * GNU General Public License ("GPL") version 2, as published by the Free
1796 + * Software Foundation.
1797 + *
1798 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1799 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1800 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1801 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1802 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1803 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1804 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1805 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1806 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1807 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1808 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1809 + */
1810 +
1811 +#ifndef VCHI_CFG_H_
1812 +#define VCHI_CFG_H_
1813 +
1814 +/****************************************************************************************
1815 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1816 + * services.
1817 + ***************************************************************************************/
1818 +
1819 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1820 +/* Really determined by the message driver, and should be available from a run-time call. */
1821 +#ifndef VCHI_BULK_ALIGN
1822 +# if __VCCOREVER__ >= 0x04000000
1823 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1824 +# else
1825 +# define VCHI_BULK_ALIGN 16
1826 +# endif
1827 +#endif
1828 +
1829 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1830 +/* May be less than or greater than VCHI_BULK_ALIGN */
1831 +/* Really determined by the message driver, and should be available from a run-time call. */
1832 +#ifndef VCHI_BULK_GRANULARITY
1833 +# if __VCCOREVER__ >= 0x04000000
1834 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1835 +# else
1836 +# define VCHI_BULK_GRANULARITY 16
1837 +# endif
1838 +#endif
1839 +
1840 +/* The largest possible message to be queued with vchi_msg_queue. */
1841 +#ifndef VCHI_MAX_MSG_SIZE
1842 +# if defined VCHI_LOCAL_HOST_PORT
1843 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1844 +# else
1845 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1846 +# endif
1847 +#endif
1848 +
1849 +/******************************************************************************************
1850 + * Defines below are system configuration options, and should not be used by VCHI services.
1851 + *****************************************************************************************/
1852 +
1853 +/* How many connections can we support? A localhost implementation uses 2 connections,
1854 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1855 + * driver. */
1856 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1857 +# define VCHI_MAX_NUM_CONNECTIONS 3
1858 +#endif
1859 +
1860 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1861 + * amount of static memory. */
1862 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1863 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1864 +#endif
1865 +
1866 +/* Adjust if using a message driver that supports more logical TX channels */
1867 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1868 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1869 +#endif
1870 +
1871 +/* Adjust if using a message driver that supports more logical RX channels */
1872 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1873 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1874 +#endif
1875 +
1876 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1877 + * receive queue space, less message headers. */
1878 +#ifndef VCHI_NUM_READ_SLOTS
1879 +# if defined(VCHI_LOCAL_HOST_PORT)
1880 +# define VCHI_NUM_READ_SLOTS 4
1881 +# else
1882 +# define VCHI_NUM_READ_SLOTS 48
1883 +# endif
1884 +#endif
1885 +
1886 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1887 + * performance. Only define on VideoCore end, talking to host.
1888 + */
1889 +//#define VCHI_MSG_RX_OVERRUN
1890 +
1891 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1892 + * underneath VCHI will usually have its own buffering. */
1893 +#ifndef VCHI_NUM_WRITE_SLOTS
1894 +# define VCHI_NUM_WRITE_SLOTS 4
1895 +#endif
1896 +
1897 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1898 + * then it's taking up too much buffer space, and the peer service will be told to stop
1899 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1900 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1901 + * is too high. */
1902 +#ifndef VCHI_XOFF_THRESHOLD
1903 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1904 +#endif
1905 +
1906 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1907 + * service has dequeued/released enough messages that it's now occupying
1908 + * VCHI_XON_THRESHOLD slots or fewer. */
1909 +#ifndef VCHI_XON_THRESHOLD
1910 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1911 +#endif
1912 +
1913 +/* A size below which a bulk transfer omits the handshake completely and always goes
1914 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1915 + * can guarantee this by enabling unaligned transmits).
1916 + * Not API. */
1917 +#ifndef VCHI_MIN_BULK_SIZE
1918 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1919 +#endif
1920 +
1921 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1922 + * speed and latency; the smaller the chunk size the better change of messages and other
1923 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1924 + * break transmissions into chunks.
1925 + */
1926 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1927 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1928 +#endif
1929 +
1930 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1931 + * with multiple-line frames. Only use if the receiver can cope. */
1932 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1933 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1934 +#endif
1935 +
1936 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1937 + * vchi_msg_queue will be blocked. */
1938 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1939 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1940 +#endif
1941 +
1942 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1943 + * will be suspended until older messages are dequeued/released. */
1944 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1945 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1946 +#endif
1947 +
1948 +/* Really should be able to cope if we run out of received message descriptors, by
1949 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1950 + * under the carpet. */
1951 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1952 +# undef VCHI_RX_MSG_QUEUE_SIZE
1953 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1954 +#endif
1955 +
1956 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1957 + * will be blocked. */
1958 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1959 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1960 +#endif
1961 +
1962 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1963 + * will be blocked. */
1964 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1965 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1966 +#endif
1967 +
1968 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1969 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1970 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1971 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1972 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1973 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1974 +#endif
1975 +
1976 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1977 + * transmitter on and off.
1978 + */
1979 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1980 +
1981 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1982 +
1983 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1984 + * negative for no IDLE.
1985 + */
1986 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1987 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1988 +# endif
1989 +
1990 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1991 + * negative for no OFF.
1992 + */
1993 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1994 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1995 +# endif
1996 +
1997 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1998 +
1999 +#endif /* VCHI_CFG_H_ */
2000 +
2001 +/****************************** End of file **********************************/
2002 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
2003 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 1970-01-01 01:00:00.000000000 +0100
2004 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 2013-07-06 15:25:50.000000000 +0100
2005 @@ -0,0 +1,71 @@
2006 +/**
2007 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2008 + *
2009 + * Redistribution and use in source and binary forms, with or without
2010 + * modification, are permitted provided that the following conditions
2011 + * are met:
2012 + * 1. Redistributions of source code must retain the above copyright
2013 + * notice, this list of conditions, and the following disclaimer,
2014 + * without modification.
2015 + * 2. Redistributions in binary form must reproduce the above copyright
2016 + * notice, this list of conditions and the following disclaimer in the
2017 + * documentation and/or other materials provided with the distribution.
2018 + * 3. The names of the above-listed copyright holders may not be used
2019 + * to endorse or promote products derived from this software without
2020 + * specific prior written permission.
2021 + *
2022 + * ALTERNATIVELY, this software may be distributed under the terms of the
2023 + * GNU General Public License ("GPL") version 2, as published by the Free
2024 + * Software Foundation.
2025 + *
2026 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2027 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2028 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2029 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2030 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2031 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2032 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2033 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2034 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2035 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2036 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2037 + */
2038 +
2039 +#ifndef VCHI_CFG_INTERNAL_H_
2040 +#define VCHI_CFG_INTERNAL_H_
2041 +
2042 +/****************************************************************************************
2043 + * Control optimisation attempts.
2044 + ***************************************************************************************/
2045 +
2046 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
2047 +#define VCHI_COARSE_LOCKING
2048 +
2049 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
2050 +// (only relevant if VCHI_COARSE_LOCKING)
2051 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
2052 +
2053 +// Avoid lock on non-blocking peek
2054 +// (only relevant if VCHI_COARSE_LOCKING)
2055 +#define VCHI_AVOID_PEEK_LOCK
2056 +
2057 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
2058 +#define VCHI_MULTIPLE_HANDLER_THREADS
2059 +
2060 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
2061 +// our way through the pool of descriptors.
2062 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
2063 +
2064 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
2065 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
2066 +
2067 +// Don't use message descriptors for TX messages that don't need them
2068 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
2069 +
2070 +// Nano-locks for multiqueue
2071 +//#define VCHI_MQUEUE_NANOLOCKS
2072 +
2073 +// Lock-free(er) dequeuing
2074 +//#define VCHI_RX_NANOLOCKS
2075 +
2076 +#endif /*VCHI_CFG_INTERNAL_H_*/
2077 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_common.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_common.h
2078 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_common.h 1970-01-01 01:00:00.000000000 +0100
2079 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_common.h 2013-07-06 15:25:50.000000000 +0100
2080 @@ -0,0 +1,163 @@
2081 +/**
2082 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2083 + *
2084 + * Redistribution and use in source and binary forms, with or without
2085 + * modification, are permitted provided that the following conditions
2086 + * are met:
2087 + * 1. Redistributions of source code must retain the above copyright
2088 + * notice, this list of conditions, and the following disclaimer,
2089 + * without modification.
2090 + * 2. Redistributions in binary form must reproduce the above copyright
2091 + * notice, this list of conditions and the following disclaimer in the
2092 + * documentation and/or other materials provided with the distribution.
2093 + * 3. The names of the above-listed copyright holders may not be used
2094 + * to endorse or promote products derived from this software without
2095 + * specific prior written permission.
2096 + *
2097 + * ALTERNATIVELY, this software may be distributed under the terms of the
2098 + * GNU General Public License ("GPL") version 2, as published by the Free
2099 + * Software Foundation.
2100 + *
2101 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2102 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2103 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2104 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2105 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2106 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2107 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2108 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2109 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2110 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2111 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2112 + */
2113 +
2114 +#ifndef VCHI_COMMON_H_
2115 +#define VCHI_COMMON_H_
2116 +
2117 +
2118 +//flags used when sending messages (must be bitmapped)
2119 +typedef enum
2120 +{
2121 + VCHI_FLAGS_NONE = 0x0,
2122 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
2123 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
2124 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
2125 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
2126 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
2127 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
2128 +
2129 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
2130 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
2131 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
2132 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
2133 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
2134 + VCHI_FLAGS_INTERNAL = 0xFF0000
2135 +} VCHI_FLAGS_T;
2136 +
2137 +// constants for vchi_crc_control()
2138 +typedef enum {
2139 + VCHI_CRC_NOTHING = -1,
2140 + VCHI_CRC_PER_SERVICE = 0,
2141 + VCHI_CRC_EVERYTHING = 1,
2142 +} VCHI_CRC_CONTROL_T;
2143 +
2144 +//callback reasons when an event occurs on a service
2145 +typedef enum
2146 +{
2147 + VCHI_CALLBACK_REASON_MIN,
2148 +
2149 + //This indicates that there is data available
2150 + //handle is the msg id that was transmitted with the data
2151 + // When a message is received and there was no FULL message available previously, send callback
2152 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
2153 + VCHI_CALLBACK_MSG_AVAILABLE,
2154 + VCHI_CALLBACK_MSG_SENT,
2155 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
2156 +
2157 + // This indicates that a transfer from the other side has completed
2158 + VCHI_CALLBACK_BULK_RECEIVED,
2159 + //This indicates that data queued up to be sent has now gone
2160 + //handle is the msg id that was used when sending the data
2161 + VCHI_CALLBACK_BULK_SENT,
2162 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
2163 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
2164 +
2165 + VCHI_CALLBACK_SERVICE_CLOSED,
2166 +
2167 + // this side has sent XOFF to peer due to lack of data consumption by service
2168 + // (suggests the service may need to take some recovery action if it has
2169 + // been deliberately holding off consuming data)
2170 + VCHI_CALLBACK_SENT_XOFF,
2171 + VCHI_CALLBACK_SENT_XON,
2172 +
2173 + // indicates that a bulk transfer has finished reading the source buffer
2174 + VCHI_CALLBACK_BULK_DATA_READ,
2175 +
2176 + // power notification events (currently host side only)
2177 + VCHI_CALLBACK_PEER_OFF,
2178 + VCHI_CALLBACK_PEER_SUSPENDED,
2179 + VCHI_CALLBACK_PEER_ON,
2180 + VCHI_CALLBACK_PEER_RESUMED,
2181 + VCHI_CALLBACK_FORCED_POWER_OFF,
2182 +
2183 +#ifdef USE_VCHIQ_ARM
2184 + // some extra notifications provided by vchiq_arm
2185 + VCHI_CALLBACK_SERVICE_OPENED,
2186 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
2187 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
2188 +#endif
2189 +
2190 + VCHI_CALLBACK_REASON_MAX
2191 +} VCHI_CALLBACK_REASON_T;
2192 +
2193 +//Calback used by all services / bulk transfers
2194 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
2195 + VCHI_CALLBACK_REASON_T reason,
2196 + void *handle ); //for transmitting msg's only
2197 +
2198 +
2199 +
2200 +/*
2201 + * Define vector struct for scatter-gather (vector) operations
2202 + * Vectors can be nested - if a vector element has negative length, then
2203 + * the data pointer is treated as pointing to another vector array, with
2204 + * '-vec_len' elements. Thus to append a header onto an existing vector,
2205 + * you can do this:
2206 + *
2207 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
2208 + * {
2209 + * VCHI_MSG_VECTOR_T nv[2];
2210 + * nv[0].vec_base = my_header;
2211 + * nv[0].vec_len = sizeof my_header;
2212 + * nv[1].vec_base = v;
2213 + * nv[1].vec_len = -n;
2214 + * ...
2215 + *
2216 + */
2217 +typedef struct vchi_msg_vector {
2218 + const void *vec_base;
2219 + int32_t vec_len;
2220 +} VCHI_MSG_VECTOR_T;
2221 +
2222 +// Opaque type for a connection API
2223 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
2224 +
2225 +// Opaque type for a message driver
2226 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
2227 +
2228 +
2229 +// Iterator structure for reading ahead through received message queue. Allocated by client,
2230 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
2231 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
2232 +// will not proceed to messages received since. Behaviour is undefined if an iterator
2233 +// is used again after messages for that service are removed/dequeued by any
2234 +// means other than vchi_msg_iter_... calls on the iterator itself.
2235 +typedef struct {
2236 + struct opaque_vchi_service_t *service;
2237 + void *last;
2238 + void *next;
2239 + void *remove;
2240 +} VCHI_MSG_ITER_T;
2241 +
2242 +
2243 +#endif // VCHI_COMMON_H_
2244 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi.h
2245 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi.h 1970-01-01 01:00:00.000000000 +0100
2246 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi.h 2013-07-06 15:25:50.000000000 +0100
2247 @@ -0,0 +1,373 @@
2248 +/**
2249 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2250 + *
2251 + * Redistribution and use in source and binary forms, with or without
2252 + * modification, are permitted provided that the following conditions
2253 + * are met:
2254 + * 1. Redistributions of source code must retain the above copyright
2255 + * notice, this list of conditions, and the following disclaimer,
2256 + * without modification.
2257 + * 2. Redistributions in binary form must reproduce the above copyright
2258 + * notice, this list of conditions and the following disclaimer in the
2259 + * documentation and/or other materials provided with the distribution.
2260 + * 3. The names of the above-listed copyright holders may not be used
2261 + * to endorse or promote products derived from this software without
2262 + * specific prior written permission.
2263 + *
2264 + * ALTERNATIVELY, this software may be distributed under the terms of the
2265 + * GNU General Public License ("GPL") version 2, as published by the Free
2266 + * Software Foundation.
2267 + *
2268 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2269 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2270 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2271 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2272 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2273 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2274 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2275 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2276 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2277 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2278 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2279 + */
2280 +
2281 +#ifndef VCHI_H_
2282 +#define VCHI_H_
2283 +
2284 +#include "interface/vchi/vchi_cfg.h"
2285 +#include "interface/vchi/vchi_common.h"
2286 +#include "interface/vchi/connections/connection.h"
2287 +#include "vchi_mh.h"
2288 +
2289 +
2290 +/******************************************************************************
2291 + Global defs
2292 + *****************************************************************************/
2293 +
2294 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
2295 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
2296 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
2297 +
2298 +#ifdef USE_VCHIQ_ARM
2299 +#define VCHI_BULK_ALIGNED(x) 1
2300 +#else
2301 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
2302 +#endif
2303 +
2304 +struct vchi_version {
2305 + uint32_t version;
2306 + uint32_t version_min;
2307 +};
2308 +#define VCHI_VERSION(v_) { v_, v_ }
2309 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
2310 +
2311 +typedef enum
2312 +{
2313 + VCHI_VEC_POINTER,
2314 + VCHI_VEC_HANDLE,
2315 + VCHI_VEC_LIST
2316 +} VCHI_MSG_VECTOR_TYPE_T;
2317 +
2318 +typedef struct vchi_msg_vector_ex {
2319 +
2320 + VCHI_MSG_VECTOR_TYPE_T type;
2321 + union
2322 + {
2323 + // a memory handle
2324 + struct
2325 + {
2326 + VCHI_MEM_HANDLE_T handle;
2327 + uint32_t offset;
2328 + int32_t vec_len;
2329 + } handle;
2330 +
2331 + // an ordinary data pointer
2332 + struct
2333 + {
2334 + const void *vec_base;
2335 + int32_t vec_len;
2336 + } ptr;
2337 +
2338 + // a nested vector list
2339 + struct
2340 + {
2341 + struct vchi_msg_vector_ex *vec;
2342 + uint32_t vec_len;
2343 + } list;
2344 + } u;
2345 +} VCHI_MSG_VECTOR_EX_T;
2346 +
2347 +
2348 +// Construct an entry in a msg vector for a pointer (p) of length (l)
2349 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
2350 +
2351 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
2352 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
2353 +
2354 +// Macros to manipulate 'FOURCC' values
2355 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
2356 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
2357 +
2358 +
2359 +// Opaque service information
2360 +struct opaque_vchi_service_t;
2361 +
2362 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
2363 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
2364 +typedef struct
2365 +{
2366 + struct opaque_vchi_service_t *service;
2367 + void *message;
2368 +} VCHI_HELD_MSG_T;
2369 +
2370 +
2371 +
2372 +// structure used to provide the information needed to open a server or a client
2373 +typedef struct {
2374 + struct vchi_version version;
2375 + int32_t service_id;
2376 + VCHI_CONNECTION_T *connection;
2377 + uint32_t rx_fifo_size;
2378 + uint32_t tx_fifo_size;
2379 + VCHI_CALLBACK_T callback;
2380 + void *callback_param;
2381 + /* client intends to receive bulk transfers of
2382 + odd lengths or into unaligned buffers */
2383 + int32_t want_unaligned_bulk_rx;
2384 + /* client intends to transmit bulk transfers of
2385 + odd lengths or out of unaligned buffers */
2386 + int32_t want_unaligned_bulk_tx;
2387 + /* client wants to check CRCs on (bulk) xfers.
2388 + Only needs to be set at 1 end - will do both directions. */
2389 + int32_t want_crc;
2390 +} SERVICE_CREATION_T;
2391 +
2392 +// Opaque handle for a VCHI instance
2393 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
2394 +
2395 +// Opaque handle for a server or client
2396 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
2397 +
2398 +// Service registration & startup
2399 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
2400 +
2401 +typedef struct service_info_tag {
2402 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
2403 + VCHI_SERVICE_INIT init; /* Service initialisation function */
2404 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
2405 +} SERVICE_INFO_T;
2406 +
2407 +/******************************************************************************
2408 + Global funcs - implementation is specific to which side you are on (local / remote)
2409 + *****************************************************************************/
2410 +
2411 +#ifdef __cplusplus
2412 +extern "C" {
2413 +#endif
2414 +
2415 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
2416 + const VCHI_MESSAGE_DRIVER_T * low_level);
2417 +
2418 +
2419 +// Routine used to initialise the vchi on both local + remote connections
2420 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
2421 +
2422 +extern int32_t vchi_exit( void );
2423 +
2424 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
2425 + const uint32_t num_connections,
2426 + VCHI_INSTANCE_T instance_handle );
2427 +
2428 +//When this is called, ensure that all services have no data pending.
2429 +//Bulk transfers can remain 'queued'
2430 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
2431 +
2432 +// Global control over bulk CRC checking
2433 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
2434 + VCHI_CRC_CONTROL_T control );
2435 +
2436 +// helper functions
2437 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
2438 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
2439 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
2440 +
2441 +
2442 +/******************************************************************************
2443 + Global service API
2444 + *****************************************************************************/
2445 +// Routine to create a named service
2446 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
2447 + SERVICE_CREATION_T *setup,
2448 + VCHI_SERVICE_HANDLE_T *handle );
2449 +
2450 +// Routine to destory a service
2451 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
2452 +
2453 +// Routine to open a named service
2454 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
2455 + SERVICE_CREATION_T *setup,
2456 + VCHI_SERVICE_HANDLE_T *handle);
2457 +
2458 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
2459 + short *peer_version );
2460 +
2461 +// Routine to close a named service
2462 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
2463 +
2464 +// Routine to increment ref count on a named service
2465 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
2466 +
2467 +// Routine to decrement ref count on a named service
2468 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
2469 +
2470 +// Routine to send a message accross a service
2471 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
2472 + const void *data,
2473 + uint32_t data_size,
2474 + VCHI_FLAGS_T flags,
2475 + void *msg_handle );
2476 +
2477 +// scatter-gather (vector) and send message
2478 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
2479 + VCHI_MSG_VECTOR_EX_T *vector,
2480 + uint32_t count,
2481 + VCHI_FLAGS_T flags,
2482 + void *msg_handle );
2483 +
2484 +// legacy scatter-gather (vector) and send message, only handles pointers
2485 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
2486 + VCHI_MSG_VECTOR_T *vector,
2487 + uint32_t count,
2488 + VCHI_FLAGS_T flags,
2489 + void *msg_handle );
2490 +
2491 +// Routine to receive a msg from a service
2492 +// Dequeue is equivalent to hold, copy into client buffer, release
2493 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
2494 + void *data,
2495 + uint32_t max_data_size_to_read,
2496 + uint32_t *actual_msg_size,
2497 + VCHI_FLAGS_T flags );
2498 +
2499 +// Routine to look at a message in place.
2500 +// The message is not dequeued, so a subsequent call to peek or dequeue
2501 +// will return the same message.
2502 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
2503 + void **data,
2504 + uint32_t *msg_size,
2505 + VCHI_FLAGS_T flags );
2506 +
2507 +// Routine to remove a message after it has been read in place with peek
2508 +// The first message on the queue is dequeued.
2509 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
2510 +
2511 +// Routine to look at a message in place.
2512 +// The message is dequeued, so the caller is left holding it; the descriptor is
2513 +// filled in and must be released when the user has finished with the message.
2514 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
2515 + void **data, // } may be NULL, as info can be
2516 + uint32_t *msg_size, // } obtained from HELD_MSG_T
2517 + VCHI_FLAGS_T flags,
2518 + VCHI_HELD_MSG_T *message_descriptor );
2519 +
2520 +// Initialise an iterator to look through messages in place
2521 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
2522 + VCHI_MSG_ITER_T *iter,
2523 + VCHI_FLAGS_T flags );
2524 +
2525 +/******************************************************************************
2526 + Global service support API - operations on held messages and message iterators
2527 + *****************************************************************************/
2528 +
2529 +// Routine to get the address of a held message
2530 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
2531 +
2532 +// Routine to get the size of a held message
2533 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
2534 +
2535 +// Routine to get the transmit timestamp as written into the header by the peer
2536 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
2537 +
2538 +// Routine to get the reception timestamp, written as we parsed the header
2539 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
2540 +
2541 +// Routine to release a held message after it has been processed
2542 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
2543 +
2544 +// Indicates whether the iterator has a next message.
2545 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
2546 +
2547 +// Return the pointer and length for the next message and advance the iterator.
2548 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
2549 + void **data,
2550 + uint32_t *msg_size );
2551 +
2552 +// Remove the last message returned by vchi_msg_iter_next.
2553 +// Can only be called once after each call to vchi_msg_iter_next.
2554 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
2555 +
2556 +// Hold the last message returned by vchi_msg_iter_next.
2557 +// Can only be called once after each call to vchi_msg_iter_next.
2558 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
2559 + VCHI_HELD_MSG_T *message );
2560 +
2561 +// Return information for the next message, and hold it, advancing the iterator.
2562 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
2563 + void **data, // } may be NULL
2564 + uint32_t *msg_size, // }
2565 + VCHI_HELD_MSG_T *message );
2566 +
2567 +
2568 +/******************************************************************************
2569 + Global bulk API
2570 + *****************************************************************************/
2571 +
2572 +// Routine to prepare interface for a transfer from the other side
2573 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
2574 + void *data_dst,
2575 + uint32_t data_size,
2576 + VCHI_FLAGS_T flags,
2577 + void *transfer_handle );
2578 +
2579 +
2580 +// Prepare interface for a transfer from the other side into relocatable memory.
2581 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
2582 + VCHI_MEM_HANDLE_T h_dst,
2583 + uint32_t offset,
2584 + uint32_t data_size,
2585 + const VCHI_FLAGS_T flags,
2586 + void * const bulk_handle );
2587 +
2588 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
2589 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
2590 + const void *data_src,
2591 + uint32_t data_size,
2592 + VCHI_FLAGS_T flags,
2593 + void *transfer_handle );
2594 +
2595 +
2596 +/******************************************************************************
2597 + Configuration plumbing
2598 + *****************************************************************************/
2599 +
2600 +// function prototypes for the different mid layers (the state info gives the different physical connections)
2601 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
2602 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
2603 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
2604 +
2605 +// declare all message drivers here
2606 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
2607 +
2608 +#ifdef __cplusplus
2609 +}
2610 +#endif
2611 +
2612 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
2613 + VCHI_MEM_HANDLE_T h_src,
2614 + uint32_t offset,
2615 + uint32_t data_size,
2616 + VCHI_FLAGS_T flags,
2617 + void *transfer_handle );
2618 +#endif /* VCHI_H_ */
2619 +
2620 +/****************************** End of file **********************************/
2621 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_mh.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
2622 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 1970-01-01 01:00:00.000000000 +0100
2623 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 2013-07-06 15:25:50.000000000 +0100
2624 @@ -0,0 +1,42 @@
2625 +/**
2626 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2627 + *
2628 + * Redistribution and use in source and binary forms, with or without
2629 + * modification, are permitted provided that the following conditions
2630 + * are met:
2631 + * 1. Redistributions of source code must retain the above copyright
2632 + * notice, this list of conditions, and the following disclaimer,
2633 + * without modification.
2634 + * 2. Redistributions in binary form must reproduce the above copyright
2635 + * notice, this list of conditions and the following disclaimer in the
2636 + * documentation and/or other materials provided with the distribution.
2637 + * 3. The names of the above-listed copyright holders may not be used
2638 + * to endorse or promote products derived from this software without
2639 + * specific prior written permission.
2640 + *
2641 + * ALTERNATIVELY, this software may be distributed under the terms of the
2642 + * GNU General Public License ("GPL") version 2, as published by the Free
2643 + * Software Foundation.
2644 + *
2645 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2646 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2647 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2648 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2649 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2650 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2651 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2652 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2653 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2654 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2655 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2656 + */
2657 +
2658 +#ifndef VCHI_MH_H_
2659 +#define VCHI_MH_H_
2660 +
2661 +#include <linux/types.h>
2662 +
2663 +typedef int32_t VCHI_MEM_HANDLE_T;
2664 +#define VCHI_MEM_HANDLE_INVALID 0
2665 +
2666 +#endif
2667 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
2668 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 1970-01-01 01:00:00.000000000 +0100
2669 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 2013-07-06 15:25:50.000000000 +0100
2670 @@ -0,0 +1,538 @@
2671 +/**
2672 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2673 + *
2674 + * Redistribution and use in source and binary forms, with or without
2675 + * modification, are permitted provided that the following conditions
2676 + * are met:
2677 + * 1. Redistributions of source code must retain the above copyright
2678 + * notice, this list of conditions, and the following disclaimer,
2679 + * without modification.
2680 + * 2. Redistributions in binary form must reproduce the above copyright
2681 + * notice, this list of conditions and the following disclaimer in the
2682 + * documentation and/or other materials provided with the distribution.
2683 + * 3. The names of the above-listed copyright holders may not be used
2684 + * to endorse or promote products derived from this software without
2685 + * specific prior written permission.
2686 + *
2687 + * ALTERNATIVELY, this software may be distributed under the terms of the
2688 + * GNU General Public License ("GPL") version 2, as published by the Free
2689 + * Software Foundation.
2690 + *
2691 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2692 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2693 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2694 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2695 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2696 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2697 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2698 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2699 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2700 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2701 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2702 + */
2703 +
2704 +#include <linux/kernel.h>
2705 +#include <linux/types.h>
2706 +#include <linux/errno.h>
2707 +#include <linux/interrupt.h>
2708 +#include <linux/irq.h>
2709 +#include <linux/pagemap.h>
2710 +#include <linux/dma-mapping.h>
2711 +#include <linux/version.h>
2712 +#include <linux/io.h>
2713 +#include <linux/uaccess.h>
2714 +#include <asm/pgtable.h>
2715 +
2716 +#include <mach/irqs.h>
2717 +
2718 +#include <mach/platform.h>
2719 +#include <mach/vcio.h>
2720 +
2721 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
2722 +
2723 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
2724 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
2725 +
2726 +#include "vchiq_arm.h"
2727 +#include "vchiq_2835.h"
2728 +#include "vchiq_connected.h"
2729 +
2730 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
2731 +
2732 +typedef struct vchiq_2835_state_struct {
2733 + int inited;
2734 + VCHIQ_ARM_STATE_T arm_state;
2735 +} VCHIQ_2835_ARM_STATE_T;
2736 +
2737 +static char *g_slot_mem;
2738 +static int g_slot_mem_size;
2739 +dma_addr_t g_slot_phys;
2740 +static FRAGMENTS_T *g_fragments_base;
2741 +static FRAGMENTS_T *g_free_fragments;
2742 +struct semaphore g_free_fragments_sema;
2743 +
2744 +extern int vchiq_arm_log_level;
2745 +
2746 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
2747 +
2748 +static irqreturn_t
2749 +vchiq_doorbell_irq(int irq, void *dev_id);
2750 +
2751 +static int
2752 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2753 + struct task_struct *task, PAGELIST_T ** ppagelist);
2754 +
2755 +static void
2756 +free_pagelist(PAGELIST_T *pagelist, int actual);
2757 +
2758 +int __init
2759 +vchiq_platform_init(VCHIQ_STATE_T *state)
2760 +{
2761 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
2762 + int frag_mem_size;
2763 + int err;
2764 + int i;
2765 +
2766 + /* Allocate space for the channels in coherent memory */
2767 + g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
2768 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
2769 +
2770 + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
2771 + &g_slot_phys, GFP_ATOMIC);
2772 +
2773 + if (!g_slot_mem) {
2774 + vchiq_log_error(vchiq_arm_log_level,
2775 + "Unable to allocate channel memory");
2776 + err = -ENOMEM;
2777 + goto failed_alloc;
2778 + }
2779 +
2780 + WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
2781 +
2782 + vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
2783 + if (!vchiq_slot_zero) {
2784 + err = -EINVAL;
2785 + goto failed_init_slots;
2786 + }
2787 +
2788 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
2789 + (int)g_slot_phys + g_slot_mem_size;
2790 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
2791 + MAX_FRAGMENTS;
2792 +
2793 + g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
2794 + g_slot_mem_size += frag_mem_size;
2795 +
2796 + g_free_fragments = g_fragments_base;
2797 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
2798 + *(FRAGMENTS_T **)&g_fragments_base[i] =
2799 + &g_fragments_base[i + 1];
2800 + }
2801 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
2802 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
2803 +
2804 + if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
2805 + VCHIQ_SUCCESS) {
2806 + err = -EINVAL;
2807 + goto failed_vchiq_init;
2808 + }
2809 +
2810 + err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
2811 + IRQF_IRQPOLL, "VCHIQ doorbell",
2812 + state);
2813 + if (err < 0) {
2814 + vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
2815 + "irq=%d err=%d", __func__,
2816 + VCHIQ_DOORBELL_IRQ, err);
2817 + goto failed_request_irq;
2818 + }
2819 +
2820 + /* Send the base address of the slots to VideoCore */
2821 +
2822 + dsb(); /* Ensure all writes have completed */
2823 +
2824 + bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
2825 +
2826 + vchiq_log_info(vchiq_arm_log_level,
2827 + "vchiq_init - done (slots %x, phys %x)",
2828 + (unsigned int)vchiq_slot_zero, g_slot_phys);
2829 +
2830 + vchiq_call_connected_callbacks();
2831 +
2832 + return 0;
2833 +
2834 +failed_request_irq:
2835 +failed_vchiq_init:
2836 +failed_init_slots:
2837 + dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
2838 +
2839 +failed_alloc:
2840 + return err;
2841 +}
2842 +
2843 +void __exit
2844 +vchiq_platform_exit(VCHIQ_STATE_T *state)
2845 +{
2846 + free_irq(VCHIQ_DOORBELL_IRQ, state);
2847 + dma_free_coherent(NULL, g_slot_mem_size,
2848 + g_slot_mem, g_slot_phys);
2849 +}
2850 +
2851 +
2852 +VCHIQ_STATUS_T
2853 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
2854 +{
2855 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2856 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
2857 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
2858 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
2859 + if(status != VCHIQ_SUCCESS)
2860 + {
2861 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
2862 + }
2863 + return status;
2864 +}
2865 +
2866 +VCHIQ_ARM_STATE_T*
2867 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
2868 +{
2869 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
2870 + {
2871 + BUG();
2872 + }
2873 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
2874 +}
2875 +
2876 +void
2877 +remote_event_signal(REMOTE_EVENT_T *event)
2878 +{
2879 + wmb();
2880 +
2881 + event->fired = 1;
2882 +
2883 + dsb(); /* data barrier operation */
2884 +
2885 + if (event->armed) {
2886 + /* trigger vc interrupt */
2887 +
2888 + writel(0, __io_address(ARM_0_BELL2));
2889 + }
2890 +}
2891 +
2892 +int
2893 +vchiq_copy_from_user(void *dst, const void *src, int size)
2894 +{
2895 + if ((uint32_t)src < TASK_SIZE) {
2896 + return copy_from_user(dst, src, size);
2897 + } else {
2898 + memcpy(dst, src, size);
2899 + return 0;
2900 + }
2901 +}
2902 +
2903 +VCHIQ_STATUS_T
2904 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
2905 + void *offset, int size, int dir)
2906 +{
2907 + PAGELIST_T *pagelist;
2908 + int ret;
2909 +
2910 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
2911 +
2912 + ret = create_pagelist((char __user *)offset, size,
2913 + (dir == VCHIQ_BULK_RECEIVE)
2914 + ? PAGELIST_READ
2915 + : PAGELIST_WRITE,
2916 + current,
2917 + &pagelist);
2918 + if (ret != 0)
2919 + return VCHIQ_ERROR;
2920 +
2921 + bulk->handle = memhandle;
2922 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
2923 +
2924 + /* Store the pagelist address in remote_data, which isn't used by the
2925 + slave. */
2926 + bulk->remote_data = pagelist;
2927 +
2928 + return VCHIQ_SUCCESS;
2929 +}
2930 +
2931 +void
2932 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
2933 +{
2934 + if (bulk && bulk->remote_data && bulk->actual)
2935 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
2936 +}
2937 +
2938 +void
2939 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
2940 +{
2941 + /*
2942 + * This should only be called on the master (VideoCore) side, but
2943 + * provide an implementation to avoid the need for ifdefery.
2944 + */
2945 + BUG();
2946 +}
2947 +
2948 +void
2949 +vchiq_dump_platform_state(void *dump_context)
2950 +{
2951 + char buf[80];
2952 + int len;
2953 + len = snprintf(buf, sizeof(buf),
2954 + " Platform: 2835 (VC master)");
2955 + vchiq_dump(dump_context, buf, len + 1);
2956 +}
2957 +
2958 +VCHIQ_STATUS_T
2959 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
2960 +{
2961 + return VCHIQ_ERROR;
2962 +}
2963 +
2964 +VCHIQ_STATUS_T
2965 +vchiq_platform_resume(VCHIQ_STATE_T *state)
2966 +{
2967 + return VCHIQ_SUCCESS;
2968 +}
2969 +
2970 +void
2971 +vchiq_platform_paused(VCHIQ_STATE_T *state)
2972 +{
2973 +}
2974 +
2975 +void
2976 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
2977 +{
2978 +}
2979 +
2980 +int
2981 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
2982 +{
2983 + return 1; // autosuspend not supported - videocore always wanted
2984 +}
2985 +
2986 +int
2987 +vchiq_platform_use_suspend_timer(void)
2988 +{
2989 + return 0;
2990 +}
2991 +void
2992 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
2993 +{
2994 + vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
2995 +}
2996 +void
2997 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
2998 +{
2999 + (void)state;
3000 +}
3001 +/*
3002 + * Local functions
3003 + */
3004 +
3005 +static irqreturn_t
3006 +vchiq_doorbell_irq(int irq, void *dev_id)
3007 +{
3008 + VCHIQ_STATE_T *state = dev_id;
3009 + irqreturn_t ret = IRQ_NONE;
3010 + unsigned int status;
3011 +
3012 + /* Read (and clear) the doorbell */
3013 + status = readl(__io_address(ARM_0_BELL0));
3014 +
3015 + if (status & 0x4) { /* Was the doorbell rung? */
3016 + remote_event_pollall(state);
3017 + ret = IRQ_HANDLED;
3018 + }
3019 +
3020 + return ret;
3021 +}
3022 +
3023 +/* There is a potential problem with partial cache lines (pages?)
3024 +** at the ends of the block when reading. If the CPU accessed anything in
3025 +** the same line (page?) then it may have pulled old data into the cache,
3026 +** obscuring the new data underneath. We can solve this by transferring the
3027 +** partial cache lines separately, and allowing the ARM to copy into the
3028 +** cached area.
3029 +
3030 +** N.B. This implementation plays slightly fast and loose with the Linux
3031 +** driver programming rules, e.g. its use of __virt_to_bus instead of
3032 +** dma_map_single, but it isn't a multi-platform driver and it benefits
3033 +** from increased speed as a result.
3034 +*/
3035 +
3036 +static int
3037 +create_pagelist(char __user *buf, size_t count, unsigned short type,
3038 + struct task_struct *task, PAGELIST_T ** ppagelist)
3039 +{
3040 + PAGELIST_T *pagelist;
3041 + struct page **pages;
3042 + struct page *page;
3043 + unsigned long *addrs;
3044 + unsigned int num_pages, offset, i;
3045 + char *addr, *base_addr, *next_addr;
3046 + int run, addridx, actual_pages;
3047 +
3048 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
3049 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
3050 +
3051 + *ppagelist = NULL;
3052 +
3053 + /* Allocate enough storage to hold the page pointers and the page
3054 + ** list
3055 + */
3056 + pagelist = kmalloc(sizeof(PAGELIST_T) +
3057 + (num_pages * sizeof(unsigned long)) +
3058 + (num_pages * sizeof(pages[0])),
3059 + GFP_KERNEL);
3060 +
3061 + vchiq_log_trace(vchiq_arm_log_level,
3062 + "create_pagelist - %x", (unsigned int)pagelist);
3063 + if (!pagelist)
3064 + return -ENOMEM;
3065 +
3066 + addrs = pagelist->addrs;
3067 + pages = (struct page **)(addrs + num_pages);
3068 +
3069 + down_read(&task->mm->mmap_sem);
3070 + actual_pages = get_user_pages(task, task->mm,
3071 + (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages,
3072 + (type == PAGELIST_READ) /*Write */ , 0 /*Force */ ,
3073 + pages, NULL /*vmas */);
3074 + up_read(&task->mm->mmap_sem);
3075 +
3076 + if (actual_pages != num_pages)
3077 + {
3078 + /* This is probably due to the process being killed */
3079 + while (actual_pages > 0)
3080 + {
3081 + actual_pages--;
3082 + page_cache_release(pages[actual_pages]);
3083 + }
3084 + kfree(pagelist);
3085 + if (actual_pages == 0)
3086 + actual_pages = -ENOMEM;
3087 + return actual_pages;
3088 + }
3089 +
3090 + pagelist->length = count;
3091 + pagelist->type = type;
3092 + pagelist->offset = offset;
3093 +
3094 + /* Group the pages into runs of contiguous pages */
3095 +
3096 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
3097 + next_addr = base_addr + PAGE_SIZE;
3098 + addridx = 0;
3099 + run = 0;
3100 +
3101 + for (i = 1; i < num_pages; i++) {
3102 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
3103 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
3104 + next_addr += PAGE_SIZE;
3105 + run++;
3106 + } else {
3107 + addrs[addridx] = (unsigned long)base_addr + run;
3108 + addridx++;
3109 + base_addr = addr;
3110 + next_addr = addr + PAGE_SIZE;
3111 + run = 0;
3112 + }
3113 + }
3114 +
3115 + addrs[addridx] = (unsigned long)base_addr + run;
3116 + addridx++;
3117 +
3118 + /* Partial cache lines (fragments) require special measures */
3119 + if ((type == PAGELIST_READ) &&
3120 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
3121 + ((pagelist->offset + pagelist->length) &
3122 + (CACHE_LINE_SIZE - 1)))) {
3123 + FRAGMENTS_T *fragments;
3124 +
3125 + if (down_interruptible(&g_free_fragments_sema) != 0) {
3126 + kfree(pagelist);
3127 + return -EINTR;
3128 + }
3129 +
3130 + WARN_ON(g_free_fragments == NULL);
3131 +
3132 + down(&g_free_fragments_mutex);
3133 + fragments = (FRAGMENTS_T *) g_free_fragments;
3134 + WARN_ON(fragments == NULL);
3135 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
3136 + up(&g_free_fragments_mutex);
3137 + pagelist->type =
3138 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
3139 + g_fragments_base);
3140 + }
3141 +
3142 + for (page = virt_to_page(pagelist);
3143 + page <= virt_to_page(addrs + num_pages - 1); page++) {
3144 + flush_dcache_page(page);
3145 + }
3146 +
3147 + *ppagelist = pagelist;
3148 +
3149 + return 0;
3150 +}
3151 +
3152 +static void
3153 +free_pagelist(PAGELIST_T *pagelist, int actual)
3154 +{
3155 + struct page **pages;
3156 + unsigned int num_pages, i;
3157 +
3158 + vchiq_log_trace(vchiq_arm_log_level,
3159 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
3160 +
3161 + num_pages =
3162 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
3163 + PAGE_SIZE;
3164 +
3165 + pages = (struct page **)(pagelist->addrs + num_pages);
3166 +
3167 + /* Deal with any partial cache lines (fragments) */
3168 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
3169 + FRAGMENTS_T *fragments = g_fragments_base +
3170 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
3171 + int head_bytes, tail_bytes;
3172 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
3173 + (CACHE_LINE_SIZE - 1);
3174 + tail_bytes = (pagelist->offset + actual) &
3175 + (CACHE_LINE_SIZE - 1);
3176 +
3177 + if ((actual >= 0) && (head_bytes != 0)) {
3178 + if (head_bytes > actual)
3179 + head_bytes = actual;
3180 +
3181 + memcpy((char *)page_address(pages[0]) +
3182 + pagelist->offset,
3183 + fragments->headbuf,
3184 + head_bytes);
3185 + }
3186 + if ((actual >= 0) && (head_bytes < actual) &&
3187 + (tail_bytes != 0)) {
3188 + memcpy((char *)page_address(pages[num_pages - 1]) +
3189 + ((pagelist->offset + actual) &
3190 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
3191 + fragments->tailbuf, tail_bytes);
3192 + }
3193 +
3194 + down(&g_free_fragments_mutex);
3195 + *(FRAGMENTS_T **) fragments = g_free_fragments;
3196 + g_free_fragments = fragments;
3197 + up(&g_free_fragments_mutex);
3198 + up(&g_free_fragments_sema);
3199 + }
3200 +
3201 + for (i = 0; i < num_pages; i++) {
3202 + if (pagelist->type != PAGELIST_WRITE)
3203 + set_page_dirty(pages[i]);
3204 + page_cache_release(pages[i]);
3205 + }
3206 +
3207 + kfree(pagelist);
3208 +}
3209 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
3210 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 1970-01-01 01:00:00.000000000 +0100
3211 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 2013-07-06 15:25:50.000000000 +0100
3212 @@ -0,0 +1,42 @@
3213 +/**
3214 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3215 + *
3216 + * Redistribution and use in source and binary forms, with or without
3217 + * modification, are permitted provided that the following conditions
3218 + * are met:
3219 + * 1. Redistributions of source code must retain the above copyright
3220 + * notice, this list of conditions, and the following disclaimer,
3221 + * without modification.
3222 + * 2. Redistributions in binary form must reproduce the above copyright
3223 + * notice, this list of conditions and the following disclaimer in the
3224 + * documentation and/or other materials provided with the distribution.
3225 + * 3. The names of the above-listed copyright holders may not be used
3226 + * to endorse or promote products derived from this software without
3227 + * specific prior written permission.
3228 + *
3229 + * ALTERNATIVELY, this software may be distributed under the terms of the
3230 + * GNU General Public License ("GPL") version 2, as published by the Free
3231 + * Software Foundation.
3232 + *
3233 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
3234 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
3235 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
3236 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
3237 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
3238 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
3239 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
3240 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
3241 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
3242 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3243 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3244 + */
3245 +
3246 +#ifndef VCHIQ_2835_H
3247 +#define VCHIQ_2835_H
3248 +
3249 +#include "vchiq_pagelist.h"
3250 +
3251 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
3252 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
3253 +
3254 +#endif /* VCHIQ_2835_H */
3255 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
3256 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 1970-01-01 01:00:00.000000000 +0100
3257 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 2013-07-06 15:25:50.000000000 +0100
3258 @@ -0,0 +1,2806 @@
3259 +/**
3260 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3261 + *
3262 + * Redistribution and use in source and binary forms, with or without
3263 + * modification, are permitted provided that the following conditions
3264 + * are met:
3265 + * 1. Redistributions of source code must retain the above copyright
3266 + * notice, this list of conditions, and the following disclaimer,
3267 + * without modification.
3268 + * 2. Redistributions in binary form must reproduce the above copyright
3269 + * notice, this list of conditions and the following disclaimer in the
3270 + * documentation and/or other materials provided with the distribution.
3271 + * 3. The names of the above-listed copyright holders may not be used
3272 + * to endorse or promote products derived from this software without
3273 + * specific prior written permission.
3274 + *
3275 + * ALTERNATIVELY, this software may be distributed under the terms of the
3276 + * GNU General Public License ("GPL") version 2, as published by the Free
3277 + * Software Foundation.
3278 + *
3279 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
3280 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
3281 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
3282 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
3283 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
3284 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
3285 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
3286 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
3287 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
3288 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3289 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3290 + */
3291 +
3292 +#include <linux/kernel.h>
3293 +#include <linux/module.h>
3294 +#include <linux/types.h>
3295 +#include <linux/errno.h>
3296 +#include <linux/cdev.h>
3297 +#include <linux/fs.h>
3298 +#include <linux/device.h>
3299 +#include <linux/mm.h>
3300 +#include <linux/highmem.h>
3301 +#include <linux/pagemap.h>
3302 +#include <linux/bug.h>
3303 +#include <linux/semaphore.h>
3304 +#include <linux/list.h>
3305 +#include <linux/proc_fs.h>
3306 +
3307 +#include "vchiq_core.h"
3308 +#include "vchiq_ioctl.h"
3309 +#include "vchiq_arm.h"
3310 +
3311 +#define DEVICE_NAME "vchiq"
3312 +
3313 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
3314 +#undef MODULE_PARAM_PREFIX
3315 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
3316 +
3317 +#define VCHIQ_MINOR 0
3318 +
3319 +/* Some per-instance constants */
3320 +#define MAX_COMPLETIONS 16
3321 +#define MAX_SERVICES 64
3322 +#define MAX_ELEMENTS 8
3323 +#define MSG_QUEUE_SIZE 64
3324 +
3325 +#define KEEPALIVE_VER 1
3326 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
3327 +
3328 +/* Run time control of log level, based on KERN_XXX level. */
3329 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
3330 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
3331 +
3332 +#define SUSPEND_TIMER_TIMEOUT_MS 100
3333 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
3334 +
3335 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
3336 +static const char *const suspend_state_names[] = {
3337 + "VC_SUSPEND_FORCE_CANCELED",
3338 + "VC_SUSPEND_REJECTED",
3339 + "VC_SUSPEND_FAILED",
3340 + "VC_SUSPEND_IDLE",
3341 + "VC_SUSPEND_REQUESTED",
3342 + "VC_SUSPEND_IN_PROGRESS",
3343 + "VC_SUSPEND_SUSPENDED"
3344 +};
3345 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
3346 +static const char *const resume_state_names[] = {
3347 + "VC_RESUME_FAILED",
3348 + "VC_RESUME_IDLE",
3349 + "VC_RESUME_REQUESTED",
3350 + "VC_RESUME_IN_PROGRESS",
3351 + "VC_RESUME_RESUMED"
3352 +};
3353 +/* The number of times we allow force suspend to timeout before actually
3354 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
3355 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
3356 +*/
3357 +#define FORCE_SUSPEND_FAIL_MAX 8
3358 +
3359 +/* The time in ms allowed for videocore to go idle when force suspend has been
3360 + * requested */
3361 +#define FORCE_SUSPEND_TIMEOUT_MS 200
3362 +
3363 +
3364 +static void suspend_timer_callback(unsigned long context);
3365 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
3366 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
3367 +
3368 +
3369 +typedef struct user_service_struct {
3370 + VCHIQ_SERVICE_T *service;
3371 + void *userdata;
3372 + VCHIQ_INSTANCE_T instance;
3373 + int is_vchi;
3374 + int dequeue_pending;
3375 + int message_available_pos;
3376 + int msg_insert;
3377 + int msg_remove;
3378 + struct semaphore insert_event;
3379 + struct semaphore remove_event;
3380 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
3381 +} USER_SERVICE_T;
3382 +
3383 +struct bulk_waiter_node {
3384 + struct bulk_waiter bulk_waiter;
3385 + int pid;
3386 + struct list_head list;
3387 +};
3388 +
3389 +struct vchiq_instance_struct {
3390 + VCHIQ_STATE_T *state;
3391 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
3392 + int completion_insert;
3393 + int completion_remove;
3394 + struct semaphore insert_event;
3395 + struct semaphore remove_event;
3396 + struct mutex completion_mutex;
3397 +
3398 + int connected;
3399 + int closing;
3400 + int pid;
3401 + int mark;
3402 +
3403 + struct list_head bulk_waiter_list;
3404 + struct mutex bulk_waiter_list_mutex;
3405 +
3406 + struct proc_dir_entry *proc_entry;
3407 +};
3408 +
3409 +typedef struct dump_context_struct {
3410 + char __user *buf;
3411 + size_t actual;
3412 + size_t space;
3413 + loff_t offset;
3414 +} DUMP_CONTEXT_T;
3415 +
3416 +static struct cdev vchiq_cdev;
3417 +static dev_t vchiq_devid;
3418 +static VCHIQ_STATE_T g_state;
3419 +static struct class *vchiq_class;
3420 +static struct device *vchiq_dev;
3421 +static DEFINE_SPINLOCK(msg_queue_spinlock);
3422 +
3423 +static const char *const ioctl_names[] = {
3424 + "CONNECT",
3425 + "SHUTDOWN",
3426 + "CREATE_SERVICE",
3427 + "REMOVE_SERVICE",
3428 + "QUEUE_MESSAGE",
3429 + "QUEUE_BULK_TRANSMIT",
3430 + "QUEUE_BULK_RECEIVE",
3431 + "AWAIT_COMPLETION",
3432 + "DEQUEUE_MESSAGE",
3433 + "GET_CLIENT_ID",
3434 + "GET_CONFIG",
3435 + "CLOSE_SERVICE",
3436 + "USE_SERVICE",
3437 + "RELEASE_SERVICE",
3438 + "SET_SERVICE_OPTION",
3439 + "DUMP_PHYS_MEM"
3440 +};
3441 +
3442 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
3443 + (VCHIQ_IOC_MAX + 1));
3444 +
3445 +static void
3446 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
3447 +
3448 +/****************************************************************************
3449 +*
3450 +* add_completion
3451 +*
3452 +***************************************************************************/
3453 +
3454 +static VCHIQ_STATUS_T
3455 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
3456 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
3457 + void *bulk_userdata)
3458 +{
3459 + VCHIQ_COMPLETION_DATA_T *completion;
3460 + DEBUG_INITIALISE(g_state.local)
3461 +
3462 + while (instance->completion_insert ==
3463 + (instance->completion_remove + MAX_COMPLETIONS)) {
3464 + /* Out of space - wait for the client */
3465 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3466 + vchiq_log_trace(vchiq_arm_log_level,
3467 + "add_completion - completion queue full");
3468 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
3469 + if (down_interruptible(&instance->remove_event) != 0) {
3470 + vchiq_log_info(vchiq_arm_log_level,
3471 + "service_callback interrupted");
3472 + return VCHIQ_RETRY;
3473 + } else if (instance->closing) {
3474 + vchiq_log_info(vchiq_arm_log_level,
3475 + "service_callback closing");
3476 + return VCHIQ_ERROR;
3477 + }
3478 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3479 + }
3480 +
3481 + completion =
3482 + &instance->completions[instance->completion_insert &
3483 + (MAX_COMPLETIONS - 1)];
3484 +
3485 + completion->header = header;
3486 + completion->reason = reason;
3487 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
3488 + completion->service_userdata = user_service->service;
3489 + completion->bulk_userdata = bulk_userdata;
3490 +
3491 + if (reason == VCHIQ_SERVICE_CLOSED)
3492 + /* Take an extra reference, to be held until
3493 + this CLOSED notification is delivered. */
3494 + lock_service(user_service->service);
3495 +
3496 + /* A write barrier is needed here to ensure that the entire completion
3497 + record is written out before the insert point. */
3498 + wmb();
3499 +
3500 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
3501 + user_service->message_available_pos =
3502 + instance->completion_insert;
3503 + instance->completion_insert++;
3504 +
3505 + up(&instance->insert_event);
3506 +
3507 + return VCHIQ_SUCCESS;
3508 +}
3509 +
3510 +/****************************************************************************
3511 +*
3512 +* service_callback
3513 +*
3514 +***************************************************************************/
3515 +
3516 +static VCHIQ_STATUS_T
3517 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
3518 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
3519 +{
3520 + /* How do we ensure the callback goes to the right client?
3521 + ** The service_user data points to a USER_SERVICE_T record containing
3522 + ** the original callback and the user state structure, which contains a
3523 + ** circular buffer for completion records.
3524 + */
3525 + USER_SERVICE_T *user_service;
3526 + VCHIQ_SERVICE_T *service;
3527 + VCHIQ_INSTANCE_T instance;
3528 + DEBUG_INITIALISE(g_state.local)
3529 +
3530 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3531 +
3532 + service = handle_to_service(handle);
3533 + BUG_ON(!service);
3534 + user_service = (USER_SERVICE_T *)service->base.userdata;
3535 + instance = user_service->instance;
3536 +
3537 + if (!instance || instance->closing)
3538 + return VCHIQ_SUCCESS;
3539 +
3540 + vchiq_log_trace(vchiq_arm_log_level,
3541 + "service_callback - service %lx(%d), reason %d, header %lx, "
3542 + "instance %lx, bulk_userdata %lx",
3543 + (unsigned long)user_service,
3544 + service->localport,
3545 + reason, (unsigned long)header,
3546 + (unsigned long)instance, (unsigned long)bulk_userdata);
3547 +
3548 + if (header && user_service->is_vchi) {
3549 + spin_lock(&msg_queue_spinlock);
3550 + while (user_service->msg_insert ==
3551 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
3552 + spin_unlock(&msg_queue_spinlock);
3553 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3554 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
3555 + vchiq_log_trace(vchiq_arm_log_level,
3556 + "service_callback - msg queue full");
3557 + /* If there is no MESSAGE_AVAILABLE in the completion
3558 + ** queue, add one
3559 + */
3560 + if ((user_service->message_available_pos -
3561 + instance->completion_remove) < 0) {
3562 + VCHIQ_STATUS_T status;
3563 + vchiq_log_info(vchiq_arm_log_level,
3564 + "Inserting extra MESSAGE_AVAILABLE");
3565 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3566 + status = add_completion(instance, reason,
3567 + NULL, user_service, bulk_userdata);
3568 + if (status != VCHIQ_SUCCESS) {
3569 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3570 + return status;
3571 + }
3572 + }
3573 +
3574 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3575 + if (down_interruptible(&user_service->remove_event)
3576 + != 0) {
3577 + vchiq_log_info(vchiq_arm_log_level,
3578 + "service_callback interrupted");
3579 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3580 + return VCHIQ_RETRY;
3581 + } else if (instance->closing) {
3582 + vchiq_log_info(vchiq_arm_log_level,
3583 + "service_callback closing");
3584 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3585 + return VCHIQ_ERROR;
3586 + }
3587 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3588 + spin_lock(&msg_queue_spinlock);
3589 + }
3590 +
3591 + user_service->msg_queue[user_service->msg_insert &
3592 + (MSG_QUEUE_SIZE - 1)] = header;
3593 + user_service->msg_insert++;
3594 + spin_unlock(&msg_queue_spinlock);
3595 +
3596 + up(&user_service->insert_event);
3597 +
3598 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
3599 + ** there is a MESSAGE_AVAILABLE in the completion queue then
3600 + ** bypass the completion queue.
3601 + */
3602 + if (((user_service->message_available_pos -
3603 + instance->completion_remove) >= 0) ||
3604 + user_service->dequeue_pending) {
3605 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3606 + user_service->dequeue_pending = 0;
3607 + return VCHIQ_SUCCESS;
3608 + }
3609 +
3610 + header = NULL;
3611 + }
3612 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3613 +
3614 + return add_completion(instance, reason, header, user_service,
3615 + bulk_userdata);
3616 +}
3617 +
3618 +/****************************************************************************
3619 +*
3620 +* vchiq_ioctl
3621 +*
3622 +***************************************************************************/
3623 +
3624 +static long
3625 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3626 +{
3627 + VCHIQ_INSTANCE_T instance = file->private_data;
3628 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3629 + VCHIQ_SERVICE_T *service = NULL;
3630 + long ret = 0;
3631 + int i, rc;
3632 + DEBUG_INITIALISE(g_state.local)
3633 +
3634 + vchiq_log_trace(vchiq_arm_log_level,
3635 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
3636 + (unsigned int)instance,
3637 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
3638 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
3639 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
3640 +
3641 + switch (cmd) {
3642 + case VCHIQ_IOC_SHUTDOWN:
3643 + if (!instance->connected)
3644 + break;
3645 +
3646 + /* Remove all services */
3647 + i = 0;
3648 + while ((service = next_service_by_instance(instance->state,
3649 + instance, &i)) != NULL) {
3650 + status = vchiq_remove_service(service->handle);
3651 + unlock_service(service);
3652 + if (status != VCHIQ_SUCCESS)
3653 + break;
3654 + }
3655 + service = NULL;
3656 +
3657 + if (status == VCHIQ_SUCCESS) {
3658 + /* Wake the completion thread and ask it to exit */
3659 + instance->closing = 1;
3660 + up(&instance->insert_event);
3661 + }
3662 +
3663 + break;
3664 +
3665 + case VCHIQ_IOC_CONNECT:
3666 + if (instance->connected) {
3667 + ret = -EINVAL;
3668 + break;
3669 + }
3670 + rc = mutex_lock_interruptible(&instance->state->mutex);
3671 + if (rc != 0) {
3672 + vchiq_log_error(vchiq_arm_log_level,
3673 + "vchiq: connect: could not lock mutex for "
3674 + "state %d: %d",
3675 + instance->state->id, rc);
3676 + ret = -EINTR;
3677 + break;
3678 + }
3679 + status = vchiq_connect_internal(instance->state, instance);
3680 + mutex_unlock(&instance->state->mutex);
3681 +
3682 + if (status == VCHIQ_SUCCESS)
3683 + instance->connected = 1;
3684 + else
3685 + vchiq_log_error(vchiq_arm_log_level,
3686 + "vchiq: could not connect: %d", status);
3687 + break;
3688 +
3689 + case VCHIQ_IOC_CREATE_SERVICE: {
3690 + VCHIQ_CREATE_SERVICE_T args;
3691 + USER_SERVICE_T *user_service = NULL;
3692 + void *userdata;
3693 + int srvstate;
3694 +
3695 + if (copy_from_user
3696 + (&args, (const void __user *)arg,
3697 + sizeof(args)) != 0) {
3698 + ret = -EFAULT;
3699 + break;
3700 + }
3701 +
3702 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
3703 + if (!user_service) {
3704 + ret = -ENOMEM;
3705 + break;
3706 + }
3707 +
3708 + if (args.is_open) {
3709 + if (!instance->connected) {
3710 + ret = -ENOTCONN;
3711 + kfree(user_service);
3712 + break;
3713 + }
3714 + srvstate = VCHIQ_SRVSTATE_OPENING;
3715 + } else {
3716 + srvstate =
3717 + instance->connected ?
3718 + VCHIQ_SRVSTATE_LISTENING :
3719 + VCHIQ_SRVSTATE_HIDDEN;
3720 + }
3721 +
3722 + userdata = args.params.userdata;
3723 + args.params.callback = service_callback;
3724 + args.params.userdata = user_service;
3725 + service = vchiq_add_service_internal(
3726 + instance->state,
3727 + &args.params, srvstate,
3728 + instance);
3729 +
3730 + if (service != NULL) {
3731 + user_service->service = service;
3732 + user_service->userdata = userdata;
3733 + user_service->instance = instance;
3734 + user_service->is_vchi = args.is_vchi;
3735 + user_service->dequeue_pending = 0;
3736 + user_service->message_available_pos =
3737 + instance->completion_remove - 1;
3738 + user_service->msg_insert = 0;
3739 + user_service->msg_remove = 0;
3740 + sema_init(&user_service->insert_event, 0);
3741 + sema_init(&user_service->remove_event, 0);
3742 +
3743 + if (args.is_open) {
3744 + status = vchiq_open_service_internal
3745 + (service, instance->pid);
3746 + if (status != VCHIQ_SUCCESS) {
3747 + vchiq_remove_service(service->handle);
3748 + service = NULL;
3749 + ret = (status == VCHIQ_RETRY) ?
3750 + -EINTR : -EIO;
3751 + user_service->service = NULL;
3752 + user_service->instance = NULL;
3753 + break;
3754 + }
3755 + }
3756 +
3757 + if (copy_to_user((void __user *)
3758 + &(((VCHIQ_CREATE_SERVICE_T __user *)
3759 + arg)->handle),
3760 + (const void *)&service->handle,
3761 + sizeof(service->handle)) != 0) {
3762 + ret = -EFAULT;
3763 + vchiq_remove_service(service->handle);
3764 + kfree(user_service);
3765 + }
3766 +
3767 + service = NULL;
3768 + } else {
3769 + ret = -EEXIST;
3770 + kfree(user_service);
3771 + }
3772 + } break;
3773 +
3774 + case VCHIQ_IOC_CLOSE_SERVICE: {
3775 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3776 +
3777 + service = find_service_for_instance(instance, handle);
3778 + if (service != NULL)
3779 + status = vchiq_close_service(service->handle);
3780 + else
3781 + ret = -EINVAL;
3782 + } break;
3783 +
3784 + case VCHIQ_IOC_REMOVE_SERVICE: {
3785 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3786 +
3787 + service = find_service_for_instance(instance, handle);
3788 + if (service != NULL)
3789 + status = vchiq_remove_service(service->handle);
3790 + else
3791 + ret = -EINVAL;
3792 + } break;
3793 +
3794 + case VCHIQ_IOC_USE_SERVICE:
3795 + case VCHIQ_IOC_RELEASE_SERVICE: {
3796 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3797 +
3798 + service = find_service_for_instance(instance, handle);
3799 + if (service != NULL) {
3800 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
3801 + vchiq_use_service_internal(service) :
3802 + vchiq_release_service_internal(service);
3803 + if (status != VCHIQ_SUCCESS) {
3804 + vchiq_log_error(vchiq_susp_log_level,
3805 + "%s: cmd %s returned error %d for "
3806 + "service %c%c%c%c:%03d",
3807 + __func__,
3808 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
3809 + "VCHIQ_IOC_USE_SERVICE" :
3810 + "VCHIQ_IOC_RELEASE_SERVICE",
3811 + status,
3812 + VCHIQ_FOURCC_AS_4CHARS(
3813 + service->base.fourcc),
3814 + service->client_id);
3815 + ret = -EINVAL;
3816 + }
3817 + } else
3818 + ret = -EINVAL;
3819 + } break;
3820 +
3821 + case VCHIQ_IOC_QUEUE_MESSAGE: {
3822 + VCHIQ_QUEUE_MESSAGE_T args;
3823 + if (copy_from_user
3824 + (&args, (const void __user *)arg,
3825 + sizeof(args)) != 0) {
3826 + ret = -EFAULT;
3827 + break;
3828 + }
3829 +
3830 + service = find_service_for_instance(instance, args.handle);
3831 +
3832 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
3833 + /* Copy elements into kernel space */
3834 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
3835 + if (copy_from_user(elements, args.elements,
3836 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
3837 + status = vchiq_queue_message
3838 + (args.handle,
3839 + elements, args.count);
3840 + else
3841 + ret = -EFAULT;
3842 + } else {
3843 + ret = -EINVAL;
3844 + }
3845 + } break;
3846 +
3847 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
3848 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
3849 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
3850 + struct bulk_waiter_node *waiter = NULL;
3851 + VCHIQ_BULK_DIR_T dir =
3852 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
3853 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
3854 +
3855 + if (copy_from_user
3856 + (&args, (const void __user *)arg,
3857 + sizeof(args)) != 0) {
3858 + ret = -EFAULT;
3859 + break;
3860 + }
3861 +
3862 + service = find_service_for_instance(instance, args.handle);
3863 + if (!service) {
3864 + ret = -EINVAL;
3865 + break;
3866 + }
3867 +
3868 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
3869 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
3870 + GFP_KERNEL);
3871 + if (!waiter) {
3872 + ret = -ENOMEM;
3873 + break;
3874 + }
3875 + args.userdata = &waiter->bulk_waiter;
3876 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
3877 + struct list_head *pos;
3878 + mutex_lock(&instance->bulk_waiter_list_mutex);
3879 + list_for_each(pos, &instance->bulk_waiter_list) {
3880 + if (list_entry(pos, struct bulk_waiter_node,
3881 + list)->pid == current->pid) {
3882 + waiter = list_entry(pos,
3883 + struct bulk_waiter_node,
3884 + list);
3885 + list_del(pos);
3886 + break;
3887 + }
3888 +
3889 + }
3890 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3891 + if (!waiter) {
3892 + vchiq_log_error(vchiq_arm_log_level,
3893 + "no bulk_waiter found for pid %d",
3894 + current->pid);
3895 + ret = -ESRCH;
3896 + break;
3897 + }
3898 + vchiq_log_info(vchiq_arm_log_level,
3899 + "found bulk_waiter %x for pid %d",
3900 + (unsigned int)waiter, current->pid);
3901 + args.userdata = &waiter->bulk_waiter;
3902 + }
3903 + status = vchiq_bulk_transfer
3904 + (args.handle,
3905 + VCHI_MEM_HANDLE_INVALID,
3906 + args.data, args.size,
3907 + args.userdata, args.mode,
3908 + dir);
3909 + if (!waiter)
3910 + break;
3911 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
3912 + !waiter->bulk_waiter.bulk) {
3913 + if (waiter->bulk_waiter.bulk) {
3914 + /* Cancel the signal when the transfer
3915 + ** completes. */
3916 + spin_lock(&bulk_waiter_spinlock);
3917 + waiter->bulk_waiter.bulk->userdata = NULL;
3918 + spin_unlock(&bulk_waiter_spinlock);
3919 + }
3920 + kfree(waiter);
3921 + } else {
3922 + const VCHIQ_BULK_MODE_T mode_waiting =
3923 + VCHIQ_BULK_MODE_WAITING;
3924 + waiter->pid = current->pid;
3925 + mutex_lock(&instance->bulk_waiter_list_mutex);
3926 + list_add(&waiter->list, &instance->bulk_waiter_list);
3927 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3928 + vchiq_log_info(vchiq_arm_log_level,
3929 + "saved bulk_waiter %x for pid %d",
3930 + (unsigned int)waiter, current->pid);
3931 +
3932 + if (copy_to_user((void __user *)
3933 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
3934 + arg)->mode),
3935 + (const void *)&mode_waiting,
3936 + sizeof(mode_waiting)) != 0)
3937 + ret = -EFAULT;
3938 + }
3939 + } break;
3940 +
3941 + case VCHIQ_IOC_AWAIT_COMPLETION: {
3942 + VCHIQ_AWAIT_COMPLETION_T args;
3943 +
3944 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3945 + if (!instance->connected) {
3946 + ret = -ENOTCONN;
3947 + break;
3948 + }
3949 +
3950 + if (copy_from_user(&args, (const void __user *)arg,
3951 + sizeof(args)) != 0) {
3952 + ret = -EFAULT;
3953 + break;
3954 + }
3955 +
3956 + mutex_lock(&instance->completion_mutex);
3957 +
3958 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3959 + while ((instance->completion_remove ==
3960 + instance->completion_insert)
3961 + && !instance->closing) {
3962 + int rc;
3963 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3964 + mutex_unlock(&instance->completion_mutex);
3965 + rc = down_interruptible(&instance->insert_event);
3966 + mutex_lock(&instance->completion_mutex);
3967 + if (rc != 0) {
3968 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3969 + vchiq_log_info(vchiq_arm_log_level,
3970 + "AWAIT_COMPLETION interrupted");
3971 + ret = -EINTR;
3972 + break;
3973 + }
3974 + }
3975 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3976 +
3977 + /* A read memory barrier is needed to stop prefetch of a stale
3978 + ** completion record
3979 + */
3980 + rmb();
3981 +
3982 + if (ret == 0) {
3983 + int msgbufcount = args.msgbufcount;
3984 + for (ret = 0; ret < args.count; ret++) {
3985 + VCHIQ_COMPLETION_DATA_T *completion;
3986 + VCHIQ_SERVICE_T *service;
3987 + USER_SERVICE_T *user_service;
3988 + VCHIQ_HEADER_T *header;
3989 + if (instance->completion_remove ==
3990 + instance->completion_insert)
3991 + break;
3992 + completion = &instance->completions[
3993 + instance->completion_remove &
3994 + (MAX_COMPLETIONS - 1)];
3995 +
3996 + service = completion->service_userdata;
3997 + user_service = service->base.userdata;
3998 + completion->service_userdata =
3999 + user_service->userdata;
4000 +
4001 + header = completion->header;
4002 + if (header) {
4003 + void __user *msgbuf;
4004 + int msglen;
4005 +
4006 + msglen = header->size +
4007 + sizeof(VCHIQ_HEADER_T);
4008 + /* This must be a VCHIQ-style service */
4009 + if (args.msgbufsize < msglen) {
4010 + vchiq_log_error(
4011 + vchiq_arm_log_level,
4012 + "header %x: msgbufsize"
4013 + " %x < msglen %x",
4014 + (unsigned int)header,
4015 + args.msgbufsize,
4016 + msglen);
4017 + WARN(1, "invalid message "
4018 + "size\n");
4019 + if (ret == 0)
4020 + ret = -EMSGSIZE;
4021 + break;
4022 + }
4023 + if (msgbufcount <= 0)
4024 + /* Stall here for lack of a
4025 + ** buffer for the message. */
4026 + break;
4027 + /* Get the pointer from user space */
4028 + msgbufcount--;
4029 + if (copy_from_user(&msgbuf,
4030 + (const void __user *)
4031 + &args.msgbufs[msgbufcount],
4032 + sizeof(msgbuf)) != 0) {
4033 + if (ret == 0)
4034 + ret = -EFAULT;
4035 + break;
4036 + }
4037 +
4038 + /* Copy the message to user space */
4039 + if (copy_to_user(msgbuf, header,
4040 + msglen) != 0) {
4041 + if (ret == 0)
4042 + ret = -EFAULT;
4043 + break;
4044 + }
4045 +
4046 + /* Now it has been copied, the message
4047 + ** can be released. */
4048 + vchiq_release_message(service->handle,
4049 + header);
4050 +
4051 + /* The completion must point to the
4052 + ** msgbuf. */
4053 + completion->header = msgbuf;
4054 + }
4055 +
4056 + if (completion->reason ==
4057 + VCHIQ_SERVICE_CLOSED) {
4058 + unlock_service(service);
4059 + kfree(user_service);
4060 + }
4061 +
4062 + if (copy_to_user((void __user *)(
4063 + (size_t)args.buf +
4064 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
4065 + completion,
4066 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
4067 + if (ret == 0)
4068 + ret = -EFAULT;
4069 + break;
4070 + }
4071 +
4072 + instance->completion_remove++;
4073 + }
4074 +
4075 + if (msgbufcount != args.msgbufcount) {
4076 + if (copy_to_user((void __user *)
4077 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
4078 + msgbufcount,
4079 + &msgbufcount,
4080 + sizeof(msgbufcount)) != 0) {
4081 + ret = -EFAULT;
4082 + }
4083 + }
4084 + }
4085 +
4086 + if (ret != 0)
4087 + up(&instance->remove_event);
4088 + mutex_unlock(&instance->completion_mutex);
4089 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
4090 + } break;
4091 +
4092 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
4093 + VCHIQ_DEQUEUE_MESSAGE_T args;
4094 + USER_SERVICE_T *user_service;
4095 + VCHIQ_HEADER_T *header;
4096 +
4097 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4098 + if (copy_from_user
4099 + (&args, (const void __user *)arg,
4100 + sizeof(args)) != 0) {
4101 + ret = -EFAULT;
4102 + break;
4103 + }
4104 + service = find_service_for_instance(instance, args.handle);
4105 + if (!service) {
4106 + ret = -EINVAL;
4107 + break;
4108 + }
4109 + user_service = (USER_SERVICE_T *)service->base.userdata;
4110 + if (user_service->is_vchi == 0) {
4111 + ret = -EINVAL;
4112 + break;
4113 + }
4114 +
4115 + spin_lock(&msg_queue_spinlock);
4116 + if (user_service->msg_remove == user_service->msg_insert) {
4117 + if (!args.blocking) {
4118 + spin_unlock(&msg_queue_spinlock);
4119 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4120 + ret = -EWOULDBLOCK;
4121 + break;
4122 + }
4123 + user_service->dequeue_pending = 1;
4124 + do {
4125 + spin_unlock(&msg_queue_spinlock);
4126 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4127 + if (down_interruptible(
4128 + &user_service->insert_event) != 0) {
4129 + vchiq_log_info(vchiq_arm_log_level,
4130 + "DEQUEUE_MESSAGE interrupted");
4131 + ret = -EINTR;
4132 + break;
4133 + }
4134 + spin_lock(&msg_queue_spinlock);
4135 + } while (user_service->msg_remove ==
4136 + user_service->msg_insert);
4137 +
4138 + if (ret)
4139 + break;
4140 + }
4141 +
4142 + BUG_ON((int)(user_service->msg_insert -
4143 + user_service->msg_remove) < 0);
4144 +
4145 + header = user_service->msg_queue[user_service->msg_remove &
4146 + (MSG_QUEUE_SIZE - 1)];
4147 + user_service->msg_remove++;
4148 + spin_unlock(&msg_queue_spinlock);
4149 +
4150 + up(&user_service->remove_event);
4151 + if (header == NULL)
4152 + ret = -ENOTCONN;
4153 + else if (header->size <= args.bufsize) {
4154 + /* Copy to user space if msgbuf is not NULL */
4155 + if ((args.buf == NULL) ||
4156 + (copy_to_user((void __user *)args.buf,
4157 + header->data,
4158 + header->size) == 0)) {
4159 + ret = header->size;
4160 + vchiq_release_message(
4161 + service->handle,
4162 + header);
4163 + } else
4164 + ret = -EFAULT;
4165 + } else {
4166 + vchiq_log_error(vchiq_arm_log_level,
4167 + "header %x: bufsize %x < size %x",
4168 + (unsigned int)header, args.bufsize,
4169 + header->size);
4170 + WARN(1, "invalid size\n");
4171 + ret = -EMSGSIZE;
4172 + }
4173 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4174 + } break;
4175 +
4176 + case VCHIQ_IOC_GET_CLIENT_ID: {
4177 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
4178 +
4179 + ret = vchiq_get_client_id(handle);
4180 + } break;
4181 +
4182 + case VCHIQ_IOC_GET_CONFIG: {
4183 + VCHIQ_GET_CONFIG_T args;
4184 + VCHIQ_CONFIG_T config;
4185 +
4186 + if (copy_from_user(&args, (const void __user *)arg,
4187 + sizeof(args)) != 0) {
4188 + ret = -EFAULT;
4189 + break;
4190 + }
4191 + if (args.config_size > sizeof(config)) {
4192 + ret = -EINVAL;
4193 + break;
4194 + }
4195 + status = vchiq_get_config(instance, args.config_size, &config);
4196 + if (status == VCHIQ_SUCCESS) {
4197 + if (copy_to_user((void __user *)args.pconfig,
4198 + &config, args.config_size) != 0) {
4199 + ret = -EFAULT;
4200 + break;
4201 + }
4202 + }
4203 + } break;
4204 +
4205 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
4206 + VCHIQ_SET_SERVICE_OPTION_T args;
4207 +
4208 + if (copy_from_user(
4209 + &args, (const void __user *)arg,
4210 + sizeof(args)) != 0) {
4211 + ret = -EFAULT;
4212 + break;
4213 + }
4214 +
4215 + service = find_service_for_instance(instance, args.handle);
4216 + if (!service) {
4217 + ret = -EINVAL;
4218 + break;
4219 + }
4220 +
4221 + status = vchiq_set_service_option(
4222 + args.handle, args.option, args.value);
4223 + } break;
4224 +
4225 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
4226 + VCHIQ_DUMP_MEM_T args;
4227 +
4228 + if (copy_from_user
4229 + (&args, (const void __user *)arg,
4230 + sizeof(args)) != 0) {
4231 + ret = -EFAULT;
4232 + break;
4233 + }
4234 + dump_phys_mem(args.virt_addr, args.num_bytes);
4235 + } break;
4236 +
4237 + default:
4238 + ret = -ENOTTY;
4239 + break;
4240 + }
4241 +
4242 + if (service)
4243 + unlock_service(service);
4244 +
4245 + if (ret == 0) {
4246 + if (status == VCHIQ_ERROR)
4247 + ret = -EIO;
4248 + else if (status == VCHIQ_RETRY)
4249 + ret = -EINTR;
4250 + }
4251 +
4252 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
4253 + (ret != -EWOULDBLOCK))
4254 + vchiq_log_info(vchiq_arm_log_level,
4255 + " ioctl instance %lx, cmd %s -> status %d, %ld",
4256 + (unsigned long)instance,
4257 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
4258 + ioctl_names[_IOC_NR(cmd)] :
4259 + "<invalid>",
4260 + status, ret);
4261 + else
4262 + vchiq_log_trace(vchiq_arm_log_level,
4263 + " ioctl instance %lx, cmd %s -> status %d, %ld",
4264 + (unsigned long)instance,
4265 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
4266 + ioctl_names[_IOC_NR(cmd)] :
4267 + "<invalid>",
4268 + status, ret);
4269 +
4270 + return ret;
4271 +}
4272 +
4273 +/****************************************************************************
4274 +*
4275 +* vchiq_open
4276 +*
4277 +***************************************************************************/
4278 +
4279 +static int
4280 +vchiq_open(struct inode *inode, struct file *file)
4281 +{
4282 + int dev = iminor(inode) & 0x0f;
4283 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
4284 + switch (dev) {
4285 + case VCHIQ_MINOR: {
4286 + int ret;
4287 + VCHIQ_STATE_T *state = vchiq_get_state();
4288 + VCHIQ_INSTANCE_T instance;
4289 +
4290 + if (!state) {
4291 + vchiq_log_error(vchiq_arm_log_level,
4292 + "vchiq has no connection to VideoCore");
4293 + return -ENOTCONN;
4294 + }
4295 +
4296 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
4297 + if (!instance)
4298 + return -ENOMEM;
4299 +
4300 + instance->state = state;
4301 + instance->pid = current->tgid;
4302 +
4303 + ret = vchiq_proc_add_instance(instance);
4304 + if (ret != 0) {
4305 + kfree(instance);
4306 + return ret;
4307 + }
4308 +
4309 + sema_init(&instance->insert_event, 0);
4310 + sema_init(&instance->remove_event, 0);
4311 + mutex_init(&instance->completion_mutex);
4312 + mutex_init(&instance->bulk_waiter_list_mutex);
4313 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
4314 +
4315 + file->private_data = instance;
4316 + } break;
4317 +
4318 + default:
4319 + vchiq_log_error(vchiq_arm_log_level,
4320 + "Unknown minor device: %d", dev);
4321 + return -ENXIO;
4322 + }
4323 +
4324 + return 0;
4325 +}
4326 +
4327 +/****************************************************************************
4328 +*
4329 +* vchiq_release
4330 +*
4331 +***************************************************************************/
4332 +
4333 +static int
4334 +vchiq_release(struct inode *inode, struct file *file)
4335 +{
4336 + int dev = iminor(inode) & 0x0f;
4337 + int ret = 0;
4338 + switch (dev) {
4339 + case VCHIQ_MINOR: {
4340 + VCHIQ_INSTANCE_T instance = file->private_data;
4341 + VCHIQ_STATE_T *state = vchiq_get_state();
4342 + VCHIQ_SERVICE_T *service;
4343 + int i;
4344 +
4345 + vchiq_log_info(vchiq_arm_log_level,
4346 + "vchiq_release: instance=%lx",
4347 + (unsigned long)instance);
4348 +
4349 + if (!state) {
4350 + ret = -EPERM;
4351 + goto out;
4352 + }
4353 +
4354 + /* Ensure videocore is awake to allow termination. */
4355 + vchiq_use_internal(instance->state, NULL,
4356 + USE_TYPE_VCHIQ);
4357 +
4358 + mutex_lock(&instance->completion_mutex);
4359 +
4360 + /* Wake the completion thread and ask it to exit */
4361 + instance->closing = 1;
4362 + up(&instance->insert_event);
4363 +
4364 + mutex_unlock(&instance->completion_mutex);
4365 +
4366 + /* Wake the slot handler if the completion queue is full. */
4367 + up(&instance->remove_event);
4368 +
4369 + /* Mark all services for termination... */
4370 + i = 0;
4371 + while ((service = next_service_by_instance(state, instance,
4372 + &i)) != NULL) {
4373 + USER_SERVICE_T *user_service = service->base.userdata;
4374 +
4375 + /* Wake the slot handler if the msg queue is full. */
4376 + up(&user_service->remove_event);
4377 +
4378 + vchiq_terminate_service_internal(service);
4379 + unlock_service(service);
4380 + }
4381 +
4382 + /* ...and wait for them to die */
4383 + i = 0;
4384 + while ((service = next_service_by_instance(state, instance, &i))
4385 + != NULL) {
4386 + USER_SERVICE_T *user_service = service->base.userdata;
4387 +
4388 + down(&service->remove_event);
4389 +
4390 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
4391 +
4392 + spin_lock(&msg_queue_spinlock);
4393 +
4394 + while (user_service->msg_remove !=
4395 + user_service->msg_insert) {
4396 + VCHIQ_HEADER_T *header = user_service->
4397 + msg_queue[user_service->msg_remove &
4398 + (MSG_QUEUE_SIZE - 1)];
4399 + user_service->msg_remove++;
4400 + spin_unlock(&msg_queue_spinlock);
4401 +
4402 + if (header)
4403 + vchiq_release_message(
4404 + service->handle,
4405 + header);
4406 + spin_lock(&msg_queue_spinlock);
4407 + }
4408 +
4409 + spin_unlock(&msg_queue_spinlock);
4410 +
4411 + unlock_service(service);
4412 + kfree(user_service);
4413 + }
4414 +
4415 + /* Release any closed services */
4416 + while (instance->completion_remove !=
4417 + instance->completion_insert) {
4418 + VCHIQ_COMPLETION_DATA_T *completion;
4419 + VCHIQ_SERVICE_T *service;
4420 + completion = &instance->completions[
4421 + instance->completion_remove &
4422 + (MAX_COMPLETIONS - 1)];
4423 + service = completion->service_userdata;
4424 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
4425 + unlock_service(service);
4426 + instance->completion_remove++;
4427 + }
4428 +
4429 + /* Release the PEER service count. */
4430 + vchiq_release_internal(instance->state, NULL);
4431 +
4432 + {
4433 + struct list_head *pos, *next;
4434 + list_for_each_safe(pos, next,
4435 + &instance->bulk_waiter_list) {
4436 + struct bulk_waiter_node *waiter;
4437 + waiter = list_entry(pos,
4438 + struct bulk_waiter_node,
4439 + list);
4440 + list_del(pos);
4441 + vchiq_log_info(vchiq_arm_log_level,
4442 + "bulk_waiter - cleaned up %x "
4443 + "for pid %d",
4444 + (unsigned int)waiter, waiter->pid);
4445 + kfree(waiter);
4446 + }
4447 + }
4448 +
4449 + vchiq_proc_remove_instance(instance);
4450 +
4451 + kfree(instance);
4452 + file->private_data = NULL;
4453 + } break;
4454 +
4455 + default:
4456 + vchiq_log_error(vchiq_arm_log_level,
4457 + "Unknown minor device: %d", dev);
4458 + ret = -ENXIO;
4459 + }
4460 +
4461 +out:
4462 + return ret;
4463 +}
4464 +
4465 +/****************************************************************************
4466 +*
4467 +* vchiq_dump
4468 +*
4469 +***************************************************************************/
4470 +
4471 +void
4472 +vchiq_dump(void *dump_context, const char *str, int len)
4473 +{
4474 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
4475 +
4476 + if (context->actual < context->space) {
4477 + int copy_bytes;
4478 + if (context->offset > 0) {
4479 + int skip_bytes = min(len, (int)context->offset);
4480 + str += skip_bytes;
4481 + len -= skip_bytes;
4482 + context->offset -= skip_bytes;
4483 + if (context->offset > 0)
4484 + return;
4485 + }
4486 + copy_bytes = min(len, (int)(context->space - context->actual));
4487 + if (copy_bytes == 0)
4488 + return;
4489 + if (copy_to_user(context->buf + context->actual, str,
4490 + copy_bytes))
4491 + context->actual = -EFAULT;
4492 + context->actual += copy_bytes;
4493 + len -= copy_bytes;
4494 +
4495 + /* If tne terminating NUL is included in the length, then it
4496 + ** marks the end of a line and should be replaced with a
4497 + ** carriage return. */
4498 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
4499 + char cr = '\n';
4500 + if (copy_to_user(context->buf + context->actual - 1,
4501 + &cr, 1))
4502 + context->actual = -EFAULT;
4503 + }
4504 + }
4505 +}
4506 +
4507 +/****************************************************************************
4508 +*
4509 +* vchiq_dump_platform_instance_state
4510 +*
4511 +***************************************************************************/
4512 +
4513 +void
4514 +vchiq_dump_platform_instances(void *dump_context)
4515 +{
4516 + VCHIQ_STATE_T *state = vchiq_get_state();
4517 + char buf[80];
4518 + int len;
4519 + int i;
4520 +
4521 + /* There is no list of instances, so instead scan all services,
4522 + marking those that have been dumped. */
4523 +
4524 + for (i = 0; i < state->unused_service; i++) {
4525 + VCHIQ_SERVICE_T *service = state->services[i];
4526 + VCHIQ_INSTANCE_T instance;
4527 +
4528 + if (service && (service->base.callback == service_callback)) {
4529 + instance = service->instance;
4530 + if (instance)
4531 + instance->mark = 0;
4532 + }
4533 + }
4534 +
4535 + for (i = 0; i < state->unused_service; i++) {
4536 + VCHIQ_SERVICE_T *service = state->services[i];
4537 + VCHIQ_INSTANCE_T instance;
4538 +
4539 + if (service && (service->base.callback == service_callback)) {
4540 + instance = service->instance;
4541 + if (instance && !instance->mark) {
4542 + len = snprintf(buf, sizeof(buf),
4543 + "Instance %x: pid %d,%s completions "
4544 + "%d/%d",
4545 + (unsigned int)instance, instance->pid,
4546 + instance->connected ? " connected, " :
4547 + "",
4548 + instance->completion_insert -
4549 + instance->completion_remove,
4550 + MAX_COMPLETIONS);
4551 +
4552 + vchiq_dump(dump_context, buf, len + 1);
4553 +
4554 + instance->mark = 1;
4555 + }
4556 + }
4557 + }
4558 +}
4559 +
4560 +/****************************************************************************
4561 +*
4562 +* vchiq_dump_platform_service_state
4563 +*
4564 +***************************************************************************/
4565 +
4566 +void
4567 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
4568 +{
4569 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
4570 + char buf[80];
4571 + int len;
4572 +
4573 + len = snprintf(buf, sizeof(buf), " instance %x",
4574 + (unsigned int)service->instance);
4575 +
4576 + if ((service->base.callback == service_callback) &&
4577 + user_service->is_vchi) {
4578 + len += snprintf(buf + len, sizeof(buf) - len,
4579 + ", %d/%d messages",
4580 + user_service->msg_insert - user_service->msg_remove,
4581 + MSG_QUEUE_SIZE);
4582 +
4583 + if (user_service->dequeue_pending)
4584 + len += snprintf(buf + len, sizeof(buf) - len,
4585 + " (dequeue pending)");
4586 + }
4587 +
4588 + vchiq_dump(dump_context, buf, len + 1);
4589 +}
4590 +
4591 +/****************************************************************************
4592 +*
4593 +* dump_user_mem
4594 +*
4595 +***************************************************************************/
4596 +
4597 +static void
4598 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
4599 +{
4600 + int rc;
4601 + uint8_t *end_virt_addr = virt_addr + num_bytes;
4602 + int num_pages;
4603 + int offset;
4604 + int end_offset;
4605 + int page_idx;
4606 + int prev_idx;
4607 + struct page *page;
4608 + struct page **pages;
4609 + uint8_t *kmapped_virt_ptr;
4610 +
4611 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
4612 +
4613 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
4614 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
4615 + ~0x0fuL);
4616 +
4617 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
4618 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
4619 +
4620 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
4621 +
4622 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
4623 + if (pages == NULL) {
4624 + vchiq_log_error(vchiq_arm_log_level,
4625 + "Unable to allocation memory for %d pages\n",
4626 + num_pages);
4627 + return;
4628 + }
4629 +
4630 + down_read(&current->mm->mmap_sem);
4631 + rc = get_user_pages(current, /* task */
4632 + current->mm, /* mm */
4633 + (unsigned long)virt_addr, /* start */
4634 + num_pages, /* len */
4635 + 0, /* write */
4636 + 0, /* force */
4637 + pages, /* pages (array of page pointers) */
4638 + NULL); /* vmas */
4639 + up_read(&current->mm->mmap_sem);
4640 +
4641 + prev_idx = -1;
4642 + page = NULL;
4643 +
4644 + while (offset < end_offset) {
4645 +
4646 + int page_offset = offset % PAGE_SIZE;
4647 + page_idx = offset / PAGE_SIZE;
4648 +
4649 + if (page_idx != prev_idx) {
4650 +
4651 + if (page != NULL)
4652 + kunmap(page);
4653 + page = pages[page_idx];
4654 + kmapped_virt_ptr = kmap(page);
4655 +
4656 + prev_idx = page_idx;
4657 + }
4658 +
4659 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
4660 + vchiq_log_dump_mem("ph",
4661 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
4662 + page_offset],
4663 + &kmapped_virt_ptr[page_offset], 16);
4664 +
4665 + offset += 16;
4666 + }
4667 + if (page != NULL)
4668 + kunmap(page);
4669 +
4670 + for (page_idx = 0; page_idx < num_pages; page_idx++)
4671 + page_cache_release(pages[page_idx]);
4672 +
4673 + kfree(pages);
4674 +}
4675 +
4676 +/****************************************************************************
4677 +*
4678 +* vchiq_read
4679 +*
4680 +***************************************************************************/
4681 +
4682 +static ssize_t
4683 +vchiq_read(struct file *file, char __user *buf,
4684 + size_t count, loff_t *ppos)
4685 +{
4686 + DUMP_CONTEXT_T context;
4687 + context.buf = buf;
4688 + context.actual = 0;
4689 + context.space = count;
4690 + context.offset = *ppos;
4691 +
4692 + vchiq_dump_state(&context, &g_state);
4693 +
4694 + *ppos += context.actual;
4695 +
4696 + return context.actual;
4697 +}
4698 +
4699 +VCHIQ_STATE_T *
4700 +vchiq_get_state(void)
4701 +{
4702 +
4703 + if (g_state.remote == NULL)
4704 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
4705 + else if (g_state.remote->initialised != 1)
4706 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
4707 + __func__, g_state.remote->initialised);
4708 +
4709 + return ((g_state.remote != NULL) &&
4710 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
4711 +}
4712 +
4713 +static const struct file_operations
4714 +vchiq_fops = {
4715 + .owner = THIS_MODULE,
4716 + .unlocked_ioctl = vchiq_ioctl,
4717 + .open = vchiq_open,
4718 + .release = vchiq_release,
4719 + .read = vchiq_read
4720 +};
4721 +
4722 +/*
4723 + * Autosuspend related functionality
4724 + */
4725 +
4726 +int
4727 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
4728 +{
4729 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4730 + if (!arm_state)
4731 + /* autosuspend not supported - always return wanted */
4732 + return 1;
4733 + else if (arm_state->blocked_count)
4734 + return 1;
4735 + else if (!arm_state->videocore_use_count)
4736 + /* usage count zero - check for override unless we're forcing */
4737 + if (arm_state->resume_blocked)
4738 + return 0;
4739 + else
4740 + return vchiq_platform_videocore_wanted(state);
4741 + else
4742 + /* non-zero usage count - videocore still required */
4743 + return 1;
4744 +}
4745 +
4746 +static VCHIQ_STATUS_T
4747 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
4748 + VCHIQ_HEADER_T *header,
4749 + VCHIQ_SERVICE_HANDLE_T service_user,
4750 + void *bulk_user)
4751 +{
4752 + vchiq_log_error(vchiq_susp_log_level,
4753 + "%s callback reason %d", __func__, reason);
4754 + return 0;
4755 +}
4756 +
4757 +static int
4758 +vchiq_keepalive_thread_func(void *v)
4759 +{
4760 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
4761 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4762 +
4763 + VCHIQ_STATUS_T status;
4764 + VCHIQ_INSTANCE_T instance;
4765 + VCHIQ_SERVICE_HANDLE_T ka_handle;
4766 +
4767 + VCHIQ_SERVICE_PARAMS_T params = {
4768 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
4769 + .callback = vchiq_keepalive_vchiq_callback,
4770 + .version = KEEPALIVE_VER,
4771 + .version_min = KEEPALIVE_VER_MIN
4772 + };
4773 +
4774 + status = vchiq_initialise(&instance);
4775 + if (status != VCHIQ_SUCCESS) {
4776 + vchiq_log_error(vchiq_susp_log_level,
4777 + "%s vchiq_initialise failed %d", __func__, status);
4778 + goto exit;
4779 + }
4780 +
4781 + status = vchiq_connect(instance);
4782 + if (status != VCHIQ_SUCCESS) {
4783 + vchiq_log_error(vchiq_susp_log_level,
4784 + "%s vchiq_connect failed %d", __func__, status);
4785 + goto shutdown;
4786 + }
4787 +
4788 + status = vchiq_add_service(instance, &params, &ka_handle);
4789 + if (status != VCHIQ_SUCCESS) {
4790 + vchiq_log_error(vchiq_susp_log_level,
4791 + "%s vchiq_open_service failed %d", __func__, status);
4792 + goto shutdown;
4793 + }
4794 +
4795 + while (1) {
4796 + long rc = 0, uc = 0;
4797 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
4798 + != 0) {
4799 + vchiq_log_error(vchiq_susp_log_level,
4800 + "%s interrupted", __func__);
4801 + flush_signals(current);
4802 + continue;
4803 + }
4804 +
4805 + /* read and clear counters. Do release_count then use_count to
4806 + * prevent getting more releases than uses */
4807 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
4808 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
4809 +
4810 + /* Call use/release service the requisite number of times.
4811 + * Process use before release so use counts don't go negative */
4812 + while (uc--) {
4813 + atomic_inc(&arm_state->ka_use_ack_count);
4814 + status = vchiq_use_service(ka_handle);
4815 + if (status != VCHIQ_SUCCESS) {
4816 + vchiq_log_error(vchiq_susp_log_level,
4817 + "%s vchiq_use_service error %d",
4818 + __func__, status);
4819 + }
4820 + }
4821 + while (rc--) {
4822 + status = vchiq_release_service(ka_handle);
4823 + if (status != VCHIQ_SUCCESS) {
4824 + vchiq_log_error(vchiq_susp_log_level,
4825 + "%s vchiq_release_service error %d",
4826 + __func__, status);
4827 + }
4828 + }
4829 + }
4830 +
4831 +shutdown:
4832 + vchiq_shutdown(instance);
4833 +exit:
4834 + return 0;
4835 +}
4836 +
4837 +
4838 +
4839 +VCHIQ_STATUS_T
4840 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
4841 +{
4842 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4843 +
4844 + if (arm_state) {
4845 + rwlock_init(&arm_state->susp_res_lock);
4846 +
4847 + init_completion(&arm_state->ka_evt);
4848 + atomic_set(&arm_state->ka_use_count, 0);
4849 + atomic_set(&arm_state->ka_use_ack_count, 0);
4850 + atomic_set(&arm_state->ka_release_count, 0);
4851 +
4852 + init_completion(&arm_state->vc_suspend_complete);
4853 +
4854 + init_completion(&arm_state->vc_resume_complete);
4855 + /* Initialise to 'done' state. We only want to block on resume
4856 + * completion while videocore is suspended. */
4857 + set_resume_state(arm_state, VC_RESUME_RESUMED);
4858 +
4859 + init_completion(&arm_state->resume_blocker);
4860 + /* Initialise to 'done' state. We only want to block on this
4861 + * completion while resume is blocked */
4862 + complete_all(&arm_state->resume_blocker);
4863 +
4864 + init_completion(&arm_state->blocked_blocker);
4865 + /* Initialise to 'done' state. We only want to block on this
4866 + * completion while things are waiting on the resume blocker */
4867 + complete_all(&arm_state->blocked_blocker);
4868 +
4869 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
4870 + arm_state->suspend_timer_running = 0;
4871 + init_timer(&arm_state->suspend_timer);
4872 + arm_state->suspend_timer.data = (unsigned long)(state);
4873 + arm_state->suspend_timer.function = suspend_timer_callback;
4874 +
4875 + arm_state->first_connect = 0;
4876 +
4877 + }
4878 + return status;
4879 +}
4880 +
4881 +/*
4882 +** Functions to modify the state variables;
4883 +** set_suspend_state
4884 +** set_resume_state
4885 +**
4886 +** There are more state variables than we might like, so ensure they remain in
4887 +** step. Suspend and resume state are maintained separately, since most of
4888 +** these state machines can operate independently. However, there are a few
4889 +** states where state transitions in one state machine cause a reset to the
4890 +** other state machine. In addition, there are some completion events which
4891 +** need to occur on state machine reset and end-state(s), so these are also
4892 +** dealt with in these functions.
4893 +**
4894 +** In all states we set the state variable according to the input, but in some
4895 +** cases we perform additional steps outlined below;
4896 +**
4897 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
4898 +** The suspend completion is completed after any suspend
4899 +** attempt. When we reset the state machine we also reset
4900 +** the completion. This reset occurs when videocore is
4901 +** resumed, and also if we initiate suspend after a suspend
4902 +** failure.
4903 +**
4904 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
4905 +** suspend - ie from this point on we must try to suspend
4906 +** before resuming can occur. We therefore also reset the
4907 +** resume state machine to VC_RESUME_IDLE in this state.
4908 +**
4909 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
4910 +** complete_all on the suspend completion to notify
4911 +** anything waiting for suspend to happen.
4912 +**
4913 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
4914 +** initiate resume, so no need to alter resume state.
4915 +** We call complete_all on the suspend completion to notify
4916 +** of suspend rejection.
4917 +**
4918 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
4919 +** suspend completion and reset the resume state machine.
4920 +**
4921 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
4922 +** resume completion is in it's 'done' state whenever
4923 +** videcore is running. Therfore, the VC_RESUME_IDLE state
4924 +** implies that videocore is suspended.
4925 +** Hence, any thread which needs to wait until videocore is
4926 +** running can wait on this completion - it will only block
4927 +** if videocore is suspended.
4928 +**
4929 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
4930 +** Call complete_all on the resume completion to unblock
4931 +** any threads waiting for resume. Also reset the suspend
4932 +** state machine to it's idle state.
4933 +**
4934 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
4935 +*/
4936 +
4937 +inline void
4938 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
4939 + enum vc_suspend_status new_state)
4940 +{
4941 + /* set the state in all cases */
4942 + arm_state->vc_suspend_state = new_state;
4943 +
4944 + /* state specific additional actions */
4945 + switch (new_state) {
4946 + case VC_SUSPEND_FORCE_CANCELED:
4947 + complete_all(&arm_state->vc_suspend_complete);
4948 + break;
4949 + case VC_SUSPEND_REJECTED:
4950 + complete_all(&arm_state->vc_suspend_complete);
4951 + break;
4952 + case VC_SUSPEND_FAILED:
4953 + complete_all(&arm_state->vc_suspend_complete);
4954 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
4955 + complete_all(&arm_state->vc_resume_complete);
4956 + break;
4957 + case VC_SUSPEND_IDLE:
4958 + INIT_COMPLETION(arm_state->vc_suspend_complete);
4959 + break;
4960 + case VC_SUSPEND_REQUESTED:
4961 + break;
4962 + case VC_SUSPEND_IN_PROGRESS:
4963 + set_resume_state(arm_state, VC_RESUME_IDLE);
4964 + break;
4965 + case VC_SUSPEND_SUSPENDED:
4966 + complete_all(&arm_state->vc_suspend_complete);
4967 + break;
4968 + default:
4969 + BUG();
4970 + break;
4971 + }
4972 +}
4973 +
4974 +inline void
4975 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
4976 + enum vc_resume_status new_state)
4977 +{
4978 + /* set the state in all cases */
4979 + arm_state->vc_resume_state = new_state;
4980 +
4981 + /* state specific additional actions */
4982 + switch (new_state) {
4983 + case VC_RESUME_FAILED:
4984 + break;
4985 + case VC_RESUME_IDLE:
4986 + INIT_COMPLETION(arm_state->vc_resume_complete);
4987 + break;
4988 + case VC_RESUME_REQUESTED:
4989 + break;
4990 + case VC_RESUME_IN_PROGRESS:
4991 + break;
4992 + case VC_RESUME_RESUMED:
4993 + complete_all(&arm_state->vc_resume_complete);
4994 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4995 + break;
4996 + default:
4997 + BUG();
4998 + break;
4999 + }
5000 +}
5001 +
5002 +
5003 +/* should be called with the write lock held */
5004 +inline void
5005 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
5006 +{
5007 + del_timer(&arm_state->suspend_timer);
5008 + arm_state->suspend_timer.expires = jiffies +
5009 + msecs_to_jiffies(arm_state->
5010 + suspend_timer_timeout);
5011 + add_timer(&arm_state->suspend_timer);
5012 + arm_state->suspend_timer_running = 1;
5013 +}
5014 +
5015 +/* should be called with the write lock held */
5016 +static inline void
5017 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
5018 +{
5019 + if (arm_state->suspend_timer_running) {
5020 + del_timer(&arm_state->suspend_timer);
5021 + arm_state->suspend_timer_running = 0;
5022 + }
5023 +}
5024 +
5025 +static inline int
5026 +need_resume(VCHIQ_STATE_T *state)
5027 +{
5028 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5029 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
5030 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
5031 + vchiq_videocore_wanted(state);
5032 +}
5033 +
5034 +static int
5035 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
5036 +{
5037 + int status = VCHIQ_SUCCESS;
5038 + const unsigned long timeout_val =
5039 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
5040 + int resume_count = 0;
5041 +
5042 + /* Allow any threads which were blocked by the last force suspend to
5043 + * complete if they haven't already. Only give this one shot; if
5044 + * blocked_count is incremented after blocked_blocker is completed
5045 + * (which only happens when blocked_count hits 0) then those threads
5046 + * will have to wait until next time around */
5047 + if (arm_state->blocked_count) {
5048 + INIT_COMPLETION(arm_state->blocked_blocker);
5049 + write_unlock_bh(&arm_state->susp_res_lock);
5050 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
5051 + "blocked clients", __func__);
5052 + if (wait_for_completion_interruptible_timeout(
5053 + &arm_state->blocked_blocker, timeout_val)
5054 + <= 0) {
5055 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
5056 + "previously blocked clients failed" , __func__);
5057 + status = VCHIQ_ERROR;
5058 + write_lock_bh(&arm_state->susp_res_lock);
5059 + goto out;
5060 + }
5061 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
5062 + "clients resumed", __func__);
5063 + write_lock_bh(&arm_state->susp_res_lock);
5064 + }
5065 +
5066 + /* We need to wait for resume to complete if it's in process */
5067 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
5068 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
5069 + if (resume_count > 1) {
5070 + status = VCHIQ_ERROR;
5071 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
5072 + "many times for resume" , __func__);
5073 + goto out;
5074 + }
5075 + write_unlock_bh(&arm_state->susp_res_lock);
5076 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
5077 + __func__);
5078 + if (wait_for_completion_interruptible_timeout(
5079 + &arm_state->vc_resume_complete, timeout_val)
5080 + <= 0) {
5081 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
5082 + "resume failed (%s)", __func__,
5083 + resume_state_names[arm_state->vc_resume_state +
5084 + VC_RESUME_NUM_OFFSET]);
5085 + status = VCHIQ_ERROR;
5086 + write_lock_bh(&arm_state->susp_res_lock);
5087 + goto out;
5088 + }
5089 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
5090 + write_lock_bh(&arm_state->susp_res_lock);
5091 + resume_count++;
5092 + }
5093 + INIT_COMPLETION(arm_state->resume_blocker);
5094 + arm_state->resume_blocked = 1;
5095 +
5096 +out:
5097 + return status;
5098 +}
5099 +
5100 +static inline void
5101 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
5102 +{
5103 + complete_all(&arm_state->resume_blocker);
5104 + arm_state->resume_blocked = 0;
5105 +}
5106 +
5107 +/* Initiate suspend via slot handler. Should be called with the write lock
5108 + * held */
5109 +VCHIQ_STATUS_T
5110 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
5111 +{
5112 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
5113 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5114 +
5115 + if (!arm_state)
5116 + goto out;
5117 +
5118 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5119 + status = VCHIQ_SUCCESS;
5120 +
5121 +
5122 + switch (arm_state->vc_suspend_state) {
5123 + case VC_SUSPEND_REQUESTED:
5124 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
5125 + "requested", __func__);
5126 + break;
5127 + case VC_SUSPEND_IN_PROGRESS:
5128 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
5129 + "progress", __func__);
5130 + break;
5131 +
5132 + default:
5133 + /* We don't expect to be in other states, so log but continue
5134 + * anyway */
5135 + vchiq_log_error(vchiq_susp_log_level,
5136 + "%s unexpected suspend state %s", __func__,
5137 + suspend_state_names[arm_state->vc_suspend_state +
5138 + VC_SUSPEND_NUM_OFFSET]);
5139 + /* fall through */
5140 + case VC_SUSPEND_REJECTED:
5141 + case VC_SUSPEND_FAILED:
5142 + /* Ensure any idle state actions have been run */
5143 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5144 + /* fall through */
5145 + case VC_SUSPEND_IDLE:
5146 + vchiq_log_info(vchiq_susp_log_level,
5147 + "%s: suspending", __func__);
5148 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
5149 + /* kick the slot handler thread to initiate suspend */
5150 + request_poll(state, NULL, 0);
5151 + break;
5152 + }
5153 +
5154 +out:
5155 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
5156 + return status;
5157 +}
5158 +
5159 +void
5160 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
5161 +{
5162 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5163 + int susp = 0;
5164 +
5165 + if (!arm_state)
5166 + goto out;
5167 +
5168 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5169 +
5170 + write_lock_bh(&arm_state->susp_res_lock);
5171 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
5172 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
5173 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
5174 + susp = 1;
5175 + }
5176 + write_unlock_bh(&arm_state->susp_res_lock);
5177 +
5178 + if (susp)
5179 + vchiq_platform_suspend(state);
5180 +
5181 +out:
5182 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5183 + return;
5184 +}
5185 +
5186 +
5187 +static void
5188 +output_timeout_error(VCHIQ_STATE_T *state)
5189 +{
5190 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5191 + char service_err[50] = "";
5192 + int vc_use_count = arm_state->videocore_use_count;
5193 + int active_services = state->unused_service;
5194 + int i;
5195 +
5196 + if (!arm_state->videocore_use_count) {
5197 + snprintf(service_err, 50, " Videocore usecount is 0");
5198 + goto output_msg;
5199 + }
5200 + for (i = 0; i < active_services; i++) {
5201 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
5202 + if (service_ptr && service_ptr->service_use_count &&
5203 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
5204 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
5205 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
5206 + service_ptr->base.fourcc),
5207 + service_ptr->client_id,
5208 + service_ptr->service_use_count,
5209 + service_ptr->service_use_count ==
5210 + vc_use_count ? "" : " (+ more)");
5211 + break;
5212 + }
5213 + }
5214 +
5215 +output_msg:
5216 + vchiq_log_error(vchiq_susp_log_level,
5217 + "timed out waiting for vc suspend (%d).%s",
5218 + arm_state->autosuspend_override, service_err);
5219 +
5220 +}
5221 +
5222 +/* Try to get videocore into suspended state, regardless of autosuspend state.
5223 +** We don't actually force suspend, since videocore may get into a bad state
5224 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
5225 +** determine a good point to suspend. If this doesn't happen within 100ms we
5226 +** report failure.
5227 +**
5228 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
5229 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
5230 +*/
5231 +VCHIQ_STATUS_T
5232 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
5233 +{
5234 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5235 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
5236 + long rc = 0;
5237 + int repeat = -1;
5238 +
5239 + if (!arm_state)
5240 + goto out;
5241 +
5242 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5243 +
5244 + write_lock_bh(&arm_state->susp_res_lock);
5245 +
5246 + status = block_resume(arm_state);
5247 + if (status != VCHIQ_SUCCESS)
5248 + goto unlock;
5249 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
5250 + /* Already suspended - just block resume and exit */
5251 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
5252 + __func__);
5253 + status = VCHIQ_SUCCESS;
5254 + goto unlock;
5255 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
5256 + /* initiate suspend immediately in the case that we're waiting
5257 + * for the timeout */
5258 + stop_suspend_timer(arm_state);
5259 + if (!vchiq_videocore_wanted(state)) {
5260 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
5261 + "idle, initiating suspend", __func__);
5262 + status = vchiq_arm_vcsuspend(state);
5263 + } else if (arm_state->autosuspend_override <
5264 + FORCE_SUSPEND_FAIL_MAX) {
5265 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
5266 + "videocore go idle", __func__);
5267 + status = VCHIQ_SUCCESS;
5268 + } else {
5269 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
5270 + "many times - attempting suspend", __func__);
5271 + status = vchiq_arm_vcsuspend(state);
5272 + }
5273 + } else {
5274 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
5275 + "in progress - wait for completion", __func__);
5276 + status = VCHIQ_SUCCESS;
5277 + }
5278 +
5279 + /* Wait for suspend to happen due to system idle (not forced..) */
5280 + if (status != VCHIQ_SUCCESS)
5281 + goto unblock_resume;
5282 +
5283 + do {
5284 + write_unlock_bh(&arm_state->susp_res_lock);
5285 +
5286 + rc = wait_for_completion_interruptible_timeout(
5287 + &arm_state->vc_suspend_complete,
5288 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
5289 +
5290 + write_lock_bh(&arm_state->susp_res_lock);
5291 + if (rc < 0) {
5292 + vchiq_log_warning(vchiq_susp_log_level, "%s "
5293 + "interrupted waiting for suspend", __func__);
5294 + status = VCHIQ_ERROR;
5295 + goto unblock_resume;
5296 + } else if (rc == 0) {
5297 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
5298 + /* Repeat timeout once if in progress */
5299 + if (repeat < 0) {
5300 + repeat = 1;
5301 + continue;
5302 + }
5303 + }
5304 + arm_state->autosuspend_override++;
5305 + output_timeout_error(state);
5306 +
5307 + status = VCHIQ_RETRY;
5308 + goto unblock_resume;
5309 + }
5310 + } while (0 < (repeat--));
5311 +
5312 + /* Check and report state in case we need to abort ARM suspend */
5313 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
5314 + status = VCHIQ_RETRY;
5315 + vchiq_log_error(vchiq_susp_log_level,
5316 + "%s videocore suspend failed (state %s)", __func__,
5317 + suspend_state_names[arm_state->vc_suspend_state +
5318 + VC_SUSPEND_NUM_OFFSET]);
5319 + /* Reset the state only if it's still in an error state.
5320 + * Something could have already initiated another suspend. */
5321 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
5322 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5323 +
5324 + goto unblock_resume;
5325 + }
5326 +
5327 + /* successfully suspended - unlock and exit */
5328 + goto unlock;
5329 +
5330 +unblock_resume:
5331 + /* all error states need to unblock resume before exit */
5332 + unblock_resume(arm_state);
5333 +
5334 +unlock:
5335 + write_unlock_bh(&arm_state->susp_res_lock);
5336 +
5337 +out:
5338 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
5339 + return status;
5340 +}
5341 +
5342 +void
5343 +vchiq_check_suspend(VCHIQ_STATE_T *state)
5344 +{
5345 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5346 +
5347 + if (!arm_state)
5348 + goto out;
5349 +
5350 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5351 +
5352 + write_lock_bh(&arm_state->susp_res_lock);
5353 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
5354 + arm_state->first_connect &&
5355 + !vchiq_videocore_wanted(state)) {
5356 + vchiq_arm_vcsuspend(state);
5357 + }
5358 + write_unlock_bh(&arm_state->susp_res_lock);
5359 +
5360 +out:
5361 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5362 + return;
5363 +}
5364 +
5365 +
5366 +int
5367 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
5368 +{
5369 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5370 + int resume = 0;
5371 + int ret = -1;
5372 +
5373 + if (!arm_state)
5374 + goto out;
5375 +
5376 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5377 +
5378 + write_lock_bh(&arm_state->susp_res_lock);
5379 + unblock_resume(arm_state);
5380 + resume = vchiq_check_resume(state);
5381 + write_unlock_bh(&arm_state->susp_res_lock);
5382 +
5383 + if (resume) {
5384 + if (wait_for_completion_interruptible(
5385 + &arm_state->vc_resume_complete) < 0) {
5386 + vchiq_log_error(vchiq_susp_log_level,
5387 + "%s interrupted", __func__);
5388 + /* failed, cannot accurately derive suspend
5389 + * state, so exit early. */
5390 + goto out;
5391 + }
5392 + }
5393 +
5394 + read_lock_bh(&arm_state->susp_res_lock);
5395 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
5396 + vchiq_log_info(vchiq_susp_log_level,
5397 + "%s: Videocore remains suspended", __func__);
5398 + } else {
5399 + vchiq_log_info(vchiq_susp_log_level,
5400 + "%s: Videocore resumed", __func__);
5401 + ret = 0;
5402 + }
5403 + read_unlock_bh(&arm_state->susp_res_lock);
5404 +out:
5405 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5406 + return ret;
5407 +}
5408 +
5409 +/* This function should be called with the write lock held */
5410 +int
5411 +vchiq_check_resume(VCHIQ_STATE_T *state)
5412 +{
5413 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5414 + int resume = 0;
5415 +
5416 + if (!arm_state)
5417 + goto out;
5418 +
5419 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5420 +
5421 + if (need_resume(state)) {
5422 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
5423 + request_poll(state, NULL, 0);
5424 + resume = 1;
5425 + }
5426 +
5427 +out:
5428 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5429 + return resume;
5430 +}
5431 +
5432 +void
5433 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
5434 +{
5435 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5436 + int res = 0;
5437 +
5438 + if (!arm_state)
5439 + goto out;
5440 +
5441 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5442 +
5443 + write_lock_bh(&arm_state->susp_res_lock);
5444 + if (arm_state->wake_address == 0) {
5445 + vchiq_log_info(vchiq_susp_log_level,
5446 + "%s: already awake", __func__);
5447 + goto unlock;
5448 + }
5449 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
5450 + vchiq_log_info(vchiq_susp_log_level,
5451 + "%s: already resuming", __func__);
5452 + goto unlock;
5453 + }
5454 +
5455 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
5456 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
5457 + res = 1;
5458 + } else
5459 + vchiq_log_trace(vchiq_susp_log_level,
5460 + "%s: not resuming (resume state %s)", __func__,
5461 + resume_state_names[arm_state->vc_resume_state +
5462 + VC_RESUME_NUM_OFFSET]);
5463 +
5464 +unlock:
5465 + write_unlock_bh(&arm_state->susp_res_lock);
5466 +
5467 + if (res)
5468 + vchiq_platform_resume(state);
5469 +
5470 +out:
5471 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5472 + return;
5473 +
5474 +}
5475 +
5476 +
5477 +
5478 +VCHIQ_STATUS_T
5479 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5480 + enum USE_TYPE_E use_type)
5481 +{
5482 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5483 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
5484 + char entity[16];
5485 + int *entity_uc;
5486 + int local_uc, local_entity_uc;
5487 +
5488 + if (!arm_state)
5489 + goto out;
5490 +
5491 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5492 +
5493 + if (use_type == USE_TYPE_VCHIQ) {
5494 + sprintf(entity, "VCHIQ: ");
5495 + entity_uc = &arm_state->peer_use_count;
5496 + } else if (service) {
5497 + sprintf(entity, "%c%c%c%c:%03d",
5498 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5499 + service->client_id);
5500 + entity_uc = &service->service_use_count;
5501 + } else {
5502 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
5503 + "ptr", __func__);
5504 + ret = VCHIQ_ERROR;
5505 + goto out;
5506 + }
5507 +
5508 + write_lock_bh(&arm_state->susp_res_lock);
5509 + while (arm_state->resume_blocked) {
5510 + /* If we call 'use' while force suspend is waiting for suspend,
5511 + * then we're about to block the thread which the force is
5512 + * waiting to complete, so we're bound to just time out. In this
5513 + * case, set the suspend state such that the wait will be
5514 + * canceled, so we can complete as quickly as possible. */
5515 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
5516 + VC_SUSPEND_IDLE) {
5517 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
5518 + break;
5519 + }
5520 + /* If suspend is already in progress then we need to block */
5521 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
5522 + /* Indicate that there are threads waiting on the resume
5523 + * blocker. These need to be allowed to complete before
5524 + * a _second_ call to force suspend can complete,
5525 + * otherwise low priority threads might never actually
5526 + * continue */
5527 + arm_state->blocked_count++;
5528 + write_unlock_bh(&arm_state->susp_res_lock);
5529 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
5530 + "blocked - waiting...", __func__, entity);
5531 + if (wait_for_completion_killable(
5532 + &arm_state->resume_blocker) != 0) {
5533 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
5534 + "wait for resume blocker interrupted",
5535 + __func__, entity);
5536 + ret = VCHIQ_ERROR;
5537 + write_lock_bh(&arm_state->susp_res_lock);
5538 + arm_state->blocked_count--;
5539 + write_unlock_bh(&arm_state->susp_res_lock);
5540 + goto out;
5541 + }
5542 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
5543 + "unblocked", __func__, entity);
5544 + write_lock_bh(&arm_state->susp_res_lock);
5545 + if (--arm_state->blocked_count == 0)
5546 + complete_all(&arm_state->blocked_blocker);
5547 + }
5548 + }
5549 +
5550 + stop_suspend_timer(arm_state);
5551 +
5552 + local_uc = ++arm_state->videocore_use_count;
5553 + local_entity_uc = ++(*entity_uc);
5554 +
5555 + /* If there's a pending request which hasn't yet been serviced then
5556 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
5557 + * vc_resume_complete will block until we either resume or fail to
5558 + * suspend */
5559 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
5560 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5561 +
5562 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
5563 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
5564 + vchiq_log_info(vchiq_susp_log_level,
5565 + "%s %s count %d, state count %d",
5566 + __func__, entity, local_entity_uc, local_uc);
5567 + request_poll(state, NULL, 0);
5568 + } else
5569 + vchiq_log_trace(vchiq_susp_log_level,
5570 + "%s %s count %d, state count %d",
5571 + __func__, entity, *entity_uc, local_uc);
5572 +
5573 +
5574 + write_unlock_bh(&arm_state->susp_res_lock);
5575 +
5576 + /* Completion is in a done state when we're not suspended, so this won't
5577 + * block for the non-suspended case. */
5578 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
5579 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
5580 + __func__, entity);
5581 + if (wait_for_completion_killable(
5582 + &arm_state->vc_resume_complete) != 0) {
5583 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
5584 + "resume interrupted", __func__, entity);
5585 + ret = VCHIQ_ERROR;
5586 + goto out;
5587 + }
5588 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
5589 + entity);
5590 + }
5591 +
5592 + if (ret == VCHIQ_SUCCESS) {
5593 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
5594 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
5595 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
5596 + /* Send the use notify to videocore */
5597 + status = vchiq_send_remote_use_active(state);
5598 + if (status == VCHIQ_SUCCESS)
5599 + ack_cnt--;
5600 + else
5601 + atomic_add(ack_cnt,
5602 + &arm_state->ka_use_ack_count);
5603 + }
5604 + }
5605 +
5606 +out:
5607 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5608 + return ret;
5609 +}
5610 +
5611 +VCHIQ_STATUS_T
5612 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
5613 +{
5614 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5615 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
5616 + char entity[16];
5617 + int *entity_uc;
5618 + int local_uc, local_entity_uc;
5619 +
5620 + if (!arm_state)
5621 + goto out;
5622 +
5623 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5624 +
5625 + if (service) {
5626 + sprintf(entity, "%c%c%c%c:%03d",
5627 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5628 + service->client_id);
5629 + entity_uc = &service->service_use_count;
5630 + } else {
5631 + sprintf(entity, "PEER: ");
5632 + entity_uc = &arm_state->peer_use_count;
5633 + }
5634 +
5635 + write_lock_bh(&arm_state->susp_res_lock);
5636 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
5637 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
5638 + WARN_ON(!arm_state->videocore_use_count);
5639 + WARN_ON(!(*entity_uc));
5640 + ret = VCHIQ_ERROR;
5641 + goto unlock;
5642 + }
5643 + local_uc = --arm_state->videocore_use_count;
5644 + local_entity_uc = --(*entity_uc);
5645 +
5646 + if (!vchiq_videocore_wanted(state)) {
5647 + if (vchiq_platform_use_suspend_timer() &&
5648 + !arm_state->resume_blocked) {
5649 + /* Only use the timer if we're not trying to force
5650 + * suspend (=> resume_blocked) */
5651 + start_suspend_timer(arm_state);
5652 + } else {
5653 + vchiq_log_info(vchiq_susp_log_level,
5654 + "%s %s count %d, state count %d - suspending",
5655 + __func__, entity, *entity_uc,
5656 + arm_state->videocore_use_count);
5657 + vchiq_arm_vcsuspend(state);
5658 + }
5659 + } else
5660 + vchiq_log_trace(vchiq_susp_log_level,
5661 + "%s %s count %d, state count %d",
5662 + __func__, entity, *entity_uc,
5663 + arm_state->videocore_use_count);
5664 +
5665 +unlock:
5666 + write_unlock_bh(&arm_state->susp_res_lock);
5667 +
5668 +out:
5669 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5670 + return ret;
5671 +}
5672 +
5673 +void
5674 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
5675 +{
5676 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5677 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5678 + atomic_inc(&arm_state->ka_use_count);
5679 + complete(&arm_state->ka_evt);
5680 +}
5681 +
5682 +void
5683 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
5684 +{
5685 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5686 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5687 + atomic_inc(&arm_state->ka_release_count);
5688 + complete(&arm_state->ka_evt);
5689 +}
5690 +
5691 +VCHIQ_STATUS_T
5692 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
5693 +{
5694 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
5695 +}
5696 +
5697 +VCHIQ_STATUS_T
5698 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
5699 +{
5700 + return vchiq_release_internal(service->state, service);
5701 +}
5702 +
5703 +static void suspend_timer_callback(unsigned long context)
5704 +{
5705 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
5706 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5707 + if (!arm_state)
5708 + goto out;
5709 + vchiq_log_info(vchiq_susp_log_level,
5710 + "%s - suspend timer expired - check suspend", __func__);
5711 + vchiq_check_suspend(state);
5712 +out:
5713 + return;
5714 +}
5715 +
5716 +VCHIQ_STATUS_T
5717 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
5718 +{
5719 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5720 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5721 + if (service) {
5722 + ret = vchiq_use_internal(service->state, service,
5723 + USE_TYPE_SERVICE_NO_RESUME);
5724 + unlock_service(service);
5725 + }
5726 + return ret;
5727 +}
5728 +
5729 +VCHIQ_STATUS_T
5730 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
5731 +{
5732 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5733 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5734 + if (service) {
5735 + ret = vchiq_use_internal(service->state, service,
5736 + USE_TYPE_SERVICE);
5737 + unlock_service(service);
5738 + }
5739 + return ret;
5740 +}
5741 +
5742 +VCHIQ_STATUS_T
5743 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
5744 +{
5745 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5746 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5747 + if (service) {
5748 + ret = vchiq_release_internal(service->state, service);
5749 + unlock_service(service);
5750 + }
5751 + return ret;
5752 +}
5753 +
5754 +void
5755 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
5756 +{
5757 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5758 + int i, j = 0;
5759 + /* Only dump 64 services */
5760 + static const int local_max_services = 64;
5761 + /* If there's more than 64 services, only dump ones with
5762 + * non-zero counts */
5763 + int only_nonzero = 0;
5764 + static const char *nz = "<-- preventing suspend";
5765 +
5766 + enum vc_suspend_status vc_suspend_state;
5767 + enum vc_resume_status vc_resume_state;
5768 + int peer_count;
5769 + int vc_use_count;
5770 + int active_services;
5771 + struct service_data_struct {
5772 + int fourcc;
5773 + int clientid;
5774 + int use_count;
5775 + } service_data[local_max_services];
5776 +
5777 + if (!arm_state)
5778 + return;
5779 +
5780 + read_lock_bh(&arm_state->susp_res_lock);
5781 + vc_suspend_state = arm_state->vc_suspend_state;
5782 + vc_resume_state = arm_state->vc_resume_state;
5783 + peer_count = arm_state->peer_use_count;
5784 + vc_use_count = arm_state->videocore_use_count;
5785 + active_services = state->unused_service;
5786 + if (active_services > local_max_services)
5787 + only_nonzero = 1;
5788 +
5789 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
5790 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
5791 + if (!service_ptr)
5792 + continue;
5793 +
5794 + if (only_nonzero && !service_ptr->service_use_count)
5795 + continue;
5796 +
5797 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
5798 + service_data[j].fourcc = service_ptr->base.fourcc;
5799 + service_data[j].clientid = service_ptr->client_id;
5800 + service_data[j++].use_count = service_ptr->
5801 + service_use_count;
5802 + }
5803 + }
5804 +
5805 + read_unlock_bh(&arm_state->susp_res_lock);
5806 +
5807 + vchiq_log_warning(vchiq_susp_log_level,
5808 + "-- Videcore suspend state: %s --",
5809 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
5810 + vchiq_log_warning(vchiq_susp_log_level,
5811 + "-- Videcore resume state: %s --",
5812 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
5813 +
5814 + if (only_nonzero)
5815 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
5816 + "services (%d). Only dumping up to first %d services "
5817 + "with non-zero use-count", active_services,
5818 + local_max_services);
5819 +
5820 + for (i = 0; i < j; i++) {
5821 + vchiq_log_warning(vchiq_susp_log_level,
5822 + "----- %c%c%c%c:%d service count %d %s",
5823 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
5824 + service_data[i].clientid,
5825 + service_data[i].use_count,
5826 + service_data[i].use_count ? nz : "");
5827 + }
5828 + vchiq_log_warning(vchiq_susp_log_level,
5829 + "----- VCHIQ use count count %d", peer_count);
5830 + vchiq_log_warning(vchiq_susp_log_level,
5831 + "--- Overall vchiq instance use count %d", vc_use_count);
5832 +
5833 + vchiq_dump_platform_use_state(state);
5834 +}
5835 +
5836 +VCHIQ_STATUS_T
5837 +vchiq_check_service(VCHIQ_SERVICE_T *service)
5838 +{
5839 + VCHIQ_ARM_STATE_T *arm_state;
5840 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5841 +
5842 + if (!service || !service->state)
5843 + goto out;
5844 +
5845 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5846 +
5847 + arm_state = vchiq_platform_get_arm_state(service->state);
5848 +
5849 + read_lock_bh(&arm_state->susp_res_lock);
5850 + if (service->service_use_count)
5851 + ret = VCHIQ_SUCCESS;
5852 + read_unlock_bh(&arm_state->susp_res_lock);
5853 +
5854 + if (ret == VCHIQ_ERROR) {
5855 + vchiq_log_error(vchiq_susp_log_level,
5856 + "%s ERROR - %c%c%c%c:%d service count %d, "
5857 + "state count %d, videocore suspend state %s", __func__,
5858 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5859 + service->client_id, service->service_use_count,
5860 + arm_state->videocore_use_count,
5861 + suspend_state_names[arm_state->vc_suspend_state +
5862 + VC_SUSPEND_NUM_OFFSET]);
5863 + vchiq_dump_service_use_state(service->state);
5864 + }
5865 +out:
5866 + return ret;
5867 +}
5868 +
5869 +/* stub functions */
5870 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
5871 +{
5872 + (void)state;
5873 +}
5874 +
5875 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
5876 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
5877 +{
5878 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5879 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
5880 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
5881 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
5882 + write_lock_bh(&arm_state->susp_res_lock);
5883 + if (!arm_state->first_connect) {
5884 + char threadname[10];
5885 + arm_state->first_connect = 1;
5886 + write_unlock_bh(&arm_state->susp_res_lock);
5887 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
5888 + state->id);
5889 + arm_state->ka_thread = kthread_create(
5890 + &vchiq_keepalive_thread_func,
5891 + (void *)state,
5892 + threadname);
5893 + if (arm_state->ka_thread == NULL) {
5894 + vchiq_log_error(vchiq_susp_log_level,
5895 + "vchiq: FATAL: couldn't create thread %s",
5896 + threadname);
5897 + } else {
5898 + wake_up_process(arm_state->ka_thread);
5899 + }
5900 + } else
5901 + write_unlock_bh(&arm_state->susp_res_lock);
5902 + }
5903 +}
5904 +
5905 +
5906 +/****************************************************************************
5907 +*
5908 +* vchiq_init - called when the module is loaded.
5909 +*
5910 +***************************************************************************/
5911 +
5912 +static int __init
5913 +vchiq_init(void)
5914 +{
5915 + int err;
5916 + void *ptr_err;
5917 +
5918 + /* create proc entries */
5919 + err = vchiq_proc_init();
5920 + if (err != 0)
5921 + goto failed_proc_init;
5922 +
5923 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
5924 + if (err != 0) {
5925 + vchiq_log_error(vchiq_arm_log_level,
5926 + "Unable to allocate device number");
5927 + goto failed_alloc_chrdev;
5928 + }
5929 + cdev_init(&vchiq_cdev, &vchiq_fops);
5930 + vchiq_cdev.owner = THIS_MODULE;
5931 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
5932 + if (err != 0) {
5933 + vchiq_log_error(vchiq_arm_log_level,
5934 + "Unable to register device");
5935 + goto failed_cdev_add;
5936 + }
5937 +
5938 + /* create sysfs entries */
5939 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
5940 + ptr_err = vchiq_class;
5941 + if (IS_ERR(ptr_err))
5942 + goto failed_class_create;
5943 +
5944 + vchiq_dev = device_create(vchiq_class, NULL,
5945 + vchiq_devid, NULL, "vchiq");
5946 + ptr_err = vchiq_dev;
5947 + if (IS_ERR(ptr_err))
5948 + goto failed_device_create;
5949 +
5950 + err = vchiq_platform_init(&g_state);
5951 + if (err != 0)
5952 + goto failed_platform_init;
5953 +
5954 + vchiq_log_info(vchiq_arm_log_level,
5955 + "vchiq: initialised - version %d (min %d), device %d.%d",
5956 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
5957 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
5958 +
5959 + return 0;
5960 +
5961 +failed_platform_init:
5962 + device_destroy(vchiq_class, vchiq_devid);
5963 +failed_device_create:
5964 + class_destroy(vchiq_class);
5965 +failed_class_create:
5966 + cdev_del(&vchiq_cdev);
5967 + err = PTR_ERR(ptr_err);
5968 +failed_cdev_add:
5969 + unregister_chrdev_region(vchiq_devid, 1);
5970 +failed_alloc_chrdev:
5971 + vchiq_proc_deinit();
5972 +failed_proc_init:
5973 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
5974 + return err;
5975 +}
5976 +
5977 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
5978 +{
5979 + VCHIQ_SERVICE_T *service;
5980 + int use_count = 0, i;
5981 + i = 0;
5982 + while ((service = next_service_by_instance(instance->state,
5983 + instance, &i)) != NULL) {
5984 + use_count += service->service_use_count;
5985 + unlock_service(service);
5986 + }
5987 + return use_count;
5988 +}
5989 +
5990 +/* read the per-process use-count */
5991 +static int proc_read_use_count(char *page, char **start,
5992 + off_t off, int count,
5993 + int *eof, void *data)
5994 +{
5995 + VCHIQ_INSTANCE_T instance = data;
5996 + int len, use_count;
5997 +
5998 + use_count = vchiq_instance_get_use_count(instance);
5999 + len = snprintf(page+off, count, "%d\n", use_count);
6000 +
6001 + return len;
6002 +}
6003 +
6004 +/* add an instance (process) to the proc entries */
6005 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
6006 +{
6007 + char pidstr[32];
6008 + struct proc_dir_entry *top, *use_count;
6009 + struct proc_dir_entry *clients = vchiq_clients_top();
6010 + int pid = instance->pid;
6011 +
6012 + snprintf(pidstr, sizeof(pidstr), "%d", pid);
6013 + top = proc_mkdir(pidstr, clients);
6014 + if (!top)
6015 + goto fail_top;
6016 +#if 0
6017 + use_count = create_proc_read_entry("use_count",
6018 + 0444, top,
6019 + proc_read_use_count,
6020 + instance);
6021 + if (!use_count)
6022 + goto fail_use_count;
6023 +
6024 + instance->proc_entry = top;
6025 +#endif
6026 + return 0;
6027 +
6028 +fail_use_count:
6029 +#if 0
6030 + remove_proc_entry(top->name, clients);
6031 +#endif
6032 +fail_top:
6033 + return -ENOMEM;
6034 +}
6035 +
6036 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
6037 +{
6038 +#if 0
6039 + struct proc_dir_entry *clients = vchiq_clients_top();
6040 + remove_proc_entry("use_count", instance->proc_entry);
6041 + remove_proc_entry(instance->proc_entry->name, clients);
6042 +#endif
6043 +}
6044 +
6045 +/****************************************************************************
6046 +*
6047 +* vchiq_exit - called when the module is unloaded.
6048 +*
6049 +***************************************************************************/
6050 +
6051 +static void __exit
6052 +vchiq_exit(void)
6053 +{
6054 + vchiq_platform_exit(&g_state);
6055 + device_destroy(vchiq_class, vchiq_devid);
6056 + class_destroy(vchiq_class);
6057 + cdev_del(&vchiq_cdev);
6058 + unregister_chrdev_region(vchiq_devid, 1);
6059 +}
6060 +
6061 +module_init(vchiq_init);
6062 +module_exit(vchiq_exit);
6063 +MODULE_LICENSE("GPL");
6064 +MODULE_AUTHOR("Broadcom Corporation");
6065 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
6066 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 1970-01-01 01:00:00.000000000 +0100
6067 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 2013-07-06 15:25:50.000000000 +0100
6068 @@ -0,0 +1,212 @@
6069 +/**
6070 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6071 + *
6072 + * Redistribution and use in source and binary forms, with or without
6073 + * modification, are permitted provided that the following conditions
6074 + * are met:
6075 + * 1. Redistributions of source code must retain the above copyright
6076 + * notice, this list of conditions, and the following disclaimer,
6077 + * without modification.
6078 + * 2. Redistributions in binary form must reproduce the above copyright
6079 + * notice, this list of conditions and the following disclaimer in the
6080 + * documentation and/or other materials provided with the distribution.
6081 + * 3. The names of the above-listed copyright holders may not be used
6082 + * to endorse or promote products derived from this software without
6083 + * specific prior written permission.
6084 + *
6085 + * ALTERNATIVELY, this software may be distributed under the terms of the
6086 + * GNU General Public License ("GPL") version 2, as published by the Free
6087 + * Software Foundation.
6088 + *
6089 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6090 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6091 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6092 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6093 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6094 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6095 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6096 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6097 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6098 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6099 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6100 + */
6101 +
6102 +#ifndef VCHIQ_ARM_H
6103 +#define VCHIQ_ARM_H
6104 +
6105 +#include <linux/mutex.h>
6106 +#include <linux/semaphore.h>
6107 +#include <linux/atomic.h>
6108 +#include "vchiq_core.h"
6109 +
6110 +
6111 +enum vc_suspend_status {
6112 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
6113 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
6114 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
6115 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
6116 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
6117 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
6118 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
6119 +};
6120 +
6121 +enum vc_resume_status {
6122 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
6123 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
6124 + VC_RESUME_REQUESTED, /* User has requested resume */
6125 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
6126 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
6127 +};
6128 +
6129 +
6130 +enum USE_TYPE_E {
6131 + USE_TYPE_SERVICE,
6132 + USE_TYPE_SERVICE_NO_RESUME,
6133 + USE_TYPE_VCHIQ
6134 +};
6135 +
6136 +
6137 +
6138 +typedef struct vchiq_arm_state_struct {
6139 + /* Keepalive-related data */
6140 + struct task_struct *ka_thread;
6141 + struct completion ka_evt;
6142 + atomic_t ka_use_count;
6143 + atomic_t ka_use_ack_count;
6144 + atomic_t ka_release_count;
6145 +
6146 + struct completion vc_suspend_complete;
6147 + struct completion vc_resume_complete;
6148 +
6149 + rwlock_t susp_res_lock;
6150 + enum vc_suspend_status vc_suspend_state;
6151 + enum vc_resume_status vc_resume_state;
6152 +
6153 + unsigned int wake_address;
6154 +
6155 + struct timer_list suspend_timer;
6156 + int suspend_timer_timeout;
6157 + int suspend_timer_running;
6158 +
6159 + /* Global use count for videocore.
6160 + ** This is equal to the sum of the use counts for all services. When
6161 + ** this hits zero the videocore suspend procedure will be initiated.
6162 + */
6163 + int videocore_use_count;
6164 +
6165 + /* Use count to track requests from videocore peer.
6166 + ** This use count is not associated with a service, so needs to be
6167 + ** tracked separately with the state.
6168 + */
6169 + int peer_use_count;
6170 +
6171 + /* Flag to indicate whether resume is blocked. This happens when the
6172 + ** ARM is suspending
6173 + */
6174 + struct completion resume_blocker;
6175 + int resume_blocked;
6176 + struct completion blocked_blocker;
6177 + int blocked_count;
6178 +
6179 + int autosuspend_override;
6180 +
6181 + /* Flag to indicate that the first vchiq connect has made it through.
6182 + ** This means that both sides should be fully ready, and we should
6183 + ** be able to suspend after this point.
6184 + */
6185 + int first_connect;
6186 +
6187 + unsigned long long suspend_start_time;
6188 + unsigned long long sleep_start_time;
6189 + unsigned long long resume_start_time;
6190 + unsigned long long last_wake_time;
6191 +
6192 +} VCHIQ_ARM_STATE_T;
6193 +
6194 +extern int vchiq_arm_log_level;
6195 +extern int vchiq_susp_log_level;
6196 +
6197 +extern int __init
6198 +vchiq_platform_init(VCHIQ_STATE_T *state);
6199 +
6200 +extern void __exit
6201 +vchiq_platform_exit(VCHIQ_STATE_T *state);
6202 +
6203 +extern VCHIQ_STATE_T *
6204 +vchiq_get_state(void);
6205 +
6206 +extern VCHIQ_STATUS_T
6207 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
6208 +
6209 +extern VCHIQ_STATUS_T
6210 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
6211 +
6212 +extern int
6213 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
6214 +
6215 +extern VCHIQ_STATUS_T
6216 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
6217 +
6218 +extern VCHIQ_STATUS_T
6219 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
6220 +
6221 +extern int
6222 +vchiq_check_resume(VCHIQ_STATE_T *state);
6223 +
6224 +extern void
6225 +vchiq_check_suspend(VCHIQ_STATE_T *state);
6226 +
6227 +extern VCHIQ_STATUS_T
6228 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
6229 +
6230 +extern VCHIQ_STATUS_T
6231 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
6232 +
6233 +extern VCHIQ_STATUS_T
6234 +vchiq_check_service(VCHIQ_SERVICE_T *service);
6235 +
6236 +extern VCHIQ_STATUS_T
6237 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
6238 +
6239 +extern int
6240 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
6241 +
6242 +extern int
6243 +vchiq_platform_use_suspend_timer(void);
6244 +
6245 +extern void
6246 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
6247 +
6248 +extern void
6249 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
6250 +
6251 +extern VCHIQ_ARM_STATE_T*
6252 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
6253 +
6254 +extern int
6255 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
6256 +
6257 +extern VCHIQ_STATUS_T
6258 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6259 + enum USE_TYPE_E use_type);
6260 +extern VCHIQ_STATUS_T
6261 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
6262 +
6263 +void
6264 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
6265 + enum vc_suspend_status new_state);
6266 +
6267 +void
6268 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
6269 + enum vc_resume_status new_state);
6270 +
6271 +void
6272 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
6273 +
6274 +extern int vchiq_proc_init(void);
6275 +extern void vchiq_proc_deinit(void);
6276 +extern struct proc_dir_entry *vchiq_proc_top(void);
6277 +extern struct proc_dir_entry *vchiq_clients_top(void);
6278 +
6279 +
6280 +#endif /* VCHIQ_ARM_H */
6281 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
6282 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 1970-01-01 01:00:00.000000000 +0100
6283 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 2013-07-06 15:25:50.000000000 +0100
6284 @@ -0,0 +1,37 @@
6285 +/**
6286 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6287 + *
6288 + * Redistribution and use in source and binary forms, with or without
6289 + * modification, are permitted provided that the following conditions
6290 + * are met:
6291 + * 1. Redistributions of source code must retain the above copyright
6292 + * notice, this list of conditions, and the following disclaimer,
6293 + * without modification.
6294 + * 2. Redistributions in binary form must reproduce the above copyright
6295 + * notice, this list of conditions and the following disclaimer in the
6296 + * documentation and/or other materials provided with the distribution.
6297 + * 3. The names of the above-listed copyright holders may not be used
6298 + * to endorse or promote products derived from this software without
6299 + * specific prior written permission.
6300 + *
6301 + * ALTERNATIVELY, this software may be distributed under the terms of the
6302 + * GNU General Public License ("GPL") version 2, as published by the Free
6303 + * Software Foundation.
6304 + *
6305 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6306 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6307 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6308 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6309 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6310 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6311 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6312 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6313 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6314 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6315 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6316 + */
6317 +
6318 +const char *vchiq_get_build_hostname(void);
6319 +const char *vchiq_get_build_version(void);
6320 +const char *vchiq_get_build_time(void);
6321 +const char *vchiq_get_build_date(void);
6322 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
6323 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 1970-01-01 01:00:00.000000000 +0100
6324 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 2013-07-06 15:25:50.000000000 +0100
6325 @@ -0,0 +1,60 @@
6326 +/**
6327 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6328 + *
6329 + * Redistribution and use in source and binary forms, with or without
6330 + * modification, are permitted provided that the following conditions
6331 + * are met:
6332 + * 1. Redistributions of source code must retain the above copyright
6333 + * notice, this list of conditions, and the following disclaimer,
6334 + * without modification.
6335 + * 2. Redistributions in binary form must reproduce the above copyright
6336 + * notice, this list of conditions and the following disclaimer in the
6337 + * documentation and/or other materials provided with the distribution.
6338 + * 3. The names of the above-listed copyright holders may not be used
6339 + * to endorse or promote products derived from this software without
6340 + * specific prior written permission.
6341 + *
6342 + * ALTERNATIVELY, this software may be distributed under the terms of the
6343 + * GNU General Public License ("GPL") version 2, as published by the Free
6344 + * Software Foundation.
6345 + *
6346 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6347 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6348 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6349 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6350 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6351 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6352 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6353 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6354 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6355 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6356 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6357 + */
6358 +
6359 +#ifndef VCHIQ_CFG_H
6360 +#define VCHIQ_CFG_H
6361 +
6362 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
6363 +/* The version of VCHIQ - change with any non-trivial change */
6364 +#define VCHIQ_VERSION 6
6365 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
6366 +** incompatible change */
6367 +#define VCHIQ_VERSION_MIN 3
6368 +
6369 +#define VCHIQ_MAX_STATES 1
6370 +#define VCHIQ_MAX_SERVICES 4096
6371 +#define VCHIQ_MAX_SLOTS 128
6372 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
6373 +
6374 +#define VCHIQ_NUM_CURRENT_BULKS 32
6375 +#define VCHIQ_NUM_SERVICE_BULKS 4
6376 +
6377 +#ifndef VCHIQ_ENABLE_DEBUG
6378 +#define VCHIQ_ENABLE_DEBUG 1
6379 +#endif
6380 +
6381 +#ifndef VCHIQ_ENABLE_STATS
6382 +#define VCHIQ_ENABLE_STATS 1
6383 +#endif
6384 +
6385 +#endif /* VCHIQ_CFG_H */
6386 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
6387 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 1970-01-01 01:00:00.000000000 +0100
6388 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 2013-07-06 15:25:50.000000000 +0100
6389 @@ -0,0 +1,119 @@
6390 +/**
6391 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6392 + *
6393 + * Redistribution and use in source and binary forms, with or without
6394 + * modification, are permitted provided that the following conditions
6395 + * are met:
6396 + * 1. Redistributions of source code must retain the above copyright
6397 + * notice, this list of conditions, and the following disclaimer,
6398 + * without modification.
6399 + * 2. Redistributions in binary form must reproduce the above copyright
6400 + * notice, this list of conditions and the following disclaimer in the
6401 + * documentation and/or other materials provided with the distribution.
6402 + * 3. The names of the above-listed copyright holders may not be used
6403 + * to endorse or promote products derived from this software without
6404 + * specific prior written permission.
6405 + *
6406 + * ALTERNATIVELY, this software may be distributed under the terms of the
6407 + * GNU General Public License ("GPL") version 2, as published by the Free
6408 + * Software Foundation.
6409 + *
6410 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6411 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6412 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6413 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6414 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6415 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6416 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6417 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6418 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6419 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6420 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6421 + */
6422 +
6423 +#include "vchiq_connected.h"
6424 +#include "vchiq_core.h"
6425 +#include <linux/module.h>
6426 +#include <linux/mutex.h>
6427 +
6428 +#define MAX_CALLBACKS 10
6429 +
6430 +static int g_connected;
6431 +static int g_num_deferred_callbacks;
6432 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
6433 +static int g_once_init;
6434 +static struct mutex g_connected_mutex;
6435 +
6436 +/****************************************************************************
6437 +*
6438 +* Function to initialize our lock.
6439 +*
6440 +***************************************************************************/
6441 +
6442 +static void connected_init(void)
6443 +{
6444 + if (!g_once_init) {
6445 + mutex_init(&g_connected_mutex);
6446 + g_once_init = 1;
6447 + }
6448 +}
6449 +
6450 +/****************************************************************************
6451 +*
6452 +* This function is used to defer initialization until the vchiq stack is
6453 +* initialized. If the stack is already initialized, then the callback will
6454 +* be made immediately, otherwise it will be deferred until
6455 +* vchiq_call_connected_callbacks is called.
6456 +*
6457 +***************************************************************************/
6458 +
6459 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
6460 +{
6461 + connected_init();
6462 +
6463 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
6464 + return;
6465 +
6466 + if (g_connected)
6467 + /* We're already connected. Call the callback immediately. */
6468 +
6469 + callback();
6470 + else {
6471 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
6472 + vchiq_log_error(vchiq_core_log_level,
6473 + "There already %d callback registered - "
6474 + "please increase MAX_CALLBACKS",
6475 + g_num_deferred_callbacks);
6476 + else {
6477 + g_deferred_callback[g_num_deferred_callbacks] =
6478 + callback;
6479 + g_num_deferred_callbacks++;
6480 + }
6481 + }
6482 + mutex_unlock(&g_connected_mutex);
6483 +}
6484 +
6485 +/****************************************************************************
6486 +*
6487 +* This function is called by the vchiq stack once it has been connected to
6488 +* the videocore and clients can start to use the stack.
6489 +*
6490 +***************************************************************************/
6491 +
6492 +void vchiq_call_connected_callbacks(void)
6493 +{
6494 + int i;
6495 +
6496 + connected_init();
6497 +
6498 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
6499 + return;
6500 +
6501 + for (i = 0; i < g_num_deferred_callbacks; i++)
6502 + g_deferred_callback[i]();
6503 +
6504 + g_num_deferred_callbacks = 0;
6505 + g_connected = 1;
6506 + mutex_unlock(&g_connected_mutex);
6507 +}
6508 +EXPORT_SYMBOL(vchiq_add_connected_callback);
6509 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
6510 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 1970-01-01 01:00:00.000000000 +0100
6511 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 2013-07-06 15:25:50.000000000 +0100
6512 @@ -0,0 +1,51 @@
6513 +/**
6514 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6515 + *
6516 + * Redistribution and use in source and binary forms, with or without
6517 + * modification, are permitted provided that the following conditions
6518 + * are met:
6519 + * 1. Redistributions of source code must retain the above copyright
6520 + * notice, this list of conditions, and the following disclaimer,
6521 + * without modification.
6522 + * 2. Redistributions in binary form must reproduce the above copyright
6523 + * notice, this list of conditions and the following disclaimer in the
6524 + * documentation and/or other materials provided with the distribution.
6525 + * 3. The names of the above-listed copyright holders may not be used
6526 + * to endorse or promote products derived from this software without
6527 + * specific prior written permission.
6528 + *
6529 + * ALTERNATIVELY, this software may be distributed under the terms of the
6530 + * GNU General Public License ("GPL") version 2, as published by the Free
6531 + * Software Foundation.
6532 + *
6533 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6534 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6535 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6536 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6537 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6538 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6539 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6540 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6541 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6542 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6543 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6544 + */
6545 +
6546 +#ifndef VCHIQ_CONNECTED_H
6547 +#define VCHIQ_CONNECTED_H
6548 +
6549 +/* ---- Include Files ----------------------------------------------------- */
6550 +
6551 +/* ---- Constants and Types ---------------------------------------------- */
6552 +
6553 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
6554 +
6555 +/* ---- Variable Externs ------------------------------------------------- */
6556 +
6557 +/* ---- Function Prototypes ---------------------------------------------- */
6558 +
6559 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
6560 +void vchiq_call_connected_callbacks(void);
6561 +
6562 +#endif /* VCHIQ_CONNECTED_H */
6563 +
6564 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
6565 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 1970-01-01 01:00:00.000000000 +0100
6566 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 2013-07-06 15:25:50.000000000 +0100
6567 @@ -0,0 +1,3818 @@
6568 +/**
6569 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6570 + *
6571 + * Redistribution and use in source and binary forms, with or without
6572 + * modification, are permitted provided that the following conditions
6573 + * are met:
6574 + * 1. Redistributions of source code must retain the above copyright
6575 + * notice, this list of conditions, and the following disclaimer,
6576 + * without modification.
6577 + * 2. Redistributions in binary form must reproduce the above copyright
6578 + * notice, this list of conditions and the following disclaimer in the
6579 + * documentation and/or other materials provided with the distribution.
6580 + * 3. The names of the above-listed copyright holders may not be used
6581 + * to endorse or promote products derived from this software without
6582 + * specific prior written permission.
6583 + *
6584 + * ALTERNATIVELY, this software may be distributed under the terms of the
6585 + * GNU General Public License ("GPL") version 2, as published by the Free
6586 + * Software Foundation.
6587 + *
6588 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6589 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6590 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6591 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6592 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6593 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6594 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6595 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6596 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6597 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6598 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6599 + */
6600 +
6601 +#include "vchiq_core.h"
6602 +
6603 +#define VCHIQ_SLOT_HANDLER_STACK 8192
6604 +
6605 +#define HANDLE_STATE_SHIFT 12
6606 +
6607 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
6608 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
6609 +#define SLOT_INDEX_FROM_DATA(state, data) \
6610 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
6611 + VCHIQ_SLOT_SIZE)
6612 +#define SLOT_INDEX_FROM_INFO(state, info) \
6613 + ((unsigned int)(info - state->slot_info))
6614 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
6615 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
6616 +
6617 +
6618 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
6619 +
6620 +
6621 +struct vchiq_open_payload {
6622 + int fourcc;
6623 + int client_id;
6624 + short version;
6625 + short version_min;
6626 +};
6627 +
6628 +struct vchiq_openack_payload {
6629 + short version;
6630 +};
6631 +
6632 +/* we require this for consistency between endpoints */
6633 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
6634 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
6635 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
6636 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
6637 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
6638 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
6639 +
6640 +/* Run time control of log level, based on KERN_XXX level. */
6641 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
6642 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
6643 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
6644 +
6645 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
6646 +
6647 +static DEFINE_SPINLOCK(service_spinlock);
6648 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
6649 +DEFINE_SPINLOCK(quota_spinlock);
6650 +
6651 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
6652 +static unsigned int handle_seq;
6653 +
6654 +static const char *const srvstate_names[] = {
6655 + "FREE",
6656 + "HIDDEN",
6657 + "LISTENING",
6658 + "OPENING",
6659 + "OPEN",
6660 + "OPENSYNC",
6661 + "CLOSESENT",
6662 + "CLOSERECVD",
6663 + "CLOSEWAIT",
6664 + "CLOSED"
6665 +};
6666 +
6667 +static const char *const reason_names[] = {
6668 + "SERVICE_OPENED",
6669 + "SERVICE_CLOSED",
6670 + "MESSAGE_AVAILABLE",
6671 + "BULK_TRANSMIT_DONE",
6672 + "BULK_RECEIVE_DONE",
6673 + "BULK_TRANSMIT_ABORTED",
6674 + "BULK_RECEIVE_ABORTED"
6675 +};
6676 +
6677 +static const char *const conn_state_names[] = {
6678 + "DISCONNECTED",
6679 + "CONNECTING",
6680 + "CONNECTED",
6681 + "PAUSING",
6682 + "PAUSE_SENT",
6683 + "PAUSED",
6684 + "RESUMING",
6685 + "PAUSE_TIMEOUT",
6686 + "RESUME_TIMEOUT"
6687 +};
6688 +
6689 +
6690 +static void
6691 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
6692 +
6693 +static const char *msg_type_str(unsigned int msg_type)
6694 +{
6695 + switch (msg_type) {
6696 + case VCHIQ_MSG_PADDING: return "PADDING";
6697 + case VCHIQ_MSG_CONNECT: return "CONNECT";
6698 + case VCHIQ_MSG_OPEN: return "OPEN";
6699 + case VCHIQ_MSG_OPENACK: return "OPENACK";
6700 + case VCHIQ_MSG_CLOSE: return "CLOSE";
6701 + case VCHIQ_MSG_DATA: return "DATA";
6702 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
6703 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
6704 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
6705 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
6706 + case VCHIQ_MSG_PAUSE: return "PAUSE";
6707 + case VCHIQ_MSG_RESUME: return "RESUME";
6708 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
6709 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
6710 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
6711 + }
6712 + return "???";
6713 +}
6714 +
6715 +static inline void
6716 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
6717 +{
6718 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
6719 + service->state->id, service->localport,
6720 + srvstate_names[service->srvstate],
6721 + srvstate_names[newstate]);
6722 + service->srvstate = newstate;
6723 +}
6724 +
6725 +VCHIQ_SERVICE_T *
6726 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
6727 +{
6728 + VCHIQ_SERVICE_T *service;
6729 +
6730 + spin_lock(&service_spinlock);
6731 + service = handle_to_service(handle);
6732 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
6733 + (service->handle == handle)) {
6734 + BUG_ON(service->ref_count == 0);
6735 + service->ref_count++;
6736 + } else
6737 + service = NULL;
6738 + spin_unlock(&service_spinlock);
6739 +
6740 + if (!service)
6741 + vchiq_log_info(vchiq_core_log_level,
6742 + "Invalid service handle 0x%x", handle);
6743 +
6744 + return service;
6745 +}
6746 +
6747 +VCHIQ_SERVICE_T *
6748 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
6749 +{
6750 + VCHIQ_SERVICE_T *service = NULL;
6751 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
6752 + spin_lock(&service_spinlock);
6753 + service = state->services[localport];
6754 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
6755 + BUG_ON(service->ref_count == 0);
6756 + service->ref_count++;
6757 + } else
6758 + service = NULL;
6759 + spin_unlock(&service_spinlock);
6760 + }
6761 +
6762 + if (!service)
6763 + vchiq_log_info(vchiq_core_log_level,
6764 + "Invalid port %d", localport);
6765 +
6766 + return service;
6767 +}
6768 +
6769 +VCHIQ_SERVICE_T *
6770 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
6771 + VCHIQ_SERVICE_HANDLE_T handle) {
6772 + VCHIQ_SERVICE_T *service;
6773 +
6774 + spin_lock(&service_spinlock);
6775 + service = handle_to_service(handle);
6776 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
6777 + (service->handle == handle) &&
6778 + (service->instance == instance)) {
6779 + BUG_ON(service->ref_count == 0);
6780 + service->ref_count++;
6781 + } else
6782 + service = NULL;
6783 + spin_unlock(&service_spinlock);
6784 +
6785 + if (!service)
6786 + vchiq_log_info(vchiq_core_log_level,
6787 + "Invalid service handle 0x%x", handle);
6788 +
6789 + return service;
6790 +}
6791 +
6792 +VCHIQ_SERVICE_T *
6793 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
6794 + int *pidx)
6795 +{
6796 + VCHIQ_SERVICE_T *service = NULL;
6797 + int idx = *pidx;
6798 +
6799 + spin_lock(&service_spinlock);
6800 + while (idx < state->unused_service) {
6801 + VCHIQ_SERVICE_T *srv = state->services[idx++];
6802 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
6803 + (srv->instance == instance)) {
6804 + service = srv;
6805 + BUG_ON(service->ref_count == 0);
6806 + service->ref_count++;
6807 + break;
6808 + }
6809 + }
6810 + spin_unlock(&service_spinlock);
6811 +
6812 + *pidx = idx;
6813 +
6814 + return service;
6815 +}
6816 +
6817 +void
6818 +lock_service(VCHIQ_SERVICE_T *service)
6819 +{
6820 + spin_lock(&service_spinlock);
6821 + BUG_ON(!service || (service->ref_count == 0));
6822 + if (service)
6823 + service->ref_count++;
6824 + spin_unlock(&service_spinlock);
6825 +}
6826 +
6827 +void
6828 +unlock_service(VCHIQ_SERVICE_T *service)
6829 +{
6830 + VCHIQ_STATE_T *state = service->state;
6831 + spin_lock(&service_spinlock);
6832 + BUG_ON(!service || (service->ref_count == 0));
6833 + if (service && service->ref_count) {
6834 + service->ref_count--;
6835 + if (!service->ref_count) {
6836 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
6837 + state->services[service->localport] = NULL;
6838 + } else
6839 + service = NULL;
6840 + }
6841 + spin_unlock(&service_spinlock);
6842 +
6843 + kfree(service);
6844 +}
6845 +
6846 +int
6847 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
6848 +{
6849 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
6850 + int id;
6851 +
6852 + id = service ? service->client_id : 0;
6853 + if (service)
6854 + unlock_service(service);
6855 +
6856 + return id;
6857 +}
6858 +
6859 +void *
6860 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
6861 +{
6862 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6863 +
6864 + return service ? service->base.userdata : NULL;
6865 +}
6866 +
6867 +int
6868 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
6869 +{
6870 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6871 +
6872 + return service ? service->base.fourcc : 0;
6873 +}
6874 +
6875 +static void
6876 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
6877 +{
6878 + VCHIQ_STATE_T *state = service->state;
6879 + VCHIQ_SERVICE_QUOTA_T *service_quota;
6880 +
6881 + service->closing = 1;
6882 +
6883 + /* Synchronise with other threads. */
6884 + mutex_lock(&state->recycle_mutex);
6885 + mutex_unlock(&state->recycle_mutex);
6886 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
6887 + /* If we're pausing then the slot_mutex is held until resume
6888 + * by the slot handler. Therefore don't try to acquire this
6889 + * mutex if we're the slot handler and in the pause sent state.
6890 + * We don't need to in this case anyway. */
6891 + mutex_lock(&state->slot_mutex);
6892 + mutex_unlock(&state->slot_mutex);
6893 + }
6894 +
6895 + /* Unblock any sending thread. */
6896 + service_quota = &state->service_quotas[service->localport];
6897 + up(&service_quota->quota_event);
6898 +}
6899 +
6900 +static void
6901 +mark_service_closing(VCHIQ_SERVICE_T *service)
6902 +{
6903 + mark_service_closing_internal(service, 0);
6904 +}
6905 +
6906 +static inline VCHIQ_STATUS_T
6907 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
6908 + VCHIQ_HEADER_T *header, void *bulk_userdata)
6909 +{
6910 + VCHIQ_STATUS_T status;
6911 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
6912 + service->state->id, service->localport, reason_names[reason],
6913 + (unsigned int)header, (unsigned int)bulk_userdata);
6914 + status = service->base.callback(reason, header, service->handle,
6915 + bulk_userdata);
6916 + if (status == VCHIQ_ERROR) {
6917 + vchiq_log_warning(vchiq_core_log_level,
6918 + "%d: ignoring ERROR from callback to service %x",
6919 + service->state->id, service->handle);
6920 + status = VCHIQ_SUCCESS;
6921 + }
6922 + return status;
6923 +}
6924 +
6925 +inline void
6926 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
6927 +{
6928 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
6929 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
6930 + conn_state_names[oldstate],
6931 + conn_state_names[newstate]);
6932 + state->conn_state = newstate;
6933 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
6934 +}
6935 +
6936 +static inline void
6937 +remote_event_create(REMOTE_EVENT_T *event)
6938 +{
6939 + event->armed = 0;
6940 + /* Don't clear the 'fired' flag because it may already have been set
6941 + ** by the other side. */
6942 + sema_init(event->event, 0);
6943 +}
6944 +
6945 +static inline void
6946 +remote_event_destroy(REMOTE_EVENT_T *event)
6947 +{
6948 + (void)event;
6949 +}
6950 +
6951 +static inline int
6952 +remote_event_wait(REMOTE_EVENT_T *event)
6953 +{
6954 + if (!event->fired) {
6955 + event->armed = 1;
6956 + dsb();
6957 + if (!event->fired) {
6958 + if (down_interruptible(event->event) != 0) {
6959 + event->armed = 0;
6960 + return 0;
6961 + }
6962 + }
6963 + event->armed = 0;
6964 + wmb();
6965 + }
6966 +
6967 + event->fired = 0;
6968 + return 1;
6969 +}
6970 +
6971 +static inline void
6972 +remote_event_signal_local(REMOTE_EVENT_T *event)
6973 +{
6974 + event->armed = 0;
6975 + up(event->event);
6976 +}
6977 +
6978 +static inline void
6979 +remote_event_poll(REMOTE_EVENT_T *event)
6980 +{
6981 + if (event->fired && event->armed)
6982 + remote_event_signal_local(event);
6983 +}
6984 +
6985 +void
6986 +remote_event_pollall(VCHIQ_STATE_T *state)
6987 +{
6988 + remote_event_poll(&state->local->sync_trigger);
6989 + remote_event_poll(&state->local->sync_release);
6990 + remote_event_poll(&state->local->trigger);
6991 + remote_event_poll(&state->local->recycle);
6992 +}
6993 +
6994 +/* Round up message sizes so that any space at the end of a slot is always big
6995 +** enough for a header. This relies on header size being a power of two, which
6996 +** has been verified earlier by a static assertion. */
6997 +
6998 +static inline unsigned int
6999 +calc_stride(unsigned int size)
7000 +{
7001 + /* Allow room for the header */
7002 + size += sizeof(VCHIQ_HEADER_T);
7003 +
7004 + /* Round up */
7005 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
7006 + - 1);
7007 +}
7008 +
7009 +/* Called by the slot handler thread */
7010 +static VCHIQ_SERVICE_T *
7011 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
7012 +{
7013 + int i;
7014 +
7015 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
7016 +
7017 + for (i = 0; i < state->unused_service; i++) {
7018 + VCHIQ_SERVICE_T *service = state->services[i];
7019 + if (service &&
7020 + (service->public_fourcc == fourcc) &&
7021 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
7022 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
7023 + (service->remoteport == VCHIQ_PORT_FREE)))) {
7024 + lock_service(service);
7025 + return service;
7026 + }
7027 + }
7028 +
7029 + return NULL;
7030 +}
7031 +
7032 +/* Called by the slot handler thread */
7033 +static VCHIQ_SERVICE_T *
7034 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
7035 +{
7036 + int i;
7037 + for (i = 0; i < state->unused_service; i++) {
7038 + VCHIQ_SERVICE_T *service = state->services[i];
7039 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
7040 + && (service->remoteport == port)) {
7041 + lock_service(service);
7042 + return service;
7043 + }
7044 + }
7045 + return NULL;
7046 +}
7047 +
7048 +inline void
7049 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
7050 +{
7051 + uint32_t value;
7052 +
7053 + if (service) {
7054 + do {
7055 + value = atomic_read(&service->poll_flags);
7056 + } while (atomic_cmpxchg(&service->poll_flags, value,
7057 + value | (1 << poll_type)) != value);
7058 +
7059 + do {
7060 + value = atomic_read(&state->poll_services[
7061 + service->localport>>5]);
7062 + } while (atomic_cmpxchg(
7063 + &state->poll_services[service->localport>>5],
7064 + value, value | (1 << (service->localport & 0x1f)))
7065 + != value);
7066 + }
7067 +
7068 + state->poll_needed = 1;
7069 + wmb();
7070 +
7071 + /* ... and ensure the slot handler runs. */
7072 + remote_event_signal_local(&state->local->trigger);
7073 +}
7074 +
7075 +/* Called from queue_message, by the slot handler and application threads,
7076 +** with slot_mutex held */
7077 +static VCHIQ_HEADER_T *
7078 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
7079 +{
7080 + VCHIQ_SHARED_STATE_T *local = state->local;
7081 + int tx_pos = state->local_tx_pos;
7082 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
7083 +
7084 + if (space > slot_space) {
7085 + VCHIQ_HEADER_T *header;
7086 + /* Fill the remaining space with padding */
7087 + WARN_ON(state->tx_data == NULL);
7088 + header = (VCHIQ_HEADER_T *)
7089 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
7090 + header->msgid = VCHIQ_MSGID_PADDING;
7091 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
7092 +
7093 + tx_pos += slot_space;
7094 + }
7095 +
7096 + /* If necessary, get the next slot. */
7097 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
7098 + int slot_index;
7099 +
7100 + /* If there is no free slot... */
7101 +
7102 + if (down_trylock(&state->slot_available_event) != 0) {
7103 + /* ...wait for one. */
7104 +
7105 + VCHIQ_STATS_INC(state, slot_stalls);
7106 +
7107 + /* But first, flush through the last slot. */
7108 + state->local_tx_pos = tx_pos;
7109 + local->tx_pos = tx_pos;
7110 + remote_event_signal(&state->remote->trigger);
7111 +
7112 + if (!is_blocking ||
7113 + (down_interruptible(
7114 + &state->slot_available_event) != 0))
7115 + return NULL; /* No space available */
7116 + }
7117 +
7118 + BUG_ON(tx_pos ==
7119 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
7120 +
7121 + slot_index = local->slot_queue[
7122 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
7123 + VCHIQ_SLOT_QUEUE_MASK];
7124 + state->tx_data =
7125 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
7126 + }
7127 +
7128 + state->local_tx_pos = tx_pos + space;
7129 +
7130 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
7131 +}
7132 +
7133 +/* Called by the recycle thread. */
7134 +static void
7135 +process_free_queue(VCHIQ_STATE_T *state)
7136 +{
7137 + VCHIQ_SHARED_STATE_T *local = state->local;
7138 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
7139 + int slot_queue_available;
7140 +
7141 + /* Use a read memory barrier to ensure that any state that may have
7142 + ** been modified by another thread is not masked by stale prefetched
7143 + ** values. */
7144 + rmb();
7145 +
7146 + /* Find slots which have been freed by the other side, and return them
7147 + ** to the available queue. */
7148 + slot_queue_available = state->slot_queue_available;
7149 +
7150 + while (slot_queue_available != local->slot_queue_recycle) {
7151 + unsigned int pos;
7152 + int slot_index = local->slot_queue[slot_queue_available++ &
7153 + VCHIQ_SLOT_QUEUE_MASK];
7154 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
7155 + int data_found = 0;
7156 +
7157 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
7158 + state->id, slot_index, (unsigned int)data,
7159 + local->slot_queue_recycle, slot_queue_available);
7160 +
7161 + /* Initialise the bitmask for services which have used this
7162 + ** slot */
7163 + BITSET_ZERO(service_found);
7164 +
7165 + pos = 0;
7166 +
7167 + while (pos < VCHIQ_SLOT_SIZE) {
7168 + VCHIQ_HEADER_T *header =
7169 + (VCHIQ_HEADER_T *)(data + pos);
7170 + int msgid = header->msgid;
7171 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
7172 + int port = VCHIQ_MSG_SRCPORT(msgid);
7173 + VCHIQ_SERVICE_QUOTA_T *service_quota =
7174 + &state->service_quotas[port];
7175 + int count;
7176 + spin_lock(&quota_spinlock);
7177 + count = service_quota->message_use_count;
7178 + if (count > 0)
7179 + service_quota->message_use_count =
7180 + count - 1;
7181 + spin_unlock(&quota_spinlock);
7182 +
7183 + if (count == service_quota->message_quota)
7184 + /* Signal the service that it
7185 + ** has dropped below its quota
7186 + */
7187 + up(&service_quota->quota_event);
7188 + else if (count == 0) {
7189 + vchiq_log_error(vchiq_core_log_level,
7190 + "service %d "
7191 + "message_use_count=%d "
7192 + "(header %x, msgid %x, "
7193 + "header->msgid %x, "
7194 + "header->size %x)",
7195 + port,
7196 + service_quota->
7197 + message_use_count,
7198 + (unsigned int)header, msgid,
7199 + header->msgid,
7200 + header->size);
7201 + WARN(1, "invalid message use count\n");
7202 + }
7203 + if (!BITSET_IS_SET(service_found, port)) {
7204 + /* Set the found bit for this service */
7205 + BITSET_SET(service_found, port);
7206 +
7207 + spin_lock(&quota_spinlock);
7208 + count = service_quota->slot_use_count;
7209 + if (count > 0)
7210 + service_quota->slot_use_count =
7211 + count - 1;
7212 + spin_unlock(&quota_spinlock);
7213 +
7214 + if (count > 0) {
7215 + /* Signal the service in case
7216 + ** it has dropped below its
7217 + ** quota */
7218 + up(&service_quota->quota_event);
7219 + vchiq_log_trace(
7220 + vchiq_core_log_level,
7221 + "%d: pfq:%d %x@%x - "
7222 + "slot_use->%d",
7223 + state->id, port,
7224 + header->size,
7225 + (unsigned int)header,
7226 + count - 1);
7227 + } else {
7228 + vchiq_log_error(
7229 + vchiq_core_log_level,
7230 + "service %d "
7231 + "slot_use_count"
7232 + "=%d (header %x"
7233 + ", msgid %x, "
7234 + "header->msgid"
7235 + " %x, header->"
7236 + "size %x)",
7237 + port, count,
7238 + (unsigned int)header,
7239 + msgid,
7240 + header->msgid,
7241 + header->size);
7242 + WARN(1, "bad slot use count\n");
7243 + }
7244 + }
7245 +
7246 + data_found = 1;
7247 + }
7248 +
7249 + pos += calc_stride(header->size);
7250 + if (pos > VCHIQ_SLOT_SIZE) {
7251 + vchiq_log_error(vchiq_core_log_level,
7252 + "pfq - pos %x: header %x, msgid %x, "
7253 + "header->msgid %x, header->size %x",
7254 + pos, (unsigned int)header, msgid,
7255 + header->msgid, header->size);
7256 + WARN(1, "invalid slot position\n");
7257 + }
7258 + }
7259 +
7260 + if (data_found) {
7261 + int count;
7262 + spin_lock(&quota_spinlock);
7263 + count = state->data_use_count;
7264 + if (count > 0)
7265 + state->data_use_count =
7266 + count - 1;
7267 + spin_unlock(&quota_spinlock);
7268 + if (count == state->data_quota)
7269 + up(&state->data_quota_event);
7270 + }
7271 +
7272 + state->slot_queue_available = slot_queue_available;
7273 + up(&state->slot_available_event);
7274 + }
7275 +}
7276 +
7277 +/* Called by the slot handler and application threads */
7278 +static VCHIQ_STATUS_T
7279 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
7280 + int msgid, const VCHIQ_ELEMENT_T *elements,
7281 + int count, int size, int is_blocking)
7282 +{
7283 + VCHIQ_SHARED_STATE_T *local;
7284 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
7285 + VCHIQ_HEADER_T *header;
7286 + int type = VCHIQ_MSG_TYPE(msgid);
7287 +
7288 + unsigned int stride;
7289 +
7290 + local = state->local;
7291 +
7292 + stride = calc_stride(size);
7293 +
7294 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
7295 +
7296 + if ((type != VCHIQ_MSG_RESUME) &&
7297 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
7298 + return VCHIQ_RETRY;
7299 +
7300 + if (type == VCHIQ_MSG_DATA) {
7301 + int tx_end_index;
7302 +
7303 + BUG_ON(!service);
7304 +
7305 + if (service->closing) {
7306 + /* The service has been closed */
7307 + mutex_unlock(&state->slot_mutex);
7308 + return VCHIQ_ERROR;
7309 + }
7310 +
7311 + service_quota = &state->service_quotas[service->localport];
7312 +
7313 + spin_lock(&quota_spinlock);
7314 +
7315 + /* Ensure this service doesn't use more than its quota of
7316 + ** messages or slots */
7317 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7318 + state->local_tx_pos + stride - 1);
7319 +
7320 + /* Ensure data messages don't use more than their quota of
7321 + ** slots */
7322 + while ((tx_end_index != state->previous_data_index) &&
7323 + (state->data_use_count == state->data_quota)) {
7324 + VCHIQ_STATS_INC(state, data_stalls);
7325 + spin_unlock(&quota_spinlock);
7326 + mutex_unlock(&state->slot_mutex);
7327 +
7328 + if (down_interruptible(&state->data_quota_event)
7329 + != 0)
7330 + return VCHIQ_RETRY;
7331 +
7332 + mutex_lock(&state->slot_mutex);
7333 + spin_lock(&quota_spinlock);
7334 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7335 + state->local_tx_pos + stride - 1);
7336 + if ((tx_end_index == state->previous_data_index) ||
7337 + (state->data_use_count < state->data_quota)) {
7338 + /* Pass the signal on to other waiters */
7339 + up(&state->data_quota_event);
7340 + break;
7341 + }
7342 + }
7343 +
7344 + while ((service_quota->message_use_count ==
7345 + service_quota->message_quota) ||
7346 + ((tx_end_index != service_quota->previous_tx_index) &&
7347 + (service_quota->slot_use_count ==
7348 + service_quota->slot_quota))) {
7349 + spin_unlock(&quota_spinlock);
7350 + vchiq_log_trace(vchiq_core_log_level,
7351 + "%d: qm:%d %s,%x - quota stall "
7352 + "(msg %d, slot %d)",
7353 + state->id, service->localport,
7354 + msg_type_str(type), size,
7355 + service_quota->message_use_count,
7356 + service_quota->slot_use_count);
7357 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
7358 + mutex_unlock(&state->slot_mutex);
7359 + if (down_interruptible(&service_quota->quota_event)
7360 + != 0)
7361 + return VCHIQ_RETRY;
7362 + if (service->closing)
7363 + return VCHIQ_ERROR;
7364 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
7365 + return VCHIQ_RETRY;
7366 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
7367 + /* The service has been closed */
7368 + mutex_unlock(&state->slot_mutex);
7369 + return VCHIQ_ERROR;
7370 + }
7371 + spin_lock(&quota_spinlock);
7372 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7373 + state->local_tx_pos + stride - 1);
7374 + }
7375 +
7376 + spin_unlock(&quota_spinlock);
7377 + }
7378 +
7379 + header = reserve_space(state, stride, is_blocking);
7380 +
7381 + if (!header) {
7382 + if (service)
7383 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
7384 + mutex_unlock(&state->slot_mutex);
7385 + return VCHIQ_RETRY;
7386 + }
7387 +
7388 + if (type == VCHIQ_MSG_DATA) {
7389 + int i, pos;
7390 + int tx_end_index;
7391 + int slot_use_count;
7392 +
7393 + vchiq_log_info(vchiq_core_log_level,
7394 + "%d: qm %s@%x,%x (%d->%d)",
7395 + state->id,
7396 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7397 + (unsigned int)header, size,
7398 + VCHIQ_MSG_SRCPORT(msgid),
7399 + VCHIQ_MSG_DSTPORT(msgid));
7400 +
7401 + BUG_ON(!service);
7402 +
7403 + for (i = 0, pos = 0; i < (unsigned int)count;
7404 + pos += elements[i++].size)
7405 + if (elements[i].size) {
7406 + if (vchiq_copy_from_user
7407 + (header->data + pos, elements[i].data,
7408 + (size_t) elements[i].size) !=
7409 + VCHIQ_SUCCESS) {
7410 + mutex_unlock(&state->slot_mutex);
7411 + VCHIQ_SERVICE_STATS_INC(service,
7412 + error_count);
7413 + return VCHIQ_ERROR;
7414 + }
7415 + if (i == 0) {
7416 + if (vchiq_core_msg_log_level >=
7417 + VCHIQ_LOG_INFO)
7418 + vchiq_log_dump_mem("Sent", 0,
7419 + header->data + pos,
7420 + min(64u,
7421 + elements[0].size));
7422 + }
7423 + }
7424 +
7425 + spin_lock(&quota_spinlock);
7426 + service_quota->message_use_count++;
7427 +
7428 + tx_end_index =
7429 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
7430 +
7431 + /* If this transmission can't fit in the last slot used by any
7432 + ** service, the data_use_count must be increased. */
7433 + if (tx_end_index != state->previous_data_index) {
7434 + state->previous_data_index = tx_end_index;
7435 + state->data_use_count++;
7436 + }
7437 +
7438 + /* If this isn't the same slot last used by this service,
7439 + ** the service's slot_use_count must be increased. */
7440 + if (tx_end_index != service_quota->previous_tx_index) {
7441 + service_quota->previous_tx_index = tx_end_index;
7442 + slot_use_count = ++service_quota->slot_use_count;
7443 + } else {
7444 + slot_use_count = 0;
7445 + }
7446 +
7447 + spin_unlock(&quota_spinlock);
7448 +
7449 + if (slot_use_count)
7450 + vchiq_log_trace(vchiq_core_log_level,
7451 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
7452 + state->id, service->localport,
7453 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
7454 + slot_use_count, header);
7455 +
7456 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
7457 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
7458 + } else {
7459 + vchiq_log_info(vchiq_core_log_level,
7460 + "%d: qm %s@%x,%x (%d->%d)", state->id,
7461 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7462 + (unsigned int)header, size,
7463 + VCHIQ_MSG_SRCPORT(msgid),
7464 + VCHIQ_MSG_DSTPORT(msgid));
7465 + if (size != 0) {
7466 + WARN_ON(!((count == 1) && (size == elements[0].size)));
7467 + memcpy(header->data, elements[0].data,
7468 + elements[0].size);
7469 + }
7470 + VCHIQ_STATS_INC(state, ctrl_tx_count);
7471 + }
7472 +
7473 + header->msgid = msgid;
7474 + header->size = size;
7475 +
7476 + {
7477 + int svc_fourcc;
7478 +
7479 + svc_fourcc = service
7480 + ? service->base.fourcc
7481 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7482 +
7483 + vchiq_log_info(vchiq_core_msg_log_level,
7484 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
7485 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7486 + VCHIQ_MSG_TYPE(msgid),
7487 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7488 + VCHIQ_MSG_SRCPORT(msgid),
7489 + VCHIQ_MSG_DSTPORT(msgid),
7490 + size);
7491 + }
7492 +
7493 + /* Make sure the new header is visible to the peer. */
7494 + wmb();
7495 +
7496 + /* Make the new tx_pos visible to the peer. */
7497 + local->tx_pos = state->local_tx_pos;
7498 + wmb();
7499 +
7500 + if (service && (type == VCHIQ_MSG_CLOSE))
7501 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
7502 +
7503 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
7504 + mutex_unlock(&state->slot_mutex);
7505 +
7506 + remote_event_signal(&state->remote->trigger);
7507 +
7508 + return VCHIQ_SUCCESS;
7509 +}
7510 +
7511 +/* Called by the slot handler and application threads */
7512 +static VCHIQ_STATUS_T
7513 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
7514 + int msgid, const VCHIQ_ELEMENT_T *elements,
7515 + int count, int size, int is_blocking)
7516 +{
7517 + VCHIQ_SHARED_STATE_T *local;
7518 + VCHIQ_HEADER_T *header;
7519 +
7520 + local = state->local;
7521 +
7522 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
7523 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
7524 + return VCHIQ_RETRY;
7525 +
7526 + remote_event_wait(&local->sync_release);
7527 +
7528 + rmb();
7529 +
7530 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7531 + local->slot_sync);
7532 +
7533 + {
7534 + int oldmsgid = header->msgid;
7535 + if (oldmsgid != VCHIQ_MSGID_PADDING)
7536 + vchiq_log_error(vchiq_core_log_level,
7537 + "%d: qms - msgid %x, not PADDING",
7538 + state->id, oldmsgid);
7539 + }
7540 +
7541 + if (service) {
7542 + int i, pos;
7543 +
7544 + vchiq_log_info(vchiq_sync_log_level,
7545 + "%d: qms %s@%x,%x (%d->%d)", state->id,
7546 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7547 + (unsigned int)header, size,
7548 + VCHIQ_MSG_SRCPORT(msgid),
7549 + VCHIQ_MSG_DSTPORT(msgid));
7550 +
7551 + for (i = 0, pos = 0; i < (unsigned int)count;
7552 + pos += elements[i++].size)
7553 + if (elements[i].size) {
7554 + if (vchiq_copy_from_user
7555 + (header->data + pos, elements[i].data,
7556 + (size_t) elements[i].size) !=
7557 + VCHIQ_SUCCESS) {
7558 + mutex_unlock(&state->sync_mutex);
7559 + VCHIQ_SERVICE_STATS_INC(service,
7560 + error_count);
7561 + return VCHIQ_ERROR;
7562 + }
7563 + if (i == 0) {
7564 + if (vchiq_sync_log_level >=
7565 + VCHIQ_LOG_TRACE)
7566 + vchiq_log_dump_mem("Sent Sync",
7567 + 0, header->data + pos,
7568 + min(64u,
7569 + elements[0].size));
7570 + }
7571 + }
7572 +
7573 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
7574 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
7575 + } else {
7576 + vchiq_log_info(vchiq_sync_log_level,
7577 + "%d: qms %s@%x,%x (%d->%d)", state->id,
7578 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7579 + (unsigned int)header, size,
7580 + VCHIQ_MSG_SRCPORT(msgid),
7581 + VCHIQ_MSG_DSTPORT(msgid));
7582 + if (size != 0) {
7583 + WARN_ON(!((count == 1) && (size == elements[0].size)));
7584 + memcpy(header->data, elements[0].data,
7585 + elements[0].size);
7586 + }
7587 + VCHIQ_STATS_INC(state, ctrl_tx_count);
7588 + }
7589 +
7590 + header->size = size;
7591 + header->msgid = msgid;
7592 +
7593 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7594 + int svc_fourcc;
7595 +
7596 + svc_fourcc = service
7597 + ? service->base.fourcc
7598 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7599 +
7600 + vchiq_log_trace(vchiq_sync_log_level,
7601 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
7602 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7603 + VCHIQ_MSG_TYPE(msgid),
7604 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7605 + VCHIQ_MSG_SRCPORT(msgid),
7606 + VCHIQ_MSG_DSTPORT(msgid),
7607 + size);
7608 + }
7609 +
7610 + /* Make sure the new header is visible to the peer. */
7611 + wmb();
7612 +
7613 + remote_event_signal(&state->remote->sync_trigger);
7614 +
7615 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
7616 + mutex_unlock(&state->sync_mutex);
7617 +
7618 + return VCHIQ_SUCCESS;
7619 +}
7620 +
7621 +static inline void
7622 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
7623 +{
7624 + slot->use_count++;
7625 +}
7626 +
7627 +static void
7628 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
7629 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
7630 +{
7631 + int release_count;
7632 +
7633 + mutex_lock(&state->recycle_mutex);
7634 +
7635 + if (header) {
7636 + int msgid = header->msgid;
7637 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
7638 + (service && service->closing)) {
7639 + mutex_unlock(&state->recycle_mutex);
7640 + return;
7641 + }
7642 +
7643 + /* Rewrite the message header to prevent a double
7644 + ** release */
7645 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
7646 + }
7647 +
7648 + release_count = slot_info->release_count;
7649 + slot_info->release_count = ++release_count;
7650 +
7651 + if (release_count == slot_info->use_count) {
7652 + int slot_queue_recycle;
7653 + /* Add to the freed queue */
7654 +
7655 + /* A read barrier is necessary here to prevent speculative
7656 + ** fetches of remote->slot_queue_recycle from overtaking the
7657 + ** mutex. */
7658 + rmb();
7659 +
7660 + slot_queue_recycle = state->remote->slot_queue_recycle;
7661 + state->remote->slot_queue[slot_queue_recycle &
7662 + VCHIQ_SLOT_QUEUE_MASK] =
7663 + SLOT_INDEX_FROM_INFO(state, slot_info);
7664 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
7665 + vchiq_log_info(vchiq_core_log_level,
7666 + "%d: release_slot %d - recycle->%x",
7667 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
7668 + state->remote->slot_queue_recycle);
7669 +
7670 + /* A write barrier is necessary, but remote_event_signal
7671 + ** contains one. */
7672 + remote_event_signal(&state->remote->recycle);
7673 + }
7674 +
7675 + mutex_unlock(&state->recycle_mutex);
7676 +}
7677 +
7678 +/* Called by the slot handler - don't hold the bulk mutex */
7679 +static VCHIQ_STATUS_T
7680 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
7681 + int retry_poll)
7682 +{
7683 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
7684 +
7685 + vchiq_log_trace(vchiq_core_log_level,
7686 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
7687 + service->state->id, service->localport,
7688 + (queue == &service->bulk_tx) ? 't' : 'r',
7689 + queue->process, queue->remote_notify, queue->remove);
7690 +
7691 + if (service->state->is_master) {
7692 + while (queue->remote_notify != queue->process) {
7693 + VCHIQ_BULK_T *bulk =
7694 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
7695 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
7696 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
7697 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
7698 + service->remoteport);
7699 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
7700 + /* Only reply to non-dummy bulk requests */
7701 + if (bulk->remote_data) {
7702 + status = queue_message(service->state, NULL,
7703 + msgid, &element, 1, 4, 0);
7704 + if (status != VCHIQ_SUCCESS)
7705 + break;
7706 + }
7707 + queue->remote_notify++;
7708 + }
7709 + } else {
7710 + queue->remote_notify = queue->process;
7711 + }
7712 +
7713 + if (status == VCHIQ_SUCCESS) {
7714 + while (queue->remove != queue->remote_notify) {
7715 + VCHIQ_BULK_T *bulk =
7716 + &queue->bulks[BULK_INDEX(queue->remove)];
7717 +
7718 + /* Only generate callbacks for non-dummy bulk
7719 + ** requests, and non-terminated services */
7720 + if (bulk->data && service->instance) {
7721 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
7722 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
7723 + VCHIQ_SERVICE_STATS_INC(service,
7724 + bulk_tx_count);
7725 + VCHIQ_SERVICE_STATS_ADD(service,
7726 + bulk_tx_bytes,
7727 + bulk->actual);
7728 + } else {
7729 + VCHIQ_SERVICE_STATS_INC(service,
7730 + bulk_rx_count);
7731 + VCHIQ_SERVICE_STATS_ADD(service,
7732 + bulk_rx_bytes,
7733 + bulk->actual);
7734 + }
7735 + } else {
7736 + VCHIQ_SERVICE_STATS_INC(service,
7737 + bulk_aborted_count);
7738 + }
7739 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
7740 + struct bulk_waiter *waiter;
7741 + spin_lock(&bulk_waiter_spinlock);
7742 + waiter = bulk->userdata;
7743 + if (waiter) {
7744 + waiter->actual = bulk->actual;
7745 + up(&waiter->event);
7746 + }
7747 + spin_unlock(&bulk_waiter_spinlock);
7748 + } else if (bulk->mode ==
7749 + VCHIQ_BULK_MODE_CALLBACK) {
7750 + VCHIQ_REASON_T reason = (bulk->dir ==
7751 + VCHIQ_BULK_TRANSMIT) ?
7752 + ((bulk->actual ==
7753 + VCHIQ_BULK_ACTUAL_ABORTED) ?
7754 + VCHIQ_BULK_TRANSMIT_ABORTED :
7755 + VCHIQ_BULK_TRANSMIT_DONE) :
7756 + ((bulk->actual ==
7757 + VCHIQ_BULK_ACTUAL_ABORTED) ?
7758 + VCHIQ_BULK_RECEIVE_ABORTED :
7759 + VCHIQ_BULK_RECEIVE_DONE);
7760 + status = make_service_callback(service,
7761 + reason, NULL, bulk->userdata);
7762 + if (status == VCHIQ_RETRY)
7763 + break;
7764 + }
7765 + }
7766 +
7767 + queue->remove++;
7768 + up(&service->bulk_remove_event);
7769 + }
7770 + if (!retry_poll)
7771 + status = VCHIQ_SUCCESS;
7772 + }
7773 +
7774 + if (status == VCHIQ_RETRY)
7775 + request_poll(service->state, service,
7776 + (queue == &service->bulk_tx) ?
7777 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
7778 +
7779 + return status;
7780 +}
7781 +
7782 +/* Called by the slot handler thread */
7783 +static void
7784 +poll_services(VCHIQ_STATE_T *state)
7785 +{
7786 + int group, i;
7787 +
7788 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
7789 + uint32_t flags;
7790 + flags = atomic_xchg(&state->poll_services[group], 0);
7791 + for (i = 0; flags; i++) {
7792 + if (flags & (1 << i)) {
7793 + VCHIQ_SERVICE_T *service =
7794 + find_service_by_port(state,
7795 + (group<<5) + i);
7796 + uint32_t service_flags;
7797 + flags &= ~(1 << i);
7798 + if (!service)
7799 + continue;
7800 + service_flags =
7801 + atomic_xchg(&service->poll_flags, 0);
7802 + if (service_flags &
7803 + (1 << VCHIQ_POLL_REMOVE)) {
7804 + vchiq_log_info(vchiq_core_log_level,
7805 + "%d: ps - remove %d<->%d",
7806 + state->id, service->localport,
7807 + service->remoteport);
7808 +
7809 + /* Make it look like a client, because
7810 + it must be removed and not left in
7811 + the LISTENING state. */
7812 + service->public_fourcc =
7813 + VCHIQ_FOURCC_INVALID;
7814 +
7815 + if (vchiq_close_service_internal(
7816 + service, 0/*!close_recvd*/) !=
7817 + VCHIQ_SUCCESS)
7818 + request_poll(state, service,
7819 + VCHIQ_POLL_REMOVE);
7820 + } else if (service_flags &
7821 + (1 << VCHIQ_POLL_TERMINATE)) {
7822 + vchiq_log_info(vchiq_core_log_level,
7823 + "%d: ps - terminate %d<->%d",
7824 + state->id, service->localport,
7825 + service->remoteport);
7826 + if (vchiq_close_service_internal(
7827 + service, 0/*!close_recvd*/) !=
7828 + VCHIQ_SUCCESS)
7829 + request_poll(state, service,
7830 + VCHIQ_POLL_TERMINATE);
7831 + }
7832 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
7833 + notify_bulks(service,
7834 + &service->bulk_tx,
7835 + 1/*retry_poll*/);
7836 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
7837 + notify_bulks(service,
7838 + &service->bulk_rx,
7839 + 1/*retry_poll*/);
7840 + unlock_service(service);
7841 + }
7842 + }
7843 + }
7844 +}
7845 +
7846 +/* Called by the slot handler or application threads, holding the bulk mutex. */
7847 +static int
7848 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7849 +{
7850 + VCHIQ_STATE_T *state = service->state;
7851 + int resolved = 0;
7852 + int rc;
7853 +
7854 + while ((queue->process != queue->local_insert) &&
7855 + (queue->process != queue->remote_insert)) {
7856 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7857 +
7858 + vchiq_log_trace(vchiq_core_log_level,
7859 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
7860 + state->id, service->localport,
7861 + (queue == &service->bulk_tx) ? 't' : 'r',
7862 + queue->local_insert, queue->remote_insert,
7863 + queue->process);
7864 +
7865 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
7866 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
7867 +
7868 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
7869 + if (rc != 0)
7870 + break;
7871 +
7872 + vchiq_transfer_bulk(bulk);
7873 + mutex_unlock(&state->bulk_transfer_mutex);
7874 +
7875 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7876 + const char *header = (queue == &service->bulk_tx) ?
7877 + "Send Bulk to" : "Recv Bulk from";
7878 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
7879 + vchiq_log_info(vchiq_core_msg_log_level,
7880 + "%s %c%c%c%c d:%d len:%d %x<->%x",
7881 + header,
7882 + VCHIQ_FOURCC_AS_4CHARS(
7883 + service->base.fourcc),
7884 + service->remoteport,
7885 + bulk->size,
7886 + (unsigned int)bulk->data,
7887 + (unsigned int)bulk->remote_data);
7888 + else
7889 + vchiq_log_info(vchiq_core_msg_log_level,
7890 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
7891 + " rx len:%d %x<->%x",
7892 + header,
7893 + VCHIQ_FOURCC_AS_4CHARS(
7894 + service->base.fourcc),
7895 + service->remoteport,
7896 + bulk->size,
7897 + bulk->remote_size,
7898 + (unsigned int)bulk->data,
7899 + (unsigned int)bulk->remote_data);
7900 + }
7901 +
7902 + vchiq_complete_bulk(bulk);
7903 + queue->process++;
7904 + resolved++;
7905 + }
7906 + return resolved;
7907 +}
7908 +
7909 +/* Called with the bulk_mutex held */
7910 +static void
7911 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7912 +{
7913 + int is_tx = (queue == &service->bulk_tx);
7914 + vchiq_log_trace(vchiq_core_log_level,
7915 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
7916 + service->state->id, service->localport, is_tx ? 't' : 'r',
7917 + queue->local_insert, queue->remote_insert, queue->process);
7918 +
7919 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
7920 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
7921 +
7922 + while ((queue->process != queue->local_insert) ||
7923 + (queue->process != queue->remote_insert)) {
7924 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7925 +
7926 + if (queue->process == queue->remote_insert) {
7927 + /* fabricate a matching dummy bulk */
7928 + bulk->remote_data = NULL;
7929 + bulk->remote_size = 0;
7930 + queue->remote_insert++;
7931 + }
7932 +
7933 + if (queue->process != queue->local_insert) {
7934 + vchiq_complete_bulk(bulk);
7935 +
7936 + vchiq_log_info(vchiq_core_msg_log_level,
7937 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
7938 + "rx len:%d",
7939 + is_tx ? "Send Bulk to" : "Recv Bulk from",
7940 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7941 + service->remoteport,
7942 + bulk->size,
7943 + bulk->remote_size);
7944 + } else {
7945 + /* fabricate a matching dummy bulk */
7946 + bulk->data = NULL;
7947 + bulk->size = 0;
7948 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
7949 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
7950 + VCHIQ_BULK_RECEIVE;
7951 + queue->local_insert++;
7952 + }
7953 +
7954 + queue->process++;
7955 + }
7956 +}
7957 +
7958 +/* Called from the slot handler thread */
7959 +static void
7960 +pause_bulks(VCHIQ_STATE_T *state)
7961 +{
7962 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
7963 + WARN_ON_ONCE(1);
7964 + atomic_set(&pause_bulks_count, 1);
7965 + return;
7966 + }
7967 +
7968 + /* Block bulk transfers from all services */
7969 + mutex_lock(&state->bulk_transfer_mutex);
7970 +}
7971 +
7972 +/* Called from the slot handler thread */
7973 +static void
7974 +resume_bulks(VCHIQ_STATE_T *state)
7975 +{
7976 + int i;
7977 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
7978 + WARN_ON_ONCE(1);
7979 + atomic_set(&pause_bulks_count, 0);
7980 + return;
7981 + }
7982 +
7983 + /* Allow bulk transfers from all services */
7984 + mutex_unlock(&state->bulk_transfer_mutex);
7985 +
7986 + if (state->deferred_bulks == 0)
7987 + return;
7988 +
7989 + /* Deal with any bulks which had to be deferred due to being in
7990 + * paused state. Don't try to match up to number of deferred bulks
7991 + * in case we've had something come and close the service in the
7992 + * interim - just process all bulk queues for all services */
7993 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
7994 + __func__, state->deferred_bulks);
7995 +
7996 + for (i = 0; i < state->unused_service; i++) {
7997 + VCHIQ_SERVICE_T *service = state->services[i];
7998 + int resolved_rx = 0;
7999 + int resolved_tx = 0;
8000 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
8001 + continue;
8002 +
8003 + mutex_lock(&service->bulk_mutex);
8004 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
8005 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
8006 + mutex_unlock(&service->bulk_mutex);
8007 + if (resolved_rx)
8008 + notify_bulks(service, &service->bulk_rx, 1);
8009 + if (resolved_tx)
8010 + notify_bulks(service, &service->bulk_tx, 1);
8011 + }
8012 + state->deferred_bulks = 0;
8013 +}
8014 +
8015 +static int
8016 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8017 +{
8018 + VCHIQ_SERVICE_T *service = NULL;
8019 + int msgid, size;
8020 + int type;
8021 + unsigned int localport, remoteport;
8022 +
8023 + msgid = header->msgid;
8024 + size = header->size;
8025 + type = VCHIQ_MSG_TYPE(msgid);
8026 + localport = VCHIQ_MSG_DSTPORT(msgid);
8027 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
8028 + if (size >= sizeof(struct vchiq_open_payload)) {
8029 + const struct vchiq_open_payload *payload =
8030 + (struct vchiq_open_payload *)header->data;
8031 + unsigned int fourcc;
8032 +
8033 + fourcc = payload->fourcc;
8034 + vchiq_log_info(vchiq_core_log_level,
8035 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
8036 + state->id, (unsigned int)header,
8037 + localport,
8038 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
8039 +
8040 + service = get_listening_service(state, fourcc);
8041 +
8042 + if (service) {
8043 + /* A matching service exists */
8044 + short version = payload->version;
8045 + short version_min = payload->version_min;
8046 + if ((service->version < version_min) ||
8047 + (version < service->version_min)) {
8048 + /* Version mismatch */
8049 + vchiq_loud_error_header();
8050 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
8051 + "version mismatch - local (%d, min %d)"
8052 + " vs. remote (%d, min %d)",
8053 + state->id, service->localport,
8054 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
8055 + service->version, service->version_min,
8056 + version, version_min);
8057 + vchiq_loud_error_footer();
8058 + unlock_service(service);
8059 + goto fail_open;
8060 + }
8061 + service->peer_version = version;
8062 +
8063 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8064 + struct vchiq_openack_payload ack_payload = {
8065 + service->version
8066 + };
8067 + VCHIQ_ELEMENT_T body = {
8068 + &ack_payload,
8069 + sizeof(ack_payload)
8070 + };
8071 +
8072 + /* Acknowledge the OPEN */
8073 + if (service->sync) {
8074 + if (queue_message_sync(state, NULL,
8075 + VCHIQ_MAKE_MSG(
8076 + VCHIQ_MSG_OPENACK,
8077 + service->localport,
8078 + remoteport),
8079 + &body, 1, sizeof(ack_payload),
8080 + 0) == VCHIQ_RETRY)
8081 + goto bail_not_ready;
8082 + } else {
8083 + if (queue_message(state, NULL,
8084 + VCHIQ_MAKE_MSG(
8085 + VCHIQ_MSG_OPENACK,
8086 + service->localport,
8087 + remoteport),
8088 + &body, 1, sizeof(ack_payload),
8089 + 0) == VCHIQ_RETRY)
8090 + goto bail_not_ready;
8091 + }
8092 +
8093 + /* The service is now open */
8094 + vchiq_set_service_state(service,
8095 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
8096 + : VCHIQ_SRVSTATE_OPEN);
8097 + }
8098 +
8099 + service->remoteport = remoteport;
8100 + service->client_id = ((int *)header->data)[1];
8101 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
8102 + NULL, NULL) == VCHIQ_RETRY) {
8103 + /* Bail out if not ready */
8104 + service->remoteport = VCHIQ_PORT_FREE;
8105 + goto bail_not_ready;
8106 + }
8107 +
8108 + /* Success - the message has been dealt with */
8109 + unlock_service(service);
8110 + return 1;
8111 + }
8112 + }
8113 +
8114 +fail_open:
8115 + /* No available service, or an invalid request - send a CLOSE */
8116 + if (queue_message(state, NULL,
8117 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
8118 + NULL, 0, 0, 0) == VCHIQ_RETRY)
8119 + goto bail_not_ready;
8120 +
8121 + return 1;
8122 +
8123 +bail_not_ready:
8124 + unlock_service(service);
8125 +
8126 + return 0;
8127 +}
8128 +
8129 +/* Called by the slot handler thread */
8130 +static void
8131 +parse_rx_slots(VCHIQ_STATE_T *state)
8132 +{
8133 + VCHIQ_SHARED_STATE_T *remote = state->remote;
8134 + VCHIQ_SERVICE_T *service = NULL;
8135 + int tx_pos;
8136 + DEBUG_INITIALISE(state->local)
8137 +
8138 + tx_pos = remote->tx_pos;
8139 +
8140 + while (state->rx_pos != tx_pos) {
8141 + VCHIQ_HEADER_T *header;
8142 + int msgid, size;
8143 + int type;
8144 + unsigned int localport, remoteport;
8145 +
8146 + DEBUG_TRACE(PARSE_LINE);
8147 + if (!state->rx_data) {
8148 + int rx_index;
8149 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
8150 + rx_index = remote->slot_queue[
8151 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
8152 + VCHIQ_SLOT_QUEUE_MASK];
8153 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
8154 + rx_index);
8155 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
8156 +
8157 + /* Initialise use_count to one, and increment
8158 + ** release_count at the end of the slot to avoid
8159 + ** releasing the slot prematurely. */
8160 + state->rx_info->use_count = 1;
8161 + state->rx_info->release_count = 0;
8162 + }
8163 +
8164 + header = (VCHIQ_HEADER_T *)(state->rx_data +
8165 + (state->rx_pos & VCHIQ_SLOT_MASK));
8166 + DEBUG_VALUE(PARSE_HEADER, (int)header);
8167 + msgid = header->msgid;
8168 + DEBUG_VALUE(PARSE_MSGID, msgid);
8169 + size = header->size;
8170 + type = VCHIQ_MSG_TYPE(msgid);
8171 + localport = VCHIQ_MSG_DSTPORT(msgid);
8172 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
8173 +
8174 + if (type != VCHIQ_MSG_DATA)
8175 + VCHIQ_STATS_INC(state, ctrl_rx_count);
8176 +
8177 + switch (type) {
8178 + case VCHIQ_MSG_OPENACK:
8179 + case VCHIQ_MSG_CLOSE:
8180 + case VCHIQ_MSG_DATA:
8181 + case VCHIQ_MSG_BULK_RX:
8182 + case VCHIQ_MSG_BULK_TX:
8183 + case VCHIQ_MSG_BULK_RX_DONE:
8184 + case VCHIQ_MSG_BULK_TX_DONE:
8185 + service = find_service_by_port(state, localport);
8186 + if ((!service || service->remoteport != remoteport) &&
8187 + (localport == 0) &&
8188 + (type == VCHIQ_MSG_CLOSE)) {
8189 + /* This could be a CLOSE from a client which
8190 + hadn't yet received the OPENACK - look for
8191 + the connected service */
8192 + if (service)
8193 + unlock_service(service);
8194 + service = get_connected_service(state,
8195 + remoteport);
8196 + if (service)
8197 + vchiq_log_warning(vchiq_core_log_level,
8198 + "%d: prs %s@%x (%d->%d) - "
8199 + "found connected service %d",
8200 + state->id, msg_type_str(type),
8201 + (unsigned int)header,
8202 + remoteport, localport,
8203 + service->localport);
8204 + }
8205 +
8206 + if (!service) {
8207 + vchiq_log_error(vchiq_core_log_level,
8208 + "%d: prs %s@%x (%d->%d) - "
8209 + "invalid/closed service %d",
8210 + state->id, msg_type_str(type),
8211 + (unsigned int)header,
8212 + remoteport, localport, localport);
8213 + goto skip_message;
8214 + }
8215 + break;
8216 + default:
8217 + break;
8218 + }
8219 +
8220 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
8221 + int svc_fourcc;
8222 +
8223 + svc_fourcc = service
8224 + ? service->base.fourcc
8225 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
8226 + vchiq_log_info(vchiq_core_msg_log_level,
8227 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
8228 + "len:%d",
8229 + msg_type_str(type), type,
8230 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
8231 + remoteport, localport, size);
8232 + if (size > 0)
8233 + vchiq_log_dump_mem("Rcvd", 0, header->data,
8234 + min(64, size));
8235 + }
8236 +
8237 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
8238 + > VCHIQ_SLOT_SIZE) {
8239 + vchiq_log_error(vchiq_core_log_level,
8240 + "header %x (msgid %x) - size %x too big for "
8241 + "slot",
8242 + (unsigned int)header, (unsigned int)msgid,
8243 + (unsigned int)size);
8244 + WARN(1, "oversized for slot\n");
8245 + }
8246 +
8247 + switch (type) {
8248 + case VCHIQ_MSG_OPEN:
8249 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
8250 + if (!parse_open(state, header))
8251 + goto bail_not_ready;
8252 + break;
8253 + case VCHIQ_MSG_OPENACK:
8254 + if (size >= sizeof(struct vchiq_openack_payload)) {
8255 + const struct vchiq_openack_payload *payload =
8256 + (struct vchiq_openack_payload *)
8257 + header->data;
8258 + service->peer_version = payload->version;
8259 + }
8260 + vchiq_log_info(vchiq_core_log_level,
8261 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
8262 + state->id, (unsigned int)header, size,
8263 + remoteport, localport, service->peer_version);
8264 + if (service->srvstate ==
8265 + VCHIQ_SRVSTATE_OPENING) {
8266 + service->remoteport = remoteport;
8267 + vchiq_set_service_state(service,
8268 + VCHIQ_SRVSTATE_OPEN);
8269 + up(&service->remove_event);
8270 + } else
8271 + vchiq_log_error(vchiq_core_log_level,
8272 + "OPENACK received in state %s",
8273 + srvstate_names[service->srvstate]);
8274 + break;
8275 + case VCHIQ_MSG_CLOSE:
8276 + WARN_ON(size != 0); /* There should be no data */
8277 +
8278 + vchiq_log_info(vchiq_core_log_level,
8279 + "%d: prs CLOSE@%x (%d->%d)",
8280 + state->id, (unsigned int)header,
8281 + remoteport, localport);
8282 +
8283 + mark_service_closing_internal(service, 1);
8284 +
8285 + if (vchiq_close_service_internal(service,
8286 + 1/*close_recvd*/) == VCHIQ_RETRY)
8287 + goto bail_not_ready;
8288 +
8289 + vchiq_log_info(vchiq_core_log_level,
8290 + "Close Service %c%c%c%c s:%u d:%d",
8291 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
8292 + service->localport,
8293 + service->remoteport);
8294 + break;
8295 + case VCHIQ_MSG_DATA:
8296 + vchiq_log_trace(vchiq_core_log_level,
8297 + "%d: prs DATA@%x,%x (%d->%d)",
8298 + state->id, (unsigned int)header, size,
8299 + remoteport, localport);
8300 +
8301 + if ((service->remoteport == remoteport)
8302 + && (service->srvstate ==
8303 + VCHIQ_SRVSTATE_OPEN)) {
8304 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
8305 + claim_slot(state->rx_info);
8306 + DEBUG_TRACE(PARSE_LINE);
8307 + if (make_service_callback(service,
8308 + VCHIQ_MESSAGE_AVAILABLE, header,
8309 + NULL) == VCHIQ_RETRY) {
8310 + DEBUG_TRACE(PARSE_LINE);
8311 + goto bail_not_ready;
8312 + }
8313 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
8314 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
8315 + size);
8316 + } else {
8317 + VCHIQ_STATS_INC(state, error_count);
8318 + }
8319 + break;
8320 + case VCHIQ_MSG_CONNECT:
8321 + vchiq_log_info(vchiq_core_log_level,
8322 + "%d: prs CONNECT@%x",
8323 + state->id, (unsigned int)header);
8324 + up(&state->connect);
8325 + break;
8326 + case VCHIQ_MSG_BULK_RX:
8327 + case VCHIQ_MSG_BULK_TX: {
8328 + VCHIQ_BULK_QUEUE_T *queue;
8329 + WARN_ON(!state->is_master);
8330 + queue = (type == VCHIQ_MSG_BULK_RX) ?
8331 + &service->bulk_tx : &service->bulk_rx;
8332 + if ((service->remoteport == remoteport)
8333 + && (service->srvstate ==
8334 + VCHIQ_SRVSTATE_OPEN)) {
8335 + VCHIQ_BULK_T *bulk;
8336 + int resolved = 0;
8337 +
8338 + DEBUG_TRACE(PARSE_LINE);
8339 + if (mutex_lock_interruptible(
8340 + &service->bulk_mutex) != 0) {
8341 + DEBUG_TRACE(PARSE_LINE);
8342 + goto bail_not_ready;
8343 + }
8344 +
8345 + WARN_ON(!(queue->remote_insert < queue->remove +
8346 + VCHIQ_NUM_SERVICE_BULKS));
8347 + bulk = &queue->bulks[
8348 + BULK_INDEX(queue->remote_insert)];
8349 + bulk->remote_data =
8350 + (void *)((int *)header->data)[0];
8351 + bulk->remote_size = ((int *)header->data)[1];
8352 + wmb();
8353 +
8354 + vchiq_log_info(vchiq_core_log_level,
8355 + "%d: prs %s@%x (%d->%d) %x@%x",
8356 + state->id, msg_type_str(type),
8357 + (unsigned int)header,
8358 + remoteport, localport,
8359 + bulk->remote_size,
8360 + (unsigned int)bulk->remote_data);
8361 +
8362 + queue->remote_insert++;
8363 +
8364 + if (atomic_read(&pause_bulks_count)) {
8365 + state->deferred_bulks++;
8366 + vchiq_log_info(vchiq_core_log_level,
8367 + "%s: deferring bulk (%d)",
8368 + __func__,
8369 + state->deferred_bulks);
8370 + if (state->conn_state !=
8371 + VCHIQ_CONNSTATE_PAUSE_SENT)
8372 + vchiq_log_error(
8373 + vchiq_core_log_level,
8374 + "%s: bulks paused in "
8375 + "unexpected state %s",
8376 + __func__,
8377 + conn_state_names[
8378 + state->conn_state]);
8379 + } else if (state->conn_state ==
8380 + VCHIQ_CONNSTATE_CONNECTED) {
8381 + DEBUG_TRACE(PARSE_LINE);
8382 + resolved = resolve_bulks(service,
8383 + queue);
8384 + }
8385 +
8386 + mutex_unlock(&service->bulk_mutex);
8387 + if (resolved)
8388 + notify_bulks(service, queue,
8389 + 1/*retry_poll*/);
8390 + }
8391 + } break;
8392 + case VCHIQ_MSG_BULK_RX_DONE:
8393 + case VCHIQ_MSG_BULK_TX_DONE:
8394 + WARN_ON(state->is_master);
8395 + if ((service->remoteport == remoteport)
8396 + && (service->srvstate !=
8397 + VCHIQ_SRVSTATE_FREE)) {
8398 + VCHIQ_BULK_QUEUE_T *queue;
8399 + VCHIQ_BULK_T *bulk;
8400 +
8401 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
8402 + &service->bulk_rx : &service->bulk_tx;
8403 +
8404 + DEBUG_TRACE(PARSE_LINE);
8405 + if (mutex_lock_interruptible(
8406 + &service->bulk_mutex) != 0) {
8407 + DEBUG_TRACE(PARSE_LINE);
8408 + goto bail_not_ready;
8409 + }
8410 + if ((int)(queue->remote_insert -
8411 + queue->local_insert) >= 0) {
8412 + vchiq_log_error(vchiq_core_log_level,
8413 + "%d: prs %s@%x (%d->%d) "
8414 + "unexpected (ri=%d,li=%d)",
8415 + state->id, msg_type_str(type),
8416 + (unsigned int)header,
8417 + remoteport, localport,
8418 + queue->remote_insert,
8419 + queue->local_insert);
8420 + mutex_unlock(&service->bulk_mutex);
8421 + break;
8422 + }
8423 +
8424 + BUG_ON(queue->process == queue->local_insert);
8425 + BUG_ON(queue->process != queue->remote_insert);
8426 +
8427 + bulk = &queue->bulks[
8428 + BULK_INDEX(queue->remote_insert)];
8429 + bulk->actual = *(int *)header->data;
8430 + queue->remote_insert++;
8431 +
8432 + vchiq_log_info(vchiq_core_log_level,
8433 + "%d: prs %s@%x (%d->%d) %x@%x",
8434 + state->id, msg_type_str(type),
8435 + (unsigned int)header,
8436 + remoteport, localport,
8437 + bulk->actual, (unsigned int)bulk->data);
8438 +
8439 + vchiq_log_trace(vchiq_core_log_level,
8440 + "%d: prs:%d %cx li=%x ri=%x p=%x",
8441 + state->id, localport,
8442 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
8443 + 'r' : 't',
8444 + queue->local_insert,
8445 + queue->remote_insert, queue->process);
8446 +
8447 + DEBUG_TRACE(PARSE_LINE);
8448 + WARN_ON(queue->process == queue->local_insert);
8449 + vchiq_complete_bulk(bulk);
8450 + queue->process++;
8451 + mutex_unlock(&service->bulk_mutex);
8452 + DEBUG_TRACE(PARSE_LINE);
8453 + notify_bulks(service, queue, 1/*retry_poll*/);
8454 + DEBUG_TRACE(PARSE_LINE);
8455 + }
8456 + break;
8457 + case VCHIQ_MSG_PADDING:
8458 + vchiq_log_trace(vchiq_core_log_level,
8459 + "%d: prs PADDING@%x,%x",
8460 + state->id, (unsigned int)header, size);
8461 + break;
8462 + case VCHIQ_MSG_PAUSE:
8463 + /* If initiated, signal the application thread */
8464 + vchiq_log_trace(vchiq_core_log_level,
8465 + "%d: prs PAUSE@%x,%x",
8466 + state->id, (unsigned int)header, size);
8467 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8468 + vchiq_log_error(vchiq_core_log_level,
8469 + "%d: PAUSE received in state PAUSED",
8470 + state->id);
8471 + break;
8472 + }
8473 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
8474 + /* Send a PAUSE in response */
8475 + if (queue_message(state, NULL,
8476 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
8477 + NULL, 0, 0, 0) == VCHIQ_RETRY)
8478 + goto bail_not_ready;
8479 + if (state->is_master)
8480 + pause_bulks(state);
8481 + }
8482 + /* At this point slot_mutex is held */
8483 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
8484 + vchiq_platform_paused(state);
8485 + break;
8486 + case VCHIQ_MSG_RESUME:
8487 + vchiq_log_trace(vchiq_core_log_level,
8488 + "%d: prs RESUME@%x,%x",
8489 + state->id, (unsigned int)header, size);
8490 + /* Release the slot mutex */
8491 + mutex_unlock(&state->slot_mutex);
8492 + if (state->is_master)
8493 + resume_bulks(state);
8494 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8495 + vchiq_platform_resumed(state);
8496 + break;
8497 +
8498 + case VCHIQ_MSG_REMOTE_USE:
8499 + vchiq_on_remote_use(state);
8500 + break;
8501 + case VCHIQ_MSG_REMOTE_RELEASE:
8502 + vchiq_on_remote_release(state);
8503 + break;
8504 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
8505 + vchiq_on_remote_use_active(state);
8506 + break;
8507 +
8508 + default:
8509 + vchiq_log_error(vchiq_core_log_level,
8510 + "%d: prs invalid msgid %x@%x,%x",
8511 + state->id, msgid, (unsigned int)header, size);
8512 + WARN(1, "invalid message\n");
8513 + break;
8514 + }
8515 +
8516 +skip_message:
8517 + if (service) {
8518 + unlock_service(service);
8519 + service = NULL;
8520 + }
8521 +
8522 + state->rx_pos += calc_stride(size);
8523 +
8524 + DEBUG_TRACE(PARSE_LINE);
8525 + /* Perform some housekeeping when the end of the slot is
8526 + ** reached. */
8527 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
8528 + /* Remove the extra reference count. */
8529 + release_slot(state, state->rx_info, NULL, NULL);
8530 + state->rx_data = NULL;
8531 + }
8532 + }
8533 +
8534 +bail_not_ready:
8535 + if (service)
8536 + unlock_service(service);
8537 +}
8538 +
8539 +/* Called by the slot handler thread */
8540 +static int
8541 +slot_handler_func(void *v)
8542 +{
8543 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8544 + VCHIQ_SHARED_STATE_T *local = state->local;
8545 + DEBUG_INITIALISE(local)
8546 +
8547 + while (1) {
8548 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
8549 + DEBUG_TRACE(SLOT_HANDLER_LINE);
8550 + remote_event_wait(&local->trigger);
8551 +
8552 + rmb();
8553 +
8554 + DEBUG_TRACE(SLOT_HANDLER_LINE);
8555 + if (state->poll_needed) {
8556 + /* Check if we need to suspend - may change our
8557 + * conn_state */
8558 + vchiq_platform_check_suspend(state);
8559 +
8560 + state->poll_needed = 0;
8561 +
8562 + /* Handle service polling and other rare conditions here
8563 + ** out of the mainline code */
8564 + switch (state->conn_state) {
8565 + case VCHIQ_CONNSTATE_CONNECTED:
8566 + /* Poll the services as requested */
8567 + poll_services(state);
8568 + break;
8569 +
8570 + case VCHIQ_CONNSTATE_PAUSING:
8571 + if (state->is_master)
8572 + pause_bulks(state);
8573 + if (queue_message(state, NULL,
8574 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
8575 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
8576 + vchiq_set_conn_state(state,
8577 + VCHIQ_CONNSTATE_PAUSE_SENT);
8578 + } else {
8579 + if (state->is_master)
8580 + resume_bulks(state);
8581 + /* Retry later */
8582 + state->poll_needed = 1;
8583 + }
8584 + break;
8585 +
8586 + case VCHIQ_CONNSTATE_PAUSED:
8587 + vchiq_platform_resume(state);
8588 + break;
8589 +
8590 + case VCHIQ_CONNSTATE_RESUMING:
8591 + if (queue_message(state, NULL,
8592 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
8593 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
8594 + if (state->is_master)
8595 + resume_bulks(state);
8596 + vchiq_set_conn_state(state,
8597 + VCHIQ_CONNSTATE_CONNECTED);
8598 + vchiq_platform_resumed(state);
8599 + } else {
8600 + /* This should really be impossible,
8601 + ** since the PAUSE should have flushed
8602 + ** through outstanding messages. */
8603 + vchiq_log_error(vchiq_core_log_level,
8604 + "Failed to send RESUME "
8605 + "message");
8606 + BUG();
8607 + }
8608 + break;
8609 +
8610 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
8611 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
8612 + vchiq_platform_handle_timeout(state);
8613 + break;
8614 + default:
8615 + break;
8616 + }
8617 +
8618 +
8619 + }
8620 +
8621 + DEBUG_TRACE(SLOT_HANDLER_LINE);
8622 + parse_rx_slots(state);
8623 + }
8624 + return 0;
8625 +}
8626 +
8627 +
8628 +/* Called by the recycle thread */
8629 +static int
8630 +recycle_func(void *v)
8631 +{
8632 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8633 + VCHIQ_SHARED_STATE_T *local = state->local;
8634 +
8635 + while (1) {
8636 + remote_event_wait(&local->recycle);
8637 +
8638 + process_free_queue(state);
8639 + }
8640 + return 0;
8641 +}
8642 +
8643 +
8644 +/* Called by the sync thread */
8645 +static int
8646 +sync_func(void *v)
8647 +{
8648 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8649 + VCHIQ_SHARED_STATE_T *local = state->local;
8650 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
8651 + state->remote->slot_sync);
8652 +
8653 + while (1) {
8654 + VCHIQ_SERVICE_T *service;
8655 + int msgid, size;
8656 + int type;
8657 + unsigned int localport, remoteport;
8658 +
8659 + remote_event_wait(&local->sync_trigger);
8660 +
8661 + rmb();
8662 +
8663 + msgid = header->msgid;
8664 + size = header->size;
8665 + type = VCHIQ_MSG_TYPE(msgid);
8666 + localport = VCHIQ_MSG_DSTPORT(msgid);
8667 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
8668 +
8669 + service = find_service_by_port(state, localport);
8670 +
8671 + if (!service) {
8672 + vchiq_log_error(vchiq_sync_log_level,
8673 + "%d: sf %s@%x (%d->%d) - "
8674 + "invalid/closed service %d",
8675 + state->id, msg_type_str(type),
8676 + (unsigned int)header,
8677 + remoteport, localport, localport);
8678 + release_message_sync(state, header);
8679 + continue;
8680 + }
8681 +
8682 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
8683 + int svc_fourcc;
8684 +
8685 + svc_fourcc = service
8686 + ? service->base.fourcc
8687 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
8688 + vchiq_log_trace(vchiq_sync_log_level,
8689 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
8690 + msg_type_str(type),
8691 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
8692 + remoteport, localport, size);
8693 + if (size > 0)
8694 + vchiq_log_dump_mem("Rcvd", 0, header->data,
8695 + min(64, size));
8696 + }
8697 +
8698 + switch (type) {
8699 + case VCHIQ_MSG_OPENACK:
8700 + if (size >= sizeof(struct vchiq_openack_payload)) {
8701 + const struct vchiq_openack_payload *payload =
8702 + (struct vchiq_openack_payload *)
8703 + header->data;
8704 + service->peer_version = payload->version;
8705 + }
8706 + vchiq_log_info(vchiq_sync_log_level,
8707 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
8708 + state->id, (unsigned int)header, size,
8709 + remoteport, localport, service->peer_version);
8710 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
8711 + service->remoteport = remoteport;
8712 + vchiq_set_service_state(service,
8713 + VCHIQ_SRVSTATE_OPENSYNC);
8714 + up(&service->remove_event);
8715 + }
8716 + release_message_sync(state, header);
8717 + break;
8718 +
8719 + case VCHIQ_MSG_DATA:
8720 + vchiq_log_trace(vchiq_sync_log_level,
8721 + "%d: sf DATA@%x,%x (%d->%d)",
8722 + state->id, (unsigned int)header, size,
8723 + remoteport, localport);
8724 +
8725 + if ((service->remoteport == remoteport) &&
8726 + (service->srvstate ==
8727 + VCHIQ_SRVSTATE_OPENSYNC)) {
8728 + if (make_service_callback(service,
8729 + VCHIQ_MESSAGE_AVAILABLE, header,
8730 + NULL) == VCHIQ_RETRY)
8731 + vchiq_log_error(vchiq_sync_log_level,
8732 + "synchronous callback to "
8733 + "service %d returns "
8734 + "VCHIQ_RETRY",
8735 + localport);
8736 + }
8737 + break;
8738 +
8739 + default:
8740 + vchiq_log_error(vchiq_sync_log_level,
8741 + "%d: sf unexpected msgid %x@%x,%x",
8742 + state->id, msgid, (unsigned int)header, size);
8743 + release_message_sync(state, header);
8744 + break;
8745 + }
8746 +
8747 + unlock_service(service);
8748 + }
8749 +
8750 + return 0;
8751 +}
8752 +
8753 +
8754 +static void
8755 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
8756 +{
8757 + queue->local_insert = 0;
8758 + queue->remote_insert = 0;
8759 + queue->process = 0;
8760 + queue->remote_notify = 0;
8761 + queue->remove = 0;
8762 +}
8763 +
8764 +
8765 +inline const char *
8766 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
8767 +{
8768 + return conn_state_names[conn_state];
8769 +}
8770 +
8771 +
8772 +VCHIQ_SLOT_ZERO_T *
8773 +vchiq_init_slots(void *mem_base, int mem_size)
8774 +{
8775 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
8776 + VCHIQ_SLOT_ZERO_T *slot_zero =
8777 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
8778 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
8779 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
8780 +
8781 + /* Ensure there is enough memory to run an absolutely minimum system */
8782 + num_slots -= first_data_slot;
8783 +
8784 + if (num_slots < 4) {
8785 + vchiq_log_error(vchiq_core_log_level,
8786 + "vchiq_init_slots - insufficient memory %x bytes",
8787 + mem_size);
8788 + return NULL;
8789 + }
8790 +
8791 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
8792 +
8793 + slot_zero->magic = VCHIQ_MAGIC;
8794 + slot_zero->version = VCHIQ_VERSION;
8795 + slot_zero->version_min = VCHIQ_VERSION_MIN;
8796 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
8797 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
8798 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
8799 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
8800 +
8801 + slot_zero->master.slot_sync = first_data_slot;
8802 + slot_zero->master.slot_first = first_data_slot + 1;
8803 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
8804 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
8805 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
8806 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
8807 +
8808 + return slot_zero;
8809 +}
8810 +
8811 +VCHIQ_STATUS_T
8812 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
8813 + int is_master)
8814 +{
8815 + VCHIQ_SHARED_STATE_T *local;
8816 + VCHIQ_SHARED_STATE_T *remote;
8817 + VCHIQ_STATUS_T status;
8818 + char threadname[10];
8819 + static int id;
8820 + int i;
8821 +
8822 + vchiq_log_warning(vchiq_core_log_level,
8823 + "%s: slot_zero = 0x%08lx, is_master = %d",
8824 + __func__, (unsigned long)slot_zero, is_master);
8825 +
8826 + /* Check the input configuration */
8827 +
8828 + if (slot_zero->magic != VCHIQ_MAGIC) {
8829 + vchiq_loud_error_header();
8830 + vchiq_loud_error("Invalid VCHIQ magic value found.");
8831 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
8832 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
8833 + vchiq_loud_error_footer();
8834 + return VCHIQ_ERROR;
8835 + }
8836 +
8837 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
8838 + vchiq_loud_error_header();
8839 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8840 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
8841 + "(minimum %d)",
8842 + (unsigned int)slot_zero, slot_zero->version,
8843 + VCHIQ_VERSION_MIN);
8844 + vchiq_loud_error("Restart with a newer VideoCore image.");
8845 + vchiq_loud_error_footer();
8846 + return VCHIQ_ERROR;
8847 + }
8848 +
8849 + if (VCHIQ_VERSION < slot_zero->version_min) {
8850 + vchiq_loud_error_header();
8851 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8852 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
8853 + "minimum %d)",
8854 + (unsigned int)slot_zero, VCHIQ_VERSION,
8855 + slot_zero->version_min);
8856 + vchiq_loud_error("Restart with a newer kernel.");
8857 + vchiq_loud_error_footer();
8858 + return VCHIQ_ERROR;
8859 + }
8860 +
8861 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
8862 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
8863 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
8864 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
8865 + vchiq_loud_error_header();
8866 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
8867 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
8868 + "(expected %x)",
8869 + (unsigned int)slot_zero,
8870 + slot_zero->slot_zero_size,
8871 + sizeof(VCHIQ_SLOT_ZERO_T));
8872 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
8873 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
8874 + "(expected %d",
8875 + (unsigned int)slot_zero, slot_zero->slot_size,
8876 + VCHIQ_SLOT_SIZE);
8877 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
8878 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
8879 + "(expected %d)",
8880 + (unsigned int)slot_zero, slot_zero->max_slots,
8881 + VCHIQ_MAX_SLOTS);
8882 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
8883 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
8884 + "(expected %d)",
8885 + (unsigned int)slot_zero,
8886 + slot_zero->max_slots_per_side,
8887 + VCHIQ_MAX_SLOTS_PER_SIDE);
8888 + vchiq_loud_error_footer();
8889 + return VCHIQ_ERROR;
8890 + }
8891 +
8892 + if (is_master) {
8893 + local = &slot_zero->master;
8894 + remote = &slot_zero->slave;
8895 + } else {
8896 + local = &slot_zero->slave;
8897 + remote = &slot_zero->master;
8898 + }
8899 +
8900 + if (local->initialised) {
8901 + vchiq_loud_error_header();
8902 + if (remote->initialised)
8903 + vchiq_loud_error("local state has already been "
8904 + "initialised");
8905 + else
8906 + vchiq_loud_error("master/slave mismatch - two %ss",
8907 + is_master ? "master" : "slave");
8908 + vchiq_loud_error_footer();
8909 + return VCHIQ_ERROR;
8910 + }
8911 +
8912 + memset(state, 0, sizeof(VCHIQ_STATE_T));
8913 +
8914 + state->id = id++;
8915 + state->is_master = is_master;
8916 +
8917 + /*
8918 + initialize shared state pointers
8919 + */
8920 +
8921 + state->local = local;
8922 + state->remote = remote;
8923 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
8924 +
8925 + /*
8926 + initialize events and mutexes
8927 + */
8928 +
8929 + sema_init(&state->connect, 0);
8930 + mutex_init(&state->mutex);
8931 + sema_init(&state->trigger_event, 0);
8932 + sema_init(&state->recycle_event, 0);
8933 + sema_init(&state->sync_trigger_event, 0);
8934 + sema_init(&state->sync_release_event, 0);
8935 +
8936 + mutex_init(&state->slot_mutex);
8937 + mutex_init(&state->recycle_mutex);
8938 + mutex_init(&state->sync_mutex);
8939 + mutex_init(&state->bulk_transfer_mutex);
8940 +
8941 + sema_init(&state->slot_available_event, 0);
8942 + sema_init(&state->slot_remove_event, 0);
8943 + sema_init(&state->data_quota_event, 0);
8944 +
8945 + state->slot_queue_available = 0;
8946 +
8947 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
8948 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8949 + &state->service_quotas[i];
8950 + sema_init(&service_quota->quota_event, 0);
8951 + }
8952 +
8953 + for (i = local->slot_first; i <= local->slot_last; i++) {
8954 + local->slot_queue[state->slot_queue_available++] = i;
8955 + up(&state->slot_available_event);
8956 + }
8957 +
8958 + state->default_slot_quota = state->slot_queue_available/2;
8959 + state->default_message_quota =
8960 + min((unsigned short)(state->default_slot_quota * 256),
8961 + (unsigned short)~0);
8962 +
8963 + state->previous_data_index = -1;
8964 + state->data_use_count = 0;
8965 + state->data_quota = state->slot_queue_available - 1;
8966 +
8967 + local->trigger.event = &state->trigger_event;
8968 + remote_event_create(&local->trigger);
8969 + local->tx_pos = 0;
8970 +
8971 + local->recycle.event = &state->recycle_event;
8972 + remote_event_create(&local->recycle);
8973 + local->slot_queue_recycle = state->slot_queue_available;
8974 +
8975 + local->sync_trigger.event = &state->sync_trigger_event;
8976 + remote_event_create(&local->sync_trigger);
8977 +
8978 + local->sync_release.event = &state->sync_release_event;
8979 + remote_event_create(&local->sync_release);
8980 +
8981 + /* At start-of-day, the slot is empty and available */
8982 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
8983 + = VCHIQ_MSGID_PADDING;
8984 + remote_event_signal_local(&local->sync_release);
8985 +
8986 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
8987 +
8988 + status = vchiq_platform_init_state(state);
8989 +
8990 + /*
8991 + bring up slot handler thread
8992 + */
8993 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
8994 + state->slot_handler_thread = kthread_create(&slot_handler_func,
8995 + (void *)state,
8996 + threadname);
8997 +
8998 + if (state->slot_handler_thread == NULL) {
8999 + vchiq_loud_error_header();
9000 + vchiq_loud_error("couldn't create thread %s", threadname);
9001 + vchiq_loud_error_footer();
9002 + return VCHIQ_ERROR;
9003 + }
9004 + set_user_nice(state->slot_handler_thread, -19);
9005 + wake_up_process(state->slot_handler_thread);
9006 +
9007 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
9008 + state->recycle_thread = kthread_create(&recycle_func,
9009 + (void *)state,
9010 + threadname);
9011 + if (state->recycle_thread == NULL) {
9012 + vchiq_loud_error_header();
9013 + vchiq_loud_error("couldn't create thread %s", threadname);
9014 + vchiq_loud_error_footer();
9015 + return VCHIQ_ERROR;
9016 + }
9017 + set_user_nice(state->recycle_thread, -19);
9018 + wake_up_process(state->recycle_thread);
9019 +
9020 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
9021 + state->sync_thread = kthread_create(&sync_func,
9022 + (void *)state,
9023 + threadname);
9024 + if (state->sync_thread == NULL) {
9025 + vchiq_loud_error_header();
9026 + vchiq_loud_error("couldn't create thread %s", threadname);
9027 + vchiq_loud_error_footer();
9028 + return VCHIQ_ERROR;
9029 + }
9030 + set_user_nice(state->sync_thread, -20);
9031 + wake_up_process(state->sync_thread);
9032 +
9033 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
9034 + vchiq_states[state->id] = state;
9035 +
9036 + /* Indicate readiness to the other side */
9037 + local->initialised = 1;
9038 +
9039 + return status;
9040 +}
9041 +
9042 +/* Called from application thread when a client or server service is created. */
9043 +VCHIQ_SERVICE_T *
9044 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9045 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9046 + VCHIQ_INSTANCE_T instance)
9047 +{
9048 + VCHIQ_SERVICE_T *service;
9049 +
9050 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
9051 + if (service) {
9052 + service->base.fourcc = params->fourcc;
9053 + service->base.callback = params->callback;
9054 + service->base.userdata = params->userdata;
9055 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
9056 + service->ref_count = 1;
9057 + service->srvstate = VCHIQ_SRVSTATE_FREE;
9058 + service->localport = VCHIQ_PORT_FREE;
9059 + service->remoteport = VCHIQ_PORT_FREE;
9060 +
9061 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
9062 + VCHIQ_FOURCC_INVALID : params->fourcc;
9063 + service->client_id = 0;
9064 + service->auto_close = 1;
9065 + service->sync = 0;
9066 + service->closing = 0;
9067 + atomic_set(&service->poll_flags, 0);
9068 + service->version = params->version;
9069 + service->version_min = params->version_min;
9070 + service->state = state;
9071 + service->instance = instance;
9072 + service->service_use_count = 0;
9073 + init_bulk_queue(&service->bulk_tx);
9074 + init_bulk_queue(&service->bulk_rx);
9075 + sema_init(&service->remove_event, 0);
9076 + sema_init(&service->bulk_remove_event, 0);
9077 + mutex_init(&service->bulk_mutex);
9078 + memset(&service->stats, 0, sizeof(service->stats));
9079 + } else {
9080 + vchiq_log_error(vchiq_core_log_level,
9081 + "Out of memory");
9082 + }
9083 +
9084 + if (service) {
9085 + VCHIQ_SERVICE_T **pservice = NULL;
9086 + int i;
9087 +
9088 + /* Although it is perfectly possible to use service_spinlock
9089 + ** to protect the creation of services, it is overkill as it
9090 + ** disables interrupts while the array is searched.
9091 + ** The only danger is of another thread trying to create a
9092 + ** service - service deletion is safe.
9093 + ** Therefore it is preferable to use state->mutex which,
9094 + ** although slower to claim, doesn't block interrupts while
9095 + ** it is held.
9096 + */
9097 +
9098 + mutex_lock(&state->mutex);
9099 +
9100 + /* Prepare to use a previously unused service */
9101 + if (state->unused_service < VCHIQ_MAX_SERVICES)
9102 + pservice = &state->services[state->unused_service];
9103 +
9104 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
9105 + for (i = 0; i < state->unused_service; i++) {
9106 + VCHIQ_SERVICE_T *srv = state->services[i];
9107 + if (!srv) {
9108 + pservice = &state->services[i];
9109 + break;
9110 + }
9111 + }
9112 + } else {
9113 + for (i = (state->unused_service - 1); i >= 0; i--) {
9114 + VCHIQ_SERVICE_T *srv = state->services[i];
9115 + if (!srv)
9116 + pservice = &state->services[i];
9117 + else if ((srv->public_fourcc == params->fourcc)
9118 + && ((srv->instance != instance) ||
9119 + (srv->base.callback !=
9120 + params->callback))) {
9121 + /* There is another server using this
9122 + ** fourcc which doesn't match. */
9123 + pservice = NULL;
9124 + break;
9125 + }
9126 + }
9127 + }
9128 +
9129 + if (pservice) {
9130 + service->localport = (pservice - state->services);
9131 + if (!handle_seq)
9132 + handle_seq = VCHIQ_MAX_STATES *
9133 + VCHIQ_MAX_SERVICES;
9134 + service->handle = handle_seq |
9135 + (state->id * VCHIQ_MAX_SERVICES) |
9136 + service->localport;
9137 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
9138 + *pservice = service;
9139 + if (pservice == &state->services[state->unused_service])
9140 + state->unused_service++;
9141 + }
9142 +
9143 + mutex_unlock(&state->mutex);
9144 +
9145 + if (!pservice) {
9146 + kfree(service);
9147 + service = NULL;
9148 + }
9149 + }
9150 +
9151 + if (service) {
9152 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9153 + &state->service_quotas[service->localport];
9154 + service_quota->slot_quota = state->default_slot_quota;
9155 + service_quota->message_quota = state->default_message_quota;
9156 + if (service_quota->slot_use_count == 0)
9157 + service_quota->previous_tx_index =
9158 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
9159 + - 1;
9160 +
9161 + /* Bring this service online */
9162 + vchiq_set_service_state(service, srvstate);
9163 +
9164 + vchiq_log_info(vchiq_core_msg_log_level,
9165 + "%s Service %c%c%c%c SrcPort:%d",
9166 + (srvstate == VCHIQ_SRVSTATE_OPENING)
9167 + ? "Open" : "Add",
9168 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
9169 + service->localport);
9170 + }
9171 +
9172 + /* Don't unlock the service - leave it with a ref_count of 1. */
9173 +
9174 + return service;
9175 +}
9176 +
9177 +VCHIQ_STATUS_T
9178 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
9179 +{
9180 + struct vchiq_open_payload payload = {
9181 + service->base.fourcc,
9182 + client_id,
9183 + service->version,
9184 + service->version_min
9185 + };
9186 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
9187 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9188 +
9189 + service->client_id = client_id;
9190 + vchiq_use_service_internal(service);
9191 + status = queue_message(service->state, NULL,
9192 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
9193 + &body, 1, sizeof(payload), 1);
9194 + if (status == VCHIQ_SUCCESS) {
9195 + if (down_interruptible(&service->remove_event) != 0) {
9196 + status = VCHIQ_RETRY;
9197 + vchiq_release_service_internal(service);
9198 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
9199 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
9200 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
9201 + vchiq_log_error(vchiq_core_log_level,
9202 + "%d: osi - srvstate = %s (ref %d)",
9203 + service->state->id,
9204 + srvstate_names[service->srvstate],
9205 + service->ref_count);
9206 + status = VCHIQ_ERROR;
9207 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9208 + vchiq_release_service_internal(service);
9209 + }
9210 + }
9211 + return status;
9212 +}
9213 +
9214 +static void
9215 +release_service_messages(VCHIQ_SERVICE_T *service)
9216 +{
9217 + VCHIQ_STATE_T *state = service->state;
9218 + int slot_last = state->remote->slot_last;
9219 + int i;
9220 +
9221 + /* Release any claimed messages */
9222 + for (i = state->remote->slot_first; i <= slot_last; i++) {
9223 + VCHIQ_SLOT_INFO_T *slot_info =
9224 + SLOT_INFO_FROM_INDEX(state, i);
9225 + if (slot_info->release_count != slot_info->use_count) {
9226 + char *data =
9227 + (char *)SLOT_DATA_FROM_INDEX(state, i);
9228 + unsigned int pos, end;
9229 +
9230 + end = VCHIQ_SLOT_SIZE;
9231 + if (data == state->rx_data)
9232 + /* This buffer is still being read from - stop
9233 + ** at the current read position */
9234 + end = state->rx_pos & VCHIQ_SLOT_MASK;
9235 +
9236 + pos = 0;
9237 +
9238 + while (pos < end) {
9239 + VCHIQ_HEADER_T *header =
9240 + (VCHIQ_HEADER_T *)(data + pos);
9241 + int msgid = header->msgid;
9242 + int port = VCHIQ_MSG_DSTPORT(msgid);
9243 + if ((port == service->localport) &&
9244 + (msgid & VCHIQ_MSGID_CLAIMED)) {
9245 + vchiq_log_info(vchiq_core_log_level,
9246 + " fsi - hdr %x",
9247 + (unsigned int)header);
9248 + release_slot(state, slot_info, header,
9249 + NULL);
9250 + }
9251 + pos += calc_stride(header->size);
9252 + if (pos > VCHIQ_SLOT_SIZE) {
9253 + vchiq_log_error(vchiq_core_log_level,
9254 + "fsi - pos %x: header %x, "
9255 + "msgid %x, header->msgid %x, "
9256 + "header->size %x",
9257 + pos, (unsigned int)header,
9258 + msgid, header->msgid,
9259 + header->size);
9260 + WARN(1, "invalid slot position\n");
9261 + }
9262 + }
9263 + }
9264 + }
9265 +}
9266 +
9267 +static int
9268 +do_abort_bulks(VCHIQ_SERVICE_T *service)
9269 +{
9270 + VCHIQ_STATUS_T status;
9271 +
9272 + /* Abort any outstanding bulk transfers */
9273 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
9274 + return 0;
9275 + abort_outstanding_bulks(service, &service->bulk_tx);
9276 + abort_outstanding_bulks(service, &service->bulk_rx);
9277 + mutex_unlock(&service->bulk_mutex);
9278 +
9279 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
9280 + if (status == VCHIQ_SUCCESS)
9281 + status = notify_bulks(service, &service->bulk_rx,
9282 + 0/*!retry_poll*/);
9283 + return (status == VCHIQ_SUCCESS);
9284 +}
9285 +
9286 +static VCHIQ_STATUS_T
9287 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
9288 +{
9289 + VCHIQ_STATUS_T status;
9290 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
9291 + int newstate;
9292 +
9293 + switch (service->srvstate) {
9294 + case VCHIQ_SRVSTATE_OPEN:
9295 + case VCHIQ_SRVSTATE_CLOSESENT:
9296 + case VCHIQ_SRVSTATE_CLOSERECVD:
9297 + if (is_server) {
9298 + if (service->auto_close) {
9299 + service->client_id = 0;
9300 + service->remoteport = VCHIQ_PORT_FREE;
9301 + newstate = VCHIQ_SRVSTATE_LISTENING;
9302 + } else
9303 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
9304 + } else
9305 + newstate = VCHIQ_SRVSTATE_CLOSED;
9306 + vchiq_set_service_state(service, newstate);
9307 + break;
9308 + case VCHIQ_SRVSTATE_LISTENING:
9309 + break;
9310 + default:
9311 + vchiq_log_error(vchiq_core_log_level,
9312 + "close_service_complete(%x) called in state %s",
9313 + service->handle, srvstate_names[service->srvstate]);
9314 + WARN(1, "close_service_complete in unexpected state\n");
9315 + return VCHIQ_ERROR;
9316 + }
9317 +
9318 + status = make_service_callback(service,
9319 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
9320 +
9321 + if (status != VCHIQ_RETRY) {
9322 + int uc = service->service_use_count;
9323 + int i;
9324 + /* Complete the close process */
9325 + for (i = 0; i < uc; i++)
9326 + /* cater for cases where close is forced and the
9327 + ** client may not close all it's handles */
9328 + vchiq_release_service_internal(service);
9329 +
9330 + service->client_id = 0;
9331 + service->remoteport = VCHIQ_PORT_FREE;
9332 +
9333 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
9334 + vchiq_free_service_internal(service);
9335 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
9336 + if (is_server)
9337 + service->closing = 0;
9338 +
9339 + up(&service->remove_event);
9340 + }
9341 + } else
9342 + vchiq_set_service_state(service, failstate);
9343 +
9344 + return status;
9345 +}
9346 +
9347 +/* Called by the slot handler */
9348 +VCHIQ_STATUS_T
9349 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
9350 +{
9351 + VCHIQ_STATE_T *state = service->state;
9352 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9353 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
9354 +
9355 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
9356 + service->state->id, service->localport, close_recvd,
9357 + srvstate_names[service->srvstate]);
9358 +
9359 + switch (service->srvstate) {
9360 + case VCHIQ_SRVSTATE_CLOSED:
9361 + case VCHIQ_SRVSTATE_HIDDEN:
9362 + case VCHIQ_SRVSTATE_LISTENING:
9363 + case VCHIQ_SRVSTATE_CLOSEWAIT:
9364 + if (close_recvd)
9365 + vchiq_log_error(vchiq_core_log_level,
9366 + "vchiq_close_service_internal(1) called "
9367 + "in state %s",
9368 + srvstate_names[service->srvstate]);
9369 + else if (is_server) {
9370 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
9371 + status = VCHIQ_ERROR;
9372 + } else {
9373 + service->client_id = 0;
9374 + service->remoteport = VCHIQ_PORT_FREE;
9375 + if (service->srvstate ==
9376 + VCHIQ_SRVSTATE_CLOSEWAIT)
9377 + vchiq_set_service_state(service,
9378 + VCHIQ_SRVSTATE_LISTENING);
9379 + }
9380 + up(&service->remove_event);
9381 + } else
9382 + vchiq_free_service_internal(service);
9383 + break;
9384 + case VCHIQ_SRVSTATE_OPENING:
9385 + if (close_recvd) {
9386 + /* The open was rejected - tell the user */
9387 + vchiq_set_service_state(service,
9388 + VCHIQ_SRVSTATE_CLOSEWAIT);
9389 + up(&service->remove_event);
9390 + } else {
9391 + /* Shutdown mid-open - let the other side know */
9392 + status = queue_message(state, service,
9393 + VCHIQ_MAKE_MSG
9394 + (VCHIQ_MSG_CLOSE,
9395 + service->localport,
9396 + VCHIQ_MSG_DSTPORT(service->remoteport)),
9397 + NULL, 0, 0, 0);
9398 + }
9399 + break;
9400 +
9401 + case VCHIQ_SRVSTATE_OPENSYNC:
9402 + mutex_lock(&state->sync_mutex);
9403 + /* Drop through */
9404 +
9405 + case VCHIQ_SRVSTATE_OPEN:
9406 + if (state->is_master || close_recvd) {
9407 + if (!do_abort_bulks(service))
9408 + status = VCHIQ_RETRY;
9409 + }
9410 +
9411 + release_service_messages(service);
9412 +
9413 + if (status == VCHIQ_SUCCESS)
9414 + status = queue_message(state, service,
9415 + VCHIQ_MAKE_MSG
9416 + (VCHIQ_MSG_CLOSE,
9417 + service->localport,
9418 + VCHIQ_MSG_DSTPORT(service->remoteport)),
9419 + NULL, 0, 0, 0);
9420 +
9421 + if (status == VCHIQ_SUCCESS) {
9422 + if (!close_recvd)
9423 + break;
9424 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
9425 + mutex_unlock(&state->sync_mutex);
9426 + break;
9427 + } else
9428 + break;
9429 +
9430 + status = close_service_complete(service,
9431 + VCHIQ_SRVSTATE_CLOSERECVD);
9432 + break;
9433 +
9434 + case VCHIQ_SRVSTATE_CLOSESENT:
9435 + if (!close_recvd)
9436 + /* This happens when a process is killed mid-close */
9437 + break;
9438 +
9439 + if (!state->is_master) {
9440 + if (!do_abort_bulks(service)) {
9441 + status = VCHIQ_RETRY;
9442 + break;
9443 + }
9444 + }
9445 +
9446 + if (status == VCHIQ_SUCCESS)
9447 + status = close_service_complete(service,
9448 + VCHIQ_SRVSTATE_CLOSERECVD);
9449 + break;
9450 +
9451 + case VCHIQ_SRVSTATE_CLOSERECVD:
9452 + if (!close_recvd && is_server)
9453 + /* Force into LISTENING mode */
9454 + vchiq_set_service_state(service,
9455 + VCHIQ_SRVSTATE_LISTENING);
9456 + status = close_service_complete(service,
9457 + VCHIQ_SRVSTATE_CLOSERECVD);
9458 + break;
9459 +
9460 + default:
9461 + vchiq_log_error(vchiq_core_log_level,
9462 + "vchiq_close_service_internal(%d) called in state %s",
9463 + close_recvd, srvstate_names[service->srvstate]);
9464 + break;
9465 + }
9466 +
9467 + return status;
9468 +}
9469 +
9470 +/* Called from the application process upon process death */
9471 +void
9472 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
9473 +{
9474 + VCHIQ_STATE_T *state = service->state;
9475 +
9476 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
9477 + state->id, service->localport, service->remoteport);
9478 +
9479 + mark_service_closing(service);
9480 +
9481 + /* Mark the service for removal by the slot handler */
9482 + request_poll(state, service, VCHIQ_POLL_REMOVE);
9483 +}
9484 +
9485 +/* Called from the slot handler */
9486 +void
9487 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
9488 +{
9489 + VCHIQ_STATE_T *state = service->state;
9490 +
9491 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
9492 + state->id, service->localport);
9493 +
9494 + switch (service->srvstate) {
9495 + case VCHIQ_SRVSTATE_OPENING:
9496 + case VCHIQ_SRVSTATE_CLOSED:
9497 + case VCHIQ_SRVSTATE_HIDDEN:
9498 + case VCHIQ_SRVSTATE_LISTENING:
9499 + case VCHIQ_SRVSTATE_CLOSEWAIT:
9500 + break;
9501 + default:
9502 + vchiq_log_error(vchiq_core_log_level,
9503 + "%d: fsi - (%d) in state %s",
9504 + state->id, service->localport,
9505 + srvstate_names[service->srvstate]);
9506 + return;
9507 + }
9508 +
9509 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
9510 +
9511 + up(&service->remove_event);
9512 +
9513 + /* Release the initial lock */
9514 + unlock_service(service);
9515 +}
9516 +
9517 +VCHIQ_STATUS_T
9518 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
9519 +{
9520 + VCHIQ_SERVICE_T *service;
9521 + int i;
9522 +
9523 + /* Find all services registered to this client and enable them. */
9524 + i = 0;
9525 + while ((service = next_service_by_instance(state, instance,
9526 + &i)) != NULL) {
9527 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
9528 + vchiq_set_service_state(service,
9529 + VCHIQ_SRVSTATE_LISTENING);
9530 + unlock_service(service);
9531 + }
9532 +
9533 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
9534 + if (queue_message(state, NULL,
9535 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
9536 + 0, 1) == VCHIQ_RETRY)
9537 + return VCHIQ_RETRY;
9538 +
9539 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
9540 + }
9541 +
9542 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
9543 + if (down_interruptible(&state->connect) != 0)
9544 + return VCHIQ_RETRY;
9545 +
9546 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
9547 + up(&state->connect);
9548 + }
9549 +
9550 + return VCHIQ_SUCCESS;
9551 +}
9552 +
9553 +VCHIQ_STATUS_T
9554 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
9555 +{
9556 + VCHIQ_SERVICE_T *service;
9557 + int i;
9558 +
9559 + /* Find all services registered to this client and enable them. */
9560 + i = 0;
9561 + while ((service = next_service_by_instance(state, instance,
9562 + &i)) != NULL) {
9563 + (void)vchiq_remove_service(service->handle);
9564 + unlock_service(service);
9565 + }
9566 +
9567 + return VCHIQ_SUCCESS;
9568 +}
9569 +
9570 +VCHIQ_STATUS_T
9571 +vchiq_pause_internal(VCHIQ_STATE_T *state)
9572 +{
9573 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9574 +
9575 + switch (state->conn_state) {
9576 + case VCHIQ_CONNSTATE_CONNECTED:
9577 + /* Request a pause */
9578 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
9579 + request_poll(state, NULL, 0);
9580 + break;
9581 + default:
9582 + vchiq_log_error(vchiq_core_log_level,
9583 + "vchiq_pause_internal in state %s\n",
9584 + conn_state_names[state->conn_state]);
9585 + status = VCHIQ_ERROR;
9586 + VCHIQ_STATS_INC(state, error_count);
9587 + break;
9588 + }
9589 +
9590 + return status;
9591 +}
9592 +
9593 +VCHIQ_STATUS_T
9594 +vchiq_resume_internal(VCHIQ_STATE_T *state)
9595 +{
9596 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9597 +
9598 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
9599 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
9600 + request_poll(state, NULL, 0);
9601 + } else {
9602 + status = VCHIQ_ERROR;
9603 + VCHIQ_STATS_INC(state, error_count);
9604 + }
9605 +
9606 + return status;
9607 +}
9608 +
9609 +VCHIQ_STATUS_T
9610 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
9611 +{
9612 + /* Unregister the service */
9613 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9614 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9615 +
9616 + if (!service)
9617 + return VCHIQ_ERROR;
9618 +
9619 + vchiq_log_info(vchiq_core_log_level,
9620 + "%d: close_service:%d",
9621 + service->state->id, service->localport);
9622 +
9623 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9624 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
9625 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
9626 + unlock_service(service);
9627 + return VCHIQ_ERROR;
9628 + }
9629 +
9630 + mark_service_closing(service);
9631 +
9632 + if (current == service->state->slot_handler_thread) {
9633 + status = vchiq_close_service_internal(service,
9634 + 0/*!close_recvd*/);
9635 + BUG_ON(status == VCHIQ_RETRY);
9636 + } else {
9637 + /* Mark the service for termination by the slot handler */
9638 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
9639 + }
9640 +
9641 + while (1) {
9642 + if (down_interruptible(&service->remove_event) != 0) {
9643 + status = VCHIQ_RETRY;
9644 + break;
9645 + }
9646 +
9647 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9648 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
9649 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
9650 + break;
9651 +
9652 + vchiq_log_warning(vchiq_core_log_level,
9653 + "%d: close_service:%d - waiting in state %s",
9654 + service->state->id, service->localport,
9655 + srvstate_names[service->srvstate]);
9656 + }
9657 +
9658 + if ((status == VCHIQ_SUCCESS) &&
9659 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
9660 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
9661 + status = VCHIQ_ERROR;
9662 +
9663 + unlock_service(service);
9664 +
9665 + return status;
9666 +}
9667 +
9668 +VCHIQ_STATUS_T
9669 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
9670 +{
9671 + /* Unregister the service */
9672 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9673 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9674 +
9675 + if (!service)
9676 + return VCHIQ_ERROR;
9677 +
9678 + vchiq_log_info(vchiq_core_log_level,
9679 + "%d: remove_service:%d",
9680 + service->state->id, service->localport);
9681 +
9682 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
9683 + unlock_service(service);
9684 + return VCHIQ_ERROR;
9685 + }
9686 +
9687 + mark_service_closing(service);
9688 +
9689 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9690 + (current == service->state->slot_handler_thread)) {
9691 + /* Make it look like a client, because it must be removed and
9692 + not left in the LISTENING state. */
9693 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
9694 +
9695 + status = vchiq_close_service_internal(service,
9696 + 0/*!close_recvd*/);
9697 + BUG_ON(status == VCHIQ_RETRY);
9698 + } else {
9699 + /* Mark the service for removal by the slot handler */
9700 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
9701 + }
9702 + while (1) {
9703 + if (down_interruptible(&service->remove_event) != 0) {
9704 + status = VCHIQ_RETRY;
9705 + break;
9706 + }
9707 +
9708 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9709 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
9710 + break;
9711 +
9712 + vchiq_log_warning(vchiq_core_log_level,
9713 + "%d: remove_service:%d - waiting in state %s",
9714 + service->state->id, service->localport,
9715 + srvstate_names[service->srvstate]);
9716 + }
9717 +
9718 + if ((status == VCHIQ_SUCCESS) &&
9719 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
9720 + status = VCHIQ_ERROR;
9721 +
9722 + unlock_service(service);
9723 +
9724 + return status;
9725 +}
9726 +
9727 +
9728 +/* This function may be called by kernel threads or user threads.
9729 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
9730 + * received and the call should be retried after being returned to user
9731 + * context.
9732 + * When called in blocking mode, the userdata field points to a bulk_waiter
9733 + * structure.
9734 + */
9735 +VCHIQ_STATUS_T
9736 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9737 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9738 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
9739 +{
9740 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9741 + VCHIQ_BULK_QUEUE_T *queue;
9742 + VCHIQ_BULK_T *bulk;
9743 + VCHIQ_STATE_T *state;
9744 + struct bulk_waiter *bulk_waiter = NULL;
9745 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
9746 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
9747 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
9748 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9749 +
9750 + if (!service ||
9751 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
9752 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
9753 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
9754 + goto error_exit;
9755 +
9756 + switch (mode) {
9757 + case VCHIQ_BULK_MODE_NOCALLBACK:
9758 + case VCHIQ_BULK_MODE_CALLBACK:
9759 + break;
9760 + case VCHIQ_BULK_MODE_BLOCKING:
9761 + bulk_waiter = (struct bulk_waiter *)userdata;
9762 + sema_init(&bulk_waiter->event, 0);
9763 + bulk_waiter->actual = 0;
9764 + bulk_waiter->bulk = NULL;
9765 + break;
9766 + case VCHIQ_BULK_MODE_WAITING:
9767 + bulk_waiter = (struct bulk_waiter *)userdata;
9768 + bulk = bulk_waiter->bulk;
9769 + goto waiting;
9770 + default:
9771 + goto error_exit;
9772 + }
9773 +
9774 + state = service->state;
9775 +
9776 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
9777 + &service->bulk_tx : &service->bulk_rx;
9778 +
9779 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
9780 + status = VCHIQ_RETRY;
9781 + goto error_exit;
9782 + }
9783 +
9784 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
9785 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
9786 + do {
9787 + mutex_unlock(&service->bulk_mutex);
9788 + if (down_interruptible(&service->bulk_remove_event)
9789 + != 0) {
9790 + status = VCHIQ_RETRY;
9791 + goto error_exit;
9792 + }
9793 + if (mutex_lock_interruptible(&service->bulk_mutex)
9794 + != 0) {
9795 + status = VCHIQ_RETRY;
9796 + goto error_exit;
9797 + }
9798 + } while (queue->local_insert == queue->remove +
9799 + VCHIQ_NUM_SERVICE_BULKS);
9800 + }
9801 +
9802 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
9803 +
9804 + bulk->mode = mode;
9805 + bulk->dir = dir;
9806 + bulk->userdata = userdata;
9807 + bulk->size = size;
9808 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
9809 +
9810 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
9811 + VCHIQ_SUCCESS)
9812 + goto unlock_error_exit;
9813 +
9814 + wmb();
9815 +
9816 + vchiq_log_info(vchiq_core_log_level,
9817 + "%d: bt (%d->%d) %cx %x@%x %x",
9818 + state->id,
9819 + service->localport, service->remoteport, dir_char,
9820 + size, (unsigned int)bulk->data, (unsigned int)userdata);
9821 +
9822 + if (state->is_master) {
9823 + queue->local_insert++;
9824 + if (resolve_bulks(service, queue))
9825 + request_poll(state, service,
9826 + (dir == VCHIQ_BULK_TRANSMIT) ?
9827 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
9828 + } else {
9829 + int payload[2] = { (int)bulk->data, bulk->size };
9830 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
9831 +
9832 + status = queue_message(state, NULL,
9833 + VCHIQ_MAKE_MSG(dir_msgtype,
9834 + service->localport, service->remoteport),
9835 + &element, 1, sizeof(payload), 1);
9836 + if (status != VCHIQ_SUCCESS) {
9837 + vchiq_complete_bulk(bulk);
9838 + goto unlock_error_exit;
9839 + }
9840 + queue->local_insert++;
9841 + }
9842 +
9843 + mutex_unlock(&service->bulk_mutex);
9844 +
9845 + vchiq_log_trace(vchiq_core_log_level,
9846 + "%d: bt:%d %cx li=%x ri=%x p=%x",
9847 + state->id,
9848 + service->localport, dir_char,
9849 + queue->local_insert, queue->remote_insert, queue->process);
9850 +
9851 +waiting:
9852 + unlock_service(service);
9853 +
9854 + status = VCHIQ_SUCCESS;
9855 +
9856 + if (bulk_waiter) {
9857 + bulk_waiter->bulk = bulk;
9858 + if (down_interruptible(&bulk_waiter->event) != 0)
9859 + status = VCHIQ_RETRY;
9860 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
9861 + status = VCHIQ_ERROR;
9862 + }
9863 +
9864 + return status;
9865 +
9866 +unlock_error_exit:
9867 + mutex_unlock(&service->bulk_mutex);
9868 +
9869 +error_exit:
9870 + if (service)
9871 + unlock_service(service);
9872 + return status;
9873 +}
9874 +
9875 +VCHIQ_STATUS_T
9876 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
9877 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
9878 +{
9879 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9880 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9881 +
9882 + unsigned int size = 0;
9883 + unsigned int i;
9884 +
9885 + if (!service ||
9886 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
9887 + goto error_exit;
9888 +
9889 + for (i = 0; i < (unsigned int)count; i++) {
9890 + if (elements[i].size) {
9891 + if (elements[i].data == NULL) {
9892 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9893 + goto error_exit;
9894 + }
9895 + size += elements[i].size;
9896 + }
9897 + }
9898 +
9899 + if (size > VCHIQ_MAX_MSG_SIZE) {
9900 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9901 + goto error_exit;
9902 + }
9903 +
9904 + switch (service->srvstate) {
9905 + case VCHIQ_SRVSTATE_OPEN:
9906 + status = queue_message(service->state, service,
9907 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9908 + service->localport,
9909 + service->remoteport),
9910 + elements, count, size, 1);
9911 + break;
9912 + case VCHIQ_SRVSTATE_OPENSYNC:
9913 + status = queue_message_sync(service->state, service,
9914 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9915 + service->localport,
9916 + service->remoteport),
9917 + elements, count, size, 1);
9918 + break;
9919 + default:
9920 + status = VCHIQ_ERROR;
9921 + break;
9922 + }
9923 +
9924 +error_exit:
9925 + if (service)
9926 + unlock_service(service);
9927 +
9928 + return status;
9929 +}
9930 +
9931 +void
9932 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
9933 +{
9934 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9935 + VCHIQ_SHARED_STATE_T *remote;
9936 + VCHIQ_STATE_T *state;
9937 + int slot_index;
9938 +
9939 + if (!service)
9940 + return;
9941 +
9942 + state = service->state;
9943 + remote = state->remote;
9944 +
9945 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
9946 +
9947 + if ((slot_index >= remote->slot_first) &&
9948 + (slot_index <= remote->slot_last)) {
9949 + int msgid = header->msgid;
9950 + if (msgid & VCHIQ_MSGID_CLAIMED) {
9951 + VCHIQ_SLOT_INFO_T *slot_info =
9952 + SLOT_INFO_FROM_INDEX(state, slot_index);
9953 +
9954 + release_slot(state, slot_info, header, service);
9955 + }
9956 + } else if (slot_index == remote->slot_sync)
9957 + release_message_sync(state, header);
9958 +
9959 + unlock_service(service);
9960 +}
9961 +
9962 +static void
9963 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
9964 +{
9965 + header->msgid = VCHIQ_MSGID_PADDING;
9966 + wmb();
9967 + remote_event_signal(&state->remote->sync_release);
9968 +}
9969 +
9970 +VCHIQ_STATUS_T
9971 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
9972 +{
9973 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9974 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9975 +
9976 + if (!service ||
9977 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
9978 + !peer_version)
9979 + goto exit;
9980 + *peer_version = service->peer_version;
9981 + status = VCHIQ_SUCCESS;
9982 +
9983 +exit:
9984 + if (service)
9985 + unlock_service(service);
9986 + return status;
9987 +}
9988 +
9989 +VCHIQ_STATUS_T
9990 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
9991 + int config_size, VCHIQ_CONFIG_T *pconfig)
9992 +{
9993 + VCHIQ_CONFIG_T config;
9994 +
9995 + (void)instance;
9996 +
9997 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
9998 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
9999 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
10000 + config.max_services = VCHIQ_MAX_SERVICES;
10001 + config.version = VCHIQ_VERSION;
10002 + config.version_min = VCHIQ_VERSION_MIN;
10003 +
10004 + if (config_size > sizeof(VCHIQ_CONFIG_T))
10005 + return VCHIQ_ERROR;
10006 +
10007 + memcpy(pconfig, &config,
10008 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
10009 +
10010 + return VCHIQ_SUCCESS;
10011 +}
10012 +
10013 +VCHIQ_STATUS_T
10014 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
10015 + VCHIQ_SERVICE_OPTION_T option, int value)
10016 +{
10017 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
10018 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10019 +
10020 + if (service) {
10021 + switch (option) {
10022 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
10023 + service->auto_close = value;
10024 + status = VCHIQ_SUCCESS;
10025 + break;
10026 +
10027 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
10028 + VCHIQ_SERVICE_QUOTA_T *service_quota =
10029 + &service->state->service_quotas[
10030 + service->localport];
10031 + if (value == 0)
10032 + value = service->state->default_slot_quota;
10033 + if ((value >= service_quota->slot_use_count) &&
10034 + (value < (unsigned short)~0)) {
10035 + service_quota->slot_quota = value;
10036 + if ((value >= service_quota->slot_use_count) &&
10037 + (service_quota->message_quota >=
10038 + service_quota->message_use_count)) {
10039 + /* Signal the service that it may have
10040 + ** dropped below its quota */
10041 + up(&service_quota->quota_event);
10042 + }
10043 + status = VCHIQ_SUCCESS;
10044 + }
10045 + } break;
10046 +
10047 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
10048 + VCHIQ_SERVICE_QUOTA_T *service_quota =
10049 + &service->state->service_quotas[
10050 + service->localport];
10051 + if (value == 0)
10052 + value = service->state->default_message_quota;
10053 + if ((value >= service_quota->message_use_count) &&
10054 + (value < (unsigned short)~0)) {
10055 + service_quota->message_quota = value;
10056 + if ((value >=
10057 + service_quota->message_use_count) &&
10058 + (service_quota->slot_quota >=
10059 + service_quota->slot_use_count))
10060 + /* Signal the service that it may have
10061 + ** dropped below its quota */
10062 + up(&service_quota->quota_event);
10063 + status = VCHIQ_SUCCESS;
10064 + }
10065 + } break;
10066 +
10067 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
10068 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
10069 + (service->srvstate ==
10070 + VCHIQ_SRVSTATE_LISTENING)) {
10071 + service->sync = value;
10072 + status = VCHIQ_SUCCESS;
10073 + }
10074 + break;
10075 +
10076 + default:
10077 + break;
10078 + }
10079 + unlock_service(service);
10080 + }
10081 +
10082 + return status;
10083 +}
10084 +
10085 +void
10086 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
10087 + VCHIQ_SHARED_STATE_T *shared, const char *label)
10088 +{
10089 + static const char *const debug_names[] = {
10090 + "<entries>",
10091 + "SLOT_HANDLER_COUNT",
10092 + "SLOT_HANDLER_LINE",
10093 + "PARSE_LINE",
10094 + "PARSE_HEADER",
10095 + "PARSE_MSGID",
10096 + "AWAIT_COMPLETION_LINE",
10097 + "DEQUEUE_MESSAGE_LINE",
10098 + "SERVICE_CALLBACK_LINE",
10099 + "MSG_QUEUE_FULL_COUNT",
10100 + "COMPLETION_QUEUE_FULL_COUNT"
10101 + };
10102 + int i;
10103 +
10104 + char buf[80];
10105 + int len;
10106 + len = snprintf(buf, sizeof(buf),
10107 + " %s: slots %d-%d tx_pos=%x recycle=%x",
10108 + label, shared->slot_first, shared->slot_last,
10109 + shared->tx_pos, shared->slot_queue_recycle);
10110 + vchiq_dump(dump_context, buf, len + 1);
10111 +
10112 + len = snprintf(buf, sizeof(buf),
10113 + " Slots claimed:");
10114 + vchiq_dump(dump_context, buf, len + 1);
10115 +
10116 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
10117 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
10118 + if (slot_info.use_count != slot_info.release_count) {
10119 + len = snprintf(buf, sizeof(buf),
10120 + " %d: %d/%d", i, slot_info.use_count,
10121 + slot_info.release_count);
10122 + vchiq_dump(dump_context, buf, len + 1);
10123 + }
10124 + }
10125 +
10126 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
10127 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
10128 + debug_names[i], shared->debug[i], shared->debug[i]);
10129 + vchiq_dump(dump_context, buf, len + 1);
10130 + }
10131 +}
10132 +
10133 +void
10134 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
10135 +{
10136 + char buf[80];
10137 + int len;
10138 + int i;
10139 +
10140 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
10141 + conn_state_names[state->conn_state]);
10142 + vchiq_dump(dump_context, buf, len + 1);
10143 +
10144 + len = snprintf(buf, sizeof(buf),
10145 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
10146 + state->local->tx_pos,
10147 + (uint32_t)state->tx_data +
10148 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
10149 + state->rx_pos,
10150 + (uint32_t)state->rx_data +
10151 + (state->rx_pos & VCHIQ_SLOT_MASK));
10152 + vchiq_dump(dump_context, buf, len + 1);
10153 +
10154 + len = snprintf(buf, sizeof(buf),
10155 + " Version: %d (min %d)",
10156 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
10157 + vchiq_dump(dump_context, buf, len + 1);
10158 +
10159 + if (VCHIQ_ENABLE_STATS) {
10160 + len = snprintf(buf, sizeof(buf),
10161 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
10162 + "error_count=%d",
10163 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
10164 + state->stats.error_count);
10165 + vchiq_dump(dump_context, buf, len + 1);
10166 + }
10167 +
10168 + len = snprintf(buf, sizeof(buf),
10169 + " Slots: %d available (%d data), %d recyclable, %d stalls "
10170 + "(%d data)",
10171 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
10172 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
10173 + state->data_quota - state->data_use_count,
10174 + state->local->slot_queue_recycle - state->slot_queue_available,
10175 + state->stats.slot_stalls, state->stats.data_stalls);
10176 + vchiq_dump(dump_context, buf, len + 1);
10177 +
10178 + vchiq_dump_platform_state(dump_context);
10179 +
10180 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
10181 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
10182 +
10183 + vchiq_dump_platform_instances(dump_context);
10184 +
10185 + for (i = 0; i < state->unused_service; i++) {
10186 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
10187 +
10188 + if (service) {
10189 + vchiq_dump_service_state(dump_context, service);
10190 + unlock_service(service);
10191 + }
10192 + }
10193 +}
10194 +
10195 +void
10196 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
10197 +{
10198 + char buf[80];
10199 + int len;
10200 +
10201 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
10202 + service->localport, srvstate_names[service->srvstate],
10203 + service->ref_count - 1); /*Don't include the lock just taken*/
10204 +
10205 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
10206 + char remoteport[30];
10207 + VCHIQ_SERVICE_QUOTA_T *service_quota =
10208 + &service->state->service_quotas[service->localport];
10209 + int fourcc = service->base.fourcc;
10210 + int tx_pending, rx_pending;
10211 + if (service->remoteport != VCHIQ_PORT_FREE) {
10212 + int len2 = snprintf(remoteport, sizeof(remoteport),
10213 + "%d", service->remoteport);
10214 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
10215 + snprintf(remoteport + len2,
10216 + sizeof(remoteport) - len2,
10217 + " (client %x)", service->client_id);
10218 + } else
10219 + strcpy(remoteport, "n/a");
10220 +
10221 + len += snprintf(buf + len, sizeof(buf) - len,
10222 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
10223 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
10224 + remoteport,
10225 + service_quota->message_use_count,
10226 + service_quota->message_quota,
10227 + service_quota->slot_use_count,
10228 + service_quota->slot_quota);
10229 +
10230 + vchiq_dump(dump_context, buf, len + 1);
10231 +
10232 + tx_pending = service->bulk_tx.local_insert -
10233 + service->bulk_tx.remote_insert;
10234 +
10235 + rx_pending = service->bulk_rx.local_insert -
10236 + service->bulk_rx.remote_insert;
10237 +
10238 + len = snprintf(buf, sizeof(buf),
10239 + " Bulk: tx_pending=%d (size %d),"
10240 + " rx_pending=%d (size %d)",
10241 + tx_pending,
10242 + tx_pending ? service->bulk_tx.bulks[
10243 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
10244 + rx_pending,
10245 + rx_pending ? service->bulk_rx.bulks[
10246 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
10247 +
10248 + if (VCHIQ_ENABLE_STATS) {
10249 + vchiq_dump(dump_context, buf, len + 1);
10250 +
10251 + len = snprintf(buf, sizeof(buf),
10252 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
10253 + "rx_count=%d, rx_bytes=%llu",
10254 + service->stats.ctrl_tx_count,
10255 + service->stats.ctrl_tx_bytes,
10256 + service->stats.ctrl_rx_count,
10257 + service->stats.ctrl_rx_bytes);
10258 + vchiq_dump(dump_context, buf, len + 1);
10259 +
10260 + len = snprintf(buf, sizeof(buf),
10261 + " Bulk: tx_count=%d, tx_bytes=%llu, "
10262 + "rx_count=%d, rx_bytes=%llu",
10263 + service->stats.bulk_tx_count,
10264 + service->stats.bulk_tx_bytes,
10265 + service->stats.bulk_rx_count,
10266 + service->stats.bulk_rx_bytes);
10267 + vchiq_dump(dump_context, buf, len + 1);
10268 +
10269 + len = snprintf(buf, sizeof(buf),
10270 + " %d quota stalls, %d slot stalls, "
10271 + "%d bulk stalls, %d aborted, %d errors",
10272 + service->stats.quota_stalls,
10273 + service->stats.slot_stalls,
10274 + service->stats.bulk_stalls,
10275 + service->stats.bulk_aborted_count,
10276 + service->stats.error_count);
10277 + }
10278 + }
10279 +
10280 + vchiq_dump(dump_context, buf, len + 1);
10281 +
10282 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
10283 + vchiq_dump_platform_service_state(dump_context, service);
10284 +}
10285 +
10286 +
10287 +void
10288 +vchiq_loud_error_header(void)
10289 +{
10290 + vchiq_log_error(vchiq_core_log_level,
10291 + "============================================================"
10292 + "================");
10293 + vchiq_log_error(vchiq_core_log_level,
10294 + "============================================================"
10295 + "================");
10296 + vchiq_log_error(vchiq_core_log_level, "=====");
10297 +}
10298 +
10299 +void
10300 +vchiq_loud_error_footer(void)
10301 +{
10302 + vchiq_log_error(vchiq_core_log_level, "=====");
10303 + vchiq_log_error(vchiq_core_log_level,
10304 + "============================================================"
10305 + "================");
10306 + vchiq_log_error(vchiq_core_log_level,
10307 + "============================================================"
10308 + "================");
10309 +}
10310 +
10311 +
10312 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
10313 +{
10314 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
10315 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10316 + status = queue_message(state, NULL,
10317 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
10318 + NULL, 0, 0, 0);
10319 + return status;
10320 +}
10321 +
10322 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
10323 +{
10324 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
10325 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10326 + status = queue_message(state, NULL,
10327 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
10328 + NULL, 0, 0, 0);
10329 + return status;
10330 +}
10331 +
10332 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
10333 +{
10334 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
10335 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10336 + status = queue_message(state, NULL,
10337 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
10338 + NULL, 0, 0, 0);
10339 + return status;
10340 +}
10341 +
10342 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10343 + size_t numBytes)
10344 +{
10345 + const uint8_t *mem = (const uint8_t *)voidMem;
10346 + size_t offset;
10347 + char lineBuf[100];
10348 + char *s;
10349 +
10350 + while (numBytes > 0) {
10351 + s = lineBuf;
10352 +
10353 + for (offset = 0; offset < 16; offset++) {
10354 + if (offset < numBytes)
10355 + s += snprintf(s, 4, "%02x ", mem[offset]);
10356 + else
10357 + s += snprintf(s, 4, " ");
10358 + }
10359 +
10360 + for (offset = 0; offset < 16; offset++) {
10361 + if (offset < numBytes) {
10362 + uint8_t ch = mem[offset];
10363 +
10364 + if ((ch < ' ') || (ch > '~'))
10365 + ch = '.';
10366 + *s++ = (char)ch;
10367 + }
10368 + }
10369 + *s++ = '\0';
10370 +
10371 + if ((label != NULL) && (*label != '\0'))
10372 + vchiq_log_trace(VCHIQ_LOG_TRACE,
10373 + "%s: %08x: %s", label, addr, lineBuf);
10374 + else
10375 + vchiq_log_trace(VCHIQ_LOG_TRACE,
10376 + "%08x: %s", addr, lineBuf);
10377 +
10378 + addr += 16;
10379 + mem += 16;
10380 + if (numBytes > 16)
10381 + numBytes -= 16;
10382 + else
10383 + numBytes = 0;
10384 + }
10385 +}
10386 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
10387 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 1970-01-01 01:00:00.000000000 +0100
10388 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 2013-07-06 15:25:50.000000000 +0100
10389 @@ -0,0 +1,703 @@
10390 +/**
10391 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10392 + *
10393 + * Redistribution and use in source and binary forms, with or without
10394 + * modification, are permitted provided that the following conditions
10395 + * are met:
10396 + * 1. Redistributions of source code must retain the above copyright
10397 + * notice, this list of conditions, and the following disclaimer,
10398 + * without modification.
10399 + * 2. Redistributions in binary form must reproduce the above copyright
10400 + * notice, this list of conditions and the following disclaimer in the
10401 + * documentation and/or other materials provided with the distribution.
10402 + * 3. The names of the above-listed copyright holders may not be used
10403 + * to endorse or promote products derived from this software without
10404 + * specific prior written permission.
10405 + *
10406 + * ALTERNATIVELY, this software may be distributed under the terms of the
10407 + * GNU General Public License ("GPL") version 2, as published by the Free
10408 + * Software Foundation.
10409 + *
10410 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10411 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10412 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10413 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10414 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10415 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10416 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10417 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10418 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10419 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10420 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10421 + */
10422 +
10423 +#ifndef VCHIQ_CORE_H
10424 +#define VCHIQ_CORE_H
10425 +
10426 +#include <linux/mutex.h>
10427 +#include <linux/semaphore.h>
10428 +#include <linux/kthread.h>
10429 +
10430 +#include "vchiq_cfg.h"
10431 +
10432 +#include "vchiq.h"
10433 +
10434 +/* Run time control of log level, based on KERN_XXX level. */
10435 +#define VCHIQ_LOG_DEFAULT 4
10436 +#define VCHIQ_LOG_ERROR 3
10437 +#define VCHIQ_LOG_WARNING 4
10438 +#define VCHIQ_LOG_INFO 6
10439 +#define VCHIQ_LOG_TRACE 7
10440 +
10441 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
10442 +
10443 +#ifndef vchiq_log_error
10444 +#define vchiq_log_error(cat, fmt, ...) \
10445 + do { if (cat >= VCHIQ_LOG_ERROR) \
10446 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10447 +#endif
10448 +#ifndef vchiq_log_warning
10449 +#define vchiq_log_warning(cat, fmt, ...) \
10450 + do { if (cat >= VCHIQ_LOG_WARNING) \
10451 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10452 +#endif
10453 +#ifndef vchiq_log_info
10454 +#define vchiq_log_info(cat, fmt, ...) \
10455 + do { if (cat >= VCHIQ_LOG_INFO) \
10456 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10457 +#endif
10458 +#ifndef vchiq_log_trace
10459 +#define vchiq_log_trace(cat, fmt, ...) \
10460 + do { if (cat >= VCHIQ_LOG_TRACE) \
10461 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10462 +#endif
10463 +
10464 +#define vchiq_loud_error(...) \
10465 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
10466 +
10467 +#ifndef vchiq_static_assert
10468 +#define vchiq_static_assert(cond) __attribute__((unused)) \
10469 + extern int vchiq_static_assert[(cond) ? 1 : -1]
10470 +#endif
10471 +
10472 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
10473 +
10474 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
10475 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
10476 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
10477 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
10478 +
10479 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
10480 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
10481 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
10482 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
10483 +
10484 +#define VCHIQ_MSG_PADDING 0 /* - */
10485 +#define VCHIQ_MSG_CONNECT 1 /* - */
10486 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
10487 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
10488 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
10489 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
10490 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
10491 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
10492 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
10493 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
10494 +#define VCHIQ_MSG_PAUSE 10 /* - */
10495 +#define VCHIQ_MSG_RESUME 11 /* - */
10496 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
10497 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
10498 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
10499 +
10500 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
10501 +#define VCHIQ_PORT_FREE 0x1000
10502 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
10503 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
10504 + ((type<<24) | (srcport<<12) | (dstport<<0))
10505 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
10506 +#define VCHIQ_MSG_SRCPORT(msgid) \
10507 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
10508 +#define VCHIQ_MSG_DSTPORT(msgid) \
10509 + ((unsigned short)msgid & 0xfff)
10510 +
10511 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
10512 + ((fourcc) >> 24) & 0xff, \
10513 + ((fourcc) >> 16) & 0xff, \
10514 + ((fourcc) >> 8) & 0xff, \
10515 + (fourcc) & 0xff
10516 +
10517 +/* Ensure the fields are wide enough */
10518 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
10519 + == 0);
10520 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
10521 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
10522 + (unsigned int)VCHIQ_PORT_FREE);
10523 +
10524 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
10525 +#define VCHIQ_MSGID_CLAIMED 0x40000000
10526 +
10527 +#define VCHIQ_FOURCC_INVALID 0x00000000
10528 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
10529 +
10530 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
10531 +
10532 +typedef uint32_t BITSET_T;
10533 +
10534 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
10535 +
10536 +#define BITSET_SIZE(b) ((b + 31) >> 5)
10537 +#define BITSET_WORD(b) (b >> 5)
10538 +#define BITSET_BIT(b) (1 << (b & 31))
10539 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
10540 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
10541 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
10542 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
10543 +
10544 +#if VCHIQ_ENABLE_STATS
10545 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
10546 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
10547 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
10548 + (service->stats. stat += addend)
10549 +#else
10550 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
10551 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
10552 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
10553 +#endif
10554 +
10555 +enum {
10556 + DEBUG_ENTRIES,
10557 +#if VCHIQ_ENABLE_DEBUG
10558 + DEBUG_SLOT_HANDLER_COUNT,
10559 + DEBUG_SLOT_HANDLER_LINE,
10560 + DEBUG_PARSE_LINE,
10561 + DEBUG_PARSE_HEADER,
10562 + DEBUG_PARSE_MSGID,
10563 + DEBUG_AWAIT_COMPLETION_LINE,
10564 + DEBUG_DEQUEUE_MESSAGE_LINE,
10565 + DEBUG_SERVICE_CALLBACK_LINE,
10566 + DEBUG_MSG_QUEUE_FULL_COUNT,
10567 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
10568 +#endif
10569 + DEBUG_MAX
10570 +};
10571 +
10572 +#if VCHIQ_ENABLE_DEBUG
10573 +
10574 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
10575 +#define DEBUG_TRACE(d) \
10576 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
10577 +#define DEBUG_VALUE(d, v) \
10578 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
10579 +#define DEBUG_COUNT(d) \
10580 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
10581 +
10582 +#else /* VCHIQ_ENABLE_DEBUG */
10583 +
10584 +#define DEBUG_INITIALISE(local)
10585 +#define DEBUG_TRACE(d)
10586 +#define DEBUG_VALUE(d, v)
10587 +#define DEBUG_COUNT(d)
10588 +
10589 +#endif /* VCHIQ_ENABLE_DEBUG */
10590 +
10591 +typedef enum {
10592 + VCHIQ_CONNSTATE_DISCONNECTED,
10593 + VCHIQ_CONNSTATE_CONNECTING,
10594 + VCHIQ_CONNSTATE_CONNECTED,
10595 + VCHIQ_CONNSTATE_PAUSING,
10596 + VCHIQ_CONNSTATE_PAUSE_SENT,
10597 + VCHIQ_CONNSTATE_PAUSED,
10598 + VCHIQ_CONNSTATE_RESUMING,
10599 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
10600 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
10601 +} VCHIQ_CONNSTATE_T;
10602 +
10603 +enum {
10604 + VCHIQ_SRVSTATE_FREE,
10605 + VCHIQ_SRVSTATE_HIDDEN,
10606 + VCHIQ_SRVSTATE_LISTENING,
10607 + VCHIQ_SRVSTATE_OPENING,
10608 + VCHIQ_SRVSTATE_OPEN,
10609 + VCHIQ_SRVSTATE_OPENSYNC,
10610 + VCHIQ_SRVSTATE_CLOSESENT,
10611 + VCHIQ_SRVSTATE_CLOSERECVD,
10612 + VCHIQ_SRVSTATE_CLOSEWAIT,
10613 + VCHIQ_SRVSTATE_CLOSED
10614 +};
10615 +
10616 +enum {
10617 + VCHIQ_POLL_TERMINATE,
10618 + VCHIQ_POLL_REMOVE,
10619 + VCHIQ_POLL_TXNOTIFY,
10620 + VCHIQ_POLL_RXNOTIFY,
10621 + VCHIQ_POLL_COUNT
10622 +};
10623 +
10624 +typedef enum {
10625 + VCHIQ_BULK_TRANSMIT,
10626 + VCHIQ_BULK_RECEIVE
10627 +} VCHIQ_BULK_DIR_T;
10628 +
10629 +typedef struct vchiq_bulk_struct {
10630 + short mode;
10631 + short dir;
10632 + void *userdata;
10633 + VCHI_MEM_HANDLE_T handle;
10634 + void *data;
10635 + int size;
10636 + void *remote_data;
10637 + int remote_size;
10638 + int actual;
10639 +} VCHIQ_BULK_T;
10640 +
10641 +typedef struct vchiq_bulk_queue_struct {
10642 + int local_insert; /* Where to insert the next local bulk */
10643 + int remote_insert; /* Where to insert the next remote bulk (master) */
10644 + int process; /* Bulk to transfer next */
10645 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
10646 + int remove; /* Bulk to notify the local client of, and remove,
10647 + ** next */
10648 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
10649 +} VCHIQ_BULK_QUEUE_T;
10650 +
10651 +typedef struct remote_event_struct {
10652 + int armed;
10653 + int fired;
10654 + struct semaphore *event;
10655 +} REMOTE_EVENT_T;
10656 +
10657 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
10658 +
10659 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
10660 +
10661 +typedef struct vchiq_slot_struct {
10662 + char data[VCHIQ_SLOT_SIZE];
10663 +} VCHIQ_SLOT_T;
10664 +
10665 +typedef struct vchiq_slot_info_struct {
10666 + /* Use two counters rather than one to avoid the need for a mutex. */
10667 + short use_count;
10668 + short release_count;
10669 +} VCHIQ_SLOT_INFO_T;
10670 +
10671 +typedef struct vchiq_service_struct {
10672 + VCHIQ_SERVICE_BASE_T base;
10673 + VCHIQ_SERVICE_HANDLE_T handle;
10674 + unsigned int ref_count;
10675 + int srvstate;
10676 + unsigned int localport;
10677 + unsigned int remoteport;
10678 + int public_fourcc;
10679 + int client_id;
10680 + char auto_close;
10681 + char sync;
10682 + char closing;
10683 + atomic_t poll_flags;
10684 + short version;
10685 + short version_min;
10686 + short peer_version;
10687 +
10688 + VCHIQ_STATE_T *state;
10689 + VCHIQ_INSTANCE_T instance;
10690 +
10691 + int service_use_count;
10692 +
10693 + VCHIQ_BULK_QUEUE_T bulk_tx;
10694 + VCHIQ_BULK_QUEUE_T bulk_rx;
10695 +
10696 + struct semaphore remove_event;
10697 + struct semaphore bulk_remove_event;
10698 + struct mutex bulk_mutex;
10699 +
10700 + struct service_stats_struct {
10701 + int quota_stalls;
10702 + int slot_stalls;
10703 + int bulk_stalls;
10704 + int error_count;
10705 + int ctrl_tx_count;
10706 + int ctrl_rx_count;
10707 + int bulk_tx_count;
10708 + int bulk_rx_count;
10709 + int bulk_aborted_count;
10710 + uint64_t ctrl_tx_bytes;
10711 + uint64_t ctrl_rx_bytes;
10712 + uint64_t bulk_tx_bytes;
10713 + uint64_t bulk_rx_bytes;
10714 + } stats;
10715 +} VCHIQ_SERVICE_T;
10716 +
10717 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
10718 + statically allocated, since for accounting reasons a service's slot
10719 + usage is carried over between users of the same port number.
10720 + */
10721 +typedef struct vchiq_service_quota_struct {
10722 + unsigned short slot_quota;
10723 + unsigned short slot_use_count;
10724 + unsigned short message_quota;
10725 + unsigned short message_use_count;
10726 + struct semaphore quota_event;
10727 + int previous_tx_index;
10728 +} VCHIQ_SERVICE_QUOTA_T;
10729 +
10730 +typedef struct vchiq_shared_state_struct {
10731 +
10732 + /* A non-zero value here indicates that the content is valid. */
10733 + int initialised;
10734 +
10735 + /* The first and last (inclusive) slots allocated to the owner. */
10736 + int slot_first;
10737 + int slot_last;
10738 +
10739 + /* The slot allocated to synchronous messages from the owner. */
10740 + int slot_sync;
10741 +
10742 + /* Signalling this event indicates that owner's slot handler thread
10743 + ** should run. */
10744 + REMOTE_EVENT_T trigger;
10745 +
10746 + /* Indicates the byte position within the stream where the next message
10747 + ** will be written. The least significant bits are an index into the
10748 + ** slot. The next bits are the index of the slot in slot_queue. */
10749 + int tx_pos;
10750 +
10751 + /* This event should be signalled when a slot is recycled. */
10752 + REMOTE_EVENT_T recycle;
10753 +
10754 + /* The slot_queue index where the next recycled slot will be written. */
10755 + int slot_queue_recycle;
10756 +
10757 + /* This event should be signalled when a synchronous message is sent. */
10758 + REMOTE_EVENT_T sync_trigger;
10759 +
10760 + /* This event should be signalled when a synchronous message has been
10761 + ** released. */
10762 + REMOTE_EVENT_T sync_release;
10763 +
10764 + /* A circular buffer of slot indexes. */
10765 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
10766 +
10767 + /* Debugging state */
10768 + int debug[DEBUG_MAX];
10769 +} VCHIQ_SHARED_STATE_T;
10770 +
10771 +typedef struct vchiq_slot_zero_struct {
10772 + int magic;
10773 + short version;
10774 + short version_min;
10775 + int slot_zero_size;
10776 + int slot_size;
10777 + int max_slots;
10778 + int max_slots_per_side;
10779 + int platform_data[2];
10780 + VCHIQ_SHARED_STATE_T master;
10781 + VCHIQ_SHARED_STATE_T slave;
10782 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
10783 +} VCHIQ_SLOT_ZERO_T;
10784 +
10785 +struct vchiq_state_struct {
10786 + int id;
10787 + int initialised;
10788 + VCHIQ_CONNSTATE_T conn_state;
10789 + int is_master;
10790 +
10791 + VCHIQ_SHARED_STATE_T *local;
10792 + VCHIQ_SHARED_STATE_T *remote;
10793 + VCHIQ_SLOT_T *slot_data;
10794 +
10795 + unsigned short default_slot_quota;
10796 + unsigned short default_message_quota;
10797 +
10798 + /* Event indicating connect message received */
10799 + struct semaphore connect;
10800 +
10801 + /* Mutex protecting services */
10802 + struct mutex mutex;
10803 + VCHIQ_INSTANCE_T *instance;
10804 +
10805 + /* Processes incoming messages */
10806 + struct task_struct *slot_handler_thread;
10807 +
10808 + /* Processes recycled slots */
10809 + struct task_struct *recycle_thread;
10810 +
10811 + /* Processes synchronous messages */
10812 + struct task_struct *sync_thread;
10813 +
10814 + /* Local implementation of the trigger remote event */
10815 + struct semaphore trigger_event;
10816 +
10817 + /* Local implementation of the recycle remote event */
10818 + struct semaphore recycle_event;
10819 +
10820 + /* Local implementation of the sync trigger remote event */
10821 + struct semaphore sync_trigger_event;
10822 +
10823 + /* Local implementation of the sync release remote event */
10824 + struct semaphore sync_release_event;
10825 +
10826 + char *tx_data;
10827 + char *rx_data;
10828 + VCHIQ_SLOT_INFO_T *rx_info;
10829 +
10830 + struct mutex slot_mutex;
10831 +
10832 + struct mutex recycle_mutex;
10833 +
10834 + struct mutex sync_mutex;
10835 +
10836 + struct mutex bulk_transfer_mutex;
10837 +
10838 + /* Indicates the byte position within the stream from where the next
10839 + ** message will be read. The least significant bits are an index into
10840 + ** the slot.The next bits are the index of the slot in
10841 + ** remote->slot_queue. */
10842 + int rx_pos;
10843 +
10844 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
10845 + from remote->tx_pos. */
10846 + int local_tx_pos;
10847 +
10848 + /* The slot_queue index of the slot to become available next. */
10849 + int slot_queue_available;
10850 +
10851 + /* A flag to indicate if any poll has been requested */
10852 + int poll_needed;
10853 +
10854 + /* Ths index of the previous slot used for data messages. */
10855 + int previous_data_index;
10856 +
10857 + /* The number of slots occupied by data messages. */
10858 + unsigned short data_use_count;
10859 +
10860 + /* The maximum number of slots to be occupied by data messages. */
10861 + unsigned short data_quota;
10862 +
10863 + /* An array of bit sets indicating which services must be polled. */
10864 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
10865 +
10866 + /* The number of the first unused service */
10867 + int unused_service;
10868 +
10869 + /* Signalled when a free slot becomes available. */
10870 + struct semaphore slot_available_event;
10871 +
10872 + struct semaphore slot_remove_event;
10873 +
10874 + /* Signalled when a free data slot becomes available. */
10875 + struct semaphore data_quota_event;
10876 +
10877 + /* Incremented when there are bulk transfers which cannot be processed
10878 + * whilst paused and must be processed on resume */
10879 + int deferred_bulks;
10880 +
10881 + struct state_stats_struct {
10882 + int slot_stalls;
10883 + int data_stalls;
10884 + int ctrl_tx_count;
10885 + int ctrl_rx_count;
10886 + int error_count;
10887 + } stats;
10888 +
10889 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
10890 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
10891 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
10892 +
10893 + VCHIQ_PLATFORM_STATE_T platform_state;
10894 +};
10895 +
10896 +struct bulk_waiter {
10897 + VCHIQ_BULK_T *bulk;
10898 + struct semaphore event;
10899 + int actual;
10900 +};
10901 +
10902 +extern spinlock_t bulk_waiter_spinlock;
10903 +
10904 +extern int vchiq_core_log_level;
10905 +extern int vchiq_core_msg_log_level;
10906 +extern int vchiq_sync_log_level;
10907 +
10908 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
10909 +
10910 +extern const char *
10911 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
10912 +
10913 +extern VCHIQ_SLOT_ZERO_T *
10914 +vchiq_init_slots(void *mem_base, int mem_size);
10915 +
10916 +extern VCHIQ_STATUS_T
10917 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
10918 + int is_master);
10919 +
10920 +extern VCHIQ_STATUS_T
10921 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10922 +
10923 +extern VCHIQ_SERVICE_T *
10924 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
10925 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
10926 + VCHIQ_INSTANCE_T instance);
10927 +
10928 +extern VCHIQ_STATUS_T
10929 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
10930 +
10931 +extern VCHIQ_STATUS_T
10932 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
10933 +
10934 +extern void
10935 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
10936 +
10937 +extern void
10938 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
10939 +
10940 +extern VCHIQ_STATUS_T
10941 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10942 +
10943 +extern VCHIQ_STATUS_T
10944 +vchiq_pause_internal(VCHIQ_STATE_T *state);
10945 +
10946 +extern VCHIQ_STATUS_T
10947 +vchiq_resume_internal(VCHIQ_STATE_T *state);
10948 +
10949 +extern void
10950 +remote_event_pollall(VCHIQ_STATE_T *state);
10951 +
10952 +extern VCHIQ_STATUS_T
10953 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
10954 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
10955 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
10956 +
10957 +extern void
10958 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
10959 +
10960 +extern void
10961 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
10962 +
10963 +extern void
10964 +vchiq_loud_error_header(void);
10965 +
10966 +extern void
10967 +vchiq_loud_error_footer(void);
10968 +
10969 +extern void
10970 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
10971 +
10972 +static inline VCHIQ_SERVICE_T *
10973 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
10974 +{
10975 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
10976 + (VCHIQ_MAX_STATES - 1)];
10977 + if (!state)
10978 + return NULL;
10979 +
10980 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
10981 +}
10982 +
10983 +extern VCHIQ_SERVICE_T *
10984 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
10985 +
10986 +extern VCHIQ_SERVICE_T *
10987 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
10988 +
10989 +extern VCHIQ_SERVICE_T *
10990 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
10991 + VCHIQ_SERVICE_HANDLE_T handle);
10992 +
10993 +extern VCHIQ_SERVICE_T *
10994 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
10995 + int *pidx);
10996 +
10997 +extern void
10998 +lock_service(VCHIQ_SERVICE_T *service);
10999 +
11000 +extern void
11001 +unlock_service(VCHIQ_SERVICE_T *service);
11002 +
11003 +/* The following functions are called from vchiq_core, and external
11004 +** implementations must be provided. */
11005 +
11006 +extern VCHIQ_STATUS_T
11007 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
11008 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
11009 +
11010 +extern void
11011 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
11012 +
11013 +extern void
11014 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
11015 +
11016 +extern VCHIQ_STATUS_T
11017 +vchiq_copy_from_user(void *dst, const void *src, int size);
11018 +
11019 +extern void
11020 +remote_event_signal(REMOTE_EVENT_T *event);
11021 +
11022 +void
11023 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
11024 +
11025 +extern void
11026 +vchiq_platform_paused(VCHIQ_STATE_T *state);
11027 +
11028 +extern VCHIQ_STATUS_T
11029 +vchiq_platform_resume(VCHIQ_STATE_T *state);
11030 +
11031 +extern void
11032 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
11033 +
11034 +extern void
11035 +vchiq_dump(void *dump_context, const char *str, int len);
11036 +
11037 +extern void
11038 +vchiq_dump_platform_state(void *dump_context);
11039 +
11040 +extern void
11041 +vchiq_dump_platform_instances(void *dump_context);
11042 +
11043 +extern void
11044 +vchiq_dump_platform_service_state(void *dump_context,
11045 + VCHIQ_SERVICE_T *service);
11046 +
11047 +extern VCHIQ_STATUS_T
11048 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
11049 +
11050 +extern VCHIQ_STATUS_T
11051 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
11052 +
11053 +extern void
11054 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
11055 +
11056 +extern void
11057 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
11058 +
11059 +extern VCHIQ_STATUS_T
11060 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
11061 +
11062 +extern VCHIQ_STATUS_T
11063 +vchiq_check_service(VCHIQ_SERVICE_T *service);
11064 +
11065 +extern void
11066 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
11067 +
11068 +extern VCHIQ_STATUS_T
11069 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
11070 +
11071 +extern VCHIQ_STATUS_T
11072 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
11073 +
11074 +extern VCHIQ_STATUS_T
11075 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
11076 +
11077 +extern void
11078 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
11079 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
11080 +
11081 +extern void
11082 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
11083 +
11084 +extern void
11085 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
11086 +
11087 +
11088 +extern void
11089 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
11090 + size_t numBytes);
11091 +
11092 +#endif
11093 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
11094 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 1970-01-01 01:00:00.000000000 +0100
11095 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 2013-07-06 15:25:50.000000000 +0100
11096 @@ -0,0 +1,89 @@
11097 +#!/usr/bin/perl -w
11098 +
11099 +use strict;
11100 +
11101 +#
11102 +# Generate a version from available information
11103 +#
11104 +
11105 +my $prefix = shift @ARGV;
11106 +my $root = shift @ARGV;
11107 +
11108 +
11109 +if ( not defined $root ) {
11110 + die "usage: $0 prefix root-dir\n";
11111 +}
11112 +
11113 +if ( ! -d $root ) {
11114 + die "root directory $root not found\n";
11115 +}
11116 +
11117 +my $version = "unknown";
11118 +my $tainted = "";
11119 +
11120 +if ( -d "$root/.git" ) {
11121 + # attempt to work out git version. only do so
11122 + # on a linux build host, as cygwin builds are
11123 + # already slow enough
11124 +
11125 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
11126 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
11127 + $version = "no git version";
11128 + }
11129 + else {
11130 + $version = <F>;
11131 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
11132 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
11133 + }
11134 +
11135 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
11136 + $tainted = <G>;
11137 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
11138 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
11139 + if (length $tainted) {
11140 + $version = join ' ', $version, "(tainted)";
11141 + }
11142 + else {
11143 + $version = join ' ', $version, "(clean)";
11144 + }
11145 + }
11146 + }
11147 +}
11148 +
11149 +my $hostname = `hostname`;
11150 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
11151 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
11152 +
11153 +
11154 +print STDERR "Version $version\n";
11155 +print <<EOF;
11156 +#include "${prefix}_build_info.h"
11157 +#include <linux/broadcom/vc_debug_sym.h>
11158 +
11159 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
11160 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
11161 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
11162 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
11163 +
11164 +const char *vchiq_get_build_hostname( void )
11165 +{
11166 + return vchiq_build_hostname;
11167 +}
11168 +
11169 +const char *vchiq_get_build_version( void )
11170 +{
11171 + return vchiq_build_version;
11172 +}
11173 +
11174 +const char *vchiq_get_build_date( void )
11175 +{
11176 + return vchiq_build_date;
11177 +}
11178 +
11179 +const char *vchiq_get_build_time( void )
11180 +{
11181 + return vchiq_build_time;
11182 +}
11183 +EOF
11184 +
11185 +
11186 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
11187 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 1970-01-01 01:00:00.000000000 +0100
11188 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 2013-07-06 15:25:50.000000000 +0100
11189 @@ -0,0 +1,41 @@
11190 +/**
11191 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11192 + *
11193 + * Redistribution and use in source and binary forms, with or without
11194 + * modification, are permitted provided that the following conditions
11195 + * are met:
11196 + * 1. Redistributions of source code must retain the above copyright
11197 + * notice, this list of conditions, and the following disclaimer,
11198 + * without modification.
11199 + * 2. Redistributions in binary form must reproduce the above copyright
11200 + * notice, this list of conditions and the following disclaimer in the
11201 + * documentation and/or other materials provided with the distribution.
11202 + * 3. The names of the above-listed copyright holders may not be used
11203 + * to endorse or promote products derived from this software without
11204 + * specific prior written permission.
11205 + *
11206 + * ALTERNATIVELY, this software may be distributed under the terms of the
11207 + * GNU General Public License ("GPL") version 2, as published by the Free
11208 + * Software Foundation.
11209 + *
11210 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11211 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11212 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11213 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11214 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11215 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11216 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11217 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11218 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11219 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11220 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11221 + */
11222 +
11223 +#ifndef VCHIQ_VCHIQ_H
11224 +#define VCHIQ_VCHIQ_H
11225 +
11226 +#include "vchiq_if.h"
11227 +#include "vchiq_util.h"
11228 +
11229 +#endif
11230 +
11231 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
11232 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 1970-01-01 01:00:00.000000000 +0100
11233 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 2013-07-06 15:25:50.000000000 +0100
11234 @@ -0,0 +1,188 @@
11235 +/**
11236 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11237 + *
11238 + * Redistribution and use in source and binary forms, with or without
11239 + * modification, are permitted provided that the following conditions
11240 + * are met:
11241 + * 1. Redistributions of source code must retain the above copyright
11242 + * notice, this list of conditions, and the following disclaimer,
11243 + * without modification.
11244 + * 2. Redistributions in binary form must reproduce the above copyright
11245 + * notice, this list of conditions and the following disclaimer in the
11246 + * documentation and/or other materials provided with the distribution.
11247 + * 3. The names of the above-listed copyright holders may not be used
11248 + * to endorse or promote products derived from this software without
11249 + * specific prior written permission.
11250 + *
11251 + * ALTERNATIVELY, this software may be distributed under the terms of the
11252 + * GNU General Public License ("GPL") version 2, as published by the Free
11253 + * Software Foundation.
11254 + *
11255 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11256 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11257 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11258 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11259 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11260 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11261 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11262 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11263 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11264 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11265 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11266 + */
11267 +
11268 +#ifndef VCHIQ_IF_H
11269 +#define VCHIQ_IF_H
11270 +
11271 +#include "interface/vchi/vchi_mh.h"
11272 +
11273 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
11274 +
11275 +#define VCHIQ_SLOT_SIZE 4096
11276 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
11277 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
11278 +
11279 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
11280 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
11281 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
11282 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
11283 +
11284 +typedef enum {
11285 + VCHIQ_SERVICE_OPENED, /* service, -, - */
11286 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
11287 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
11288 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
11289 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
11290 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
11291 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
11292 +} VCHIQ_REASON_T;
11293 +
11294 +typedef enum {
11295 + VCHIQ_ERROR = -1,
11296 + VCHIQ_SUCCESS = 0,
11297 + VCHIQ_RETRY = 1
11298 +} VCHIQ_STATUS_T;
11299 +
11300 +typedef enum {
11301 + VCHIQ_BULK_MODE_CALLBACK,
11302 + VCHIQ_BULK_MODE_BLOCKING,
11303 + VCHIQ_BULK_MODE_NOCALLBACK,
11304 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
11305 +} VCHIQ_BULK_MODE_T;
11306 +
11307 +typedef enum {
11308 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
11309 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
11310 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
11311 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS
11312 +} VCHIQ_SERVICE_OPTION_T;
11313 +
11314 +typedef struct vchiq_header_struct {
11315 + /* The message identifier - opaque to applications. */
11316 + int msgid;
11317 +
11318 + /* Size of message data. */
11319 + unsigned int size;
11320 +
11321 + char data[0]; /* message */
11322 +} VCHIQ_HEADER_T;
11323 +
11324 +typedef struct {
11325 + const void *data;
11326 + unsigned int size;
11327 +} VCHIQ_ELEMENT_T;
11328 +
11329 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
11330 +
11331 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
11332 + VCHIQ_SERVICE_HANDLE_T, void *);
11333 +
11334 +typedef struct vchiq_service_base_struct {
11335 + int fourcc;
11336 + VCHIQ_CALLBACK_T callback;
11337 + void *userdata;
11338 +} VCHIQ_SERVICE_BASE_T;
11339 +
11340 +typedef struct vchiq_service_params_struct {
11341 + int fourcc;
11342 + VCHIQ_CALLBACK_T callback;
11343 + void *userdata;
11344 + short version; /* Increment for non-trivial changes */
11345 + short version_min; /* Update for incompatible changes */
11346 +} VCHIQ_SERVICE_PARAMS_T;
11347 +
11348 +typedef struct vchiq_config_struct {
11349 + unsigned int max_msg_size;
11350 + unsigned int bulk_threshold; /* The message size above which it
11351 + is better to use a bulk transfer
11352 + (<= max_msg_size) */
11353 + unsigned int max_outstanding_bulks;
11354 + unsigned int max_services;
11355 + short version; /* The version of VCHIQ */
11356 + short version_min; /* The minimum compatible version of VCHIQ */
11357 +} VCHIQ_CONFIG_T;
11358 +
11359 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
11360 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
11361 +
11362 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
11363 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
11364 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
11365 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
11366 + const VCHIQ_SERVICE_PARAMS_T *params,
11367 + VCHIQ_SERVICE_HANDLE_T *pservice);
11368 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
11369 + const VCHIQ_SERVICE_PARAMS_T *params,
11370 + VCHIQ_SERVICE_HANDLE_T *pservice);
11371 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
11372 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
11373 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
11374 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
11375 + VCHIQ_SERVICE_HANDLE_T service);
11376 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
11377 +
11378 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
11379 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
11380 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
11381 + VCHIQ_HEADER_T *header);
11382 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11383 + const void *data, unsigned int size, void *userdata);
11384 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11385 + void *data, unsigned int size, void *userdata);
11386 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
11387 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11388 + const void *offset, unsigned int size, void *userdata);
11389 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
11390 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11391 + void *offset, unsigned int size, void *userdata);
11392 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11393 + const void *data, unsigned int size, void *userdata,
11394 + VCHIQ_BULK_MODE_T mode);
11395 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11396 + void *data, unsigned int size, void *userdata,
11397 + VCHIQ_BULK_MODE_T mode);
11398 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
11399 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
11400 + void *userdata, VCHIQ_BULK_MODE_T mode);
11401 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
11402 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
11403 + void *userdata, VCHIQ_BULK_MODE_T mode);
11404 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
11405 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
11406 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
11407 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
11408 + int config_size, VCHIQ_CONFIG_T *pconfig);
11409 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
11410 + VCHIQ_SERVICE_OPTION_T option, int value);
11411 +
11412 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
11413 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
11414 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
11415 +
11416 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
11417 + void *ptr, size_t num_bytes);
11418 +
11419 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
11420 + short *peer_version);
11421 +
11422 +#endif /* VCHIQ_IF_H */
11423 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
11424 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 1970-01-01 01:00:00.000000000 +0100
11425 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 2013-07-06 15:25:50.000000000 +0100
11426 @@ -0,0 +1,129 @@
11427 +/**
11428 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11429 + *
11430 + * Redistribution and use in source and binary forms, with or without
11431 + * modification, are permitted provided that the following conditions
11432 + * are met:
11433 + * 1. Redistributions of source code must retain the above copyright
11434 + * notice, this list of conditions, and the following disclaimer,
11435 + * without modification.
11436 + * 2. Redistributions in binary form must reproduce the above copyright
11437 + * notice, this list of conditions and the following disclaimer in the
11438 + * documentation and/or other materials provided with the distribution.
11439 + * 3. The names of the above-listed copyright holders may not be used
11440 + * to endorse or promote products derived from this software without
11441 + * specific prior written permission.
11442 + *
11443 + * ALTERNATIVELY, this software may be distributed under the terms of the
11444 + * GNU General Public License ("GPL") version 2, as published by the Free
11445 + * Software Foundation.
11446 + *
11447 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11448 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11449 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11450 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11451 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11452 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11453 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11454 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11455 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11456 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11457 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11458 + */
11459 +
11460 +#ifndef VCHIQ_IOCTLS_H
11461 +#define VCHIQ_IOCTLS_H
11462 +
11463 +#include <linux/ioctl.h>
11464 +#include "vchiq_if.h"
11465 +
11466 +#define VCHIQ_IOC_MAGIC 0xc4
11467 +#define VCHIQ_INVALID_HANDLE (~0)
11468 +
11469 +typedef struct {
11470 + VCHIQ_SERVICE_PARAMS_T params;
11471 + int is_open;
11472 + int is_vchi;
11473 + unsigned int handle; /* OUT */
11474 +} VCHIQ_CREATE_SERVICE_T;
11475 +
11476 +typedef struct {
11477 + unsigned int handle;
11478 + unsigned int count;
11479 + const VCHIQ_ELEMENT_T *elements;
11480 +} VCHIQ_QUEUE_MESSAGE_T;
11481 +
11482 +typedef struct {
11483 + unsigned int handle;
11484 + void *data;
11485 + unsigned int size;
11486 + void *userdata;
11487 + VCHIQ_BULK_MODE_T mode;
11488 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
11489 +
11490 +typedef struct {
11491 + VCHIQ_REASON_T reason;
11492 + VCHIQ_HEADER_T *header;
11493 + void *service_userdata;
11494 + void *bulk_userdata;
11495 +} VCHIQ_COMPLETION_DATA_T;
11496 +
11497 +typedef struct {
11498 + unsigned int count;
11499 + VCHIQ_COMPLETION_DATA_T *buf;
11500 + unsigned int msgbufsize;
11501 + unsigned int msgbufcount; /* IN/OUT */
11502 + void **msgbufs;
11503 +} VCHIQ_AWAIT_COMPLETION_T;
11504 +
11505 +typedef struct {
11506 + unsigned int handle;
11507 + int blocking;
11508 + unsigned int bufsize;
11509 + void *buf;
11510 +} VCHIQ_DEQUEUE_MESSAGE_T;
11511 +
11512 +typedef struct {
11513 + unsigned int config_size;
11514 + VCHIQ_CONFIG_T *pconfig;
11515 +} VCHIQ_GET_CONFIG_T;
11516 +
11517 +typedef struct {
11518 + unsigned int handle;
11519 + VCHIQ_SERVICE_OPTION_T option;
11520 + int value;
11521 +} VCHIQ_SET_SERVICE_OPTION_T;
11522 +
11523 +typedef struct {
11524 + void *virt_addr;
11525 + size_t num_bytes;
11526 +} VCHIQ_DUMP_MEM_T;
11527 +
11528 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
11529 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
11530 +#define VCHIQ_IOC_CREATE_SERVICE \
11531 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
11532 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
11533 +#define VCHIQ_IOC_QUEUE_MESSAGE \
11534 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
11535 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
11536 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
11537 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
11538 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
11539 +#define VCHIQ_IOC_AWAIT_COMPLETION \
11540 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
11541 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
11542 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
11543 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
11544 +#define VCHIQ_IOC_GET_CONFIG \
11545 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
11546 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
11547 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
11548 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
11549 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
11550 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
11551 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
11552 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
11553 +#define VCHIQ_IOC_MAX 15
11554 +
11555 +#endif
11556 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
11557 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 1970-01-01 01:00:00.000000000 +0100
11558 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 2013-07-06 15:25:50.000000000 +0100
11559 @@ -0,0 +1,454 @@
11560 +/**
11561 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11562 + *
11563 + * Redistribution and use in source and binary forms, with or without
11564 + * modification, are permitted provided that the following conditions
11565 + * are met:
11566 + * 1. Redistributions of source code must retain the above copyright
11567 + * notice, this list of conditions, and the following disclaimer,
11568 + * without modification.
11569 + * 2. Redistributions in binary form must reproduce the above copyright
11570 + * notice, this list of conditions and the following disclaimer in the
11571 + * documentation and/or other materials provided with the distribution.
11572 + * 3. The names of the above-listed copyright holders may not be used
11573 + * to endorse or promote products derived from this software without
11574 + * specific prior written permission.
11575 + *
11576 + * ALTERNATIVELY, this software may be distributed under the terms of the
11577 + * GNU General Public License ("GPL") version 2, as published by the Free
11578 + * Software Foundation.
11579 + *
11580 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11581 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11582 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11583 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11584 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11585 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11586 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11587 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11588 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11589 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11590 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11591 + */
11592 +
11593 +/* ---- Include Files ---------------------------------------------------- */
11594 +
11595 +#include <linux/kernel.h>
11596 +#include <linux/module.h>
11597 +#include <linux/mutex.h>
11598 +
11599 +#include "vchiq_core.h"
11600 +#include "vchiq_arm.h"
11601 +
11602 +/* ---- Public Variables ------------------------------------------------- */
11603 +
11604 +/* ---- Private Constants and Types -------------------------------------- */
11605 +
11606 +struct bulk_waiter_node {
11607 + struct bulk_waiter bulk_waiter;
11608 + int pid;
11609 + struct list_head list;
11610 +};
11611 +
11612 +struct vchiq_instance_struct {
11613 + VCHIQ_STATE_T *state;
11614 +
11615 + int connected;
11616 +
11617 + struct list_head bulk_waiter_list;
11618 + struct mutex bulk_waiter_list_mutex;
11619 +};
11620 +
11621 +static VCHIQ_STATUS_T
11622 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11623 + unsigned int size, VCHIQ_BULK_DIR_T dir);
11624 +
11625 +/****************************************************************************
11626 +*
11627 +* vchiq_initialise
11628 +*
11629 +***************************************************************************/
11630 +#define VCHIQ_INIT_RETRIES 10
11631 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
11632 +{
11633 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11634 + VCHIQ_STATE_T *state;
11635 + VCHIQ_INSTANCE_T instance = NULL;
11636 + int i;
11637 +
11638 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
11639 +
11640 + /* VideoCore may not be ready due to boot up timing.
11641 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
11642 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
11643 + state = vchiq_get_state();
11644 + if (state)
11645 + break;
11646 + udelay(500);
11647 + }
11648 + if (i==VCHIQ_INIT_RETRIES) {
11649 + vchiq_log_error(vchiq_core_log_level,
11650 + "%s: videocore not initialized\n", __func__);
11651 + goto failed;
11652 + } else if (i>0) {
11653 + vchiq_log_warning(vchiq_core_log_level,
11654 + "%s: videocore initialized after %d retries\n", __func__, i);
11655 + }
11656 +
11657 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
11658 + if (!instance) {
11659 + vchiq_log_error(vchiq_core_log_level,
11660 + "%s: error allocating vchiq instance\n", __func__);
11661 + goto failed;
11662 + }
11663 +
11664 + instance->connected = 0;
11665 + instance->state = state;
11666 + mutex_init(&instance->bulk_waiter_list_mutex);
11667 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
11668 +
11669 + *instanceOut = instance;
11670 +
11671 + status = VCHIQ_SUCCESS;
11672 +
11673 +failed:
11674 + vchiq_log_trace(vchiq_core_log_level,
11675 + "%s(%p): returning %d", __func__, instance, status);
11676 +
11677 + return status;
11678 +}
11679 +EXPORT_SYMBOL(vchiq_initialise);
11680 +
11681 +/****************************************************************************
11682 +*
11683 +* vchiq_shutdown
11684 +*
11685 +***************************************************************************/
11686 +
11687 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
11688 +{
11689 + VCHIQ_STATUS_T status;
11690 + VCHIQ_STATE_T *state = instance->state;
11691 +
11692 + vchiq_log_trace(vchiq_core_log_level,
11693 + "%s(%p) called", __func__, instance);
11694 +
11695 + if (mutex_lock_interruptible(&state->mutex) != 0)
11696 + return VCHIQ_RETRY;
11697 +
11698 + /* Remove all services */
11699 + status = vchiq_shutdown_internal(state, instance);
11700 +
11701 + mutex_unlock(&state->mutex);
11702 +
11703 + vchiq_log_trace(vchiq_core_log_level,
11704 + "%s(%p): returning %d", __func__, instance, status);
11705 +
11706 + if (status == VCHIQ_SUCCESS) {
11707 + struct list_head *pos, *next;
11708 + list_for_each_safe(pos, next,
11709 + &instance->bulk_waiter_list) {
11710 + struct bulk_waiter_node *waiter;
11711 + waiter = list_entry(pos,
11712 + struct bulk_waiter_node,
11713 + list);
11714 + list_del(pos);
11715 + vchiq_log_info(vchiq_arm_log_level,
11716 + "bulk_waiter - cleaned up %x "
11717 + "for pid %d",
11718 + (unsigned int)waiter, waiter->pid);
11719 + kfree(waiter);
11720 + }
11721 + kfree(instance);
11722 + }
11723 +
11724 + return status;
11725 +}
11726 +EXPORT_SYMBOL(vchiq_shutdown);
11727 +
11728 +/****************************************************************************
11729 +*
11730 +* vchiq_is_connected
11731 +*
11732 +***************************************************************************/
11733 +
11734 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
11735 +{
11736 + return instance->connected;
11737 +}
11738 +
11739 +/****************************************************************************
11740 +*
11741 +* vchiq_connect
11742 +*
11743 +***************************************************************************/
11744 +
11745 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
11746 +{
11747 + VCHIQ_STATUS_T status;
11748 + VCHIQ_STATE_T *state = instance->state;
11749 +
11750 + vchiq_log_trace(vchiq_core_log_level,
11751 + "%s(%p) called", __func__, instance);
11752 +
11753 + if (mutex_lock_interruptible(&state->mutex) != 0) {
11754 + vchiq_log_trace(vchiq_core_log_level,
11755 + "%s: call to mutex_lock failed", __func__);
11756 + status = VCHIQ_RETRY;
11757 + goto failed;
11758 + }
11759 + status = vchiq_connect_internal(state, instance);
11760 +
11761 + if (status == VCHIQ_SUCCESS)
11762 + instance->connected = 1;
11763 +
11764 + mutex_unlock(&state->mutex);
11765 +
11766 +failed:
11767 + vchiq_log_trace(vchiq_core_log_level,
11768 + "%s(%p): returning %d", __func__, instance, status);
11769 +
11770 + return status;
11771 +}
11772 +EXPORT_SYMBOL(vchiq_connect);
11773 +
11774 +/****************************************************************************
11775 +*
11776 +* vchiq_add_service
11777 +*
11778 +***************************************************************************/
11779 +
11780 +VCHIQ_STATUS_T vchiq_add_service(
11781 + VCHIQ_INSTANCE_T instance,
11782 + const VCHIQ_SERVICE_PARAMS_T *params,
11783 + VCHIQ_SERVICE_HANDLE_T *phandle)
11784 +{
11785 + VCHIQ_STATUS_T status;
11786 + VCHIQ_STATE_T *state = instance->state;
11787 + VCHIQ_SERVICE_T *service = NULL;
11788 + int srvstate;
11789 +
11790 + vchiq_log_trace(vchiq_core_log_level,
11791 + "%s(%p) called", __func__, instance);
11792 +
11793 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11794 +
11795 + srvstate = vchiq_is_connected(instance)
11796 + ? VCHIQ_SRVSTATE_LISTENING
11797 + : VCHIQ_SRVSTATE_HIDDEN;
11798 +
11799 + service = vchiq_add_service_internal(
11800 + state,
11801 + params,
11802 + srvstate,
11803 + instance);
11804 +
11805 + if (service) {
11806 + *phandle = service->handle;
11807 + status = VCHIQ_SUCCESS;
11808 + } else
11809 + status = VCHIQ_ERROR;
11810 +
11811 + vchiq_log_trace(vchiq_core_log_level,
11812 + "%s(%p): returning %d", __func__, instance, status);
11813 +
11814 + return status;
11815 +}
11816 +EXPORT_SYMBOL(vchiq_add_service);
11817 +
11818 +/****************************************************************************
11819 +*
11820 +* vchiq_open_service
11821 +*
11822 +***************************************************************************/
11823 +
11824 +VCHIQ_STATUS_T vchiq_open_service(
11825 + VCHIQ_INSTANCE_T instance,
11826 + const VCHIQ_SERVICE_PARAMS_T *params,
11827 + VCHIQ_SERVICE_HANDLE_T *phandle)
11828 +{
11829 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11830 + VCHIQ_STATE_T *state = instance->state;
11831 + VCHIQ_SERVICE_T *service = NULL;
11832 +
11833 + vchiq_log_trace(vchiq_core_log_level,
11834 + "%s(%p) called", __func__, instance);
11835 +
11836 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11837 +
11838 + if (!vchiq_is_connected(instance))
11839 + goto failed;
11840 +
11841 + service = vchiq_add_service_internal(state,
11842 + params,
11843 + VCHIQ_SRVSTATE_OPENING,
11844 + instance);
11845 +
11846 + if (service) {
11847 + status = vchiq_open_service_internal(service, current->pid);
11848 + if (status == VCHIQ_SUCCESS)
11849 + *phandle = service->handle;
11850 + else
11851 + vchiq_remove_service(service->handle);
11852 + }
11853 +
11854 +failed:
11855 + vchiq_log_trace(vchiq_core_log_level,
11856 + "%s(%p): returning %d", __func__, instance, status);
11857 +
11858 + return status;
11859 +}
11860 +EXPORT_SYMBOL(vchiq_open_service);
11861 +
11862 +VCHIQ_STATUS_T
11863 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
11864 + const void *data, unsigned int size, void *userdata)
11865 +{
11866 + return vchiq_bulk_transfer(handle,
11867 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11868 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
11869 +}
11870 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
11871 +
11872 +VCHIQ_STATUS_T
11873 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11874 + unsigned int size, void *userdata)
11875 +{
11876 + return vchiq_bulk_transfer(handle,
11877 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11878 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
11879 +}
11880 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
11881 +
11882 +VCHIQ_STATUS_T
11883 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
11884 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11885 +{
11886 + VCHIQ_STATUS_T status;
11887 +
11888 + switch (mode) {
11889 + case VCHIQ_BULK_MODE_NOCALLBACK:
11890 + case VCHIQ_BULK_MODE_CALLBACK:
11891 + status = vchiq_bulk_transfer(handle,
11892 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11893 + mode, VCHIQ_BULK_TRANSMIT);
11894 + break;
11895 + case VCHIQ_BULK_MODE_BLOCKING:
11896 + status = vchiq_blocking_bulk_transfer(handle,
11897 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
11898 + break;
11899 + default:
11900 + return VCHIQ_ERROR;
11901 + }
11902 +
11903 + return status;
11904 +}
11905 +EXPORT_SYMBOL(vchiq_bulk_transmit);
11906 +
11907 +VCHIQ_STATUS_T
11908 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11909 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11910 +{
11911 + VCHIQ_STATUS_T status;
11912 +
11913 + switch (mode) {
11914 + case VCHIQ_BULK_MODE_NOCALLBACK:
11915 + case VCHIQ_BULK_MODE_CALLBACK:
11916 + status = vchiq_bulk_transfer(handle,
11917 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11918 + mode, VCHIQ_BULK_RECEIVE);
11919 + break;
11920 + case VCHIQ_BULK_MODE_BLOCKING:
11921 + status = vchiq_blocking_bulk_transfer(handle,
11922 + (void *)data, size, VCHIQ_BULK_RECEIVE);
11923 + break;
11924 + default:
11925 + return VCHIQ_ERROR;
11926 + }
11927 +
11928 + return status;
11929 +}
11930 +EXPORT_SYMBOL(vchiq_bulk_receive);
11931 +
11932 +static VCHIQ_STATUS_T
11933 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11934 + unsigned int size, VCHIQ_BULK_DIR_T dir)
11935 +{
11936 + VCHIQ_INSTANCE_T instance;
11937 + VCHIQ_SERVICE_T *service;
11938 + VCHIQ_STATUS_T status;
11939 + struct bulk_waiter_node *waiter = NULL;
11940 + struct list_head *pos;
11941 +
11942 + service = find_service_by_handle(handle);
11943 + if (!service)
11944 + return VCHIQ_ERROR;
11945 +
11946 + instance = service->instance;
11947 +
11948 + unlock_service(service);
11949 +
11950 + mutex_lock(&instance->bulk_waiter_list_mutex);
11951 + list_for_each(pos, &instance->bulk_waiter_list) {
11952 + if (list_entry(pos, struct bulk_waiter_node,
11953 + list)->pid == current->pid) {
11954 + waiter = list_entry(pos,
11955 + struct bulk_waiter_node,
11956 + list);
11957 + list_del(pos);
11958 + break;
11959 + }
11960 + }
11961 + mutex_unlock(&instance->bulk_waiter_list_mutex);
11962 +
11963 + if (waiter) {
11964 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11965 + if (bulk) {
11966 + /* This thread has an outstanding bulk transfer. */
11967 + if ((bulk->data != data) ||
11968 + (bulk->size != size)) {
11969 + /* This is not a retry of the previous one.
11970 + ** Cancel the signal when the transfer
11971 + ** completes. */
11972 + spin_lock(&bulk_waiter_spinlock);
11973 + bulk->userdata = NULL;
11974 + spin_unlock(&bulk_waiter_spinlock);
11975 + }
11976 + }
11977 + }
11978 +
11979 + if (!waiter) {
11980 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
11981 + if (!waiter) {
11982 + vchiq_log_error(vchiq_core_log_level,
11983 + "%s - out of memory", __func__);
11984 + return VCHIQ_ERROR;
11985 + }
11986 + }
11987 +
11988 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
11989 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
11990 + dir);
11991 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
11992 + !waiter->bulk_waiter.bulk) {
11993 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11994 + if (bulk) {
11995 + /* Cancel the signal when the transfer
11996 + ** completes. */
11997 + spin_lock(&bulk_waiter_spinlock);
11998 + bulk->userdata = NULL;
11999 + spin_unlock(&bulk_waiter_spinlock);
12000 + }
12001 + kfree(waiter);
12002 + } else {
12003 + waiter->pid = current->pid;
12004 + mutex_lock(&instance->bulk_waiter_list_mutex);
12005 + list_add(&waiter->list, &instance->bulk_waiter_list);
12006 + mutex_unlock(&instance->bulk_waiter_list_mutex);
12007 + vchiq_log_info(vchiq_arm_log_level,
12008 + "saved bulk_waiter %x for pid %d",
12009 + (unsigned int)waiter, current->pid);
12010 + }
12011 +
12012 + return status;
12013 +}
12014 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
12015 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 1970-01-01 01:00:00.000000000 +0100
12016 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 2013-07-06 15:25:50.000000000 +0100
12017 @@ -0,0 +1,71 @@
12018 +/**
12019 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12020 + *
12021 + * Redistribution and use in source and binary forms, with or without
12022 + * modification, are permitted provided that the following conditions
12023 + * are met:
12024 + * 1. Redistributions of source code must retain the above copyright
12025 + * notice, this list of conditions, and the following disclaimer,
12026 + * without modification.
12027 + * 2. Redistributions in binary form must reproduce the above copyright
12028 + * notice, this list of conditions and the following disclaimer in the
12029 + * documentation and/or other materials provided with the distribution.
12030 + * 3. The names of the above-listed copyright holders may not be used
12031 + * to endorse or promote products derived from this software without
12032 + * specific prior written permission.
12033 + *
12034 + * ALTERNATIVELY, this software may be distributed under the terms of the
12035 + * GNU General Public License ("GPL") version 2, as published by the Free
12036 + * Software Foundation.
12037 + *
12038 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12039 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12040 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12041 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12042 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12043 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12044 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12045 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12046 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12047 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12048 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12049 + */
12050 +
12051 +#ifndef VCHIQ_MEMDRV_H
12052 +#define VCHIQ_MEMDRV_H
12053 +
12054 +/* ---- Include Files ----------------------------------------------------- */
12055 +
12056 +#include <linux/kernel.h>
12057 +#include "vchiq_if.h"
12058 +
12059 +/* ---- Constants and Types ---------------------------------------------- */
12060 +
12061 +typedef struct {
12062 + void *armSharedMemVirt;
12063 + dma_addr_t armSharedMemPhys;
12064 + size_t armSharedMemSize;
12065 +
12066 + void *vcSharedMemVirt;
12067 + dma_addr_t vcSharedMemPhys;
12068 + size_t vcSharedMemSize;
12069 +} VCHIQ_SHARED_MEM_INFO_T;
12070 +
12071 +/* ---- Variable Externs ------------------------------------------------- */
12072 +
12073 +/* ---- Function Prototypes ---------------------------------------------- */
12074 +
12075 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
12076 +
12077 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
12078 +
12079 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
12080 + const VCHIQ_PLATFORM_DATA_T * platform_data);
12081 +
12082 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
12083 + const VCHIQ_PLATFORM_DATA_T * platform_data);
12084 +
12085 +VCHIQ_STATUS_T vchiq_userdrv_resume(
12086 + const VCHIQ_PLATFORM_DATA_T * platform_data);
12087 +
12088 +#endif
12089 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
12090 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 1970-01-01 01:00:00.000000000 +0100
12091 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 2013-07-06 15:25:50.000000000 +0100
12092 @@ -0,0 +1,58 @@
12093 +/**
12094 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12095 + *
12096 + * Redistribution and use in source and binary forms, with or without
12097 + * modification, are permitted provided that the following conditions
12098 + * are met:
12099 + * 1. Redistributions of source code must retain the above copyright
12100 + * notice, this list of conditions, and the following disclaimer,
12101 + * without modification.
12102 + * 2. Redistributions in binary form must reproduce the above copyright
12103 + * notice, this list of conditions and the following disclaimer in the
12104 + * documentation and/or other materials provided with the distribution.
12105 + * 3. The names of the above-listed copyright holders may not be used
12106 + * to endorse or promote products derived from this software without
12107 + * specific prior written permission.
12108 + *
12109 + * ALTERNATIVELY, this software may be distributed under the terms of the
12110 + * GNU General Public License ("GPL") version 2, as published by the Free
12111 + * Software Foundation.
12112 + *
12113 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12114 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12115 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12116 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12117 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12118 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12119 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12120 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12121 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12122 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12123 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12124 + */
12125 +
12126 +#ifndef VCHIQ_PAGELIST_H
12127 +#define VCHIQ_PAGELIST_H
12128 +
12129 +#ifndef PAGE_SIZE
12130 +#define PAGE_SIZE 4096
12131 +#endif
12132 +#define CACHE_LINE_SIZE 32
12133 +#define PAGELIST_WRITE 0
12134 +#define PAGELIST_READ 1
12135 +#define PAGELIST_READ_WITH_FRAGMENTS 2
12136 +
12137 +typedef struct pagelist_struct {
12138 + unsigned long length;
12139 + unsigned short type;
12140 + unsigned short offset;
12141 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
12142 + pages at consecutive addresses. */
12143 +} PAGELIST_T;
12144 +
12145 +typedef struct fragments_struct {
12146 + char headbuf[CACHE_LINE_SIZE];
12147 + char tailbuf[CACHE_LINE_SIZE];
12148 +} FRAGMENTS_T;
12149 +
12150 +#endif /* VCHIQ_PAGELIST_H */
12151 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
12152 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c 1970-01-01 01:00:00.000000000 +0100
12153 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c 2013-07-06 15:25:50.000000000 +0100
12154 @@ -0,0 +1,243 @@
12155 +/**
12156 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12157 + *
12158 + * Redistribution and use in source and binary forms, with or without
12159 + * modification, are permitted provided that the following conditions
12160 + * are met:
12161 + * 1. Redistributions of source code must retain the above copyright
12162 + * notice, this list of conditions, and the following disclaimer,
12163 + * without modification.
12164 + * 2. Redistributions in binary form must reproduce the above copyright
12165 + * notice, this list of conditions and the following disclaimer in the
12166 + * documentation and/or other materials provided with the distribution.
12167 + * 3. The names of the above-listed copyright holders may not be used
12168 + * to endorse or promote products derived from this software without
12169 + * specific prior written permission.
12170 + *
12171 + * ALTERNATIVELY, this software may be distributed under the terms of the
12172 + * GNU General Public License ("GPL") version 2, as published by the Free
12173 + * Software Foundation.
12174 + *
12175 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12176 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12177 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12178 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12179 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12180 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12181 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12182 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12183 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12184 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12185 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12186 + */
12187 +
12188 +
12189 +#include <linux/proc_fs.h>
12190 +#include "vchiq_core.h"
12191 +#include "vchiq_arm.h"
12192 +
12193 +struct vchiq_proc_info {
12194 + /* Global 'vc' proc entry used by all instances */
12195 + struct proc_dir_entry *vc_cfg_dir;
12196 +
12197 + /* one entry per client process */
12198 + struct proc_dir_entry *clients;
12199 +
12200 + /* log categories */
12201 + struct proc_dir_entry *log_categories;
12202 +};
12203 +
12204 +static struct vchiq_proc_info proc_info;
12205 +
12206 +struct proc_dir_entry *vchiq_proc_top(void)
12207 +{
12208 + BUG_ON(proc_info.vc_cfg_dir == NULL);
12209 + return proc_info.vc_cfg_dir;
12210 +}
12211 +
12212 +/****************************************************************************
12213 +*
12214 +* log category entries
12215 +*
12216 +***************************************************************************/
12217 +#define PROC_WRITE_BUF_SIZE 256
12218 +
12219 +#define VCHIQ_LOG_ERROR_STR "error"
12220 +#define VCHIQ_LOG_WARNING_STR "warning"
12221 +#define VCHIQ_LOG_INFO_STR "info"
12222 +#define VCHIQ_LOG_TRACE_STR "trace"
12223 +
12224 +static int log_cfg_read(char *buffer,
12225 + char **start,
12226 + off_t off,
12227 + int count,
12228 + int *eof,
12229 + void *data)
12230 +{
12231 + int len = 0;
12232 + char *log_value = NULL;
12233 +
12234 + switch (*((int *)data)) {
12235 + case VCHIQ_LOG_ERROR:
12236 + log_value = VCHIQ_LOG_ERROR_STR;
12237 + break;
12238 + case VCHIQ_LOG_WARNING:
12239 + log_value = VCHIQ_LOG_WARNING_STR;
12240 + break;
12241 + case VCHIQ_LOG_INFO:
12242 + log_value = VCHIQ_LOG_INFO_STR;
12243 + break;
12244 + case VCHIQ_LOG_TRACE:
12245 + log_value = VCHIQ_LOG_TRACE_STR;
12246 + break;
12247 + default:
12248 + break;
12249 + }
12250 +
12251 + len += sprintf(buffer + len,
12252 + "%s\n",
12253 + log_value ? log_value : "(null)");
12254 +
12255 + return len;
12256 +}
12257 +
12258 +
12259 +static int log_cfg_write(struct file *file,
12260 + const char __user *buffer,
12261 + unsigned long count,
12262 + void *data)
12263 +{
12264 + int *log_module = data;
12265 + char kbuf[PROC_WRITE_BUF_SIZE + 1];
12266 +
12267 + (void)file;
12268 +
12269 + memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
12270 + if (count >= PROC_WRITE_BUF_SIZE)
12271 + count = PROC_WRITE_BUF_SIZE;
12272 +
12273 + if (copy_from_user(kbuf,
12274 + buffer,
12275 + count) != 0)
12276 + return -EFAULT;
12277 + kbuf[count - 1] = 0;
12278 +
12279 + if (strncmp("error", kbuf, strlen("error")) == 0)
12280 + *log_module = VCHIQ_LOG_ERROR;
12281 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
12282 + *log_module = VCHIQ_LOG_WARNING;
12283 + else if (strncmp("info", kbuf, strlen("info")) == 0)
12284 + *log_module = VCHIQ_LOG_INFO;
12285 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
12286 + *log_module = VCHIQ_LOG_TRACE;
12287 + else
12288 + *log_module = VCHIQ_LOG_DEFAULT;
12289 +
12290 + return count;
12291 +}
12292 +
12293 +/* Log category proc entries */
12294 +struct vchiq_proc_log_entry {
12295 + const char *name;
12296 + int *plevel;
12297 + struct proc_dir_entry *dir;
12298 +};
12299 +
12300 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
12301 + { "core", &vchiq_core_log_level },
12302 + { "msg", &vchiq_core_msg_log_level },
12303 + { "sync", &vchiq_sync_log_level },
12304 + { "susp", &vchiq_susp_log_level },
12305 + { "arm", &vchiq_arm_log_level },
12306 +};
12307 +static int n_log_entries =
12308 + sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
12309 +
12310 +/* create an entry under /proc/vc/log for each log category */
12311 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
12312 +{
12313 + struct proc_dir_entry *dir;
12314 + size_t i;
12315 + int ret = 0;
12316 +#if 0
12317 + dir = proc_mkdir("log", proc_info.vc_cfg_dir);
12318 + if (!dir)
12319 + return -ENOMEM;
12320 + proc_info.log_categories = dir;
12321 +
12322 + for (i = 0; i < n_log_entries; i++) {
12323 + dir = create_proc_entry(vchiq_proc_log_entries[i].name,
12324 + 0644,
12325 + proc_info.log_categories);
12326 + if (!dir) {
12327 + ret = -ENOMEM;
12328 + break;
12329 + }
12330 +
12331 + dir->read_proc = &log_cfg_read;
12332 + dir->write_proc = &log_cfg_write;
12333 + dir->data = (void *)vchiq_proc_log_entries[i].plevel;
12334 +
12335 + vchiq_proc_log_entries[i].dir = dir;
12336 + }
12337 +#endif
12338 + return ret;
12339 +}
12340 +
12341 +
12342 +int vchiq_proc_init(void)
12343 +{
12344 + BUG_ON(proc_info.vc_cfg_dir != NULL);
12345 +
12346 + proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
12347 + if (proc_info.vc_cfg_dir == NULL)
12348 + goto fail;
12349 +
12350 + proc_info.clients = proc_mkdir("clients",
12351 + proc_info.vc_cfg_dir);
12352 + if (!proc_info.clients)
12353 + goto fail;
12354 +
12355 + if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
12356 + goto fail;
12357 +
12358 + return 0;
12359 +
12360 +fail:
12361 + vchiq_proc_deinit();
12362 + vchiq_log_error(vchiq_arm_log_level,
12363 + "%s: failed to create proc directory",
12364 + __func__);
12365 +
12366 + return -ENOMEM;
12367 +}
12368 +
12369 +/* remove all the proc entries */
12370 +void vchiq_proc_deinit(void)
12371 +{
12372 + /* log category entries */
12373 +#if 0
12374 + if (proc_info.log_categories) {
12375 + size_t i;
12376 + for (i = 0; i < n_log_entries; i++)
12377 + if (vchiq_proc_log_entries[i].dir)
12378 + remove_proc_entry(
12379 + vchiq_proc_log_entries[i].name,
12380 + proc_info.log_categories);
12381 +
12382 + remove_proc_entry(proc_info.log_categories->name,
12383 + proc_info.vc_cfg_dir);
12384 + }
12385 + if (proc_info.clients)
12386 + remove_proc_entry(proc_info.clients->name,
12387 + proc_info.vc_cfg_dir);
12388 + if (proc_info.vc_cfg_dir)
12389 + remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
12390 +#endif
12391 +}
12392 +
12393 +struct proc_dir_entry *vchiq_clients_top(void)
12394 +{
12395 + return proc_info.clients;
12396 +}
12397 +
12398 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
12399 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 1970-01-01 01:00:00.000000000 +0100
12400 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 2013-07-06 15:25:50.000000000 +0100
12401 @@ -0,0 +1,815 @@
12402 +/**
12403 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12404 + *
12405 + * Redistribution and use in source and binary forms, with or without
12406 + * modification, are permitted provided that the following conditions
12407 + * are met:
12408 + * 1. Redistributions of source code must retain the above copyright
12409 + * notice, this list of conditions, and the following disclaimer,
12410 + * without modification.
12411 + * 2. Redistributions in binary form must reproduce the above copyright
12412 + * notice, this list of conditions and the following disclaimer in the
12413 + * documentation and/or other materials provided with the distribution.
12414 + * 3. The names of the above-listed copyright holders may not be used
12415 + * to endorse or promote products derived from this software without
12416 + * specific prior written permission.
12417 + *
12418 + * ALTERNATIVELY, this software may be distributed under the terms of the
12419 + * GNU General Public License ("GPL") version 2, as published by the Free
12420 + * Software Foundation.
12421 + *
12422 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12423 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12424 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12425 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12426 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12427 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12428 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12429 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12430 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12431 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12432 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12433 + */
12434 +#include <linux/module.h>
12435 +#include <linux/types.h>
12436 +
12437 +#include "interface/vchi/vchi.h"
12438 +#include "vchiq.h"
12439 +#include "vchiq_core.h"
12440 +
12441 +#include "vchiq_util.h"
12442 +
12443 +#include <stddef.h>
12444 +
12445 +#define vchiq_status_to_vchi(status) ((int32_t)status)
12446 +
12447 +typedef struct {
12448 + VCHIQ_SERVICE_HANDLE_T handle;
12449 +
12450 + VCHIU_QUEUE_T queue;
12451 +
12452 + VCHI_CALLBACK_T callback;
12453 + void *callback_param;
12454 +} SHIM_SERVICE_T;
12455 +
12456 +/* ----------------------------------------------------------------------
12457 + * return pointer to the mphi message driver function table
12458 + * -------------------------------------------------------------------- */
12459 +const VCHI_MESSAGE_DRIVER_T *
12460 +vchi_mphi_message_driver_func_table(void)
12461 +{
12462 + return NULL;
12463 +}
12464 +
12465 +/* ----------------------------------------------------------------------
12466 + * return a pointer to the 'single' connection driver fops
12467 + * -------------------------------------------------------------------- */
12468 +const VCHI_CONNECTION_API_T *
12469 +single_get_func_table(void)
12470 +{
12471 + return NULL;
12472 +}
12473 +
12474 +VCHI_CONNECTION_T *vchi_create_connection(
12475 + const VCHI_CONNECTION_API_T *function_table,
12476 + const VCHI_MESSAGE_DRIVER_T *low_level)
12477 +{
12478 + (void)function_table;
12479 + (void)low_level;
12480 + return NULL;
12481 +}
12482 +
12483 +/***********************************************************
12484 + * Name: vchi_msg_peek
12485 + *
12486 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
12487 + * void **data,
12488 + * uint32_t *msg_size,
12489 +
12490 +
12491 + * VCHI_FLAGS_T flags
12492 + *
12493 + * Description: Routine to return a pointer to the current message (to allow in
12494 + * place processing). The message can be removed using
12495 + * vchi_msg_remove when you're finished
12496 + *
12497 + * Returns: int32_t - success == 0
12498 + *
12499 + ***********************************************************/
12500 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
12501 + void **data,
12502 + uint32_t *msg_size,
12503 + VCHI_FLAGS_T flags)
12504 +{
12505 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12506 + VCHIQ_HEADER_T *header;
12507 +
12508 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12509 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12510 +
12511 + if (flags == VCHI_FLAGS_NONE)
12512 + if (vchiu_queue_is_empty(&service->queue))
12513 + return -1;
12514 +
12515 + header = vchiu_queue_peek(&service->queue);
12516 +
12517 + *data = header->data;
12518 + *msg_size = header->size;
12519 +
12520 + return 0;
12521 +}
12522 +EXPORT_SYMBOL(vchi_msg_peek);
12523 +
12524 +/***********************************************************
12525 + * Name: vchi_msg_remove
12526 + *
12527 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
12528 + *
12529 + * Description: Routine to remove a message (after it has been read with
12530 + * vchi_msg_peek)
12531 + *
12532 + * Returns: int32_t - success == 0
12533 + *
12534 + ***********************************************************/
12535 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
12536 +{
12537 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12538 + VCHIQ_HEADER_T *header;
12539 +
12540 + header = vchiu_queue_pop(&service->queue);
12541 +
12542 + vchiq_release_message(service->handle, header);
12543 +
12544 + return 0;
12545 +}
12546 +EXPORT_SYMBOL(vchi_msg_remove);
12547 +
12548 +/***********************************************************
12549 + * Name: vchi_msg_queue
12550 + *
12551 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12552 + * const void *data,
12553 + * uint32_t data_size,
12554 + * VCHI_FLAGS_T flags,
12555 + * void *msg_handle,
12556 + *
12557 + * Description: Thin wrapper to queue a message onto a connection
12558 + *
12559 + * Returns: int32_t - success == 0
12560 + *
12561 + ***********************************************************/
12562 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
12563 + const void *data,
12564 + uint32_t data_size,
12565 + VCHI_FLAGS_T flags,
12566 + void *msg_handle)
12567 +{
12568 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12569 + VCHIQ_ELEMENT_T element = {data, data_size};
12570 + VCHIQ_STATUS_T status;
12571 +
12572 + (void)msg_handle;
12573 +
12574 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12575 +
12576 + status = vchiq_queue_message(service->handle, &element, 1);
12577 +
12578 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
12579 + ** implement a retry mechanism since this function is supposed
12580 + ** to block until queued
12581 + */
12582 + while (status == VCHIQ_RETRY) {
12583 + msleep(1);
12584 + status = vchiq_queue_message(service->handle, &element, 1);
12585 + }
12586 +
12587 + return vchiq_status_to_vchi(status);
12588 +}
12589 +EXPORT_SYMBOL(vchi_msg_queue);
12590 +
12591 +/***********************************************************
12592 + * Name: vchi_bulk_queue_receive
12593 + *
12594 + * Arguments: VCHI_BULK_HANDLE_T handle,
12595 + * void *data_dst,
12596 + * const uint32_t data_size,
12597 + * VCHI_FLAGS_T flags
12598 + * void *bulk_handle
12599 + *
12600 + * Description: Routine to setup a rcv buffer
12601 + *
12602 + * Returns: int32_t - success == 0
12603 + *
12604 + ***********************************************************/
12605 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
12606 + void *data_dst,
12607 + uint32_t data_size,
12608 + VCHI_FLAGS_T flags,
12609 + void *bulk_handle)
12610 +{
12611 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12612 + VCHIQ_BULK_MODE_T mode;
12613 + VCHIQ_STATUS_T status;
12614 +
12615 + switch ((int)flags) {
12616 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12617 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12618 + WARN_ON(!service->callback);
12619 + mode = VCHIQ_BULK_MODE_CALLBACK;
12620 + break;
12621 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12622 + mode = VCHIQ_BULK_MODE_BLOCKING;
12623 + break;
12624 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12625 + case VCHI_FLAGS_NONE:
12626 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12627 + break;
12628 + default:
12629 + WARN(1, "unsupported message\n");
12630 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12631 + }
12632 +
12633 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
12634 + bulk_handle, mode);
12635 +
12636 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
12637 + ** implement a retry mechanism since this function is supposed
12638 + ** to block until queued
12639 + */
12640 + while (status == VCHIQ_RETRY) {
12641 + msleep(1);
12642 + status = vchiq_bulk_receive(service->handle, data_dst,
12643 + data_size, bulk_handle, mode);
12644 + }
12645 +
12646 + return vchiq_status_to_vchi(status);
12647 +}
12648 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
12649 +
12650 +/***********************************************************
12651 + * Name: vchi_bulk_queue_transmit
12652 + *
12653 + * Arguments: VCHI_BULK_HANDLE_T handle,
12654 + * const void *data_src,
12655 + * uint32_t data_size,
12656 + * VCHI_FLAGS_T flags,
12657 + * void *bulk_handle
12658 + *
12659 + * Description: Routine to transmit some data
12660 + *
12661 + * Returns: int32_t - success == 0
12662 + *
12663 + ***********************************************************/
12664 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
12665 + const void *data_src,
12666 + uint32_t data_size,
12667 + VCHI_FLAGS_T flags,
12668 + void *bulk_handle)
12669 +{
12670 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12671 + VCHIQ_BULK_MODE_T mode;
12672 + VCHIQ_STATUS_T status;
12673 +
12674 + switch ((int)flags) {
12675 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12676 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12677 + WARN_ON(!service->callback);
12678 + mode = VCHIQ_BULK_MODE_CALLBACK;
12679 + break;
12680 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
12681 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12682 + mode = VCHIQ_BULK_MODE_BLOCKING;
12683 + break;
12684 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12685 + case VCHI_FLAGS_NONE:
12686 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12687 + break;
12688 + default:
12689 + WARN(1, "unsupported message\n");
12690 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12691 + }
12692 +
12693 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
12694 + bulk_handle, mode);
12695 +
12696 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
12697 + ** implement a retry mechanism since this function is supposed
12698 + ** to block until queued
12699 + */
12700 + while (status == VCHIQ_RETRY) {
12701 + msleep(1);
12702 + status = vchiq_bulk_transmit(service->handle, data_src,
12703 + data_size, bulk_handle, mode);
12704 + }
12705 +
12706 + return vchiq_status_to_vchi(status);
12707 +}
12708 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
12709 +
12710 +/***********************************************************
12711 + * Name: vchi_msg_dequeue
12712 + *
12713 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12714 + * void *data,
12715 + * uint32_t max_data_size_to_read,
12716 + * uint32_t *actual_msg_size
12717 + * VCHI_FLAGS_T flags
12718 + *
12719 + * Description: Routine to dequeue a message into the supplied buffer
12720 + *
12721 + * Returns: int32_t - success == 0
12722 + *
12723 + ***********************************************************/
12724 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
12725 + void *data,
12726 + uint32_t max_data_size_to_read,
12727 + uint32_t *actual_msg_size,
12728 + VCHI_FLAGS_T flags)
12729 +{
12730 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12731 + VCHIQ_HEADER_T *header;
12732 +
12733 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12734 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12735 +
12736 + if (flags == VCHI_FLAGS_NONE)
12737 + if (vchiu_queue_is_empty(&service->queue))
12738 + return -1;
12739 +
12740 + header = vchiu_queue_pop(&service->queue);
12741 +
12742 + memcpy(data, header->data, header->size < max_data_size_to_read ?
12743 + header->size : max_data_size_to_read);
12744 +
12745 + *actual_msg_size = header->size;
12746 +
12747 + vchiq_release_message(service->handle, header);
12748 +
12749 + return 0;
12750 +}
12751 +EXPORT_SYMBOL(vchi_msg_dequeue);
12752 +
12753 +/***********************************************************
12754 + * Name: vchi_msg_queuev
12755 + *
12756 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12757 + * VCHI_MSG_VECTOR_T *vector,
12758 + * uint32_t count,
12759 + * VCHI_FLAGS_T flags,
12760 + * void *msg_handle
12761 + *
12762 + * Description: Thin wrapper to queue a message onto a connection
12763 + *
12764 + * Returns: int32_t - success == 0
12765 + *
12766 + ***********************************************************/
12767 +
12768 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
12769 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
12770 + offsetof(VCHIQ_ELEMENT_T, data));
12771 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
12772 + offsetof(VCHIQ_ELEMENT_T, size));
12773 +
12774 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
12775 + VCHI_MSG_VECTOR_T *vector,
12776 + uint32_t count,
12777 + VCHI_FLAGS_T flags,
12778 + void *msg_handle)
12779 +{
12780 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12781 +
12782 + (void)msg_handle;
12783 +
12784 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12785 +
12786 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
12787 + (const VCHIQ_ELEMENT_T *)vector, count));
12788 +}
12789 +EXPORT_SYMBOL(vchi_msg_queuev);
12790 +
12791 +/***********************************************************
12792 + * Name: vchi_held_msg_release
12793 + *
12794 + * Arguments: VCHI_HELD_MSG_T *message
12795 + *
12796 + * Description: Routine to release a held message (after it has been read with
12797 + * vchi_msg_hold)
12798 + *
12799 + * Returns: int32_t - success == 0
12800 + *
12801 + ***********************************************************/
12802 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
12803 +{
12804 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
12805 + (VCHIQ_HEADER_T *)message->message);
12806 +
12807 + return 0;
12808 +}
12809 +
12810 +/***********************************************************
12811 + * Name: vchi_msg_hold
12812 + *
12813 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12814 + * void **data,
12815 + * uint32_t *msg_size,
12816 + * VCHI_FLAGS_T flags,
12817 + * VCHI_HELD_MSG_T *message_handle
12818 + *
12819 + * Description: Routine to return a pointer to the current message (to allow
12820 + * in place processing). The message is dequeued - don't forget
12821 + * to release the message using vchi_held_msg_release when you're
12822 + * finished.
12823 + *
12824 + * Returns: int32_t - success == 0
12825 + *
12826 + ***********************************************************/
12827 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
12828 + void **data,
12829 + uint32_t *msg_size,
12830 + VCHI_FLAGS_T flags,
12831 + VCHI_HELD_MSG_T *message_handle)
12832 +{
12833 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12834 + VCHIQ_HEADER_T *header;
12835 +
12836 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12837 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12838 +
12839 + if (flags == VCHI_FLAGS_NONE)
12840 + if (vchiu_queue_is_empty(&service->queue))
12841 + return -1;
12842 +
12843 + header = vchiu_queue_pop(&service->queue);
12844 +
12845 + *data = header->data;
12846 + *msg_size = header->size;
12847 +
12848 + message_handle->service =
12849 + (struct opaque_vchi_service_t *)service->handle;
12850 + message_handle->message = header;
12851 +
12852 + return 0;
12853 +}
12854 +
12855 +/***********************************************************
12856 + * Name: vchi_initialise
12857 + *
12858 + * Arguments: VCHI_INSTANCE_T *instance_handle
12859 + * VCHI_CONNECTION_T **connections
12860 + * const uint32_t num_connections
12861 + *
12862 + * Description: Initialises the hardware but does not transmit anything
12863 + * When run as a Host App this will be called twice hence the need
12864 + * to malloc the state information
12865 + *
12866 + * Returns: 0 if successful, failure otherwise
12867 + *
12868 + ***********************************************************/
12869 +
12870 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
12871 +{
12872 + VCHIQ_INSTANCE_T instance;
12873 + VCHIQ_STATUS_T status;
12874 +
12875 + status = vchiq_initialise(&instance);
12876 +
12877 + *instance_handle = (VCHI_INSTANCE_T)instance;
12878 +
12879 + return vchiq_status_to_vchi(status);
12880 +}
12881 +EXPORT_SYMBOL(vchi_initialise);
12882 +
12883 +/***********************************************************
12884 + * Name: vchi_connect
12885 + *
12886 + * Arguments: VCHI_CONNECTION_T **connections
12887 + * const uint32_t num_connections
12888 + * VCHI_INSTANCE_T instance_handle)
12889 + *
12890 + * Description: Starts the command service on each connection,
12891 + * causing INIT messages to be pinged back and forth
12892 + *
12893 + * Returns: 0 if successful, failure otherwise
12894 + *
12895 + ***********************************************************/
12896 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
12897 + const uint32_t num_connections,
12898 + VCHI_INSTANCE_T instance_handle)
12899 +{
12900 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12901 +
12902 + (void)connections;
12903 + (void)num_connections;
12904 +
12905 + return vchiq_connect(instance);
12906 +}
12907 +EXPORT_SYMBOL(vchi_connect);
12908 +
12909 +
12910 +/***********************************************************
12911 + * Name: vchi_disconnect
12912 + *
12913 + * Arguments: VCHI_INSTANCE_T instance_handle
12914 + *
12915 + * Description: Stops the command service on each connection,
12916 + * causing DE-INIT messages to be pinged back and forth
12917 + *
12918 + * Returns: 0 if successful, failure otherwise
12919 + *
12920 + ***********************************************************/
12921 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
12922 +{
12923 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12924 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
12925 +}
12926 +EXPORT_SYMBOL(vchi_disconnect);
12927 +
12928 +
12929 +/***********************************************************
12930 + * Name: vchi_service_open
12931 + * Name: vchi_service_create
12932 + *
12933 + * Arguments: VCHI_INSTANCE_T *instance_handle
12934 + * SERVICE_CREATION_T *setup,
12935 + * VCHI_SERVICE_HANDLE_T *handle
12936 + *
12937 + * Description: Routine to open a service
12938 + *
12939 + * Returns: int32_t - success == 0
12940 + *
12941 + ***********************************************************/
12942 +
12943 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
12944 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
12945 +{
12946 + SHIM_SERVICE_T *service =
12947 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
12948 +
12949 + switch (reason) {
12950 + case VCHIQ_MESSAGE_AVAILABLE:
12951 + vchiu_queue_push(&service->queue, header);
12952 +
12953 + if (service->callback)
12954 + service->callback(service->callback_param,
12955 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
12956 + break;
12957 + case VCHIQ_BULK_TRANSMIT_DONE:
12958 + if (service->callback)
12959 + service->callback(service->callback_param,
12960 + VCHI_CALLBACK_BULK_SENT, bulk_user);
12961 + break;
12962 + case VCHIQ_BULK_RECEIVE_DONE:
12963 + if (service->callback)
12964 + service->callback(service->callback_param,
12965 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
12966 + break;
12967 + case VCHIQ_SERVICE_CLOSED:
12968 + if (service->callback)
12969 + service->callback(service->callback_param,
12970 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
12971 + break;
12972 + case VCHIQ_SERVICE_OPENED:
12973 + /* No equivalent VCHI reason */
12974 + break;
12975 + case VCHIQ_BULK_TRANSMIT_ABORTED:
12976 + if (service->callback)
12977 + service->callback(service->callback_param,
12978 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user);
12979 + break;
12980 + case VCHIQ_BULK_RECEIVE_ABORTED:
12981 + if (service->callback)
12982 + service->callback(service->callback_param,
12983 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user);
12984 + break;
12985 + default:
12986 + WARN(1, "not supported\n");
12987 + break;
12988 + }
12989 +
12990 + return VCHIQ_SUCCESS;
12991 +}
12992 +
12993 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
12994 + SERVICE_CREATION_T *setup)
12995 +{
12996 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
12997 +
12998 + (void)instance;
12999 +
13000 + if (service) {
13001 + if (vchiu_queue_init(&service->queue, 64)) {
13002 + service->callback = setup->callback;
13003 + service->callback_param = setup->callback_param;
13004 + } else {
13005 + kfree(service);
13006 + service = NULL;
13007 + }
13008 + }
13009 +
13010 + return service;
13011 +}
13012 +
13013 +static void service_free(SHIM_SERVICE_T *service)
13014 +{
13015 + if (service) {
13016 + vchiu_queue_delete(&service->queue);
13017 + kfree(service);
13018 + }
13019 +}
13020 +
13021 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
13022 + SERVICE_CREATION_T *setup,
13023 + VCHI_SERVICE_HANDLE_T *handle)
13024 +{
13025 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
13026 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
13027 + if (service) {
13028 + VCHIQ_SERVICE_PARAMS_T params;
13029 + VCHIQ_STATUS_T status;
13030 +
13031 + memset(&params, 0, sizeof(params));
13032 + params.fourcc = setup->service_id;
13033 + params.callback = shim_callback;
13034 + params.userdata = service;
13035 + params.version = setup->version.version;
13036 + params.version_min = setup->version.version_min;
13037 +
13038 + status = vchiq_open_service(instance, &params,
13039 + &service->handle);
13040 + if (status != VCHIQ_SUCCESS) {
13041 + service_free(service);
13042 + service = NULL;
13043 + }
13044 + }
13045 +
13046 + *handle = (VCHI_SERVICE_HANDLE_T)service;
13047 +
13048 + return (service != NULL) ? 0 : -1;
13049 +}
13050 +EXPORT_SYMBOL(vchi_service_open);
13051 +
13052 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
13053 + SERVICE_CREATION_T *setup,
13054 + VCHI_SERVICE_HANDLE_T *handle)
13055 +{
13056 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
13057 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
13058 + if (service) {
13059 + VCHIQ_SERVICE_PARAMS_T params;
13060 + VCHIQ_STATUS_T status;
13061 +
13062 + memset(&params, 0, sizeof(params));
13063 + params.fourcc = setup->service_id;
13064 + params.callback = shim_callback;
13065 + params.userdata = service;
13066 + params.version = setup->version.version;
13067 + params.version_min = setup->version.version_min;
13068 + status = vchiq_add_service(instance, &params, &service->handle);
13069 +
13070 + if (status != VCHIQ_SUCCESS) {
13071 + service_free(service);
13072 + service = NULL;
13073 + }
13074 + }
13075 +
13076 + *handle = (VCHI_SERVICE_HANDLE_T)service;
13077 +
13078 + return (service != NULL) ? 0 : -1;
13079 +}
13080 +EXPORT_SYMBOL(vchi_service_create);
13081 +
13082 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
13083 +{
13084 + int32_t ret = -1;
13085 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13086 + if (service) {
13087 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
13088 + if (status == VCHIQ_SUCCESS) {
13089 + service_free(service);
13090 + service = NULL;
13091 + }
13092 +
13093 + ret = vchiq_status_to_vchi(status);
13094 + }
13095 + return ret;
13096 +}
13097 +EXPORT_SYMBOL(vchi_service_close);
13098 +
13099 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
13100 +{
13101 + int32_t ret = -1;
13102 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13103 + if (service) {
13104 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
13105 + if (status == VCHIQ_SUCCESS) {
13106 + service_free(service);
13107 + service = NULL;
13108 + }
13109 +
13110 + ret = vchiq_status_to_vchi(status);
13111 + }
13112 + return ret;
13113 +}
13114 +EXPORT_SYMBOL(vchi_service_destroy);
13115 +
13116 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
13117 +{
13118 + int32_t ret = -1;
13119 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13120 + if(service)
13121 + {
13122 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
13123 + ret = vchiq_status_to_vchi( status );
13124 + }
13125 + return ret;
13126 +}
13127 +EXPORT_SYMBOL(vchi_get_peer_version);
13128 +
13129 +/* ----------------------------------------------------------------------
13130 + * read a uint32_t from buffer.
13131 + * network format is defined to be little endian
13132 + * -------------------------------------------------------------------- */
13133 +uint32_t
13134 +vchi_readbuf_uint32(const void *_ptr)
13135 +{
13136 + const unsigned char *ptr = _ptr;
13137 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
13138 +}
13139 +
13140 +/* ----------------------------------------------------------------------
13141 + * write a uint32_t to buffer.
13142 + * network format is defined to be little endian
13143 + * -------------------------------------------------------------------- */
13144 +void
13145 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
13146 +{
13147 + unsigned char *ptr = _ptr;
13148 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
13149 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
13150 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
13151 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
13152 +}
13153 +
13154 +/* ----------------------------------------------------------------------
13155 + * read a uint16_t from buffer.
13156 + * network format is defined to be little endian
13157 + * -------------------------------------------------------------------- */
13158 +uint16_t
13159 +vchi_readbuf_uint16(const void *_ptr)
13160 +{
13161 + const unsigned char *ptr = _ptr;
13162 + return ptr[0] | (ptr[1] << 8);
13163 +}
13164 +
13165 +/* ----------------------------------------------------------------------
13166 + * write a uint16_t into the buffer.
13167 + * network format is defined to be little endian
13168 + * -------------------------------------------------------------------- */
13169 +void
13170 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
13171 +{
13172 + unsigned char *ptr = _ptr;
13173 + ptr[0] = (value >> 0) & 0xFF;
13174 + ptr[1] = (value >> 8) & 0xFF;
13175 +}
13176 +
13177 +/***********************************************************
13178 + * Name: vchi_service_use
13179 + *
13180 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
13181 + *
13182 + * Description: Routine to increment refcount on a service
13183 + *
13184 + * Returns: void
13185 + *
13186 + ***********************************************************/
13187 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
13188 +{
13189 + int32_t ret = -1;
13190 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13191 + if (service)
13192 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
13193 + return ret;
13194 +}
13195 +EXPORT_SYMBOL(vchi_service_use);
13196 +
13197 +/***********************************************************
13198 + * Name: vchi_service_release
13199 + *
13200 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
13201 + *
13202 + * Description: Routine to decrement refcount on a service
13203 + *
13204 + * Returns: void
13205 + *
13206 + ***********************************************************/
13207 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
13208 +{
13209 + int32_t ret = -1;
13210 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13211 + if (service)
13212 + ret = vchiq_status_to_vchi(
13213 + vchiq_release_service(service->handle));
13214 + return ret;
13215 +}
13216 +EXPORT_SYMBOL(vchi_service_release);
13217 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
13218 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 1970-01-01 01:00:00.000000000 +0100
13219 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 2013-07-06 15:25:50.000000000 +0100
13220 @@ -0,0 +1,120 @@
13221 +/**
13222 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13223 + *
13224 + * Redistribution and use in source and binary forms, with or without
13225 + * modification, are permitted provided that the following conditions
13226 + * are met:
13227 + * 1. Redistributions of source code must retain the above copyright
13228 + * notice, this list of conditions, and the following disclaimer,
13229 + * without modification.
13230 + * 2. Redistributions in binary form must reproduce the above copyright
13231 + * notice, this list of conditions and the following disclaimer in the
13232 + * documentation and/or other materials provided with the distribution.
13233 + * 3. The names of the above-listed copyright holders may not be used
13234 + * to endorse or promote products derived from this software without
13235 + * specific prior written permission.
13236 + *
13237 + * ALTERNATIVELY, this software may be distributed under the terms of the
13238 + * GNU General Public License ("GPL") version 2, as published by the Free
13239 + * Software Foundation.
13240 + *
13241 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13242 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13243 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13244 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13245 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13246 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13247 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13248 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13249 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13250 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13251 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13252 + */
13253 +
13254 +#include "vchiq_util.h"
13255 +
13256 +static inline int is_pow2(int i)
13257 +{
13258 + return i && !(i & (i - 1));
13259 +}
13260 +
13261 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
13262 +{
13263 + WARN_ON(!is_pow2(size));
13264 +
13265 + queue->size = size;
13266 + queue->read = 0;
13267 + queue->write = 0;
13268 +
13269 + sema_init(&queue->pop, 0);
13270 + sema_init(&queue->push, 0);
13271 +
13272 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
13273 + if (queue->storage == NULL) {
13274 + vchiu_queue_delete(queue);
13275 + return 0;
13276 + }
13277 + return 1;
13278 +}
13279 +
13280 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
13281 +{
13282 + if (queue->storage != NULL)
13283 + kfree(queue->storage);
13284 +}
13285 +
13286 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
13287 +{
13288 + return queue->read == queue->write;
13289 +}
13290 +
13291 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
13292 +{
13293 + return queue->write == queue->read + queue->size;
13294 +}
13295 +
13296 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
13297 +{
13298 + while (queue->write == queue->read + queue->size) {
13299 + if (down_interruptible(&queue->pop) != 0) {
13300 + flush_signals(current);
13301 + }
13302 + }
13303 +
13304 + queue->storage[queue->write & (queue->size - 1)] = header;
13305 +
13306 + queue->write++;
13307 +
13308 + up(&queue->push);
13309 +}
13310 +
13311 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
13312 +{
13313 + while (queue->write == queue->read) {
13314 + if (down_interruptible(&queue->push) != 0) {
13315 + flush_signals(current);
13316 + }
13317 + }
13318 +
13319 + up(&queue->push); // We haven't removed anything from the queue.
13320 + return queue->storage[queue->read & (queue->size - 1)];
13321 +}
13322 +
13323 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
13324 +{
13325 + VCHIQ_HEADER_T *header;
13326 +
13327 + while (queue->write == queue->read) {
13328 + if (down_interruptible(&queue->push) != 0) {
13329 + flush_signals(current);
13330 + }
13331 + }
13332 +
13333 + header = queue->storage[queue->read & (queue->size - 1)];
13334 +
13335 + queue->read++;
13336 +
13337 + up(&queue->pop);
13338 +
13339 + return header;
13340 +}
13341 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
13342 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 1970-01-01 01:00:00.000000000 +0100
13343 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 2013-07-06 15:25:50.000000000 +0100
13344 @@ -0,0 +1,82 @@
13345 +/**
13346 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13347 + *
13348 + * Redistribution and use in source and binary forms, with or without
13349 + * modification, are permitted provided that the following conditions
13350 + * are met:
13351 + * 1. Redistributions of source code must retain the above copyright
13352 + * notice, this list of conditions, and the following disclaimer,
13353 + * without modification.
13354 + * 2. Redistributions in binary form must reproduce the above copyright
13355 + * notice, this list of conditions and the following disclaimer in the
13356 + * documentation and/or other materials provided with the distribution.
13357 + * 3. The names of the above-listed copyright holders may not be used
13358 + * to endorse or promote products derived from this software without
13359 + * specific prior written permission.
13360 + *
13361 + * ALTERNATIVELY, this software may be distributed under the terms of the
13362 + * GNU General Public License ("GPL") version 2, as published by the Free
13363 + * Software Foundation.
13364 + *
13365 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13366 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13367 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13368 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13369 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13370 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13371 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13372 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13373 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13374 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13375 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13376 + */
13377 +
13378 +#ifndef VCHIQ_UTIL_H
13379 +#define VCHIQ_UTIL_H
13380 +
13381 +#include <linux/types.h>
13382 +#include <linux/semaphore.h>
13383 +#include <linux/mutex.h>
13384 +#include <linux/bitops.h>
13385 +#include <linux/kthread.h>
13386 +#include <linux/wait.h>
13387 +#include <linux/vmalloc.h>
13388 +#include <linux/jiffies.h>
13389 +#include <linux/delay.h>
13390 +#include <linux/string.h>
13391 +#include <linux/types.h>
13392 +#include <linux/interrupt.h>
13393 +#include <linux/random.h>
13394 +#include <linux/sched.h>
13395 +#include <linux/ctype.h>
13396 +#include <linux/uaccess.h>
13397 +#include <linux/time.h> /* for time_t */
13398 +#include <linux/slab.h>
13399 +#include <linux/vmalloc.h>
13400 +
13401 +#include "vchiq_if.h"
13402 +
13403 +typedef struct {
13404 + int size;
13405 + int read;
13406 + int write;
13407 +
13408 + struct semaphore pop;
13409 + struct semaphore push;
13410 +
13411 + VCHIQ_HEADER_T **storage;
13412 +} VCHIU_QUEUE_T;
13413 +
13414 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
13415 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
13416 +
13417 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
13418 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
13419 +
13420 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
13421 +
13422 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
13423 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
13424 +
13425 +#endif
13426 +
13427 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
13428 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 1970-01-01 01:00:00.000000000 +0100
13429 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 2013-07-06 15:25:50.000000000 +0100
13430 @@ -0,0 +1,59 @@
13431 +/**
13432 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13433 + *
13434 + * Redistribution and use in source and binary forms, with or without
13435 + * modification, are permitted provided that the following conditions
13436 + * are met:
13437 + * 1. Redistributions of source code must retain the above copyright
13438 + * notice, this list of conditions, and the following disclaimer,
13439 + * without modification.
13440 + * 2. Redistributions in binary form must reproduce the above copyright
13441 + * notice, this list of conditions and the following disclaimer in the
13442 + * documentation and/or other materials provided with the distribution.
13443 + * 3. The names of the above-listed copyright holders may not be used
13444 + * to endorse or promote products derived from this software without
13445 + * specific prior written permission.
13446 + *
13447 + * ALTERNATIVELY, this software may be distributed under the terms of the
13448 + * GNU General Public License ("GPL") version 2, as published by the Free
13449 + * Software Foundation.
13450 + *
13451 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13452 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13453 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13454 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13455 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13456 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13457 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13458 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13459 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13460 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13461 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13462 + */
13463 +#include "vchiq_build_info.h"
13464 +#include <linux/broadcom/vc_debug_sym.h>
13465 +
13466 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
13467 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
13468 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
13469 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
13470 +
13471 +const char *vchiq_get_build_hostname( void )
13472 +{
13473 + return vchiq_build_hostname;
13474 +}
13475 +
13476 +const char *vchiq_get_build_version( void )
13477 +{
13478 + return vchiq_build_version;
13479 +}
13480 +
13481 +const char *vchiq_get_build_date( void )
13482 +{
13483 + return vchiq_build_date;
13484 +}
13485 +
13486 +const char *vchiq_get_build_time( void )
13487 +{
13488 + return vchiq_build_time;
13489 +}
13490 diff -urN linux-3.10/drivers/misc/vc04_services/Kconfig linux-rpi-3.10.y/drivers/misc/vc04_services/Kconfig
13491 --- linux-3.10/drivers/misc/vc04_services/Kconfig 1970-01-01 01:00:00.000000000 +0100
13492 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/Kconfig 2013-07-06 15:25:50.000000000 +0100
13493 @@ -0,0 +1,10 @@
13494 +config BCM2708_VCHIQ
13495 + tristate "Videocore VCHIQ"
13496 + depends on MACH_BCM2708
13497 + default y
13498 + help
13499 + Kernel to VideoCore communication interface for the
13500 + BCM2708 family of products.
13501 + Defaults to Y when the Broadcom Videocore services
13502 + are included in the build, N otherwise.
13503 +
13504 diff -urN linux-3.10/drivers/misc/vc04_services/Makefile linux-rpi-3.10.y/drivers/misc/vc04_services/Makefile
13505 --- linux-3.10/drivers/misc/vc04_services/Makefile 1970-01-01 01:00:00.000000000 +0100
13506 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/Makefile 2013-07-06 15:25:50.000000000 +0100
13507 @@ -0,0 +1,18 @@
13508 +ifeq ($(CONFIG_MACH_BCM2708),y)
13509 +
13510 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
13511 +
13512 +vchiq-objs := \
13513 + interface/vchiq_arm/vchiq_core.o \
13514 + interface/vchiq_arm/vchiq_arm.o \
13515 + interface/vchiq_arm/vchiq_kern_lib.o \
13516 + interface/vchiq_arm/vchiq_2835_arm.o \
13517 + interface/vchiq_arm/vchiq_proc.o \
13518 + interface/vchiq_arm/vchiq_shim.o \
13519 + interface/vchiq_arm/vchiq_util.o \
13520 + interface/vchiq_arm/vchiq_connected.o \
13521 +
13522 +EXTRA_CFLAGS += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
13523 +
13524 +endif
13525 +
13526 diff -urN linux-3.10/include/linux/broadcom/vc_cma.h linux-rpi-3.10.y/include/linux/broadcom/vc_cma.h
13527 --- linux-3.10/include/linux/broadcom/vc_cma.h 1970-01-01 01:00:00.000000000 +0100
13528 +++ linux-rpi-3.10.y/include/linux/broadcom/vc_cma.h 2013-07-06 15:25:50.000000000 +0100
13529 @@ -0,0 +1,30 @@
13530 +/*****************************************************************************
13531 +* Copyright 2012 Broadcom Corporation. All rights reserved.
13532 +*
13533 +* Unless you and Broadcom execute a separate written software license
13534 +* agreement governing use of this software, this software is licensed to you
13535 +* under the terms of the GNU General Public License version 2, available at
13536 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
13537 +*
13538 +* Notwithstanding the above, under no circumstances may you combine this
13539 +* software in any way with any other Broadcom software provided under a
13540 +* license other than the GPL, without Broadcom's express prior written
13541 +* consent.
13542 +*****************************************************************************/
13543 +
13544 +#if !defined( VC_CMA_H )
13545 +#define VC_CMA_H
13546 +
13547 +#include <linux/ioctl.h>
13548 +
13549 +#define VC_CMA_IOC_MAGIC 0xc5
13550 +
13551 +#define VC_CMA_IOC_RESERVE _IO(VC_CMA_IOC_MAGIC, 0)
13552 +
13553 +#ifdef __KERNEL__
13554 +extern void __init vc_cma_early_init(void);
13555 +extern void __init vc_cma_reserve(void);
13556 +#endif
13557 +
13558 +#endif /* VC_CMA_H */
13559 +