brcm2708: update 4.1 patches
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-4.1 / 0012-bcm2708-vchiq-driver.patch
1 From 48dac350e45f5e45aa29fedb1b79247f9b771233 Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 012/148] bcm2708 vchiq driver
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Signed-off-by: popcornmix <popcornmix@gmail.com>
10
11 vchiq: create_pagelist copes with vmalloc memory
12
13 Signed-off-by: Daniel Stone <daniels@collabora.com>
14
15 vchiq: fix the shim message release
16
17 Signed-off-by: Daniel Stone <daniels@collabora.com>
18
19 vchiq: export additional symbols
20
21 Signed-off-by: Daniel Stone <daniels@collabora.com>
22
23 VCHIQ: Make service closure fully synchronous (drv)
24
25 This is one half of a two-part patch, the other half of which is to
26 the vchiq_lib user library. With these patches, calls to
27 vchiq_close_service and vchiq_remove_service won't return until any
28 associated callbacks have been delivered to the callback thread.
29
30 VCHIQ: Add per-service tracing
31
32 The new service option VCHIQ_SERVICE_OPTION_TRACE is a boolean that
33 toggles tracing for the specified service.
34
35 This commit also introduces vchi_service_set_option and the associated
36 option VCHI_SERVICE_OPTION_TRACE.
37
38 vchiq: Make the synchronous-CLOSE logic more tolerant
39
40 vchiq: Move logging control into debugfs
41
42 vchiq: Take care of a corner case tickled by VCSM
43
44 Closing a connection that isn't fully open requires care, since one
45 side does not know the other side's port number. Code was present to
46 handle the case where a CLOSE is sent immediately after an OPEN, i.e.
47 before the OPENACK has been received, but this was incorrectly being
48 used when an OPEN from a client using port 0 was rejected.
49
50 (In the observed failure, the host was attempting to use the VCSM
51 service, which isn't present in the 'cutdown' firmware. The failure
52 was intermittent because sometimes the keepalive service would
53 grab port 0.)
54
55 This case can be distinguished because the client's remoteport will
56 still be VCHIQ_PORT_FREE, and the srvstate will be OPENING. Either
57 condition is sufficient to differentiate it from the special case
58 described above.
59
60 vchiq: Avoid high load when blocked and unkillable
61
62 vchiq: Include SIGSTOP and SIGCONT in list of signals not-masked by vchiq to allow gdb to work
63
64 vchiq_arm: Complete support for SYNCHRONOUS mode
65
66 vchiq: Remove inline from suspend/resume
67
68 vchiq: Allocation does not need to be atomic
69
70 vchiq: Fix wrong condition check
71
72 The log level is checked from within the log call. Remove the check in the call.
73
74 Signed-off-by: Pranith Kumar <bobby.prani@gmail.com>
75
76 BCM270x: Add vchiq device to platform file and Device Tree
77
78 Prepare to turn the vchiq module into a driver.
79
80 Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
81
82 bcm2708: vchiq: Add Device Tree support
83
84 Turn vchiq into a driver and stop hardcoding resources.
85 Use devm_* functions in probe path to simplify cleanup.
86 A global variable is used to hold the register address. This is done
87 to keep this patch as small as possible.
88 Also make available on ARCH_BCM2835.
89 Based on work by Lubomir Rintel.
90
91 Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
92
93 vchiq: Change logging level for inbound data
94 ---
95 arch/arm/mach-bcm2708/bcm2708.c | 26 +
96 arch/arm/mach-bcm2708/include/mach/platform.h | 2 +
97 arch/arm/mach-bcm2709/bcm2709.c | 26 +
98 arch/arm/mach-bcm2709/include/mach/platform.h | 2 +
99 drivers/misc/Kconfig | 1 +
100 drivers/misc/Makefile | 1 +
101 drivers/misc/vc04_services/Kconfig | 9 +
102 drivers/misc/vc04_services/Makefile | 14 +
103 .../interface/vchi/connections/connection.h | 328 ++
104 .../interface/vchi/message_drivers/message.h | 204 +
105 drivers/misc/vc04_services/interface/vchi/vchi.h | 378 ++
106 .../misc/vc04_services/interface/vchi/vchi_cfg.h | 224 ++
107 .../interface/vchi/vchi_cfg_internal.h | 71 +
108 .../vc04_services/interface/vchi/vchi_common.h | 175 +
109 .../misc/vc04_services/interface/vchi/vchi_mh.h | 42 +
110 .../misc/vc04_services/interface/vchiq_arm/vchiq.h | 40 +
111 .../vc04_services/interface/vchiq_arm/vchiq_2835.h | 42 +
112 .../interface/vchiq_arm/vchiq_2835_arm.c | 547 +++
113 .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 2886 ++++++++++++++
114 .../vc04_services/interface/vchiq_arm/vchiq_arm.h | 220 ++
115 .../interface/vchiq_arm/vchiq_build_info.h | 37 +
116 .../vc04_services/interface/vchiq_arm/vchiq_cfg.h | 69 +
117 .../interface/vchiq_arm/vchiq_connected.c | 120 +
118 .../interface/vchiq_arm/vchiq_connected.h | 50 +
119 .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3934 ++++++++++++++++++++
120 .../vc04_services/interface/vchiq_arm/vchiq_core.h | 712 ++++
121 .../interface/vchiq_arm/vchiq_debugfs.c | 383 ++
122 .../interface/vchiq_arm/vchiq_debugfs.h | 52 +
123 .../interface/vchiq_arm/vchiq_genversion | 87 +
124 .../vc04_services/interface/vchiq_arm/vchiq_if.h | 189 +
125 .../interface/vchiq_arm/vchiq_ioctl.h | 131 +
126 .../interface/vchiq_arm/vchiq_kern_lib.c | 458 +++
127 .../interface/vchiq_arm/vchiq_killable.h | 69 +
128 .../interface/vchiq_arm/vchiq_memdrv.h | 71 +
129 .../interface/vchiq_arm/vchiq_pagelist.h | 58 +
130 .../vc04_services/interface/vchiq_arm/vchiq_shim.c | 860 +++++
131 .../vc04_services/interface/vchiq_arm/vchiq_util.c | 152 +
132 .../vc04_services/interface/vchiq_arm/vchiq_util.h | 81 +
133 .../interface/vchiq_arm/vchiq_version.c | 59 +
134 39 files changed, 12810 insertions(+)
135 create mode 100644 drivers/misc/vc04_services/Kconfig
136 create mode 100644 drivers/misc/vc04_services/Makefile
137 create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
138 create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
139 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
140 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
141 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
142 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
143 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
144 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
145 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
146 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
147 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
148 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
149 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
150 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
151 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
152 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
153 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
154 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
155 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
156 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
157 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
158 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
159 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
160 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
161 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h
162 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
163 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
164 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
165 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
166 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
167 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
168
169 --- a/arch/arm/mach-bcm2708/bcm2708.c
170 +++ b/arch/arm/mach-bcm2708/bcm2708.c
171 @@ -376,6 +376,31 @@ static struct platform_device bcm2708_vc
172 },
173 };
174
175 +static struct resource bcm2708_vchiq_resources[] = {
176 + {
177 + .start = ARMCTRL_0_BELL_BASE,
178 + .end = ARMCTRL_0_BELL_BASE + 16,
179 + .flags = IORESOURCE_MEM,
180 + }, {
181 + .start = IRQ_ARM_DOORBELL_0,
182 + .end = IRQ_ARM_DOORBELL_0,
183 + .flags = IORESOURCE_IRQ,
184 + },
185 +};
186 +
187 +static u64 vchiq_dmamask = DMA_BIT_MASK(DMA_MASK_BITS_COMMON);
188 +
189 +static struct platform_device bcm2708_vchiq_device = {
190 + .name = "bcm2835_vchiq",
191 + .id = -1,
192 + .resource = bcm2708_vchiq_resources,
193 + .num_resources = ARRAY_SIZE(bcm2708_vchiq_resources),
194 + .dev = {
195 + .dma_mask = &vchiq_dmamask,
196 + .coherent_dma_mask = DMA_BIT_MASK(DMA_MASK_BITS_COMMON),
197 + },
198 +};
199 +
200 #ifdef CONFIG_BCM2708_GPIO
201 #define BCM_GPIO_DRIVER_NAME "bcm2708_gpio"
202
203 @@ -611,6 +636,7 @@ void __init bcm2708_init(void)
204
205 bcm_register_device_dt(&bcm2708_dmaengine_device);
206 bcm_register_device(&bcm2708_vcio_device);
207 + bcm_register_device_dt(&bcm2708_vchiq_device);
208 #ifdef CONFIG_BCM2708_GPIO
209 bcm_register_device_dt(&bcm2708_gpio_device);
210 #endif
211 --- a/arch/arm/mach-bcm2708/include/mach/platform.h
212 +++ b/arch/arm/mach-bcm2708/include/mach/platform.h
213 @@ -81,6 +81,8 @@
214 #define ARMCTRL_IC_BASE (ARM_BASE + 0x200) /* ARM interrupt controller */
215 #define ARMCTRL_TIMER0_1_BASE (ARM_BASE + 0x400) /* Timer 0 and 1 */
216 #define ARMCTRL_0_SBM_BASE (ARM_BASE + 0x800) /* User 0 (ARM)'s Semaphores Doorbells and Mailboxes */
217 +#define ARMCTRL_0_BELL_BASE (ARMCTRL_0_SBM_BASE + 0x40) /* User 0 (ARM)'s Doorbell */
218 +#define ARMCTRL_0_MAIL0_BASE (ARMCTRL_0_SBM_BASE + 0x80) /* User 0 (ARM)'s Mailbox 0 */
219
220
221 /*
222 --- a/arch/arm/mach-bcm2709/bcm2709.c
223 +++ b/arch/arm/mach-bcm2709/bcm2709.c
224 @@ -396,6 +396,31 @@ static struct platform_device bcm2708_vc
225 },
226 };
227
228 +static struct resource bcm2708_vchiq_resources[] = {
229 + {
230 + .start = ARMCTRL_0_BELL_BASE,
231 + .end = ARMCTRL_0_BELL_BASE + 16,
232 + .flags = IORESOURCE_MEM,
233 + }, {
234 + .start = IRQ_ARM_DOORBELL_0,
235 + .end = IRQ_ARM_DOORBELL_0,
236 + .flags = IORESOURCE_IRQ,
237 + },
238 +};
239 +
240 +static u64 vchiq_dmamask = DMA_BIT_MASK(DMA_MASK_BITS_COMMON);
241 +
242 +static struct platform_device bcm2708_vchiq_device = {
243 + .name = "bcm2835_vchiq",
244 + .id = -1,
245 + .resource = bcm2708_vchiq_resources,
246 + .num_resources = ARRAY_SIZE(bcm2708_vchiq_resources),
247 + .dev = {
248 + .dma_mask = &vchiq_dmamask,
249 + .coherent_dma_mask = DMA_BIT_MASK(DMA_MASK_BITS_COMMON),
250 + },
251 +};
252 +
253 #ifdef CONFIG_BCM2708_GPIO
254 #define BCM_GPIO_DRIVER_NAME "bcm2708_gpio"
255
256 @@ -631,6 +656,7 @@ void __init bcm2709_init(void)
257
258 bcm_register_device_dt(&bcm2708_dmaengine_device);
259 bcm_register_device(&bcm2708_vcio_device);
260 + bcm_register_device_dt(&bcm2708_vchiq_device);
261 #ifdef CONFIG_BCM2708_GPIO
262 bcm_register_device_dt(&bcm2708_gpio_device);
263 #endif
264 --- a/arch/arm/mach-bcm2709/include/mach/platform.h
265 +++ b/arch/arm/mach-bcm2709/include/mach/platform.h
266 @@ -81,6 +81,8 @@
267 #define ARMCTRL_IC_BASE (ARM_BASE + 0x200) /* ARM interrupt controller */
268 #define ARMCTRL_TIMER0_1_BASE (ARM_BASE + 0x400) /* Timer 0 and 1 */
269 #define ARMCTRL_0_SBM_BASE (ARM_BASE + 0x800) /* User 0 (ARM)'s Semaphores Doorbells and Mailboxes */
270 +#define ARMCTRL_0_BELL_BASE (ARMCTRL_0_SBM_BASE + 0x40) /* User 0 (ARM)'s Doorbell */
271 +#define ARMCTRL_0_MAIL0_BASE (ARMCTRL_0_SBM_BASE + 0x80) /* User 0 (ARM)'s Mailbox 0 */
272
273
274 /*
275 --- a/drivers/misc/Kconfig
276 +++ b/drivers/misc/Kconfig
277 @@ -524,6 +524,7 @@ source "drivers/misc/carma/Kconfig"
278 source "drivers/misc/altera-stapl/Kconfig"
279 source "drivers/misc/mei/Kconfig"
280 source "drivers/misc/vmw_vmci/Kconfig"
281 +source "drivers/misc/vc04_services/Kconfig"
282 source "drivers/misc/mic/Kconfig"
283 source "drivers/misc/genwqe/Kconfig"
284 source "drivers/misc/echo/Kconfig"
285 --- a/drivers/misc/Makefile
286 +++ b/drivers/misc/Makefile
287 @@ -51,6 +51,7 @@ obj-$(CONFIG_INTEL_MEI) += mei/
288 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
289 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
290 obj-$(CONFIG_SRAM) += sram.o
291 +obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
292 obj-y += mic/
293 obj-$(CONFIG_GENWQE) += genwqe/
294 obj-$(CONFIG_ECHO) += echo/
295 --- /dev/null
296 +++ b/drivers/misc/vc04_services/Kconfig
297 @@ -0,0 +1,9 @@
298 +config BCM2708_VCHIQ
299 + tristate "Videocore VCHIQ"
300 + depends on (MACH_BCM2708 || MACH_BCM2709 || ARCH_BCM2835) && BCM2708_MBOX
301 + default y
302 + help
303 + Kernel to VideoCore communication interface for the
304 + BCM2708 family of products.
305 + Defaults to Y when the Broadcom Videocore services
306 + are included in the build, N otherwise.
307 --- /dev/null
308 +++ b/drivers/misc/vc04_services/Makefile
309 @@ -0,0 +1,14 @@
310 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
311 +
312 +vchiq-objs := \
313 + interface/vchiq_arm/vchiq_core.o \
314 + interface/vchiq_arm/vchiq_arm.o \
315 + interface/vchiq_arm/vchiq_kern_lib.o \
316 + interface/vchiq_arm/vchiq_2835_arm.o \
317 + interface/vchiq_arm/vchiq_debugfs.o \
318 + interface/vchiq_arm/vchiq_shim.o \
319 + interface/vchiq_arm/vchiq_util.o \
320 + interface/vchiq_arm/vchiq_connected.o \
321 +
322 +ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
323 +
324 --- /dev/null
325 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
326 @@ -0,0 +1,328 @@
327 +/**
328 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
329 + *
330 + * Redistribution and use in source and binary forms, with or without
331 + * modification, are permitted provided that the following conditions
332 + * are met:
333 + * 1. Redistributions of source code must retain the above copyright
334 + * notice, this list of conditions, and the following disclaimer,
335 + * without modification.
336 + * 2. Redistributions in binary form must reproduce the above copyright
337 + * notice, this list of conditions and the following disclaimer in the
338 + * documentation and/or other materials provided with the distribution.
339 + * 3. The names of the above-listed copyright holders may not be used
340 + * to endorse or promote products derived from this software without
341 + * specific prior written permission.
342 + *
343 + * ALTERNATIVELY, this software may be distributed under the terms of the
344 + * GNU General Public License ("GPL") version 2, as published by the Free
345 + * Software Foundation.
346 + *
347 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
348 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
349 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
350 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
351 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
352 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
353 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
354 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
355 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
356 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
357 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
358 + */
359 +
360 +#ifndef CONNECTION_H_
361 +#define CONNECTION_H_
362 +
363 +#include <linux/kernel.h>
364 +#include <linux/types.h>
365 +#include <linux/semaphore.h>
366 +
367 +#include "interface/vchi/vchi_cfg_internal.h"
368 +#include "interface/vchi/vchi_common.h"
369 +#include "interface/vchi/message_drivers/message.h"
370 +
371 +/******************************************************************************
372 + Global defs
373 + *****************************************************************************/
374 +
375 +// Opaque handle for a connection / service pair
376 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
377 +
378 +// opaque handle to the connection state information
379 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
380 +
381 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
382 +
383 +
384 +/******************************************************************************
385 + API
386 + *****************************************************************************/
387 +
388 +// Routine to init a connection with a particular low level driver
389 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
390 + const VCHI_MESSAGE_DRIVER_T * driver );
391 +
392 +// Routine to control CRC enabling at a connection level
393 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
394 + VCHI_CRC_CONTROL_T control );
395 +
396 +// Routine to create a service
397 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
398 + int32_t service_id,
399 + uint32_t rx_fifo_size,
400 + uint32_t tx_fifo_size,
401 + int server,
402 + VCHI_CALLBACK_T callback,
403 + void *callback_param,
404 + int32_t want_crc,
405 + int32_t want_unaligned_bulk_rx,
406 + int32_t want_unaligned_bulk_tx,
407 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
408 +
409 +// Routine to close a service
410 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
411 +
412 +// Routine to queue a message
413 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
414 + const void *data,
415 + uint32_t data_size,
416 + VCHI_FLAGS_T flags,
417 + void *msg_handle );
418 +
419 +// scatter-gather (vector) message queueing
420 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
421 + VCHI_MSG_VECTOR_T *vector,
422 + uint32_t count,
423 + VCHI_FLAGS_T flags,
424 + void *msg_handle );
425 +
426 +// Routine to dequeue a message
427 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
428 + void *data,
429 + uint32_t max_data_size_to_read,
430 + uint32_t *actual_msg_size,
431 + VCHI_FLAGS_T flags );
432 +
433 +// Routine to peek at a message
434 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
435 + void **data,
436 + uint32_t *msg_size,
437 + VCHI_FLAGS_T flags );
438 +
439 +// Routine to hold a message
440 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
441 + void **data,
442 + uint32_t *msg_size,
443 + VCHI_FLAGS_T flags,
444 + void **message_handle );
445 +
446 +// Routine to initialise a received message iterator
447 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
448 + VCHI_MSG_ITER_T *iter,
449 + VCHI_FLAGS_T flags );
450 +
451 +// Routine to release a held message
452 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
453 + void *message_handle );
454 +
455 +// Routine to get info on a held message
456 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
457 + void *message_handle,
458 + void **data,
459 + int32_t *msg_size,
460 + uint32_t *tx_timestamp,
461 + uint32_t *rx_timestamp );
462 +
463 +// Routine to check whether the iterator has a next message
464 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
465 + const VCHI_MSG_ITER_T *iter );
466 +
467 +// Routine to advance the iterator
468 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
469 + VCHI_MSG_ITER_T *iter,
470 + void **data,
471 + uint32_t *msg_size );
472 +
473 +// Routine to remove the last message returned by the iterator
474 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
475 + VCHI_MSG_ITER_T *iter );
476 +
477 +// Routine to hold the last message returned by the iterator
478 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
479 + VCHI_MSG_ITER_T *iter,
480 + void **msg_handle );
481 +
482 +// Routine to transmit bulk data
483 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
484 + const void *data_src,
485 + uint32_t data_size,
486 + VCHI_FLAGS_T flags,
487 + void *bulk_handle );
488 +
489 +// Routine to receive data
490 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
491 + void *data_dst,
492 + uint32_t data_size,
493 + VCHI_FLAGS_T flags,
494 + void *bulk_handle );
495 +
496 +// Routine to report if a server is available
497 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
498 +
499 +// Routine to report the number of RX slots available
500 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
501 +
502 +// Routine to report the RX slot size
503 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
504 +
505 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
506 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
507 + int32_t service,
508 + uint32_t length,
509 + MESSAGE_TX_CHANNEL_T channel,
510 + uint32_t channel_params,
511 + uint32_t data_length,
512 + uint32_t data_offset);
513 +
514 +// Callback to inform a service that a Xon or Xoff message has been received
515 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
516 +
517 +// Callback to inform a service that a server available reply message has been received
518 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
519 +
520 +// Callback to indicate that bulk auxiliary messages have arrived
521 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
522 +
523 +// Callback to indicate that bulk auxiliary messages have arrived
524 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
525 +
526 +// Callback with all the connection info you require
527 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
528 +
529 +// Callback to inform of a disconnect
530 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
531 +
532 +// Callback to inform of a power control request
533 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
534 +
535 +// allocate memory suitably aligned for this connection
536 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
537 +
538 +// free memory allocated by buffer_allocate
539 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
540 +
541 +
542 +/******************************************************************************
543 + System driver struct
544 + *****************************************************************************/
545 +
546 +struct opaque_vchi_connection_api_t
547 +{
548 + // Routine to init the connection
549 + VCHI_CONNECTION_INIT_T init;
550 +
551 + // Connection-level CRC control
552 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
553 +
554 + // Routine to connect to or create service
555 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
556 +
557 + // Routine to disconnect from a service
558 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
559 +
560 + // Routine to queue a message
561 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
562 +
563 + // scatter-gather (vector) message queue
564 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
565 +
566 + // Routine to dequeue a message
567 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
568 +
569 + // Routine to peek at a message
570 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
571 +
572 + // Routine to hold a message
573 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
574 +
575 + // Routine to initialise a received message iterator
576 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
577 +
578 + // Routine to release a message
579 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
580 +
581 + // Routine to get information on a held message
582 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
583 +
584 + // Routine to check for next message on iterator
585 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
586 +
587 + // Routine to get next message on iterator
588 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
589 +
590 + // Routine to remove the last message returned by iterator
591 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
592 +
593 + // Routine to hold the last message returned by iterator
594 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
595 +
596 + // Routine to transmit bulk data
597 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
598 +
599 + // Routine to receive data
600 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
601 +
602 + // Routine to report the available servers
603 + VCHI_CONNECTION_SERVER_PRESENT server_present;
604 +
605 + // Routine to report the number of RX slots available
606 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
607 +
608 + // Routine to report the RX slot size
609 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
610 +
611 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
612 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
613 +
614 + // Callback to inform a service that a Xon or Xoff message has been received
615 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
616 +
617 + // Callback to inform a service that a server available reply message has been received
618 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
619 +
620 + // Callback to indicate that bulk auxiliary messages have arrived
621 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
622 +
623 + // Callback to indicate that a bulk auxiliary message has been transmitted
624 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
625 +
626 + // Callback to provide information about the connection
627 + VCHI_CONNECTION_INFO connection_info;
628 +
629 + // Callback to notify that peer has requested disconnect
630 + VCHI_CONNECTION_DISCONNECT disconnect;
631 +
632 + // Callback to notify that peer has requested power change
633 + VCHI_CONNECTION_POWER_CONTROL power_control;
634 +
635 + // allocate memory suitably aligned for this connection
636 + VCHI_BUFFER_ALLOCATE buffer_allocate;
637 +
638 + // free memory allocated by buffer_allocate
639 + VCHI_BUFFER_FREE buffer_free;
640 +
641 +};
642 +
643 +struct vchi_connection_t {
644 + const VCHI_CONNECTION_API_T *api;
645 + VCHI_CONNECTION_STATE_T *state;
646 +#ifdef VCHI_COARSE_LOCKING
647 + struct semaphore sem;
648 +#endif
649 +};
650 +
651 +
652 +#endif /* CONNECTION_H_ */
653 +
654 +/****************************** End of file **********************************/
655 --- /dev/null
656 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
657 @@ -0,0 +1,204 @@
658 +/**
659 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
660 + *
661 + * Redistribution and use in source and binary forms, with or without
662 + * modification, are permitted provided that the following conditions
663 + * are met:
664 + * 1. Redistributions of source code must retain the above copyright
665 + * notice, this list of conditions, and the following disclaimer,
666 + * without modification.
667 + * 2. Redistributions in binary form must reproduce the above copyright
668 + * notice, this list of conditions and the following disclaimer in the
669 + * documentation and/or other materials provided with the distribution.
670 + * 3. The names of the above-listed copyright holders may not be used
671 + * to endorse or promote products derived from this software without
672 + * specific prior written permission.
673 + *
674 + * ALTERNATIVELY, this software may be distributed under the terms of the
675 + * GNU General Public License ("GPL") version 2, as published by the Free
676 + * Software Foundation.
677 + *
678 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
679 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
680 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
681 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
682 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
683 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
684 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
685 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
686 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
687 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
688 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
689 + */
690 +
691 +#ifndef _VCHI_MESSAGE_H_
692 +#define _VCHI_MESSAGE_H_
693 +
694 +#include <linux/kernel.h>
695 +#include <linux/types.h>
696 +#include <linux/semaphore.h>
697 +
698 +#include "interface/vchi/vchi_cfg_internal.h"
699 +#include "interface/vchi/vchi_common.h"
700 +
701 +
702 +typedef enum message_event_type {
703 + MESSAGE_EVENT_NONE,
704 + MESSAGE_EVENT_NOP,
705 + MESSAGE_EVENT_MESSAGE,
706 + MESSAGE_EVENT_SLOT_COMPLETE,
707 + MESSAGE_EVENT_RX_BULK_PAUSED,
708 + MESSAGE_EVENT_RX_BULK_COMPLETE,
709 + MESSAGE_EVENT_TX_COMPLETE,
710 + MESSAGE_EVENT_MSG_DISCARDED
711 +} MESSAGE_EVENT_TYPE_T;
712 +
713 +typedef enum vchi_msg_flags
714 +{
715 + VCHI_MSG_FLAGS_NONE = 0x0,
716 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
717 +} VCHI_MSG_FLAGS_T;
718 +
719 +typedef enum message_tx_channel
720 +{
721 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
722 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
723 +} MESSAGE_TX_CHANNEL_T;
724 +
725 +// Macros used for cycling through bulk channels
726 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
727 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
728 +
729 +typedef enum message_rx_channel
730 +{
731 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
732 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
733 +} MESSAGE_RX_CHANNEL_T;
734 +
735 +// Message receive slot information
736 +typedef struct rx_msg_slot_info {
737 +
738 + struct rx_msg_slot_info *next;
739 + //struct slot_info *prev;
740 +#if !defined VCHI_COARSE_LOCKING
741 + struct semaphore sem;
742 +#endif
743 +
744 + uint8_t *addr; // base address of slot
745 + uint32_t len; // length of slot in bytes
746 +
747 + uint32_t write_ptr; // hardware causes this to advance
748 + uint32_t read_ptr; // this module does the reading
749 + int active; // is this slot in the hardware dma fifo?
750 + uint32_t msgs_parsed; // count how many messages are in this slot
751 + uint32_t msgs_released; // how many messages have been released
752 + void *state; // connection state information
753 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
754 +} RX_MSG_SLOTINFO_T;
755 +
756 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
757 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
758 +// driver will be tasked with sending the aligned core section.
759 +typedef struct rx_bulk_slotinfo_t {
760 + struct rx_bulk_slotinfo_t *next;
761 +
762 + struct semaphore *blocking;
763 +
764 + // needed by DMA
765 + void *addr;
766 + uint32_t len;
767 +
768 + // needed for the callback
769 + void *service;
770 + void *handle;
771 + VCHI_FLAGS_T flags;
772 +} RX_BULK_SLOTINFO_T;
773 +
774 +
775 +/* ----------------------------------------------------------------------
776 + * each connection driver will have a pool of the following struct.
777 + *
778 + * the pool will be managed by vchi_qman_*
779 + * this means there will be multiple queues (single linked lists)
780 + * a given struct message_info will be on exactly one of these queues
781 + * at any one time
782 + * -------------------------------------------------------------------- */
783 +typedef struct rx_message_info {
784 +
785 + struct message_info *next;
786 + //struct message_info *prev;
787 +
788 + uint8_t *addr;
789 + uint32_t len;
790 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
791 + uint32_t tx_timestamp;
792 + uint32_t rx_timestamp;
793 +
794 +} RX_MESSAGE_INFO_T;
795 +
796 +typedef struct {
797 + MESSAGE_EVENT_TYPE_T type;
798 +
799 + struct {
800 + // for messages
801 + void *addr; // address of message
802 + uint16_t slot_delta; // whether this message indicated slot delta
803 + uint32_t len; // length of message
804 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
805 + int32_t service; // service id this message is destined for
806 + uint32_t tx_timestamp; // timestamp from the header
807 + uint32_t rx_timestamp; // timestamp when we parsed it
808 + } message;
809 +
810 + // FIXME: cleanup slot reporting...
811 + RX_MSG_SLOTINFO_T *rx_msg;
812 + RX_BULK_SLOTINFO_T *rx_bulk;
813 + void *tx_handle;
814 + MESSAGE_TX_CHANNEL_T tx_channel;
815 +
816 +} MESSAGE_EVENT_T;
817 +
818 +
819 +// callbacks
820 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
821 +
822 +typedef struct {
823 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
824 +} VCHI_MESSAGE_DRIVER_OPEN_T;
825 +
826 +
827 +// handle to this instance of message driver (as returned by ->open)
828 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
829 +
830 +struct opaque_vchi_message_driver_t {
831 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
832 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
833 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
834 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
835 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
836 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
837 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
838 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
839 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
840 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
841 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
842 +
843 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
844 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
845 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
846 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
847 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
848 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
849 +
850 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
851 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
852 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
853 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
854 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
855 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
856 +};
857 +
858 +
859 +#endif // _VCHI_MESSAGE_H_
860 +
861 +/****************************** End of file ***********************************/
862 --- /dev/null
863 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
864 @@ -0,0 +1,378 @@
865 +/**
866 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
867 + *
868 + * Redistribution and use in source and binary forms, with or without
869 + * modification, are permitted provided that the following conditions
870 + * are met:
871 + * 1. Redistributions of source code must retain the above copyright
872 + * notice, this list of conditions, and the following disclaimer,
873 + * without modification.
874 + * 2. Redistributions in binary form must reproduce the above copyright
875 + * notice, this list of conditions and the following disclaimer in the
876 + * documentation and/or other materials provided with the distribution.
877 + * 3. The names of the above-listed copyright holders may not be used
878 + * to endorse or promote products derived from this software without
879 + * specific prior written permission.
880 + *
881 + * ALTERNATIVELY, this software may be distributed under the terms of the
882 + * GNU General Public License ("GPL") version 2, as published by the Free
883 + * Software Foundation.
884 + *
885 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
886 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
887 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
888 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
889 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
890 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
891 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
892 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
893 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
894 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
895 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
896 + */
897 +
898 +#ifndef VCHI_H_
899 +#define VCHI_H_
900 +
901 +#include "interface/vchi/vchi_cfg.h"
902 +#include "interface/vchi/vchi_common.h"
903 +#include "interface/vchi/connections/connection.h"
904 +#include "vchi_mh.h"
905 +
906 +
907 +/******************************************************************************
908 + Global defs
909 + *****************************************************************************/
910 +
911 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
912 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
913 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
914 +
915 +#ifdef USE_VCHIQ_ARM
916 +#define VCHI_BULK_ALIGNED(x) 1
917 +#else
918 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
919 +#endif
920 +
921 +struct vchi_version {
922 + uint32_t version;
923 + uint32_t version_min;
924 +};
925 +#define VCHI_VERSION(v_) { v_, v_ }
926 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
927 +
928 +typedef enum
929 +{
930 + VCHI_VEC_POINTER,
931 + VCHI_VEC_HANDLE,
932 + VCHI_VEC_LIST
933 +} VCHI_MSG_VECTOR_TYPE_T;
934 +
935 +typedef struct vchi_msg_vector_ex {
936 +
937 + VCHI_MSG_VECTOR_TYPE_T type;
938 + union
939 + {
940 + // a memory handle
941 + struct
942 + {
943 + VCHI_MEM_HANDLE_T handle;
944 + uint32_t offset;
945 + int32_t vec_len;
946 + } handle;
947 +
948 + // an ordinary data pointer
949 + struct
950 + {
951 + const void *vec_base;
952 + int32_t vec_len;
953 + } ptr;
954 +
955 + // a nested vector list
956 + struct
957 + {
958 + struct vchi_msg_vector_ex *vec;
959 + uint32_t vec_len;
960 + } list;
961 + } u;
962 +} VCHI_MSG_VECTOR_EX_T;
963 +
964 +
965 +// Construct an entry in a msg vector for a pointer (p) of length (l)
966 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
967 +
968 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
969 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
970 +
971 +// Macros to manipulate 'FOURCC' values
972 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
973 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
974 +
975 +
976 +// Opaque service information
977 +struct opaque_vchi_service_t;
978 +
979 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
980 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
981 +typedef struct
982 +{
983 + struct opaque_vchi_service_t *service;
984 + void *message;
985 +} VCHI_HELD_MSG_T;
986 +
987 +
988 +
989 +// structure used to provide the information needed to open a server or a client
990 +typedef struct {
991 + struct vchi_version version;
992 + int32_t service_id;
993 + VCHI_CONNECTION_T *connection;
994 + uint32_t rx_fifo_size;
995 + uint32_t tx_fifo_size;
996 + VCHI_CALLBACK_T callback;
997 + void *callback_param;
998 + /* client intends to receive bulk transfers of
999 + odd lengths or into unaligned buffers */
1000 + int32_t want_unaligned_bulk_rx;
1001 + /* client intends to transmit bulk transfers of
1002 + odd lengths or out of unaligned buffers */
1003 + int32_t want_unaligned_bulk_tx;
1004 + /* client wants to check CRCs on (bulk) xfers.
1005 + Only needs to be set at 1 end - will do both directions. */
1006 + int32_t want_crc;
1007 +} SERVICE_CREATION_T;
1008 +
1009 +// Opaque handle for a VCHI instance
1010 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
1011 +
1012 +// Opaque handle for a server or client
1013 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
1014 +
1015 +// Service registration & startup
1016 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
1017 +
1018 +typedef struct service_info_tag {
1019 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
1020 + VCHI_SERVICE_INIT init; /* Service initialisation function */
1021 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
1022 +} SERVICE_INFO_T;
1023 +
1024 +/******************************************************************************
1025 + Global funcs - implementation is specific to which side you are on (local / remote)
1026 + *****************************************************************************/
1027 +
1028 +#ifdef __cplusplus
1029 +extern "C" {
1030 +#endif
1031 +
1032 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
1033 + const VCHI_MESSAGE_DRIVER_T * low_level);
1034 +
1035 +
1036 +// Routine used to initialise the vchi on both local + remote connections
1037 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
1038 +
1039 +extern int32_t vchi_exit( void );
1040 +
1041 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
1042 + const uint32_t num_connections,
1043 + VCHI_INSTANCE_T instance_handle );
1044 +
1045 +//When this is called, ensure that all services have no data pending.
1046 +//Bulk transfers can remain 'queued'
1047 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
1048 +
1049 +// Global control over bulk CRC checking
1050 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
1051 + VCHI_CRC_CONTROL_T control );
1052 +
1053 +// helper functions
1054 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
1055 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
1056 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
1057 +
1058 +
1059 +/******************************************************************************
1060 + Global service API
1061 + *****************************************************************************/
1062 +// Routine to create a named service
1063 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
1064 + SERVICE_CREATION_T *setup,
1065 + VCHI_SERVICE_HANDLE_T *handle );
1066 +
1067 +// Routine to destory a service
1068 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
1069 +
1070 +// Routine to open a named service
1071 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
1072 + SERVICE_CREATION_T *setup,
1073 + VCHI_SERVICE_HANDLE_T *handle);
1074 +
1075 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
1076 + short *peer_version );
1077 +
1078 +// Routine to close a named service
1079 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
1080 +
1081 +// Routine to increment ref count on a named service
1082 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
1083 +
1084 +// Routine to decrement ref count on a named service
1085 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
1086 +
1087 +// Routine to set a control option for a named service
1088 +extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle,
1089 + VCHI_SERVICE_OPTION_T option,
1090 + int value);
1091 +
1092 +// Routine to send a message across a service
1093 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
1094 + const void *data,
1095 + uint32_t data_size,
1096 + VCHI_FLAGS_T flags,
1097 + void *msg_handle );
1098 +
1099 +// scatter-gather (vector) and send message
1100 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
1101 + VCHI_MSG_VECTOR_EX_T *vector,
1102 + uint32_t count,
1103 + VCHI_FLAGS_T flags,
1104 + void *msg_handle );
1105 +
1106 +// legacy scatter-gather (vector) and send message, only handles pointers
1107 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
1108 + VCHI_MSG_VECTOR_T *vector,
1109 + uint32_t count,
1110 + VCHI_FLAGS_T flags,
1111 + void *msg_handle );
1112 +
1113 +// Routine to receive a msg from a service
1114 +// Dequeue is equivalent to hold, copy into client buffer, release
1115 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
1116 + void *data,
1117 + uint32_t max_data_size_to_read,
1118 + uint32_t *actual_msg_size,
1119 + VCHI_FLAGS_T flags );
1120 +
1121 +// Routine to look at a message in place.
1122 +// The message is not dequeued, so a subsequent call to peek or dequeue
1123 +// will return the same message.
1124 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
1125 + void **data,
1126 + uint32_t *msg_size,
1127 + VCHI_FLAGS_T flags );
1128 +
1129 +// Routine to remove a message after it has been read in place with peek
1130 +// The first message on the queue is dequeued.
1131 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
1132 +
1133 +// Routine to look at a message in place.
1134 +// The message is dequeued, so the caller is left holding it; the descriptor is
1135 +// filled in and must be released when the user has finished with the message.
1136 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
1137 + void **data, // } may be NULL, as info can be
1138 + uint32_t *msg_size, // } obtained from HELD_MSG_T
1139 + VCHI_FLAGS_T flags,
1140 + VCHI_HELD_MSG_T *message_descriptor );
1141 +
1142 +// Initialise an iterator to look through messages in place
1143 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
1144 + VCHI_MSG_ITER_T *iter,
1145 + VCHI_FLAGS_T flags );
1146 +
1147 +/******************************************************************************
1148 + Global service support API - operations on held messages and message iterators
1149 + *****************************************************************************/
1150 +
1151 +// Routine to get the address of a held message
1152 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
1153 +
1154 +// Routine to get the size of a held message
1155 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
1156 +
1157 +// Routine to get the transmit timestamp as written into the header by the peer
1158 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
1159 +
1160 +// Routine to get the reception timestamp, written as we parsed the header
1161 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
1162 +
1163 +// Routine to release a held message after it has been processed
1164 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
1165 +
1166 +// Indicates whether the iterator has a next message.
1167 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
1168 +
1169 +// Return the pointer and length for the next message and advance the iterator.
1170 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
1171 + void **data,
1172 + uint32_t *msg_size );
1173 +
1174 +// Remove the last message returned by vchi_msg_iter_next.
1175 +// Can only be called once after each call to vchi_msg_iter_next.
1176 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
1177 +
1178 +// Hold the last message returned by vchi_msg_iter_next.
1179 +// Can only be called once after each call to vchi_msg_iter_next.
1180 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
1181 + VCHI_HELD_MSG_T *message );
1182 +
1183 +// Return information for the next message, and hold it, advancing the iterator.
1184 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
1185 + void **data, // } may be NULL
1186 + uint32_t *msg_size, // }
1187 + VCHI_HELD_MSG_T *message );
1188 +
1189 +
1190 +/******************************************************************************
1191 + Global bulk API
1192 + *****************************************************************************/
1193 +
1194 +// Routine to prepare interface for a transfer from the other side
1195 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
1196 + void *data_dst,
1197 + uint32_t data_size,
1198 + VCHI_FLAGS_T flags,
1199 + void *transfer_handle );
1200 +
1201 +
1202 +// Prepare interface for a transfer from the other side into relocatable memory.
1203 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
1204 + VCHI_MEM_HANDLE_T h_dst,
1205 + uint32_t offset,
1206 + uint32_t data_size,
1207 + const VCHI_FLAGS_T flags,
1208 + void * const bulk_handle );
1209 +
1210 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1211 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1212 + const void *data_src,
1213 + uint32_t data_size,
1214 + VCHI_FLAGS_T flags,
1215 + void *transfer_handle );
1216 +
1217 +
1218 +/******************************************************************************
1219 + Configuration plumbing
1220 + *****************************************************************************/
1221 +
1222 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1223 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1224 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1225 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1226 +
1227 +// declare all message drivers here
1228 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1229 +
1230 +#ifdef __cplusplus
1231 +}
1232 +#endif
1233 +
1234 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1235 + VCHI_MEM_HANDLE_T h_src,
1236 + uint32_t offset,
1237 + uint32_t data_size,
1238 + VCHI_FLAGS_T flags,
1239 + void *transfer_handle );
1240 +#endif /* VCHI_H_ */
1241 +
1242 +/****************************** End of file **********************************/
1243 --- /dev/null
1244 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1245 @@ -0,0 +1,224 @@
1246 +/**
1247 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1248 + *
1249 + * Redistribution and use in source and binary forms, with or without
1250 + * modification, are permitted provided that the following conditions
1251 + * are met:
1252 + * 1. Redistributions of source code must retain the above copyright
1253 + * notice, this list of conditions, and the following disclaimer,
1254 + * without modification.
1255 + * 2. Redistributions in binary form must reproduce the above copyright
1256 + * notice, this list of conditions and the following disclaimer in the
1257 + * documentation and/or other materials provided with the distribution.
1258 + * 3. The names of the above-listed copyright holders may not be used
1259 + * to endorse or promote products derived from this software without
1260 + * specific prior written permission.
1261 + *
1262 + * ALTERNATIVELY, this software may be distributed under the terms of the
1263 + * GNU General Public License ("GPL") version 2, as published by the Free
1264 + * Software Foundation.
1265 + *
1266 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1267 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1268 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1269 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1270 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1271 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1272 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1273 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1274 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1275 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1276 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1277 + */
1278 +
1279 +#ifndef VCHI_CFG_H_
1280 +#define VCHI_CFG_H_
1281 +
1282 +/****************************************************************************************
1283 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1284 + * services.
1285 + ***************************************************************************************/
1286 +
1287 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1288 +/* Really determined by the message driver, and should be available from a run-time call. */
1289 +#ifndef VCHI_BULK_ALIGN
1290 +# if __VCCOREVER__ >= 0x04000000
1291 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1292 +# else
1293 +# define VCHI_BULK_ALIGN 16
1294 +# endif
1295 +#endif
1296 +
1297 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1298 +/* May be less than or greater than VCHI_BULK_ALIGN */
1299 +/* Really determined by the message driver, and should be available from a run-time call. */
1300 +#ifndef VCHI_BULK_GRANULARITY
1301 +# if __VCCOREVER__ >= 0x04000000
1302 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1303 +# else
1304 +# define VCHI_BULK_GRANULARITY 16
1305 +# endif
1306 +#endif
1307 +
1308 +/* The largest possible message to be queued with vchi_msg_queue. */
1309 +#ifndef VCHI_MAX_MSG_SIZE
1310 +# if defined VCHI_LOCAL_HOST_PORT
1311 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1312 +# else
1313 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1314 +# endif
1315 +#endif
1316 +
1317 +/******************************************************************************************
1318 + * Defines below are system configuration options, and should not be used by VCHI services.
1319 + *****************************************************************************************/
1320 +
1321 +/* How many connections can we support? A localhost implementation uses 2 connections,
1322 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1323 + * driver. */
1324 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1325 +# define VCHI_MAX_NUM_CONNECTIONS 3
1326 +#endif
1327 +
1328 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1329 + * amount of static memory. */
1330 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1331 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1332 +#endif
1333 +
1334 +/* Adjust if using a message driver that supports more logical TX channels */
1335 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1336 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1337 +#endif
1338 +
1339 +/* Adjust if using a message driver that supports more logical RX channels */
1340 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1341 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1342 +#endif
1343 +
1344 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1345 + * receive queue space, less message headers. */
1346 +#ifndef VCHI_NUM_READ_SLOTS
1347 +# if defined(VCHI_LOCAL_HOST_PORT)
1348 +# define VCHI_NUM_READ_SLOTS 4
1349 +# else
1350 +# define VCHI_NUM_READ_SLOTS 48
1351 +# endif
1352 +#endif
1353 +
1354 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1355 + * performance. Only define on VideoCore end, talking to host.
1356 + */
1357 +//#define VCHI_MSG_RX_OVERRUN
1358 +
1359 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1360 + * underneath VCHI will usually have its own buffering. */
1361 +#ifndef VCHI_NUM_WRITE_SLOTS
1362 +# define VCHI_NUM_WRITE_SLOTS 4
1363 +#endif
1364 +
1365 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1366 + * then it's taking up too much buffer space, and the peer service will be told to stop
1367 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1368 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1369 + * is too high. */
1370 +#ifndef VCHI_XOFF_THRESHOLD
1371 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1372 +#endif
1373 +
1374 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1375 + * service has dequeued/released enough messages that it's now occupying
1376 + * VCHI_XON_THRESHOLD slots or fewer. */
1377 +#ifndef VCHI_XON_THRESHOLD
1378 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1379 +#endif
1380 +
1381 +/* A size below which a bulk transfer omits the handshake completely and always goes
1382 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1383 + * can guarantee this by enabling unaligned transmits).
1384 + * Not API. */
1385 +#ifndef VCHI_MIN_BULK_SIZE
1386 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1387 +#endif
1388 +
1389 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1390 + * speed and latency; the smaller the chunk size the better change of messages and other
1391 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1392 + * break transmissions into chunks.
1393 + */
1394 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1395 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1396 +#endif
1397 +
1398 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1399 + * with multiple-line frames. Only use if the receiver can cope. */
1400 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1401 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1402 +#endif
1403 +
1404 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1405 + * vchi_msg_queue will be blocked. */
1406 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1407 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1408 +#endif
1409 +
1410 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1411 + * will be suspended until older messages are dequeued/released. */
1412 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1413 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1414 +#endif
1415 +
1416 +/* Really should be able to cope if we run out of received message descriptors, by
1417 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1418 + * under the carpet. */
1419 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1420 +# undef VCHI_RX_MSG_QUEUE_SIZE
1421 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1422 +#endif
1423 +
1424 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1425 + * will be blocked. */
1426 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1427 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1428 +#endif
1429 +
1430 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1431 + * will be blocked. */
1432 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1433 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1434 +#endif
1435 +
1436 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1437 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1438 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1439 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1440 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1441 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1442 +#endif
1443 +
1444 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1445 + * transmitter on and off.
1446 + */
1447 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1448 +
1449 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1450 +
1451 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1452 + * negative for no IDLE.
1453 + */
1454 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1455 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1456 +# endif
1457 +
1458 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1459 + * negative for no OFF.
1460 + */
1461 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1462 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1463 +# endif
1464 +
1465 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1466 +
1467 +#endif /* VCHI_CFG_H_ */
1468 +
1469 +/****************************** End of file **********************************/
1470 --- /dev/null
1471 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1472 @@ -0,0 +1,71 @@
1473 +/**
1474 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1475 + *
1476 + * Redistribution and use in source and binary forms, with or without
1477 + * modification, are permitted provided that the following conditions
1478 + * are met:
1479 + * 1. Redistributions of source code must retain the above copyright
1480 + * notice, this list of conditions, and the following disclaimer,
1481 + * without modification.
1482 + * 2. Redistributions in binary form must reproduce the above copyright
1483 + * notice, this list of conditions and the following disclaimer in the
1484 + * documentation and/or other materials provided with the distribution.
1485 + * 3. The names of the above-listed copyright holders may not be used
1486 + * to endorse or promote products derived from this software without
1487 + * specific prior written permission.
1488 + *
1489 + * ALTERNATIVELY, this software may be distributed under the terms of the
1490 + * GNU General Public License ("GPL") version 2, as published by the Free
1491 + * Software Foundation.
1492 + *
1493 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1494 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1495 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1496 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1497 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1498 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1499 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1500 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1501 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1502 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1503 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1504 + */
1505 +
1506 +#ifndef VCHI_CFG_INTERNAL_H_
1507 +#define VCHI_CFG_INTERNAL_H_
1508 +
1509 +/****************************************************************************************
1510 + * Control optimisation attempts.
1511 + ***************************************************************************************/
1512 +
1513 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1514 +#define VCHI_COARSE_LOCKING
1515 +
1516 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1517 +// (only relevant if VCHI_COARSE_LOCKING)
1518 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1519 +
1520 +// Avoid lock on non-blocking peek
1521 +// (only relevant if VCHI_COARSE_LOCKING)
1522 +#define VCHI_AVOID_PEEK_LOCK
1523 +
1524 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1525 +#define VCHI_MULTIPLE_HANDLER_THREADS
1526 +
1527 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1528 +// our way through the pool of descriptors.
1529 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1530 +
1531 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1532 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1533 +
1534 +// Don't use message descriptors for TX messages that don't need them
1535 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1536 +
1537 +// Nano-locks for multiqueue
1538 +//#define VCHI_MQUEUE_NANOLOCKS
1539 +
1540 +// Lock-free(er) dequeuing
1541 +//#define VCHI_RX_NANOLOCKS
1542 +
1543 +#endif /*VCHI_CFG_INTERNAL_H_*/
1544 --- /dev/null
1545 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1546 @@ -0,0 +1,175 @@
1547 +/**
1548 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1549 + *
1550 + * Redistribution and use in source and binary forms, with or without
1551 + * modification, are permitted provided that the following conditions
1552 + * are met:
1553 + * 1. Redistributions of source code must retain the above copyright
1554 + * notice, this list of conditions, and the following disclaimer,
1555 + * without modification.
1556 + * 2. Redistributions in binary form must reproduce the above copyright
1557 + * notice, this list of conditions and the following disclaimer in the
1558 + * documentation and/or other materials provided with the distribution.
1559 + * 3. The names of the above-listed copyright holders may not be used
1560 + * to endorse or promote products derived from this software without
1561 + * specific prior written permission.
1562 + *
1563 + * ALTERNATIVELY, this software may be distributed under the terms of the
1564 + * GNU General Public License ("GPL") version 2, as published by the Free
1565 + * Software Foundation.
1566 + *
1567 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1568 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1569 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1570 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1571 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1572 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1573 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1574 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1575 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1576 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1577 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1578 + */
1579 +
1580 +#ifndef VCHI_COMMON_H_
1581 +#define VCHI_COMMON_H_
1582 +
1583 +
1584 +//flags used when sending messages (must be bitmapped)
1585 +typedef enum
1586 +{
1587 + VCHI_FLAGS_NONE = 0x0,
1588 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
1589 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
1590 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
1591 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
1592 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
1593 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
1594 +
1595 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
1596 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
1597 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
1598 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
1599 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
1600 + VCHI_FLAGS_INTERNAL = 0xFF0000
1601 +} VCHI_FLAGS_T;
1602 +
1603 +// constants for vchi_crc_control()
1604 +typedef enum {
1605 + VCHI_CRC_NOTHING = -1,
1606 + VCHI_CRC_PER_SERVICE = 0,
1607 + VCHI_CRC_EVERYTHING = 1,
1608 +} VCHI_CRC_CONTROL_T;
1609 +
1610 +//callback reasons when an event occurs on a service
1611 +typedef enum
1612 +{
1613 + VCHI_CALLBACK_REASON_MIN,
1614 +
1615 + //This indicates that there is data available
1616 + //handle is the msg id that was transmitted with the data
1617 + // When a message is received and there was no FULL message available previously, send callback
1618 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1619 + VCHI_CALLBACK_MSG_AVAILABLE,
1620 + VCHI_CALLBACK_MSG_SENT,
1621 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1622 +
1623 + // This indicates that a transfer from the other side has completed
1624 + VCHI_CALLBACK_BULK_RECEIVED,
1625 + //This indicates that data queued up to be sent has now gone
1626 + //handle is the msg id that was used when sending the data
1627 + VCHI_CALLBACK_BULK_SENT,
1628 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1629 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1630 +
1631 + VCHI_CALLBACK_SERVICE_CLOSED,
1632 +
1633 + // this side has sent XOFF to peer due to lack of data consumption by service
1634 + // (suggests the service may need to take some recovery action if it has
1635 + // been deliberately holding off consuming data)
1636 + VCHI_CALLBACK_SENT_XOFF,
1637 + VCHI_CALLBACK_SENT_XON,
1638 +
1639 + // indicates that a bulk transfer has finished reading the source buffer
1640 + VCHI_CALLBACK_BULK_DATA_READ,
1641 +
1642 + // power notification events (currently host side only)
1643 + VCHI_CALLBACK_PEER_OFF,
1644 + VCHI_CALLBACK_PEER_SUSPENDED,
1645 + VCHI_CALLBACK_PEER_ON,
1646 + VCHI_CALLBACK_PEER_RESUMED,
1647 + VCHI_CALLBACK_FORCED_POWER_OFF,
1648 +
1649 +#ifdef USE_VCHIQ_ARM
1650 + // some extra notifications provided by vchiq_arm
1651 + VCHI_CALLBACK_SERVICE_OPENED,
1652 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1653 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1654 +#endif
1655 +
1656 + VCHI_CALLBACK_REASON_MAX
1657 +} VCHI_CALLBACK_REASON_T;
1658 +
1659 +// service control options
1660 +typedef enum
1661 +{
1662 + VCHI_SERVICE_OPTION_MIN,
1663 +
1664 + VCHI_SERVICE_OPTION_TRACE,
1665 + VCHI_SERVICE_OPTION_SYNCHRONOUS,
1666 +
1667 + VCHI_SERVICE_OPTION_MAX
1668 +} VCHI_SERVICE_OPTION_T;
1669 +
1670 +
1671 +//Callback used by all services / bulk transfers
1672 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1673 + VCHI_CALLBACK_REASON_T reason,
1674 + void *handle ); //for transmitting msg's only
1675 +
1676 +
1677 +
1678 +/*
1679 + * Define vector struct for scatter-gather (vector) operations
1680 + * Vectors can be nested - if a vector element has negative length, then
1681 + * the data pointer is treated as pointing to another vector array, with
1682 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1683 + * you can do this:
1684 + *
1685 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1686 + * {
1687 + * VCHI_MSG_VECTOR_T nv[2];
1688 + * nv[0].vec_base = my_header;
1689 + * nv[0].vec_len = sizeof my_header;
1690 + * nv[1].vec_base = v;
1691 + * nv[1].vec_len = -n;
1692 + * ...
1693 + *
1694 + */
1695 +typedef struct vchi_msg_vector {
1696 + const void *vec_base;
1697 + int32_t vec_len;
1698 +} VCHI_MSG_VECTOR_T;
1699 +
1700 +// Opaque type for a connection API
1701 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1702 +
1703 +// Opaque type for a message driver
1704 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1705 +
1706 +
1707 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1708 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1709 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1710 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1711 +// is used again after messages for that service are removed/dequeued by any
1712 +// means other than vchi_msg_iter_... calls on the iterator itself.
1713 +typedef struct {
1714 + struct opaque_vchi_service_t *service;
1715 + void *last;
1716 + void *next;
1717 + void *remove;
1718 +} VCHI_MSG_ITER_T;
1719 +
1720 +
1721 +#endif // VCHI_COMMON_H_
1722 --- /dev/null
1723 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1724 @@ -0,0 +1,42 @@
1725 +/**
1726 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1727 + *
1728 + * Redistribution and use in source and binary forms, with or without
1729 + * modification, are permitted provided that the following conditions
1730 + * are met:
1731 + * 1. Redistributions of source code must retain the above copyright
1732 + * notice, this list of conditions, and the following disclaimer,
1733 + * without modification.
1734 + * 2. Redistributions in binary form must reproduce the above copyright
1735 + * notice, this list of conditions and the following disclaimer in the
1736 + * documentation and/or other materials provided with the distribution.
1737 + * 3. The names of the above-listed copyright holders may not be used
1738 + * to endorse or promote products derived from this software without
1739 + * specific prior written permission.
1740 + *
1741 + * ALTERNATIVELY, this software may be distributed under the terms of the
1742 + * GNU General Public License ("GPL") version 2, as published by the Free
1743 + * Software Foundation.
1744 + *
1745 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1746 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1747 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1748 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1749 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1750 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1751 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1752 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1753 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1754 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1755 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1756 + */
1757 +
1758 +#ifndef VCHI_MH_H_
1759 +#define VCHI_MH_H_
1760 +
1761 +#include <linux/types.h>
1762 +
1763 +typedef int32_t VCHI_MEM_HANDLE_T;
1764 +#define VCHI_MEM_HANDLE_INVALID 0
1765 +
1766 +#endif
1767 --- /dev/null
1768 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1769 @@ -0,0 +1,40 @@
1770 +/**
1771 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1772 + *
1773 + * Redistribution and use in source and binary forms, with or without
1774 + * modification, are permitted provided that the following conditions
1775 + * are met:
1776 + * 1. Redistributions of source code must retain the above copyright
1777 + * notice, this list of conditions, and the following disclaimer,
1778 + * without modification.
1779 + * 2. Redistributions in binary form must reproduce the above copyright
1780 + * notice, this list of conditions and the following disclaimer in the
1781 + * documentation and/or other materials provided with the distribution.
1782 + * 3. The names of the above-listed copyright holders may not be used
1783 + * to endorse or promote products derived from this software without
1784 + * specific prior written permission.
1785 + *
1786 + * ALTERNATIVELY, this software may be distributed under the terms of the
1787 + * GNU General Public License ("GPL") version 2, as published by the Free
1788 + * Software Foundation.
1789 + *
1790 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1791 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1792 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1793 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1794 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1795 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1796 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1797 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1798 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1799 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1800 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1801 + */
1802 +
1803 +#ifndef VCHIQ_VCHIQ_H
1804 +#define VCHIQ_VCHIQ_H
1805 +
1806 +#include "vchiq_if.h"
1807 +#include "vchiq_util.h"
1808 +
1809 +#endif
1810 --- /dev/null
1811 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1812 @@ -0,0 +1,42 @@
1813 +/**
1814 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1815 + *
1816 + * Redistribution and use in source and binary forms, with or without
1817 + * modification, are permitted provided that the following conditions
1818 + * are met:
1819 + * 1. Redistributions of source code must retain the above copyright
1820 + * notice, this list of conditions, and the following disclaimer,
1821 + * without modification.
1822 + * 2. Redistributions in binary form must reproduce the above copyright
1823 + * notice, this list of conditions and the following disclaimer in the
1824 + * documentation and/or other materials provided with the distribution.
1825 + * 3. The names of the above-listed copyright holders may not be used
1826 + * to endorse or promote products derived from this software without
1827 + * specific prior written permission.
1828 + *
1829 + * ALTERNATIVELY, this software may be distributed under the terms of the
1830 + * GNU General Public License ("GPL") version 2, as published by the Free
1831 + * Software Foundation.
1832 + *
1833 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1834 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1835 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1836 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1837 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1838 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1839 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1840 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1841 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1842 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1843 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1844 + */
1845 +
1846 +#ifndef VCHIQ_2835_H
1847 +#define VCHIQ_2835_H
1848 +
1849 +#include "vchiq_pagelist.h"
1850 +
1851 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1852 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
1853 +
1854 +#endif /* VCHIQ_2835_H */
1855 --- /dev/null
1856 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1857 @@ -0,0 +1,547 @@
1858 +/**
1859 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1860 + *
1861 + * Redistribution and use in source and binary forms, with or without
1862 + * modification, are permitted provided that the following conditions
1863 + * are met:
1864 + * 1. Redistributions of source code must retain the above copyright
1865 + * notice, this list of conditions, and the following disclaimer,
1866 + * without modification.
1867 + * 2. Redistributions in binary form must reproduce the above copyright
1868 + * notice, this list of conditions and the following disclaimer in the
1869 + * documentation and/or other materials provided with the distribution.
1870 + * 3. The names of the above-listed copyright holders may not be used
1871 + * to endorse or promote products derived from this software without
1872 + * specific prior written permission.
1873 + *
1874 + * ALTERNATIVELY, this software may be distributed under the terms of the
1875 + * GNU General Public License ("GPL") version 2, as published by the Free
1876 + * Software Foundation.
1877 + *
1878 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1879 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1880 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1881 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1882 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1883 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1884 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1885 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1886 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1887 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1888 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1889 + */
1890 +
1891 +#include <linux/kernel.h>
1892 +#include <linux/types.h>
1893 +#include <linux/errno.h>
1894 +#include <linux/interrupt.h>
1895 +#include <linux/pagemap.h>
1896 +#include <linux/dma-mapping.h>
1897 +#include <linux/version.h>
1898 +#include <linux/io.h>
1899 +#include <linux/platform_data/mailbox-bcm2708.h>
1900 +#include <linux/platform_device.h>
1901 +#include <linux/uaccess.h>
1902 +#include <asm/pgtable.h>
1903 +
1904 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1905 +
1906 +#define VCHIQ_ARM_ADDRESS(x) ((void *)((char *)x + g_virt_to_bus_offset))
1907 +
1908 +#include "vchiq_arm.h"
1909 +#include "vchiq_2835.h"
1910 +#include "vchiq_connected.h"
1911 +#include "vchiq_killable.h"
1912 +
1913 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1914 +
1915 +#define BELL0 0x00
1916 +#define BELL2 0x08
1917 +
1918 +typedef struct vchiq_2835_state_struct {
1919 + int inited;
1920 + VCHIQ_ARM_STATE_T arm_state;
1921 +} VCHIQ_2835_ARM_STATE_T;
1922 +
1923 +static void __iomem *g_regs;
1924 +static FRAGMENTS_T *g_fragments_base;
1925 +static FRAGMENTS_T *g_free_fragments;
1926 +static struct semaphore g_free_fragments_sema;
1927 +static unsigned long g_virt_to_bus_offset;
1928 +
1929 +extern int vchiq_arm_log_level;
1930 +
1931 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1932 +
1933 +static irqreturn_t
1934 +vchiq_doorbell_irq(int irq, void *dev_id);
1935 +
1936 +static int
1937 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1938 + struct task_struct *task, PAGELIST_T ** ppagelist);
1939 +
1940 +static void
1941 +free_pagelist(PAGELIST_T *pagelist, int actual);
1942 +
1943 +int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
1944 +{
1945 + struct device *dev = &pdev->dev;
1946 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1947 + struct resource *res;
1948 + void *slot_mem;
1949 + dma_addr_t slot_phys;
1950 + int slot_mem_size, frag_mem_size;
1951 + int err, irq, i;
1952 +
1953 + g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
1954 +
1955 + /* Allocate space for the channels in coherent memory */
1956 + slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1957 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1958 +
1959 + slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
1960 + &slot_phys, GFP_KERNEL);
1961 + if (!slot_mem) {
1962 + dev_err(dev, "could not allocate DMA memory\n");
1963 + return -ENOMEM;
1964 + }
1965 +
1966 + WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);
1967 +
1968 + vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
1969 + if (!vchiq_slot_zero)
1970 + return -EINVAL;
1971 +
1972 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1973 + (int)slot_phys + slot_mem_size;
1974 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1975 + MAX_FRAGMENTS;
1976 +
1977 + g_fragments_base = (FRAGMENTS_T *)(slot_mem + slot_mem_size);
1978 + slot_mem_size += frag_mem_size;
1979 +
1980 + g_free_fragments = g_fragments_base;
1981 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1982 + *(FRAGMENTS_T **)&g_fragments_base[i] =
1983 + &g_fragments_base[i + 1];
1984 + }
1985 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1986 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1987 +
1988 + if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
1989 + return -EINVAL;
1990 +
1991 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1992 + g_regs = devm_ioremap_resource(&pdev->dev, res);
1993 + if (IS_ERR(g_regs))
1994 + return PTR_ERR(g_regs);
1995 +
1996 + irq = platform_get_irq(pdev, 0);
1997 + if (irq <= 0) {
1998 + dev_err(dev, "failed to get IRQ\n");
1999 + return irq;
2000 + }
2001 +
2002 + err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
2003 + "VCHIQ doorbell", state);
2004 + if (err) {
2005 + dev_err(dev, "failed to register irq=%d\n", irq);
2006 + return err;
2007 + }
2008 +
2009 + /* Send the base address of the slots to VideoCore */
2010 +
2011 + dsb(); /* Ensure all writes have completed */
2012 +
2013 + err = bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)slot_phys);
2014 + if (err) {
2015 + dev_err(dev, "mailbox write failed\n");
2016 + return err;
2017 + }
2018 +
2019 + vchiq_log_info(vchiq_arm_log_level,
2020 + "vchiq_init - done (slots %x, phys %pad)",
2021 + (unsigned int)vchiq_slot_zero, &slot_phys);
2022 +
2023 + vchiq_call_connected_callbacks();
2024 +
2025 + return 0;
2026 +}
2027 +
2028 +VCHIQ_STATUS_T
2029 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
2030 +{
2031 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2032 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
2033 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
2034 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
2035 + if(status != VCHIQ_SUCCESS)
2036 + {
2037 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
2038 + }
2039 + return status;
2040 +}
2041 +
2042 +VCHIQ_ARM_STATE_T*
2043 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
2044 +{
2045 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
2046 + {
2047 + BUG();
2048 + }
2049 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
2050 +}
2051 +
2052 +void
2053 +remote_event_signal(REMOTE_EVENT_T *event)
2054 +{
2055 + wmb();
2056 +
2057 + event->fired = 1;
2058 +
2059 + dsb(); /* data barrier operation */
2060 +
2061 + if (event->armed)
2062 + writel(0, g_regs + BELL2); /* trigger vc interrupt */
2063 +}
2064 +
2065 +int
2066 +vchiq_copy_from_user(void *dst, const void *src, int size)
2067 +{
2068 + if ((uint32_t)src < TASK_SIZE) {
2069 + return copy_from_user(dst, src, size);
2070 + } else {
2071 + memcpy(dst, src, size);
2072 + return 0;
2073 + }
2074 +}
2075 +
2076 +VCHIQ_STATUS_T
2077 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
2078 + void *offset, int size, int dir)
2079 +{
2080 + PAGELIST_T *pagelist;
2081 + int ret;
2082 +
2083 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
2084 +
2085 + ret = create_pagelist((char __user *)offset, size,
2086 + (dir == VCHIQ_BULK_RECEIVE)
2087 + ? PAGELIST_READ
2088 + : PAGELIST_WRITE,
2089 + current,
2090 + &pagelist);
2091 + if (ret != 0)
2092 + return VCHIQ_ERROR;
2093 +
2094 + bulk->handle = memhandle;
2095 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
2096 +
2097 + /* Store the pagelist address in remote_data, which isn't used by the
2098 + slave. */
2099 + bulk->remote_data = pagelist;
2100 +
2101 + return VCHIQ_SUCCESS;
2102 +}
2103 +
2104 +void
2105 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
2106 +{
2107 + if (bulk && bulk->remote_data && bulk->actual)
2108 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
2109 +}
2110 +
2111 +void
2112 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
2113 +{
2114 + /*
2115 + * This should only be called on the master (VideoCore) side, but
2116 + * provide an implementation to avoid the need for ifdefery.
2117 + */
2118 + BUG();
2119 +}
2120 +
2121 +void
2122 +vchiq_dump_platform_state(void *dump_context)
2123 +{
2124 + char buf[80];
2125 + int len;
2126 + len = snprintf(buf, sizeof(buf),
2127 + " Platform: 2835 (VC master)");
2128 + vchiq_dump(dump_context, buf, len + 1);
2129 +}
2130 +
2131 +VCHIQ_STATUS_T
2132 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
2133 +{
2134 + return VCHIQ_ERROR;
2135 +}
2136 +
2137 +VCHIQ_STATUS_T
2138 +vchiq_platform_resume(VCHIQ_STATE_T *state)
2139 +{
2140 + return VCHIQ_SUCCESS;
2141 +}
2142 +
2143 +void
2144 +vchiq_platform_paused(VCHIQ_STATE_T *state)
2145 +{
2146 +}
2147 +
2148 +void
2149 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
2150 +{
2151 +}
2152 +
2153 +int
2154 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
2155 +{
2156 + return 1; // autosuspend not supported - videocore always wanted
2157 +}
2158 +
2159 +int
2160 +vchiq_platform_use_suspend_timer(void)
2161 +{
2162 + return 0;
2163 +}
2164 +void
2165 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
2166 +{
2167 + vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
2168 +}
2169 +void
2170 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
2171 +{
2172 + (void)state;
2173 +}
2174 +/*
2175 + * Local functions
2176 + */
2177 +
2178 +static irqreturn_t
2179 +vchiq_doorbell_irq(int irq, void *dev_id)
2180 +{
2181 + VCHIQ_STATE_T *state = dev_id;
2182 + irqreturn_t ret = IRQ_NONE;
2183 + unsigned int status;
2184 +
2185 + /* Read (and clear) the doorbell */
2186 + status = readl(g_regs + BELL0);
2187 +
2188 + if (status & 0x4) { /* Was the doorbell rung? */
2189 + remote_event_pollall(state);
2190 + ret = IRQ_HANDLED;
2191 + }
2192 +
2193 + return ret;
2194 +}
2195 +
2196 +/* There is a potential problem with partial cache lines (pages?)
2197 +** at the ends of the block when reading. If the CPU accessed anything in
2198 +** the same line (page?) then it may have pulled old data into the cache,
2199 +** obscuring the new data underneath. We can solve this by transferring the
2200 +** partial cache lines separately, and allowing the ARM to copy into the
2201 +** cached area.
2202 +
2203 +** N.B. This implementation plays slightly fast and loose with the Linux
2204 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2205 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2206 +** from increased speed as a result.
2207 +*/
2208 +
2209 +static int
2210 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2211 + struct task_struct *task, PAGELIST_T ** ppagelist)
2212 +{
2213 + PAGELIST_T *pagelist;
2214 + struct page **pages;
2215 + struct page *page;
2216 + unsigned long *addrs;
2217 + unsigned int num_pages, offset, i;
2218 + char *addr, *base_addr, *next_addr;
2219 + int run, addridx, actual_pages;
2220 + unsigned long *need_release;
2221 +
2222 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
2223 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2224 +
2225 + *ppagelist = NULL;
2226 +
2227 + /* Allocate enough storage to hold the page pointers and the page
2228 + ** list
2229 + */
2230 + pagelist = kmalloc(sizeof(PAGELIST_T) +
2231 + (num_pages * sizeof(unsigned long)) +
2232 + sizeof(unsigned long) +
2233 + (num_pages * sizeof(pages[0])),
2234 + GFP_KERNEL);
2235 +
2236 + vchiq_log_trace(vchiq_arm_log_level,
2237 + "create_pagelist - %x", (unsigned int)pagelist);
2238 + if (!pagelist)
2239 + return -ENOMEM;
2240 +
2241 + addrs = pagelist->addrs;
2242 + need_release = (unsigned long *)(addrs + num_pages);
2243 + pages = (struct page **)(addrs + num_pages + 1);
2244 +
2245 + if (is_vmalloc_addr(buf)) {
2246 + for (actual_pages = 0; actual_pages < num_pages; actual_pages++) {
2247 + pages[actual_pages] = vmalloc_to_page(buf + (actual_pages * PAGE_SIZE));
2248 + }
2249 + *need_release = 0; /* do not try and release vmalloc pages */
2250 + } else {
2251 + down_read(&task->mm->mmap_sem);
2252 + actual_pages = get_user_pages(task, task->mm,
2253 + (unsigned long)buf & ~(PAGE_SIZE - 1),
2254 + num_pages,
2255 + (type == PAGELIST_READ) /*Write */ ,
2256 + 0 /*Force */ ,
2257 + pages,
2258 + NULL /*vmas */);
2259 + up_read(&task->mm->mmap_sem);
2260 +
2261 + if (actual_pages != num_pages) {
2262 + vchiq_log_info(vchiq_arm_log_level,
2263 + "create_pagelist - only %d/%d pages locked",
2264 + actual_pages,
2265 + num_pages);
2266 +
2267 + /* This is probably due to the process being killed */
2268 + while (actual_pages > 0)
2269 + {
2270 + actual_pages--;
2271 + page_cache_release(pages[actual_pages]);
2272 + }
2273 + kfree(pagelist);
2274 + if (actual_pages == 0)
2275 + actual_pages = -ENOMEM;
2276 + return actual_pages;
2277 + }
2278 + *need_release = 1; /* release user pages */
2279 + }
2280 +
2281 + pagelist->length = count;
2282 + pagelist->type = type;
2283 + pagelist->offset = offset;
2284 +
2285 + /* Group the pages into runs of contiguous pages */
2286 +
2287 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2288 + next_addr = base_addr + PAGE_SIZE;
2289 + addridx = 0;
2290 + run = 0;
2291 +
2292 + for (i = 1; i < num_pages; i++) {
2293 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2294 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2295 + next_addr += PAGE_SIZE;
2296 + run++;
2297 + } else {
2298 + addrs[addridx] = (unsigned long)base_addr + run;
2299 + addridx++;
2300 + base_addr = addr;
2301 + next_addr = addr + PAGE_SIZE;
2302 + run = 0;
2303 + }
2304 + }
2305 +
2306 + addrs[addridx] = (unsigned long)base_addr + run;
2307 + addridx++;
2308 +
2309 + /* Partial cache lines (fragments) require special measures */
2310 + if ((type == PAGELIST_READ) &&
2311 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2312 + ((pagelist->offset + pagelist->length) &
2313 + (CACHE_LINE_SIZE - 1)))) {
2314 + FRAGMENTS_T *fragments;
2315 +
2316 + if (down_interruptible(&g_free_fragments_sema) != 0) {
2317 + kfree(pagelist);
2318 + return -EINTR;
2319 + }
2320 +
2321 + WARN_ON(g_free_fragments == NULL);
2322 +
2323 + down(&g_free_fragments_mutex);
2324 + fragments = (FRAGMENTS_T *) g_free_fragments;
2325 + WARN_ON(fragments == NULL);
2326 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2327 + up(&g_free_fragments_mutex);
2328 + pagelist->type =
2329 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2330 + g_fragments_base);
2331 + }
2332 +
2333 + for (page = virt_to_page(pagelist);
2334 + page <= virt_to_page(addrs + num_pages - 1); page++) {
2335 + flush_dcache_page(page);
2336 + }
2337 +
2338 + *ppagelist = pagelist;
2339 +
2340 + return 0;
2341 +}
2342 +
2343 +static void
2344 +free_pagelist(PAGELIST_T *pagelist, int actual)
2345 +{
2346 + unsigned long *need_release;
2347 + struct page **pages;
2348 + unsigned int num_pages, i;
2349 +
2350 + vchiq_log_trace(vchiq_arm_log_level,
2351 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2352 +
2353 + num_pages =
2354 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2355 + PAGE_SIZE;
2356 +
2357 + need_release = (unsigned long *)(pagelist->addrs + num_pages);
2358 + pages = (struct page **)(pagelist->addrs + num_pages + 1);
2359 +
2360 + /* Deal with any partial cache lines (fragments) */
2361 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2362 + FRAGMENTS_T *fragments = g_fragments_base +
2363 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2364 + int head_bytes, tail_bytes;
2365 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2366 + (CACHE_LINE_SIZE - 1);
2367 + tail_bytes = (pagelist->offset + actual) &
2368 + (CACHE_LINE_SIZE - 1);
2369 +
2370 + if ((actual >= 0) && (head_bytes != 0)) {
2371 + if (head_bytes > actual)
2372 + head_bytes = actual;
2373 +
2374 + memcpy((char *)page_address(pages[0]) +
2375 + pagelist->offset,
2376 + fragments->headbuf,
2377 + head_bytes);
2378 + }
2379 + if ((actual >= 0) && (head_bytes < actual) &&
2380 + (tail_bytes != 0)) {
2381 + memcpy((char *)page_address(pages[num_pages - 1]) +
2382 + ((pagelist->offset + actual) &
2383 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2384 + fragments->tailbuf, tail_bytes);
2385 + }
2386 +
2387 + down(&g_free_fragments_mutex);
2388 + *(FRAGMENTS_T **) fragments = g_free_fragments;
2389 + g_free_fragments = fragments;
2390 + up(&g_free_fragments_mutex);
2391 + up(&g_free_fragments_sema);
2392 + }
2393 +
2394 + if (*need_release) {
2395 + for (i = 0; i < num_pages; i++) {
2396 + if (pagelist->type != PAGELIST_WRITE)
2397 + set_page_dirty(pages[i]);
2398 +
2399 + page_cache_release(pages[i]);
2400 + }
2401 + }
2402 +
2403 + kfree(pagelist);
2404 +}
2405 --- /dev/null
2406 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2407 @@ -0,0 +1,2886 @@
2408 +/**
2409 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
2410 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2411 + *
2412 + * Redistribution and use in source and binary forms, with or without
2413 + * modification, are permitted provided that the following conditions
2414 + * are met:
2415 + * 1. Redistributions of source code must retain the above copyright
2416 + * notice, this list of conditions, and the following disclaimer,
2417 + * without modification.
2418 + * 2. Redistributions in binary form must reproduce the above copyright
2419 + * notice, this list of conditions and the following disclaimer in the
2420 + * documentation and/or other materials provided with the distribution.
2421 + * 3. The names of the above-listed copyright holders may not be used
2422 + * to endorse or promote products derived from this software without
2423 + * specific prior written permission.
2424 + *
2425 + * ALTERNATIVELY, this software may be distributed under the terms of the
2426 + * GNU General Public License ("GPL") version 2, as published by the Free
2427 + * Software Foundation.
2428 + *
2429 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2430 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2431 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2432 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2433 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2434 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2435 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2436 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2437 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2438 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2439 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2440 + */
2441 +
2442 +#include <linux/kernel.h>
2443 +#include <linux/module.h>
2444 +#include <linux/types.h>
2445 +#include <linux/errno.h>
2446 +#include <linux/cdev.h>
2447 +#include <linux/fs.h>
2448 +#include <linux/device.h>
2449 +#include <linux/mm.h>
2450 +#include <linux/highmem.h>
2451 +#include <linux/pagemap.h>
2452 +#include <linux/bug.h>
2453 +#include <linux/semaphore.h>
2454 +#include <linux/list.h>
2455 +#include <linux/platform_device.h>
2456 +
2457 +#include "vchiq_core.h"
2458 +#include "vchiq_ioctl.h"
2459 +#include "vchiq_arm.h"
2460 +#include "vchiq_debugfs.h"
2461 +#include "vchiq_killable.h"
2462 +
2463 +#define DEVICE_NAME "vchiq"
2464 +
2465 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2466 +#undef MODULE_PARAM_PREFIX
2467 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2468 +
2469 +#define VCHIQ_MINOR 0
2470 +
2471 +/* Some per-instance constants */
2472 +#define MAX_COMPLETIONS 16
2473 +#define MAX_SERVICES 64
2474 +#define MAX_ELEMENTS 8
2475 +#define MSG_QUEUE_SIZE 64
2476 +
2477 +#define KEEPALIVE_VER 1
2478 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2479 +
2480 +/* Run time control of log level, based on KERN_XXX level. */
2481 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2482 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2483 +
2484 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2485 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2486 +
2487 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2488 +static const char *const suspend_state_names[] = {
2489 + "VC_SUSPEND_FORCE_CANCELED",
2490 + "VC_SUSPEND_REJECTED",
2491 + "VC_SUSPEND_FAILED",
2492 + "VC_SUSPEND_IDLE",
2493 + "VC_SUSPEND_REQUESTED",
2494 + "VC_SUSPEND_IN_PROGRESS",
2495 + "VC_SUSPEND_SUSPENDED"
2496 +};
2497 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2498 +static const char *const resume_state_names[] = {
2499 + "VC_RESUME_FAILED",
2500 + "VC_RESUME_IDLE",
2501 + "VC_RESUME_REQUESTED",
2502 + "VC_RESUME_IN_PROGRESS",
2503 + "VC_RESUME_RESUMED"
2504 +};
2505 +/* The number of times we allow force suspend to timeout before actually
2506 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
2507 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2508 +*/
2509 +#define FORCE_SUSPEND_FAIL_MAX 8
2510 +
2511 +/* The time in ms allowed for videocore to go idle when force suspend has been
2512 + * requested */
2513 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2514 +
2515 +
2516 +static void suspend_timer_callback(unsigned long context);
2517 +
2518 +
2519 +typedef struct user_service_struct {
2520 + VCHIQ_SERVICE_T *service;
2521 + void *userdata;
2522 + VCHIQ_INSTANCE_T instance;
2523 + char is_vchi;
2524 + char dequeue_pending;
2525 + char close_pending;
2526 + int message_available_pos;
2527 + int msg_insert;
2528 + int msg_remove;
2529 + struct semaphore insert_event;
2530 + struct semaphore remove_event;
2531 + struct semaphore close_event;
2532 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2533 +} USER_SERVICE_T;
2534 +
2535 +struct bulk_waiter_node {
2536 + struct bulk_waiter bulk_waiter;
2537 + int pid;
2538 + struct list_head list;
2539 +};
2540 +
2541 +struct vchiq_instance_struct {
2542 + VCHIQ_STATE_T *state;
2543 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2544 + int completion_insert;
2545 + int completion_remove;
2546 + struct semaphore insert_event;
2547 + struct semaphore remove_event;
2548 + struct mutex completion_mutex;
2549 +
2550 + int connected;
2551 + int closing;
2552 + int pid;
2553 + int mark;
2554 + int use_close_delivered;
2555 + int trace;
2556 +
2557 + struct list_head bulk_waiter_list;
2558 + struct mutex bulk_waiter_list_mutex;
2559 +
2560 + VCHIQ_DEBUGFS_NODE_T debugfs_node;
2561 +};
2562 +
2563 +typedef struct dump_context_struct {
2564 + char __user *buf;
2565 + size_t actual;
2566 + size_t space;
2567 + loff_t offset;
2568 +} DUMP_CONTEXT_T;
2569 +
2570 +static struct cdev vchiq_cdev;
2571 +static dev_t vchiq_devid;
2572 +static VCHIQ_STATE_T g_state;
2573 +static struct class *vchiq_class;
2574 +static struct device *vchiq_dev;
2575 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2576 +
2577 +static const char *const ioctl_names[] = {
2578 + "CONNECT",
2579 + "SHUTDOWN",
2580 + "CREATE_SERVICE",
2581 + "REMOVE_SERVICE",
2582 + "QUEUE_MESSAGE",
2583 + "QUEUE_BULK_TRANSMIT",
2584 + "QUEUE_BULK_RECEIVE",
2585 + "AWAIT_COMPLETION",
2586 + "DEQUEUE_MESSAGE",
2587 + "GET_CLIENT_ID",
2588 + "GET_CONFIG",
2589 + "CLOSE_SERVICE",
2590 + "USE_SERVICE",
2591 + "RELEASE_SERVICE",
2592 + "SET_SERVICE_OPTION",
2593 + "DUMP_PHYS_MEM",
2594 + "LIB_VERSION",
2595 + "CLOSE_DELIVERED"
2596 +};
2597 +
2598 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2599 + (VCHIQ_IOC_MAX + 1));
2600 +
2601 +static void
2602 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2603 +
2604 +/****************************************************************************
2605 +*
2606 +* add_completion
2607 +*
2608 +***************************************************************************/
2609 +
2610 +static VCHIQ_STATUS_T
2611 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2612 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2613 + void *bulk_userdata)
2614 +{
2615 + VCHIQ_COMPLETION_DATA_T *completion;
2616 + DEBUG_INITIALISE(g_state.local)
2617 +
2618 + while (instance->completion_insert ==
2619 + (instance->completion_remove + MAX_COMPLETIONS)) {
2620 + /* Out of space - wait for the client */
2621 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2622 + vchiq_log_trace(vchiq_arm_log_level,
2623 + "add_completion - completion queue full");
2624 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2625 + if (down_interruptible(&instance->remove_event) != 0) {
2626 + vchiq_log_info(vchiq_arm_log_level,
2627 + "service_callback interrupted");
2628 + return VCHIQ_RETRY;
2629 + } else if (instance->closing) {
2630 + vchiq_log_info(vchiq_arm_log_level,
2631 + "service_callback closing");
2632 + return VCHIQ_ERROR;
2633 + }
2634 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2635 + }
2636 +
2637 + completion =
2638 + &instance->completions[instance->completion_insert &
2639 + (MAX_COMPLETIONS - 1)];
2640 +
2641 + completion->header = header;
2642 + completion->reason = reason;
2643 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2644 + completion->service_userdata = user_service->service;
2645 + completion->bulk_userdata = bulk_userdata;
2646 +
2647 + if (reason == VCHIQ_SERVICE_CLOSED) {
2648 + /* Take an extra reference, to be held until
2649 + this CLOSED notification is delivered. */
2650 + lock_service(user_service->service);
2651 + if (instance->use_close_delivered)
2652 + user_service->close_pending = 1;
2653 + }
2654 +
2655 + /* A write barrier is needed here to ensure that the entire completion
2656 + record is written out before the insert point. */
2657 + wmb();
2658 +
2659 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
2660 + user_service->message_available_pos =
2661 + instance->completion_insert;
2662 + instance->completion_insert++;
2663 +
2664 + up(&instance->insert_event);
2665 +
2666 + return VCHIQ_SUCCESS;
2667 +}
2668 +
2669 +/****************************************************************************
2670 +*
2671 +* service_callback
2672 +*
2673 +***************************************************************************/
2674 +
2675 +static VCHIQ_STATUS_T
2676 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2677 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2678 +{
2679 + /* How do we ensure the callback goes to the right client?
2680 + ** The service_user data points to a USER_SERVICE_T record containing
2681 + ** the original callback and the user state structure, which contains a
2682 + ** circular buffer for completion records.
2683 + */
2684 + USER_SERVICE_T *user_service;
2685 + VCHIQ_SERVICE_T *service;
2686 + VCHIQ_INSTANCE_T instance;
2687 + DEBUG_INITIALISE(g_state.local)
2688 +
2689 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2690 +
2691 + service = handle_to_service(handle);
2692 + BUG_ON(!service);
2693 + user_service = (USER_SERVICE_T *)service->base.userdata;
2694 + instance = user_service->instance;
2695 +
2696 + if (!instance || instance->closing)
2697 + return VCHIQ_SUCCESS;
2698 +
2699 + vchiq_log_trace(vchiq_arm_log_level,
2700 + "service_callback - service %lx(%d,%p), reason %d, header %lx, "
2701 + "instance %lx, bulk_userdata %lx",
2702 + (unsigned long)user_service,
2703 + service->localport, user_service->userdata,
2704 + reason, (unsigned long)header,
2705 + (unsigned long)instance, (unsigned long)bulk_userdata);
2706 +
2707 + if (header && user_service->is_vchi) {
2708 + spin_lock(&msg_queue_spinlock);
2709 + while (user_service->msg_insert ==
2710 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2711 + spin_unlock(&msg_queue_spinlock);
2712 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2713 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2714 + vchiq_log_trace(vchiq_arm_log_level,
2715 + "service_callback - msg queue full");
2716 + /* If there is no MESSAGE_AVAILABLE in the completion
2717 + ** queue, add one
2718 + */
2719 + if ((user_service->message_available_pos -
2720 + instance->completion_remove) < 0) {
2721 + VCHIQ_STATUS_T status;
2722 + vchiq_log_info(vchiq_arm_log_level,
2723 + "Inserting extra MESSAGE_AVAILABLE");
2724 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2725 + status = add_completion(instance, reason,
2726 + NULL, user_service, bulk_userdata);
2727 + if (status != VCHIQ_SUCCESS) {
2728 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2729 + return status;
2730 + }
2731 + }
2732 +
2733 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2734 + if (down_interruptible(&user_service->remove_event)
2735 + != 0) {
2736 + vchiq_log_info(vchiq_arm_log_level,
2737 + "service_callback interrupted");
2738 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2739 + return VCHIQ_RETRY;
2740 + } else if (instance->closing) {
2741 + vchiq_log_info(vchiq_arm_log_level,
2742 + "service_callback closing");
2743 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2744 + return VCHIQ_ERROR;
2745 + }
2746 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2747 + spin_lock(&msg_queue_spinlock);
2748 + }
2749 +
2750 + user_service->msg_queue[user_service->msg_insert &
2751 + (MSG_QUEUE_SIZE - 1)] = header;
2752 + user_service->msg_insert++;
2753 + spin_unlock(&msg_queue_spinlock);
2754 +
2755 + up(&user_service->insert_event);
2756 +
2757 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2758 + ** there is a MESSAGE_AVAILABLE in the completion queue then
2759 + ** bypass the completion queue.
2760 + */
2761 + if (((user_service->message_available_pos -
2762 + instance->completion_remove) >= 0) ||
2763 + user_service->dequeue_pending) {
2764 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2765 + user_service->dequeue_pending = 0;
2766 + return VCHIQ_SUCCESS;
2767 + }
2768 +
2769 + header = NULL;
2770 + }
2771 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2772 +
2773 + return add_completion(instance, reason, header, user_service,
2774 + bulk_userdata);
2775 +}
2776 +
2777 +/****************************************************************************
2778 +*
2779 +* user_service_free
2780 +*
2781 +***************************************************************************/
2782 +static void
2783 +user_service_free(void *userdata)
2784 +{
2785 + kfree(userdata);
2786 +}
2787 +
2788 +/****************************************************************************
2789 +*
2790 +* close_delivered
2791 +*
2792 +***************************************************************************/
2793 +static void close_delivered(USER_SERVICE_T *user_service)
2794 +{
2795 + vchiq_log_info(vchiq_arm_log_level,
2796 + "close_delivered(handle=%x)",
2797 + user_service->service->handle);
2798 +
2799 + if (user_service->close_pending) {
2800 + /* Allow the underlying service to be culled */
2801 + unlock_service(user_service->service);
2802 +
2803 + /* Wake the user-thread blocked in close_ or remove_service */
2804 + up(&user_service->close_event);
2805 +
2806 + user_service->close_pending = 0;
2807 + }
2808 +}
2809 +
2810 +/****************************************************************************
2811 +*
2812 +* vchiq_ioctl
2813 +*
2814 +***************************************************************************/
2815 +static long
2816 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2817 +{
2818 + VCHIQ_INSTANCE_T instance = file->private_data;
2819 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2820 + VCHIQ_SERVICE_T *service = NULL;
2821 + long ret = 0;
2822 + int i, rc;
2823 + DEBUG_INITIALISE(g_state.local)
2824 +
2825 + vchiq_log_trace(vchiq_arm_log_level,
2826 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2827 + (unsigned int)instance,
2828 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2829 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2830 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2831 +
2832 + switch (cmd) {
2833 + case VCHIQ_IOC_SHUTDOWN:
2834 + if (!instance->connected)
2835 + break;
2836 +
2837 + /* Remove all services */
2838 + i = 0;
2839 + while ((service = next_service_by_instance(instance->state,
2840 + instance, &i)) != NULL) {
2841 + status = vchiq_remove_service(service->handle);
2842 + unlock_service(service);
2843 + if (status != VCHIQ_SUCCESS)
2844 + break;
2845 + }
2846 + service = NULL;
2847 +
2848 + if (status == VCHIQ_SUCCESS) {
2849 + /* Wake the completion thread and ask it to exit */
2850 + instance->closing = 1;
2851 + up(&instance->insert_event);
2852 + }
2853 +
2854 + break;
2855 +
2856 + case VCHIQ_IOC_CONNECT:
2857 + if (instance->connected) {
2858 + ret = -EINVAL;
2859 + break;
2860 + }
2861 + rc = mutex_lock_interruptible(&instance->state->mutex);
2862 + if (rc != 0) {
2863 + vchiq_log_error(vchiq_arm_log_level,
2864 + "vchiq: connect: could not lock mutex for "
2865 + "state %d: %d",
2866 + instance->state->id, rc);
2867 + ret = -EINTR;
2868 + break;
2869 + }
2870 + status = vchiq_connect_internal(instance->state, instance);
2871 + mutex_unlock(&instance->state->mutex);
2872 +
2873 + if (status == VCHIQ_SUCCESS)
2874 + instance->connected = 1;
2875 + else
2876 + vchiq_log_error(vchiq_arm_log_level,
2877 + "vchiq: could not connect: %d", status);
2878 + break;
2879 +
2880 + case VCHIQ_IOC_CREATE_SERVICE: {
2881 + VCHIQ_CREATE_SERVICE_T args;
2882 + USER_SERVICE_T *user_service = NULL;
2883 + void *userdata;
2884 + int srvstate;
2885 +
2886 + if (copy_from_user
2887 + (&args, (const void __user *)arg,
2888 + sizeof(args)) != 0) {
2889 + ret = -EFAULT;
2890 + break;
2891 + }
2892 +
2893 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2894 + if (!user_service) {
2895 + ret = -ENOMEM;
2896 + break;
2897 + }
2898 +
2899 + if (args.is_open) {
2900 + if (!instance->connected) {
2901 + ret = -ENOTCONN;
2902 + kfree(user_service);
2903 + break;
2904 + }
2905 + srvstate = VCHIQ_SRVSTATE_OPENING;
2906 + } else {
2907 + srvstate =
2908 + instance->connected ?
2909 + VCHIQ_SRVSTATE_LISTENING :
2910 + VCHIQ_SRVSTATE_HIDDEN;
2911 + }
2912 +
2913 + userdata = args.params.userdata;
2914 + args.params.callback = service_callback;
2915 + args.params.userdata = user_service;
2916 + service = vchiq_add_service_internal(
2917 + instance->state,
2918 + &args.params, srvstate,
2919 + instance, user_service_free);
2920 +
2921 + if (service != NULL) {
2922 + user_service->service = service;
2923 + user_service->userdata = userdata;
2924 + user_service->instance = instance;
2925 + user_service->is_vchi = (args.is_vchi != 0);
2926 + user_service->dequeue_pending = 0;
2927 + user_service->close_pending = 0;
2928 + user_service->message_available_pos =
2929 + instance->completion_remove - 1;
2930 + user_service->msg_insert = 0;
2931 + user_service->msg_remove = 0;
2932 + sema_init(&user_service->insert_event, 0);
2933 + sema_init(&user_service->remove_event, 0);
2934 + sema_init(&user_service->close_event, 0);
2935 +
2936 + if (args.is_open) {
2937 + status = vchiq_open_service_internal
2938 + (service, instance->pid);
2939 + if (status != VCHIQ_SUCCESS) {
2940 + vchiq_remove_service(service->handle);
2941 + service = NULL;
2942 + ret = (status == VCHIQ_RETRY) ?
2943 + -EINTR : -EIO;
2944 + break;
2945 + }
2946 + }
2947 +
2948 + if (copy_to_user((void __user *)
2949 + &(((VCHIQ_CREATE_SERVICE_T __user *)
2950 + arg)->handle),
2951 + (const void *)&service->handle,
2952 + sizeof(service->handle)) != 0) {
2953 + ret = -EFAULT;
2954 + vchiq_remove_service(service->handle);
2955 + }
2956 +
2957 + service = NULL;
2958 + } else {
2959 + ret = -EEXIST;
2960 + kfree(user_service);
2961 + }
2962 + } break;
2963 +
2964 + case VCHIQ_IOC_CLOSE_SERVICE: {
2965 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2966 +
2967 + service = find_service_for_instance(instance, handle);
2968 + if (service != NULL) {
2969 + USER_SERVICE_T *user_service =
2970 + (USER_SERVICE_T *)service->base.userdata;
2971 + /* close_pending is false on first entry, and when the
2972 + wait in vchiq_close_service has been interrupted. */
2973 + if (!user_service->close_pending) {
2974 + status = vchiq_close_service(service->handle);
2975 + if (status != VCHIQ_SUCCESS)
2976 + break;
2977 + }
2978 +
2979 + /* close_pending is true once the underlying service
2980 + has been closed until the client library calls the
2981 + CLOSE_DELIVERED ioctl, signalling close_event. */
2982 + if (user_service->close_pending &&
2983 + down_interruptible(&user_service->close_event))
2984 + status = VCHIQ_RETRY;
2985 + }
2986 + else
2987 + ret = -EINVAL;
2988 + } break;
2989 +
2990 + case VCHIQ_IOC_REMOVE_SERVICE: {
2991 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2992 +
2993 + service = find_service_for_instance(instance, handle);
2994 + if (service != NULL) {
2995 + USER_SERVICE_T *user_service =
2996 + (USER_SERVICE_T *)service->base.userdata;
2997 + /* close_pending is false on first entry, and when the
2998 + wait in vchiq_close_service has been interrupted. */
2999 + if (!user_service->close_pending) {
3000 + status = vchiq_remove_service(service->handle);
3001 + if (status != VCHIQ_SUCCESS)
3002 + break;
3003 + }
3004 +
3005 + /* close_pending is true once the underlying service
3006 + has been closed until the client library calls the
3007 + CLOSE_DELIVERED ioctl, signalling close_event. */
3008 + if (user_service->close_pending &&
3009 + down_interruptible(&user_service->close_event))
3010 + status = VCHIQ_RETRY;
3011 + }
3012 + else
3013 + ret = -EINVAL;
3014 + } break;
3015 +
3016 + case VCHIQ_IOC_USE_SERVICE:
3017 + case VCHIQ_IOC_RELEASE_SERVICE: {
3018 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3019 +
3020 + service = find_service_for_instance(instance, handle);
3021 + if (service != NULL) {
3022 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
3023 + vchiq_use_service_internal(service) :
3024 + vchiq_release_service_internal(service);
3025 + if (status != VCHIQ_SUCCESS) {
3026 + vchiq_log_error(vchiq_susp_log_level,
3027 + "%s: cmd %s returned error %d for "
3028 + "service %c%c%c%c:%03d",
3029 + __func__,
3030 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
3031 + "VCHIQ_IOC_USE_SERVICE" :
3032 + "VCHIQ_IOC_RELEASE_SERVICE",
3033 + status,
3034 + VCHIQ_FOURCC_AS_4CHARS(
3035 + service->base.fourcc),
3036 + service->client_id);
3037 + ret = -EINVAL;
3038 + }
3039 + } else
3040 + ret = -EINVAL;
3041 + } break;
3042 +
3043 + case VCHIQ_IOC_QUEUE_MESSAGE: {
3044 + VCHIQ_QUEUE_MESSAGE_T args;
3045 + if (copy_from_user
3046 + (&args, (const void __user *)arg,
3047 + sizeof(args)) != 0) {
3048 + ret = -EFAULT;
3049 + break;
3050 + }
3051 +
3052 + service = find_service_for_instance(instance, args.handle);
3053 +
3054 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
3055 + /* Copy elements into kernel space */
3056 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
3057 + if (copy_from_user(elements, args.elements,
3058 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
3059 + status = vchiq_queue_message
3060 + (args.handle,
3061 + elements, args.count);
3062 + else
3063 + ret = -EFAULT;
3064 + } else {
3065 + ret = -EINVAL;
3066 + }
3067 + } break;
3068 +
3069 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
3070 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
3071 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
3072 + struct bulk_waiter_node *waiter = NULL;
3073 + VCHIQ_BULK_DIR_T dir =
3074 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
3075 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
3076 +
3077 + if (copy_from_user
3078 + (&args, (const void __user *)arg,
3079 + sizeof(args)) != 0) {
3080 + ret = -EFAULT;
3081 + break;
3082 + }
3083 +
3084 + service = find_service_for_instance(instance, args.handle);
3085 + if (!service) {
3086 + ret = -EINVAL;
3087 + break;
3088 + }
3089 +
3090 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
3091 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
3092 + GFP_KERNEL);
3093 + if (!waiter) {
3094 + ret = -ENOMEM;
3095 + break;
3096 + }
3097 + args.userdata = &waiter->bulk_waiter;
3098 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
3099 + struct list_head *pos;
3100 + mutex_lock(&instance->bulk_waiter_list_mutex);
3101 + list_for_each(pos, &instance->bulk_waiter_list) {
3102 + if (list_entry(pos, struct bulk_waiter_node,
3103 + list)->pid == current->pid) {
3104 + waiter = list_entry(pos,
3105 + struct bulk_waiter_node,
3106 + list);
3107 + list_del(pos);
3108 + break;
3109 + }
3110 +
3111 + }
3112 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3113 + if (!waiter) {
3114 + vchiq_log_error(vchiq_arm_log_level,
3115 + "no bulk_waiter found for pid %d",
3116 + current->pid);
3117 + ret = -ESRCH;
3118 + break;
3119 + }
3120 + vchiq_log_info(vchiq_arm_log_level,
3121 + "found bulk_waiter %x for pid %d",
3122 + (unsigned int)waiter, current->pid);
3123 + args.userdata = &waiter->bulk_waiter;
3124 + }
3125 + status = vchiq_bulk_transfer
3126 + (args.handle,
3127 + VCHI_MEM_HANDLE_INVALID,
3128 + args.data, args.size,
3129 + args.userdata, args.mode,
3130 + dir);
3131 + if (!waiter)
3132 + break;
3133 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
3134 + !waiter->bulk_waiter.bulk) {
3135 + if (waiter->bulk_waiter.bulk) {
3136 + /* Cancel the signal when the transfer
3137 + ** completes. */
3138 + spin_lock(&bulk_waiter_spinlock);
3139 + waiter->bulk_waiter.bulk->userdata = NULL;
3140 + spin_unlock(&bulk_waiter_spinlock);
3141 + }
3142 + kfree(waiter);
3143 + } else {
3144 + const VCHIQ_BULK_MODE_T mode_waiting =
3145 + VCHIQ_BULK_MODE_WAITING;
3146 + waiter->pid = current->pid;
3147 + mutex_lock(&instance->bulk_waiter_list_mutex);
3148 + list_add(&waiter->list, &instance->bulk_waiter_list);
3149 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3150 + vchiq_log_info(vchiq_arm_log_level,
3151 + "saved bulk_waiter %x for pid %d",
3152 + (unsigned int)waiter, current->pid);
3153 +
3154 + if (copy_to_user((void __user *)
3155 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
3156 + arg)->mode),
3157 + (const void *)&mode_waiting,
3158 + sizeof(mode_waiting)) != 0)
3159 + ret = -EFAULT;
3160 + }
3161 + } break;
3162 +
3163 + case VCHIQ_IOC_AWAIT_COMPLETION: {
3164 + VCHIQ_AWAIT_COMPLETION_T args;
3165 +
3166 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3167 + if (!instance->connected) {
3168 + ret = -ENOTCONN;
3169 + break;
3170 + }
3171 +
3172 + if (copy_from_user(&args, (const void __user *)arg,
3173 + sizeof(args)) != 0) {
3174 + ret = -EFAULT;
3175 + break;
3176 + }
3177 +
3178 + mutex_lock(&instance->completion_mutex);
3179 +
3180 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3181 + while ((instance->completion_remove ==
3182 + instance->completion_insert)
3183 + && !instance->closing) {
3184 + int rc;
3185 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3186 + mutex_unlock(&instance->completion_mutex);
3187 + rc = down_interruptible(&instance->insert_event);
3188 + mutex_lock(&instance->completion_mutex);
3189 + if (rc != 0) {
3190 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3191 + vchiq_log_info(vchiq_arm_log_level,
3192 + "AWAIT_COMPLETION interrupted");
3193 + ret = -EINTR;
3194 + break;
3195 + }
3196 + }
3197 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3198 +
3199 + /* A read memory barrier is needed to stop prefetch of a stale
3200 + ** completion record
3201 + */
3202 + rmb();
3203 +
3204 + if (ret == 0) {
3205 + int msgbufcount = args.msgbufcount;
3206 + for (ret = 0; ret < args.count; ret++) {
3207 + VCHIQ_COMPLETION_DATA_T *completion;
3208 + VCHIQ_SERVICE_T *service;
3209 + USER_SERVICE_T *user_service;
3210 + VCHIQ_HEADER_T *header;
3211 + if (instance->completion_remove ==
3212 + instance->completion_insert)
3213 + break;
3214 + completion = &instance->completions[
3215 + instance->completion_remove &
3216 + (MAX_COMPLETIONS - 1)];
3217 +
3218 + service = completion->service_userdata;
3219 + user_service = service->base.userdata;
3220 + completion->service_userdata =
3221 + user_service->userdata;
3222 +
3223 + header = completion->header;
3224 + if (header) {
3225 + void __user *msgbuf;
3226 + int msglen;
3227 +
3228 + msglen = header->size +
3229 + sizeof(VCHIQ_HEADER_T);
3230 + /* This must be a VCHIQ-style service */
3231 + if (args.msgbufsize < msglen) {
3232 + vchiq_log_error(
3233 + vchiq_arm_log_level,
3234 + "header %x: msgbufsize"
3235 + " %x < msglen %x",
3236 + (unsigned int)header,
3237 + args.msgbufsize,
3238 + msglen);
3239 + WARN(1, "invalid message "
3240 + "size\n");
3241 + if (ret == 0)
3242 + ret = -EMSGSIZE;
3243 + break;
3244 + }
3245 + if (msgbufcount <= 0)
3246 + /* Stall here for lack of a
3247 + ** buffer for the message. */
3248 + break;
3249 + /* Get the pointer from user space */
3250 + msgbufcount--;
3251 + if (copy_from_user(&msgbuf,
3252 + (const void __user *)
3253 + &args.msgbufs[msgbufcount],
3254 + sizeof(msgbuf)) != 0) {
3255 + if (ret == 0)
3256 + ret = -EFAULT;
3257 + break;
3258 + }
3259 +
3260 + /* Copy the message to user space */
3261 + if (copy_to_user(msgbuf, header,
3262 + msglen) != 0) {
3263 + if (ret == 0)
3264 + ret = -EFAULT;
3265 + break;
3266 + }
3267 +
3268 + /* Now it has been copied, the message
3269 + ** can be released. */
3270 + vchiq_release_message(service->handle,
3271 + header);
3272 +
3273 + /* The completion must point to the
3274 + ** msgbuf. */
3275 + completion->header = msgbuf;
3276 + }
3277 +
3278 + if ((completion->reason ==
3279 + VCHIQ_SERVICE_CLOSED) &&
3280 + !instance->use_close_delivered)
3281 + unlock_service(service);
3282 +
3283 + if (copy_to_user((void __user *)(
3284 + (size_t)args.buf +
3285 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
3286 + completion,
3287 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
3288 + if (ret == 0)
3289 + ret = -EFAULT;
3290 + break;
3291 + }
3292 +
3293 + instance->completion_remove++;
3294 + }
3295 +
3296 + if (msgbufcount != args.msgbufcount) {
3297 + if (copy_to_user((void __user *)
3298 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3299 + msgbufcount,
3300 + &msgbufcount,
3301 + sizeof(msgbufcount)) != 0) {
3302 + ret = -EFAULT;
3303 + }
3304 + }
3305 + }
3306 +
3307 + if (ret != 0)
3308 + up(&instance->remove_event);
3309 + mutex_unlock(&instance->completion_mutex);
3310 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3311 + } break;
3312 +
3313 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3314 + VCHIQ_DEQUEUE_MESSAGE_T args;
3315 + USER_SERVICE_T *user_service;
3316 + VCHIQ_HEADER_T *header;
3317 +
3318 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3319 + if (copy_from_user
3320 + (&args, (const void __user *)arg,
3321 + sizeof(args)) != 0) {
3322 + ret = -EFAULT;
3323 + break;
3324 + }
3325 + service = find_service_for_instance(instance, args.handle);
3326 + if (!service) {
3327 + ret = -EINVAL;
3328 + break;
3329 + }
3330 + user_service = (USER_SERVICE_T *)service->base.userdata;
3331 + if (user_service->is_vchi == 0) {
3332 + ret = -EINVAL;
3333 + break;
3334 + }
3335 +
3336 + spin_lock(&msg_queue_spinlock);
3337 + if (user_service->msg_remove == user_service->msg_insert) {
3338 + if (!args.blocking) {
3339 + spin_unlock(&msg_queue_spinlock);
3340 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3341 + ret = -EWOULDBLOCK;
3342 + break;
3343 + }
3344 + user_service->dequeue_pending = 1;
3345 + do {
3346 + spin_unlock(&msg_queue_spinlock);
3347 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3348 + if (down_interruptible(
3349 + &user_service->insert_event) != 0) {
3350 + vchiq_log_info(vchiq_arm_log_level,
3351 + "DEQUEUE_MESSAGE interrupted");
3352 + ret = -EINTR;
3353 + break;
3354 + }
3355 + spin_lock(&msg_queue_spinlock);
3356 + } while (user_service->msg_remove ==
3357 + user_service->msg_insert);
3358 +
3359 + if (ret)
3360 + break;
3361 + }
3362 +
3363 + BUG_ON((int)(user_service->msg_insert -
3364 + user_service->msg_remove) < 0);
3365 +
3366 + header = user_service->msg_queue[user_service->msg_remove &
3367 + (MSG_QUEUE_SIZE - 1)];
3368 + user_service->msg_remove++;
3369 + spin_unlock(&msg_queue_spinlock);
3370 +
3371 + up(&user_service->remove_event);
3372 + if (header == NULL)
3373 + ret = -ENOTCONN;
3374 + else if (header->size <= args.bufsize) {
3375 + /* Copy to user space if msgbuf is not NULL */
3376 + if ((args.buf == NULL) ||
3377 + (copy_to_user((void __user *)args.buf,
3378 + header->data,
3379 + header->size) == 0)) {
3380 + ret = header->size;
3381 + vchiq_release_message(
3382 + service->handle,
3383 + header);
3384 + } else
3385 + ret = -EFAULT;
3386 + } else {
3387 + vchiq_log_error(vchiq_arm_log_level,
3388 + "header %x: bufsize %x < size %x",
3389 + (unsigned int)header, args.bufsize,
3390 + header->size);
3391 + WARN(1, "invalid size\n");
3392 + ret = -EMSGSIZE;
3393 + }
3394 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3395 + } break;
3396 +
3397 + case VCHIQ_IOC_GET_CLIENT_ID: {
3398 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3399 +
3400 + ret = vchiq_get_client_id(handle);
3401 + } break;
3402 +
3403 + case VCHIQ_IOC_GET_CONFIG: {
3404 + VCHIQ_GET_CONFIG_T args;
3405 + VCHIQ_CONFIG_T config;
3406 +
3407 + if (copy_from_user(&args, (const void __user *)arg,
3408 + sizeof(args)) != 0) {
3409 + ret = -EFAULT;
3410 + break;
3411 + }
3412 + if (args.config_size > sizeof(config)) {
3413 + ret = -EINVAL;
3414 + break;
3415 + }
3416 + status = vchiq_get_config(instance, args.config_size, &config);
3417 + if (status == VCHIQ_SUCCESS) {
3418 + if (copy_to_user((void __user *)args.pconfig,
3419 + &config, args.config_size) != 0) {
3420 + ret = -EFAULT;
3421 + break;
3422 + }
3423 + }
3424 + } break;
3425 +
3426 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
3427 + VCHIQ_SET_SERVICE_OPTION_T args;
3428 +
3429 + if (copy_from_user(
3430 + &args, (const void __user *)arg,
3431 + sizeof(args)) != 0) {
3432 + ret = -EFAULT;
3433 + break;
3434 + }
3435 +
3436 + service = find_service_for_instance(instance, args.handle);
3437 + if (!service) {
3438 + ret = -EINVAL;
3439 + break;
3440 + }
3441 +
3442 + status = vchiq_set_service_option(
3443 + args.handle, args.option, args.value);
3444 + } break;
3445 +
3446 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
3447 + VCHIQ_DUMP_MEM_T args;
3448 +
3449 + if (copy_from_user
3450 + (&args, (const void __user *)arg,
3451 + sizeof(args)) != 0) {
3452 + ret = -EFAULT;
3453 + break;
3454 + }
3455 + dump_phys_mem(args.virt_addr, args.num_bytes);
3456 + } break;
3457 +
3458 + case VCHIQ_IOC_LIB_VERSION: {
3459 + unsigned int lib_version = (unsigned int)arg;
3460 +
3461 + if (lib_version < VCHIQ_VERSION_MIN)
3462 + ret = -EINVAL;
3463 + else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
3464 + instance->use_close_delivered = 1;
3465 + } break;
3466 +
3467 + case VCHIQ_IOC_CLOSE_DELIVERED: {
3468 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3469 +
3470 + service = find_closed_service_for_instance(instance, handle);
3471 + if (service != NULL) {
3472 + USER_SERVICE_T *user_service =
3473 + (USER_SERVICE_T *)service->base.userdata;
3474 + close_delivered(user_service);
3475 + }
3476 + else
3477 + ret = -EINVAL;
3478 + } break;
3479 +
3480 + default:
3481 + ret = -ENOTTY;
3482 + break;
3483 + }
3484 +
3485 + if (service)
3486 + unlock_service(service);
3487 +
3488 + if (ret == 0) {
3489 + if (status == VCHIQ_ERROR)
3490 + ret = -EIO;
3491 + else if (status == VCHIQ_RETRY)
3492 + ret = -EINTR;
3493 + }
3494 +
3495 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3496 + (ret != -EWOULDBLOCK))
3497 + vchiq_log_info(vchiq_arm_log_level,
3498 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3499 + (unsigned long)instance,
3500 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3501 + ioctl_names[_IOC_NR(cmd)] :
3502 + "<invalid>",
3503 + status, ret);
3504 + else
3505 + vchiq_log_trace(vchiq_arm_log_level,
3506 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3507 + (unsigned long)instance,
3508 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3509 + ioctl_names[_IOC_NR(cmd)] :
3510 + "<invalid>",
3511 + status, ret);
3512 +
3513 + return ret;
3514 +}
3515 +
3516 +/****************************************************************************
3517 +*
3518 +* vchiq_open
3519 +*
3520 +***************************************************************************/
3521 +
3522 +static int
3523 +vchiq_open(struct inode *inode, struct file *file)
3524 +{
3525 + int dev = iminor(inode) & 0x0f;
3526 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3527 + switch (dev) {
3528 + case VCHIQ_MINOR: {
3529 + int ret;
3530 + VCHIQ_STATE_T *state = vchiq_get_state();
3531 + VCHIQ_INSTANCE_T instance;
3532 +
3533 + if (!state) {
3534 + vchiq_log_error(vchiq_arm_log_level,
3535 + "vchiq has no connection to VideoCore");
3536 + return -ENOTCONN;
3537 + }
3538 +
3539 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3540 + if (!instance)
3541 + return -ENOMEM;
3542 +
3543 + instance->state = state;
3544 + instance->pid = current->tgid;
3545 +
3546 + ret = vchiq_debugfs_add_instance(instance);
3547 + if (ret != 0) {
3548 + kfree(instance);
3549 + return ret;
3550 + }
3551 +
3552 + sema_init(&instance->insert_event, 0);
3553 + sema_init(&instance->remove_event, 0);
3554 + mutex_init(&instance->completion_mutex);
3555 + mutex_init(&instance->bulk_waiter_list_mutex);
3556 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
3557 +
3558 + file->private_data = instance;
3559 + } break;
3560 +
3561 + default:
3562 + vchiq_log_error(vchiq_arm_log_level,
3563 + "Unknown minor device: %d", dev);
3564 + return -ENXIO;
3565 + }
3566 +
3567 + return 0;
3568 +}
3569 +
3570 +/****************************************************************************
3571 +*
3572 +* vchiq_release
3573 +*
3574 +***************************************************************************/
3575 +
3576 +static int
3577 +vchiq_release(struct inode *inode, struct file *file)
3578 +{
3579 + int dev = iminor(inode) & 0x0f;
3580 + int ret = 0;
3581 + switch (dev) {
3582 + case VCHIQ_MINOR: {
3583 + VCHIQ_INSTANCE_T instance = file->private_data;
3584 + VCHIQ_STATE_T *state = vchiq_get_state();
3585 + VCHIQ_SERVICE_T *service;
3586 + int i;
3587 +
3588 + vchiq_log_info(vchiq_arm_log_level,
3589 + "vchiq_release: instance=%lx",
3590 + (unsigned long)instance);
3591 +
3592 + if (!state) {
3593 + ret = -EPERM;
3594 + goto out;
3595 + }
3596 +
3597 + /* Ensure videocore is awake to allow termination. */
3598 + vchiq_use_internal(instance->state, NULL,
3599 + USE_TYPE_VCHIQ);
3600 +
3601 + mutex_lock(&instance->completion_mutex);
3602 +
3603 + /* Wake the completion thread and ask it to exit */
3604 + instance->closing = 1;
3605 + up(&instance->insert_event);
3606 +
3607 + mutex_unlock(&instance->completion_mutex);
3608 +
3609 + /* Wake the slot handler if the completion queue is full. */
3610 + up(&instance->remove_event);
3611 +
3612 + /* Mark all services for termination... */
3613 + i = 0;
3614 + while ((service = next_service_by_instance(state, instance,
3615 + &i)) != NULL) {
3616 + USER_SERVICE_T *user_service = service->base.userdata;
3617 +
3618 + /* Wake the slot handler if the msg queue is full. */
3619 + up(&user_service->remove_event);
3620 +
3621 + vchiq_terminate_service_internal(service);
3622 + unlock_service(service);
3623 + }
3624 +
3625 + /* ...and wait for them to die */
3626 + i = 0;
3627 + while ((service = next_service_by_instance(state, instance, &i))
3628 + != NULL) {
3629 + USER_SERVICE_T *user_service = service->base.userdata;
3630 +
3631 + down(&service->remove_event);
3632 +
3633 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3634 +
3635 + spin_lock(&msg_queue_spinlock);
3636 +
3637 + while (user_service->msg_remove !=
3638 + user_service->msg_insert) {
3639 + VCHIQ_HEADER_T *header = user_service->
3640 + msg_queue[user_service->msg_remove &
3641 + (MSG_QUEUE_SIZE - 1)];
3642 + user_service->msg_remove++;
3643 + spin_unlock(&msg_queue_spinlock);
3644 +
3645 + if (header)
3646 + vchiq_release_message(
3647 + service->handle,
3648 + header);
3649 + spin_lock(&msg_queue_spinlock);
3650 + }
3651 +
3652 + spin_unlock(&msg_queue_spinlock);
3653 +
3654 + unlock_service(service);
3655 + }
3656 +
3657 + /* Release any closed services */
3658 + while (instance->completion_remove !=
3659 + instance->completion_insert) {
3660 + VCHIQ_COMPLETION_DATA_T *completion;
3661 + VCHIQ_SERVICE_T *service;
3662 + completion = &instance->completions[
3663 + instance->completion_remove &
3664 + (MAX_COMPLETIONS - 1)];
3665 + service = completion->service_userdata;
3666 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
3667 + {
3668 + USER_SERVICE_T *user_service =
3669 + service->base.userdata;
3670 +
3671 + /* Wake any blocked user-thread */
3672 + if (instance->use_close_delivered)
3673 + up(&user_service->close_event);
3674 + unlock_service(service);
3675 + }
3676 + instance->completion_remove++;
3677 + }
3678 +
3679 + /* Release the PEER service count. */
3680 + vchiq_release_internal(instance->state, NULL);
3681 +
3682 + {
3683 + struct list_head *pos, *next;
3684 + list_for_each_safe(pos, next,
3685 + &instance->bulk_waiter_list) {
3686 + struct bulk_waiter_node *waiter;
3687 + waiter = list_entry(pos,
3688 + struct bulk_waiter_node,
3689 + list);
3690 + list_del(pos);
3691 + vchiq_log_info(vchiq_arm_log_level,
3692 + "bulk_waiter - cleaned up %x "
3693 + "for pid %d",
3694 + (unsigned int)waiter, waiter->pid);
3695 + kfree(waiter);
3696 + }
3697 + }
3698 +
3699 + vchiq_debugfs_remove_instance(instance);
3700 +
3701 + kfree(instance);
3702 + file->private_data = NULL;
3703 + } break;
3704 +
3705 + default:
3706 + vchiq_log_error(vchiq_arm_log_level,
3707 + "Unknown minor device: %d", dev);
3708 + ret = -ENXIO;
3709 + }
3710 +
3711 +out:
3712 + return ret;
3713 +}
3714 +
3715 +/****************************************************************************
3716 +*
3717 +* vchiq_dump
3718 +*
3719 +***************************************************************************/
3720 +
3721 +void
3722 +vchiq_dump(void *dump_context, const char *str, int len)
3723 +{
3724 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3725 +
3726 + if (context->actual < context->space) {
3727 + int copy_bytes;
3728 + if (context->offset > 0) {
3729 + int skip_bytes = min(len, (int)context->offset);
3730 + str += skip_bytes;
3731 + len -= skip_bytes;
3732 + context->offset -= skip_bytes;
3733 + if (context->offset > 0)
3734 + return;
3735 + }
3736 + copy_bytes = min(len, (int)(context->space - context->actual));
3737 + if (copy_bytes == 0)
3738 + return;
3739 + if (copy_to_user(context->buf + context->actual, str,
3740 + copy_bytes))
3741 + context->actual = -EFAULT;
3742 + context->actual += copy_bytes;
3743 + len -= copy_bytes;
3744 +
3745 + /* If tne terminating NUL is included in the length, then it
3746 + ** marks the end of a line and should be replaced with a
3747 + ** carriage return. */
3748 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3749 + char cr = '\n';
3750 + if (copy_to_user(context->buf + context->actual - 1,
3751 + &cr, 1))
3752 + context->actual = -EFAULT;
3753 + }
3754 + }
3755 +}
3756 +
3757 +/****************************************************************************
3758 +*
3759 +* vchiq_dump_platform_instance_state
3760 +*
3761 +***************************************************************************/
3762 +
3763 +void
3764 +vchiq_dump_platform_instances(void *dump_context)
3765 +{
3766 + VCHIQ_STATE_T *state = vchiq_get_state();
3767 + char buf[80];
3768 + int len;
3769 + int i;
3770 +
3771 + /* There is no list of instances, so instead scan all services,
3772 + marking those that have been dumped. */
3773 +
3774 + for (i = 0; i < state->unused_service; i++) {
3775 + VCHIQ_SERVICE_T *service = state->services[i];
3776 + VCHIQ_INSTANCE_T instance;
3777 +
3778 + if (service && (service->base.callback == service_callback)) {
3779 + instance = service->instance;
3780 + if (instance)
3781 + instance->mark = 0;
3782 + }
3783 + }
3784 +
3785 + for (i = 0; i < state->unused_service; i++) {
3786 + VCHIQ_SERVICE_T *service = state->services[i];
3787 + VCHIQ_INSTANCE_T instance;
3788 +
3789 + if (service && (service->base.callback == service_callback)) {
3790 + instance = service->instance;
3791 + if (instance && !instance->mark) {
3792 + len = snprintf(buf, sizeof(buf),
3793 + "Instance %x: pid %d,%s completions "
3794 + "%d/%d",
3795 + (unsigned int)instance, instance->pid,
3796 + instance->connected ? " connected, " :
3797 + "",
3798 + instance->completion_insert -
3799 + instance->completion_remove,
3800 + MAX_COMPLETIONS);
3801 +
3802 + vchiq_dump(dump_context, buf, len + 1);
3803 +
3804 + instance->mark = 1;
3805 + }
3806 + }
3807 + }
3808 +}
3809 +
3810 +/****************************************************************************
3811 +*
3812 +* vchiq_dump_platform_service_state
3813 +*
3814 +***************************************************************************/
3815 +
3816 +void
3817 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3818 +{
3819 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3820 + char buf[80];
3821 + int len;
3822 +
3823 + len = snprintf(buf, sizeof(buf), " instance %x",
3824 + (unsigned int)service->instance);
3825 +
3826 + if ((service->base.callback == service_callback) &&
3827 + user_service->is_vchi) {
3828 + len += snprintf(buf + len, sizeof(buf) - len,
3829 + ", %d/%d messages",
3830 + user_service->msg_insert - user_service->msg_remove,
3831 + MSG_QUEUE_SIZE);
3832 +
3833 + if (user_service->dequeue_pending)
3834 + len += snprintf(buf + len, sizeof(buf) - len,
3835 + " (dequeue pending)");
3836 + }
3837 +
3838 + vchiq_dump(dump_context, buf, len + 1);
3839 +}
3840 +
3841 +/****************************************************************************
3842 +*
3843 +* dump_user_mem
3844 +*
3845 +***************************************************************************/
3846 +
3847 +static void
3848 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3849 +{
3850 + int rc;
3851 + uint8_t *end_virt_addr = virt_addr + num_bytes;
3852 + int num_pages;
3853 + int offset;
3854 + int end_offset;
3855 + int page_idx;
3856 + int prev_idx;
3857 + struct page *page;
3858 + struct page **pages;
3859 + uint8_t *kmapped_virt_ptr;
3860 +
3861 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3862 +
3863 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3864 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3865 + ~0x0fuL);
3866 +
3867 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3868 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3869 +
3870 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3871 +
3872 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3873 + if (pages == NULL) {
3874 + vchiq_log_error(vchiq_arm_log_level,
3875 + "Unable to allocation memory for %d pages\n",
3876 + num_pages);
3877 + return;
3878 + }
3879 +
3880 + down_read(&current->mm->mmap_sem);
3881 + rc = get_user_pages(current, /* task */
3882 + current->mm, /* mm */
3883 + (unsigned long)virt_addr, /* start */
3884 + num_pages, /* len */
3885 + 0, /* write */
3886 + 0, /* force */
3887 + pages, /* pages (array of page pointers) */
3888 + NULL); /* vmas */
3889 + up_read(&current->mm->mmap_sem);
3890 +
3891 + prev_idx = -1;
3892 + page = NULL;
3893 +
3894 + while (offset < end_offset) {
3895 +
3896 + int page_offset = offset % PAGE_SIZE;
3897 + page_idx = offset / PAGE_SIZE;
3898 +
3899 + if (page_idx != prev_idx) {
3900 +
3901 + if (page != NULL)
3902 + kunmap(page);
3903 + page = pages[page_idx];
3904 + kmapped_virt_ptr = kmap(page);
3905 +
3906 + prev_idx = page_idx;
3907 + }
3908 +
3909 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3910 + vchiq_log_dump_mem("ph",
3911 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
3912 + page_offset],
3913 + &kmapped_virt_ptr[page_offset], 16);
3914 +
3915 + offset += 16;
3916 + }
3917 + if (page != NULL)
3918 + kunmap(page);
3919 +
3920 + for (page_idx = 0; page_idx < num_pages; page_idx++)
3921 + page_cache_release(pages[page_idx]);
3922 +
3923 + kfree(pages);
3924 +}
3925 +
3926 +/****************************************************************************
3927 +*
3928 +* vchiq_read
3929 +*
3930 +***************************************************************************/
3931 +
3932 +static ssize_t
3933 +vchiq_read(struct file *file, char __user *buf,
3934 + size_t count, loff_t *ppos)
3935 +{
3936 + DUMP_CONTEXT_T context;
3937 + context.buf = buf;
3938 + context.actual = 0;
3939 + context.space = count;
3940 + context.offset = *ppos;
3941 +
3942 + vchiq_dump_state(&context, &g_state);
3943 +
3944 + *ppos += context.actual;
3945 +
3946 + return context.actual;
3947 +}
3948 +
3949 +VCHIQ_STATE_T *
3950 +vchiq_get_state(void)
3951 +{
3952 +
3953 + if (g_state.remote == NULL)
3954 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3955 + else if (g_state.remote->initialised != 1)
3956 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3957 + __func__, g_state.remote->initialised);
3958 +
3959 + return ((g_state.remote != NULL) &&
3960 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
3961 +}
3962 +
3963 +static const struct file_operations
3964 +vchiq_fops = {
3965 + .owner = THIS_MODULE,
3966 + .unlocked_ioctl = vchiq_ioctl,
3967 + .open = vchiq_open,
3968 + .release = vchiq_release,
3969 + .read = vchiq_read
3970 +};
3971 +
3972 +/*
3973 + * Autosuspend related functionality
3974 + */
3975 +
3976 +int
3977 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3978 +{
3979 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3980 + if (!arm_state)
3981 + /* autosuspend not supported - always return wanted */
3982 + return 1;
3983 + else if (arm_state->blocked_count)
3984 + return 1;
3985 + else if (!arm_state->videocore_use_count)
3986 + /* usage count zero - check for override unless we're forcing */
3987 + if (arm_state->resume_blocked)
3988 + return 0;
3989 + else
3990 + return vchiq_platform_videocore_wanted(state);
3991 + else
3992 + /* non-zero usage count - videocore still required */
3993 + return 1;
3994 +}
3995 +
3996 +static VCHIQ_STATUS_T
3997 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3998 + VCHIQ_HEADER_T *header,
3999 + VCHIQ_SERVICE_HANDLE_T service_user,
4000 + void *bulk_user)
4001 +{
4002 + vchiq_log_error(vchiq_susp_log_level,
4003 + "%s callback reason %d", __func__, reason);
4004 + return 0;
4005 +}
4006 +
4007 +static int
4008 +vchiq_keepalive_thread_func(void *v)
4009 +{
4010 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
4011 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4012 +
4013 + VCHIQ_STATUS_T status;
4014 + VCHIQ_INSTANCE_T instance;
4015 + VCHIQ_SERVICE_HANDLE_T ka_handle;
4016 +
4017 + VCHIQ_SERVICE_PARAMS_T params = {
4018 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
4019 + .callback = vchiq_keepalive_vchiq_callback,
4020 + .version = KEEPALIVE_VER,
4021 + .version_min = KEEPALIVE_VER_MIN
4022 + };
4023 +
4024 + status = vchiq_initialise(&instance);
4025 + if (status != VCHIQ_SUCCESS) {
4026 + vchiq_log_error(vchiq_susp_log_level,
4027 + "%s vchiq_initialise failed %d", __func__, status);
4028 + goto exit;
4029 + }
4030 +
4031 + status = vchiq_connect(instance);
4032 + if (status != VCHIQ_SUCCESS) {
4033 + vchiq_log_error(vchiq_susp_log_level,
4034 + "%s vchiq_connect failed %d", __func__, status);
4035 + goto shutdown;
4036 + }
4037 +
4038 + status = vchiq_add_service(instance, &params, &ka_handle);
4039 + if (status != VCHIQ_SUCCESS) {
4040 + vchiq_log_error(vchiq_susp_log_level,
4041 + "%s vchiq_open_service failed %d", __func__, status);
4042 + goto shutdown;
4043 + }
4044 +
4045 + while (1) {
4046 + long rc = 0, uc = 0;
4047 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
4048 + != 0) {
4049 + vchiq_log_error(vchiq_susp_log_level,
4050 + "%s interrupted", __func__);
4051 + flush_signals(current);
4052 + continue;
4053 + }
4054 +
4055 + /* read and clear counters. Do release_count then use_count to
4056 + * prevent getting more releases than uses */
4057 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
4058 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
4059 +
4060 + /* Call use/release service the requisite number of times.
4061 + * Process use before release so use counts don't go negative */
4062 + while (uc--) {
4063 + atomic_inc(&arm_state->ka_use_ack_count);
4064 + status = vchiq_use_service(ka_handle);
4065 + if (status != VCHIQ_SUCCESS) {
4066 + vchiq_log_error(vchiq_susp_log_level,
4067 + "%s vchiq_use_service error %d",
4068 + __func__, status);
4069 + }
4070 + }
4071 + while (rc--) {
4072 + status = vchiq_release_service(ka_handle);
4073 + if (status != VCHIQ_SUCCESS) {
4074 + vchiq_log_error(vchiq_susp_log_level,
4075 + "%s vchiq_release_service error %d",
4076 + __func__, status);
4077 + }
4078 + }
4079 + }
4080 +
4081 +shutdown:
4082 + vchiq_shutdown(instance);
4083 +exit:
4084 + return 0;
4085 +}
4086 +
4087 +
4088 +
4089 +VCHIQ_STATUS_T
4090 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
4091 +{
4092 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4093 +
4094 + if (arm_state) {
4095 + rwlock_init(&arm_state->susp_res_lock);
4096 +
4097 + init_completion(&arm_state->ka_evt);
4098 + atomic_set(&arm_state->ka_use_count, 0);
4099 + atomic_set(&arm_state->ka_use_ack_count, 0);
4100 + atomic_set(&arm_state->ka_release_count, 0);
4101 +
4102 + init_completion(&arm_state->vc_suspend_complete);
4103 +
4104 + init_completion(&arm_state->vc_resume_complete);
4105 + /* Initialise to 'done' state. We only want to block on resume
4106 + * completion while videocore is suspended. */
4107 + set_resume_state(arm_state, VC_RESUME_RESUMED);
4108 +
4109 + init_completion(&arm_state->resume_blocker);
4110 + /* Initialise to 'done' state. We only want to block on this
4111 + * completion while resume is blocked */
4112 + complete_all(&arm_state->resume_blocker);
4113 +
4114 + init_completion(&arm_state->blocked_blocker);
4115 + /* Initialise to 'done' state. We only want to block on this
4116 + * completion while things are waiting on the resume blocker */
4117 + complete_all(&arm_state->blocked_blocker);
4118 +
4119 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
4120 + arm_state->suspend_timer_running = 0;
4121 + init_timer(&arm_state->suspend_timer);
4122 + arm_state->suspend_timer.data = (unsigned long)(state);
4123 + arm_state->suspend_timer.function = suspend_timer_callback;
4124 +
4125 + arm_state->first_connect = 0;
4126 +
4127 + }
4128 + return status;
4129 +}
4130 +
4131 +/*
4132 +** Functions to modify the state variables;
4133 +** set_suspend_state
4134 +** set_resume_state
4135 +**
4136 +** There are more state variables than we might like, so ensure they remain in
4137 +** step. Suspend and resume state are maintained separately, since most of
4138 +** these state machines can operate independently. However, there are a few
4139 +** states where state transitions in one state machine cause a reset to the
4140 +** other state machine. In addition, there are some completion events which
4141 +** need to occur on state machine reset and end-state(s), so these are also
4142 +** dealt with in these functions.
4143 +**
4144 +** In all states we set the state variable according to the input, but in some
4145 +** cases we perform additional steps outlined below;
4146 +**
4147 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
4148 +** The suspend completion is completed after any suspend
4149 +** attempt. When we reset the state machine we also reset
4150 +** the completion. This reset occurs when videocore is
4151 +** resumed, and also if we initiate suspend after a suspend
4152 +** failure.
4153 +**
4154 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
4155 +** suspend - ie from this point on we must try to suspend
4156 +** before resuming can occur. We therefore also reset the
4157 +** resume state machine to VC_RESUME_IDLE in this state.
4158 +**
4159 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
4160 +** complete_all on the suspend completion to notify
4161 +** anything waiting for suspend to happen.
4162 +**
4163 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
4164 +** initiate resume, so no need to alter resume state.
4165 +** We call complete_all on the suspend completion to notify
4166 +** of suspend rejection.
4167 +**
4168 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
4169 +** suspend completion and reset the resume state machine.
4170 +**
4171 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
4172 +** resume completion is in it's 'done' state whenever
4173 +** videcore is running. Therfore, the VC_RESUME_IDLE state
4174 +** implies that videocore is suspended.
4175 +** Hence, any thread which needs to wait until videocore is
4176 +** running can wait on this completion - it will only block
4177 +** if videocore is suspended.
4178 +**
4179 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
4180 +** Call complete_all on the resume completion to unblock
4181 +** any threads waiting for resume. Also reset the suspend
4182 +** state machine to it's idle state.
4183 +**
4184 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
4185 +*/
4186 +
4187 +void
4188 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
4189 + enum vc_suspend_status new_state)
4190 +{
4191 + /* set the state in all cases */
4192 + arm_state->vc_suspend_state = new_state;
4193 +
4194 + /* state specific additional actions */
4195 + switch (new_state) {
4196 + case VC_SUSPEND_FORCE_CANCELED:
4197 + complete_all(&arm_state->vc_suspend_complete);
4198 + break;
4199 + case VC_SUSPEND_REJECTED:
4200 + complete_all(&arm_state->vc_suspend_complete);
4201 + break;
4202 + case VC_SUSPEND_FAILED:
4203 + complete_all(&arm_state->vc_suspend_complete);
4204 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
4205 + complete_all(&arm_state->vc_resume_complete);
4206 + break;
4207 + case VC_SUSPEND_IDLE:
4208 + reinit_completion(&arm_state->vc_suspend_complete);
4209 + break;
4210 + case VC_SUSPEND_REQUESTED:
4211 + break;
4212 + case VC_SUSPEND_IN_PROGRESS:
4213 + set_resume_state(arm_state, VC_RESUME_IDLE);
4214 + break;
4215 + case VC_SUSPEND_SUSPENDED:
4216 + complete_all(&arm_state->vc_suspend_complete);
4217 + break;
4218 + default:
4219 + BUG();
4220 + break;
4221 + }
4222 +}
4223 +
4224 +void
4225 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
4226 + enum vc_resume_status new_state)
4227 +{
4228 + /* set the state in all cases */
4229 + arm_state->vc_resume_state = new_state;
4230 +
4231 + /* state specific additional actions */
4232 + switch (new_state) {
4233 + case VC_RESUME_FAILED:
4234 + break;
4235 + case VC_RESUME_IDLE:
4236 + reinit_completion(&arm_state->vc_resume_complete);
4237 + break;
4238 + case VC_RESUME_REQUESTED:
4239 + break;
4240 + case VC_RESUME_IN_PROGRESS:
4241 + break;
4242 + case VC_RESUME_RESUMED:
4243 + complete_all(&arm_state->vc_resume_complete);
4244 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4245 + break;
4246 + default:
4247 + BUG();
4248 + break;
4249 + }
4250 +}
4251 +
4252 +
4253 +/* should be called with the write lock held */
4254 +inline void
4255 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
4256 +{
4257 + del_timer(&arm_state->suspend_timer);
4258 + arm_state->suspend_timer.expires = jiffies +
4259 + msecs_to_jiffies(arm_state->
4260 + suspend_timer_timeout);
4261 + add_timer(&arm_state->suspend_timer);
4262 + arm_state->suspend_timer_running = 1;
4263 +}
4264 +
4265 +/* should be called with the write lock held */
4266 +static inline void
4267 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
4268 +{
4269 + if (arm_state->suspend_timer_running) {
4270 + del_timer(&arm_state->suspend_timer);
4271 + arm_state->suspend_timer_running = 0;
4272 + }
4273 +}
4274 +
4275 +static inline int
4276 +need_resume(VCHIQ_STATE_T *state)
4277 +{
4278 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4279 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
4280 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
4281 + vchiq_videocore_wanted(state);
4282 +}
4283 +
4284 +static int
4285 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
4286 +{
4287 + int status = VCHIQ_SUCCESS;
4288 + const unsigned long timeout_val =
4289 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
4290 + int resume_count = 0;
4291 +
4292 + /* Allow any threads which were blocked by the last force suspend to
4293 + * complete if they haven't already. Only give this one shot; if
4294 + * blocked_count is incremented after blocked_blocker is completed
4295 + * (which only happens when blocked_count hits 0) then those threads
4296 + * will have to wait until next time around */
4297 + if (arm_state->blocked_count) {
4298 + reinit_completion(&arm_state->blocked_blocker);
4299 + write_unlock_bh(&arm_state->susp_res_lock);
4300 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
4301 + "blocked clients", __func__);
4302 + if (wait_for_completion_interruptible_timeout(
4303 + &arm_state->blocked_blocker, timeout_val)
4304 + <= 0) {
4305 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4306 + "previously blocked clients failed" , __func__);
4307 + status = VCHIQ_ERROR;
4308 + write_lock_bh(&arm_state->susp_res_lock);
4309 + goto out;
4310 + }
4311 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
4312 + "clients resumed", __func__);
4313 + write_lock_bh(&arm_state->susp_res_lock);
4314 + }
4315 +
4316 + /* We need to wait for resume to complete if it's in process */
4317 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
4318 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
4319 + if (resume_count > 1) {
4320 + status = VCHIQ_ERROR;
4321 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4322 + "many times for resume" , __func__);
4323 + goto out;
4324 + }
4325 + write_unlock_bh(&arm_state->susp_res_lock);
4326 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4327 + __func__);
4328 + if (wait_for_completion_interruptible_timeout(
4329 + &arm_state->vc_resume_complete, timeout_val)
4330 + <= 0) {
4331 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4332 + "resume failed (%s)", __func__,
4333 + resume_state_names[arm_state->vc_resume_state +
4334 + VC_RESUME_NUM_OFFSET]);
4335 + status = VCHIQ_ERROR;
4336 + write_lock_bh(&arm_state->susp_res_lock);
4337 + goto out;
4338 + }
4339 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4340 + write_lock_bh(&arm_state->susp_res_lock);
4341 + resume_count++;
4342 + }
4343 + reinit_completion(&arm_state->resume_blocker);
4344 + arm_state->resume_blocked = 1;
4345 +
4346 +out:
4347 + return status;
4348 +}
4349 +
4350 +static inline void
4351 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4352 +{
4353 + complete_all(&arm_state->resume_blocker);
4354 + arm_state->resume_blocked = 0;
4355 +}
4356 +
4357 +/* Initiate suspend via slot handler. Should be called with the write lock
4358 + * held */
4359 +VCHIQ_STATUS_T
4360 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4361 +{
4362 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4363 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4364 +
4365 + if (!arm_state)
4366 + goto out;
4367 +
4368 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4369 + status = VCHIQ_SUCCESS;
4370 +
4371 +
4372 + switch (arm_state->vc_suspend_state) {
4373 + case VC_SUSPEND_REQUESTED:
4374 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4375 + "requested", __func__);
4376 + break;
4377 + case VC_SUSPEND_IN_PROGRESS:
4378 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4379 + "progress", __func__);
4380 + break;
4381 +
4382 + default:
4383 + /* We don't expect to be in other states, so log but continue
4384 + * anyway */
4385 + vchiq_log_error(vchiq_susp_log_level,
4386 + "%s unexpected suspend state %s", __func__,
4387 + suspend_state_names[arm_state->vc_suspend_state +
4388 + VC_SUSPEND_NUM_OFFSET]);
4389 + /* fall through */
4390 + case VC_SUSPEND_REJECTED:
4391 + case VC_SUSPEND_FAILED:
4392 + /* Ensure any idle state actions have been run */
4393 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4394 + /* fall through */
4395 + case VC_SUSPEND_IDLE:
4396 + vchiq_log_info(vchiq_susp_log_level,
4397 + "%s: suspending", __func__);
4398 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4399 + /* kick the slot handler thread to initiate suspend */
4400 + request_poll(state, NULL, 0);
4401 + break;
4402 + }
4403 +
4404 +out:
4405 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4406 + return status;
4407 +}
4408 +
4409 +void
4410 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4411 +{
4412 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4413 + int susp = 0;
4414 +
4415 + if (!arm_state)
4416 + goto out;
4417 +
4418 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4419 +
4420 + write_lock_bh(&arm_state->susp_res_lock);
4421 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4422 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4423 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4424 + susp = 1;
4425 + }
4426 + write_unlock_bh(&arm_state->susp_res_lock);
4427 +
4428 + if (susp)
4429 + vchiq_platform_suspend(state);
4430 +
4431 +out:
4432 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4433 + return;
4434 +}
4435 +
4436 +
4437 +static void
4438 +output_timeout_error(VCHIQ_STATE_T *state)
4439 +{
4440 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4441 + char service_err[50] = "";
4442 + int vc_use_count = arm_state->videocore_use_count;
4443 + int active_services = state->unused_service;
4444 + int i;
4445 +
4446 + if (!arm_state->videocore_use_count) {
4447 + snprintf(service_err, 50, " Videocore usecount is 0");
4448 + goto output_msg;
4449 + }
4450 + for (i = 0; i < active_services; i++) {
4451 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4452 + if (service_ptr && service_ptr->service_use_count &&
4453 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4454 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4455 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4456 + service_ptr->base.fourcc),
4457 + service_ptr->client_id,
4458 + service_ptr->service_use_count,
4459 + service_ptr->service_use_count ==
4460 + vc_use_count ? "" : " (+ more)");
4461 + break;
4462 + }
4463 + }
4464 +
4465 +output_msg:
4466 + vchiq_log_error(vchiq_susp_log_level,
4467 + "timed out waiting for vc suspend (%d).%s",
4468 + arm_state->autosuspend_override, service_err);
4469 +
4470 +}
4471 +
4472 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4473 +** We don't actually force suspend, since videocore may get into a bad state
4474 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
4475 +** determine a good point to suspend. If this doesn't happen within 100ms we
4476 +** report failure.
4477 +**
4478 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4479 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4480 +*/
4481 +VCHIQ_STATUS_T
4482 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4483 +{
4484 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4485 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4486 + long rc = 0;
4487 + int repeat = -1;
4488 +
4489 + if (!arm_state)
4490 + goto out;
4491 +
4492 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4493 +
4494 + write_lock_bh(&arm_state->susp_res_lock);
4495 +
4496 + status = block_resume(arm_state);
4497 + if (status != VCHIQ_SUCCESS)
4498 + goto unlock;
4499 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4500 + /* Already suspended - just block resume and exit */
4501 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4502 + __func__);
4503 + status = VCHIQ_SUCCESS;
4504 + goto unlock;
4505 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4506 + /* initiate suspend immediately in the case that we're waiting
4507 + * for the timeout */
4508 + stop_suspend_timer(arm_state);
4509 + if (!vchiq_videocore_wanted(state)) {
4510 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4511 + "idle, initiating suspend", __func__);
4512 + status = vchiq_arm_vcsuspend(state);
4513 + } else if (arm_state->autosuspend_override <
4514 + FORCE_SUSPEND_FAIL_MAX) {
4515 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
4516 + "videocore go idle", __func__);
4517 + status = VCHIQ_SUCCESS;
4518 + } else {
4519 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4520 + "many times - attempting suspend", __func__);
4521 + status = vchiq_arm_vcsuspend(state);
4522 + }
4523 + } else {
4524 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4525 + "in progress - wait for completion", __func__);
4526 + status = VCHIQ_SUCCESS;
4527 + }
4528 +
4529 + /* Wait for suspend to happen due to system idle (not forced..) */
4530 + if (status != VCHIQ_SUCCESS)
4531 + goto unblock_resume;
4532 +
4533 + do {
4534 + write_unlock_bh(&arm_state->susp_res_lock);
4535 +
4536 + rc = wait_for_completion_interruptible_timeout(
4537 + &arm_state->vc_suspend_complete,
4538 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4539 +
4540 + write_lock_bh(&arm_state->susp_res_lock);
4541 + if (rc < 0) {
4542 + vchiq_log_warning(vchiq_susp_log_level, "%s "
4543 + "interrupted waiting for suspend", __func__);
4544 + status = VCHIQ_ERROR;
4545 + goto unblock_resume;
4546 + } else if (rc == 0) {
4547 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4548 + /* Repeat timeout once if in progress */
4549 + if (repeat < 0) {
4550 + repeat = 1;
4551 + continue;
4552 + }
4553 + }
4554 + arm_state->autosuspend_override++;
4555 + output_timeout_error(state);
4556 +
4557 + status = VCHIQ_RETRY;
4558 + goto unblock_resume;
4559 + }
4560 + } while (0 < (repeat--));
4561 +
4562 + /* Check and report state in case we need to abort ARM suspend */
4563 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4564 + status = VCHIQ_RETRY;
4565 + vchiq_log_error(vchiq_susp_log_level,
4566 + "%s videocore suspend failed (state %s)", __func__,
4567 + suspend_state_names[arm_state->vc_suspend_state +
4568 + VC_SUSPEND_NUM_OFFSET]);
4569 + /* Reset the state only if it's still in an error state.
4570 + * Something could have already initiated another suspend. */
4571 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4572 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4573 +
4574 + goto unblock_resume;
4575 + }
4576 +
4577 + /* successfully suspended - unlock and exit */
4578 + goto unlock;
4579 +
4580 +unblock_resume:
4581 + /* all error states need to unblock resume before exit */
4582 + unblock_resume(arm_state);
4583 +
4584 +unlock:
4585 + write_unlock_bh(&arm_state->susp_res_lock);
4586 +
4587 +out:
4588 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4589 + return status;
4590 +}
4591 +
4592 +void
4593 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4594 +{
4595 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4596 +
4597 + if (!arm_state)
4598 + goto out;
4599 +
4600 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4601 +
4602 + write_lock_bh(&arm_state->susp_res_lock);
4603 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4604 + arm_state->first_connect &&
4605 + !vchiq_videocore_wanted(state)) {
4606 + vchiq_arm_vcsuspend(state);
4607 + }
4608 + write_unlock_bh(&arm_state->susp_res_lock);
4609 +
4610 +out:
4611 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4612 + return;
4613 +}
4614 +
4615 +
4616 +int
4617 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4618 +{
4619 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4620 + int resume = 0;
4621 + int ret = -1;
4622 +
4623 + if (!arm_state)
4624 + goto out;
4625 +
4626 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4627 +
4628 + write_lock_bh(&arm_state->susp_res_lock);
4629 + unblock_resume(arm_state);
4630 + resume = vchiq_check_resume(state);
4631 + write_unlock_bh(&arm_state->susp_res_lock);
4632 +
4633 + if (resume) {
4634 + if (wait_for_completion_interruptible(
4635 + &arm_state->vc_resume_complete) < 0) {
4636 + vchiq_log_error(vchiq_susp_log_level,
4637 + "%s interrupted", __func__);
4638 + /* failed, cannot accurately derive suspend
4639 + * state, so exit early. */
4640 + goto out;
4641 + }
4642 + }
4643 +
4644 + read_lock_bh(&arm_state->susp_res_lock);
4645 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4646 + vchiq_log_info(vchiq_susp_log_level,
4647 + "%s: Videocore remains suspended", __func__);
4648 + } else {
4649 + vchiq_log_info(vchiq_susp_log_level,
4650 + "%s: Videocore resumed", __func__);
4651 + ret = 0;
4652 + }
4653 + read_unlock_bh(&arm_state->susp_res_lock);
4654 +out:
4655 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4656 + return ret;
4657 +}
4658 +
4659 +/* This function should be called with the write lock held */
4660 +int
4661 +vchiq_check_resume(VCHIQ_STATE_T *state)
4662 +{
4663 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4664 + int resume = 0;
4665 +
4666 + if (!arm_state)
4667 + goto out;
4668 +
4669 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4670 +
4671 + if (need_resume(state)) {
4672 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4673 + request_poll(state, NULL, 0);
4674 + resume = 1;
4675 + }
4676 +
4677 +out:
4678 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4679 + return resume;
4680 +}
4681 +
4682 +void
4683 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4684 +{
4685 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4686 + int res = 0;
4687 +
4688 + if (!arm_state)
4689 + goto out;
4690 +
4691 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4692 +
4693 + write_lock_bh(&arm_state->susp_res_lock);
4694 + if (arm_state->wake_address == 0) {
4695 + vchiq_log_info(vchiq_susp_log_level,
4696 + "%s: already awake", __func__);
4697 + goto unlock;
4698 + }
4699 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4700 + vchiq_log_info(vchiq_susp_log_level,
4701 + "%s: already resuming", __func__);
4702 + goto unlock;
4703 + }
4704 +
4705 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4706 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4707 + res = 1;
4708 + } else
4709 + vchiq_log_trace(vchiq_susp_log_level,
4710 + "%s: not resuming (resume state %s)", __func__,
4711 + resume_state_names[arm_state->vc_resume_state +
4712 + VC_RESUME_NUM_OFFSET]);
4713 +
4714 +unlock:
4715 + write_unlock_bh(&arm_state->susp_res_lock);
4716 +
4717 + if (res)
4718 + vchiq_platform_resume(state);
4719 +
4720 +out:
4721 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4722 + return;
4723 +
4724 +}
4725 +
4726 +
4727 +
4728 +VCHIQ_STATUS_T
4729 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4730 + enum USE_TYPE_E use_type)
4731 +{
4732 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4733 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4734 + char entity[16];
4735 + int *entity_uc;
4736 + int local_uc, local_entity_uc;
4737 +
4738 + if (!arm_state)
4739 + goto out;
4740 +
4741 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4742 +
4743 + if (use_type == USE_TYPE_VCHIQ) {
4744 + sprintf(entity, "VCHIQ: ");
4745 + entity_uc = &arm_state->peer_use_count;
4746 + } else if (service) {
4747 + sprintf(entity, "%c%c%c%c:%03d",
4748 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4749 + service->client_id);
4750 + entity_uc = &service->service_use_count;
4751 + } else {
4752 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
4753 + "ptr", __func__);
4754 + ret = VCHIQ_ERROR;
4755 + goto out;
4756 + }
4757 +
4758 + write_lock_bh(&arm_state->susp_res_lock);
4759 + while (arm_state->resume_blocked) {
4760 + /* If we call 'use' while force suspend is waiting for suspend,
4761 + * then we're about to block the thread which the force is
4762 + * waiting to complete, so we're bound to just time out. In this
4763 + * case, set the suspend state such that the wait will be
4764 + * canceled, so we can complete as quickly as possible. */
4765 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4766 + VC_SUSPEND_IDLE) {
4767 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4768 + break;
4769 + }
4770 + /* If suspend is already in progress then we need to block */
4771 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4772 + /* Indicate that there are threads waiting on the resume
4773 + * blocker. These need to be allowed to complete before
4774 + * a _second_ call to force suspend can complete,
4775 + * otherwise low priority threads might never actually
4776 + * continue */
4777 + arm_state->blocked_count++;
4778 + write_unlock_bh(&arm_state->susp_res_lock);
4779 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4780 + "blocked - waiting...", __func__, entity);
4781 + if (wait_for_completion_killable(
4782 + &arm_state->resume_blocker) != 0) {
4783 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
4784 + "wait for resume blocker interrupted",
4785 + __func__, entity);
4786 + ret = VCHIQ_ERROR;
4787 + write_lock_bh(&arm_state->susp_res_lock);
4788 + arm_state->blocked_count--;
4789 + write_unlock_bh(&arm_state->susp_res_lock);
4790 + goto out;
4791 + }
4792 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4793 + "unblocked", __func__, entity);
4794 + write_lock_bh(&arm_state->susp_res_lock);
4795 + if (--arm_state->blocked_count == 0)
4796 + complete_all(&arm_state->blocked_blocker);
4797 + }
4798 + }
4799 +
4800 + stop_suspend_timer(arm_state);
4801 +
4802 + local_uc = ++arm_state->videocore_use_count;
4803 + local_entity_uc = ++(*entity_uc);
4804 +
4805 + /* If there's a pending request which hasn't yet been serviced then
4806 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
4807 + * vc_resume_complete will block until we either resume or fail to
4808 + * suspend */
4809 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4810 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4811 +
4812 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4813 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4814 + vchiq_log_info(vchiq_susp_log_level,
4815 + "%s %s count %d, state count %d",
4816 + __func__, entity, local_entity_uc, local_uc);
4817 + request_poll(state, NULL, 0);
4818 + } else
4819 + vchiq_log_trace(vchiq_susp_log_level,
4820 + "%s %s count %d, state count %d",
4821 + __func__, entity, *entity_uc, local_uc);
4822 +
4823 +
4824 + write_unlock_bh(&arm_state->susp_res_lock);
4825 +
4826 + /* Completion is in a done state when we're not suspended, so this won't
4827 + * block for the non-suspended case. */
4828 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4829 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4830 + __func__, entity);
4831 + if (wait_for_completion_killable(
4832 + &arm_state->vc_resume_complete) != 0) {
4833 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4834 + "resume interrupted", __func__, entity);
4835 + ret = VCHIQ_ERROR;
4836 + goto out;
4837 + }
4838 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4839 + entity);
4840 + }
4841 +
4842 + if (ret == VCHIQ_SUCCESS) {
4843 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4844 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4845 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4846 + /* Send the use notify to videocore */
4847 + status = vchiq_send_remote_use_active(state);
4848 + if (status == VCHIQ_SUCCESS)
4849 + ack_cnt--;
4850 + else
4851 + atomic_add(ack_cnt,
4852 + &arm_state->ka_use_ack_count);
4853 + }
4854 + }
4855 +
4856 +out:
4857 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4858 + return ret;
4859 +}
4860 +
4861 +VCHIQ_STATUS_T
4862 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4863 +{
4864 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4865 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4866 + char entity[16];
4867 + int *entity_uc;
4868 + int local_uc, local_entity_uc;
4869 +
4870 + if (!arm_state)
4871 + goto out;
4872 +
4873 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4874 +
4875 + if (service) {
4876 + sprintf(entity, "%c%c%c%c:%03d",
4877 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4878 + service->client_id);
4879 + entity_uc = &service->service_use_count;
4880 + } else {
4881 + sprintf(entity, "PEER: ");
4882 + entity_uc = &arm_state->peer_use_count;
4883 + }
4884 +
4885 + write_lock_bh(&arm_state->susp_res_lock);
4886 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
4887 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
4888 + WARN_ON(!arm_state->videocore_use_count);
4889 + WARN_ON(!(*entity_uc));
4890 + ret = VCHIQ_ERROR;
4891 + goto unlock;
4892 + }
4893 + local_uc = --arm_state->videocore_use_count;
4894 + local_entity_uc = --(*entity_uc);
4895 +
4896 + if (!vchiq_videocore_wanted(state)) {
4897 + if (vchiq_platform_use_suspend_timer() &&
4898 + !arm_state->resume_blocked) {
4899 + /* Only use the timer if we're not trying to force
4900 + * suspend (=> resume_blocked) */
4901 + start_suspend_timer(arm_state);
4902 + } else {
4903 + vchiq_log_info(vchiq_susp_log_level,
4904 + "%s %s count %d, state count %d - suspending",
4905 + __func__, entity, *entity_uc,
4906 + arm_state->videocore_use_count);
4907 + vchiq_arm_vcsuspend(state);
4908 + }
4909 + } else
4910 + vchiq_log_trace(vchiq_susp_log_level,
4911 + "%s %s count %d, state count %d",
4912 + __func__, entity, *entity_uc,
4913 + arm_state->videocore_use_count);
4914 +
4915 +unlock:
4916 + write_unlock_bh(&arm_state->susp_res_lock);
4917 +
4918 +out:
4919 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4920 + return ret;
4921 +}
4922 +
4923 +void
4924 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4925 +{
4926 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4927 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4928 + atomic_inc(&arm_state->ka_use_count);
4929 + complete(&arm_state->ka_evt);
4930 +}
4931 +
4932 +void
4933 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4934 +{
4935 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4936 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4937 + atomic_inc(&arm_state->ka_release_count);
4938 + complete(&arm_state->ka_evt);
4939 +}
4940 +
4941 +VCHIQ_STATUS_T
4942 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4943 +{
4944 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4945 +}
4946 +
4947 +VCHIQ_STATUS_T
4948 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4949 +{
4950 + return vchiq_release_internal(service->state, service);
4951 +}
4952 +
4953 +VCHIQ_DEBUGFS_NODE_T *
4954 +vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
4955 +{
4956 + return &instance->debugfs_node;
4957 +}
4958 +
4959 +int
4960 +vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4961 +{
4962 + VCHIQ_SERVICE_T *service;
4963 + int use_count = 0, i;
4964 + i = 0;
4965 + while ((service = next_service_by_instance(instance->state,
4966 + instance, &i)) != NULL) {
4967 + use_count += service->service_use_count;
4968 + unlock_service(service);
4969 + }
4970 + return use_count;
4971 +}
4972 +
4973 +int
4974 +vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
4975 +{
4976 + return instance->pid;
4977 +}
4978 +
4979 +int
4980 +vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
4981 +{
4982 + return instance->trace;
4983 +}
4984 +
4985 +void
4986 +vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
4987 +{
4988 + VCHIQ_SERVICE_T *service;
4989 + int i;
4990 + i = 0;
4991 + while ((service = next_service_by_instance(instance->state,
4992 + instance, &i)) != NULL) {
4993 + service->trace = trace;
4994 + unlock_service(service);
4995 + }
4996 + instance->trace = (trace != 0);
4997 +}
4998 +
4999 +static void suspend_timer_callback(unsigned long context)
5000 +{
5001 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
5002 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5003 + if (!arm_state)
5004 + goto out;
5005 + vchiq_log_info(vchiq_susp_log_level,
5006 + "%s - suspend timer expired - check suspend", __func__);
5007 + vchiq_check_suspend(state);
5008 +out:
5009 + return;
5010 +}
5011 +
5012 +VCHIQ_STATUS_T
5013 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
5014 +{
5015 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5016 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5017 + if (service) {
5018 + ret = vchiq_use_internal(service->state, service,
5019 + USE_TYPE_SERVICE_NO_RESUME);
5020 + unlock_service(service);
5021 + }
5022 + return ret;
5023 +}
5024 +
5025 +VCHIQ_STATUS_T
5026 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
5027 +{
5028 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5029 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5030 + if (service) {
5031 + ret = vchiq_use_internal(service->state, service,
5032 + USE_TYPE_SERVICE);
5033 + unlock_service(service);
5034 + }
5035 + return ret;
5036 +}
5037 +
5038 +VCHIQ_STATUS_T
5039 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
5040 +{
5041 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5042 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5043 + if (service) {
5044 + ret = vchiq_release_internal(service->state, service);
5045 + unlock_service(service);
5046 + }
5047 + return ret;
5048 +}
5049 +
5050 +void
5051 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
5052 +{
5053 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5054 + int i, j = 0;
5055 + /* Only dump 64 services */
5056 + static const int local_max_services = 64;
5057 + /* If there's more than 64 services, only dump ones with
5058 + * non-zero counts */
5059 + int only_nonzero = 0;
5060 + static const char *nz = "<-- preventing suspend";
5061 +
5062 + enum vc_suspend_status vc_suspend_state;
5063 + enum vc_resume_status vc_resume_state;
5064 + int peer_count;
5065 + int vc_use_count;
5066 + int active_services;
5067 + struct service_data_struct {
5068 + int fourcc;
5069 + int clientid;
5070 + int use_count;
5071 + } service_data[local_max_services];
5072 +
5073 + if (!arm_state)
5074 + return;
5075 +
5076 + read_lock_bh(&arm_state->susp_res_lock);
5077 + vc_suspend_state = arm_state->vc_suspend_state;
5078 + vc_resume_state = arm_state->vc_resume_state;
5079 + peer_count = arm_state->peer_use_count;
5080 + vc_use_count = arm_state->videocore_use_count;
5081 + active_services = state->unused_service;
5082 + if (active_services > local_max_services)
5083 + only_nonzero = 1;
5084 +
5085 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
5086 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
5087 + if (!service_ptr)
5088 + continue;
5089 +
5090 + if (only_nonzero && !service_ptr->service_use_count)
5091 + continue;
5092 +
5093 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
5094 + service_data[j].fourcc = service_ptr->base.fourcc;
5095 + service_data[j].clientid = service_ptr->client_id;
5096 + service_data[j++].use_count = service_ptr->
5097 + service_use_count;
5098 + }
5099 + }
5100 +
5101 + read_unlock_bh(&arm_state->susp_res_lock);
5102 +
5103 + vchiq_log_warning(vchiq_susp_log_level,
5104 + "-- Videcore suspend state: %s --",
5105 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
5106 + vchiq_log_warning(vchiq_susp_log_level,
5107 + "-- Videcore resume state: %s --",
5108 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
5109 +
5110 + if (only_nonzero)
5111 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
5112 + "services (%d). Only dumping up to first %d services "
5113 + "with non-zero use-count", active_services,
5114 + local_max_services);
5115 +
5116 + for (i = 0; i < j; i++) {
5117 + vchiq_log_warning(vchiq_susp_log_level,
5118 + "----- %c%c%c%c:%d service count %d %s",
5119 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
5120 + service_data[i].clientid,
5121 + service_data[i].use_count,
5122 + service_data[i].use_count ? nz : "");
5123 + }
5124 + vchiq_log_warning(vchiq_susp_log_level,
5125 + "----- VCHIQ use count count %d", peer_count);
5126 + vchiq_log_warning(vchiq_susp_log_level,
5127 + "--- Overall vchiq instance use count %d", vc_use_count);
5128 +
5129 + vchiq_dump_platform_use_state(state);
5130 +}
5131 +
5132 +VCHIQ_STATUS_T
5133 +vchiq_check_service(VCHIQ_SERVICE_T *service)
5134 +{
5135 + VCHIQ_ARM_STATE_T *arm_state;
5136 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5137 +
5138 + if (!service || !service->state)
5139 + goto out;
5140 +
5141 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5142 +
5143 + arm_state = vchiq_platform_get_arm_state(service->state);
5144 +
5145 + read_lock_bh(&arm_state->susp_res_lock);
5146 + if (service->service_use_count)
5147 + ret = VCHIQ_SUCCESS;
5148 + read_unlock_bh(&arm_state->susp_res_lock);
5149 +
5150 + if (ret == VCHIQ_ERROR) {
5151 + vchiq_log_error(vchiq_susp_log_level,
5152 + "%s ERROR - %c%c%c%c:%d service count %d, "
5153 + "state count %d, videocore suspend state %s", __func__,
5154 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5155 + service->client_id, service->service_use_count,
5156 + arm_state->videocore_use_count,
5157 + suspend_state_names[arm_state->vc_suspend_state +
5158 + VC_SUSPEND_NUM_OFFSET]);
5159 + vchiq_dump_service_use_state(service->state);
5160 + }
5161 +out:
5162 + return ret;
5163 +}
5164 +
5165 +/* stub functions */
5166 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
5167 +{
5168 + (void)state;
5169 +}
5170 +
5171 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
5172 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
5173 +{
5174 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5175 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
5176 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
5177 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
5178 + write_lock_bh(&arm_state->susp_res_lock);
5179 + if (!arm_state->first_connect) {
5180 + char threadname[10];
5181 + arm_state->first_connect = 1;
5182 + write_unlock_bh(&arm_state->susp_res_lock);
5183 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
5184 + state->id);
5185 + arm_state->ka_thread = kthread_create(
5186 + &vchiq_keepalive_thread_func,
5187 + (void *)state,
5188 + threadname);
5189 + if (arm_state->ka_thread == NULL) {
5190 + vchiq_log_error(vchiq_susp_log_level,
5191 + "vchiq: FATAL: couldn't create thread %s",
5192 + threadname);
5193 + } else {
5194 + wake_up_process(arm_state->ka_thread);
5195 + }
5196 + } else
5197 + write_unlock_bh(&arm_state->susp_res_lock);
5198 + }
5199 +}
5200 +
5201 +static int vchiq_probe(struct platform_device *pdev)
5202 +{
5203 + int err;
5204 + void *ptr_err;
5205 +
5206 + /* create debugfs entries */
5207 + err = vchiq_debugfs_init();
5208 + if (err != 0)
5209 + goto failed_debugfs_init;
5210 +
5211 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
5212 + if (err != 0) {
5213 + vchiq_log_error(vchiq_arm_log_level,
5214 + "Unable to allocate device number");
5215 + goto failed_alloc_chrdev;
5216 + }
5217 + cdev_init(&vchiq_cdev, &vchiq_fops);
5218 + vchiq_cdev.owner = THIS_MODULE;
5219 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
5220 + if (err != 0) {
5221 + vchiq_log_error(vchiq_arm_log_level,
5222 + "Unable to register device");
5223 + goto failed_cdev_add;
5224 + }
5225 +
5226 + /* create sysfs entries */
5227 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
5228 + ptr_err = vchiq_class;
5229 + if (IS_ERR(ptr_err))
5230 + goto failed_class_create;
5231 +
5232 + vchiq_dev = device_create(vchiq_class, NULL,
5233 + vchiq_devid, NULL, "vchiq");
5234 + ptr_err = vchiq_dev;
5235 + if (IS_ERR(ptr_err))
5236 + goto failed_device_create;
5237 +
5238 + err = vchiq_platform_init(pdev, &g_state);
5239 + if (err != 0)
5240 + goto failed_platform_init;
5241 +
5242 + vchiq_log_info(vchiq_arm_log_level,
5243 + "vchiq: initialised - version %d (min %d), device %d.%d",
5244 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
5245 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
5246 +
5247 + return 0;
5248 +
5249 +failed_platform_init:
5250 + device_destroy(vchiq_class, vchiq_devid);
5251 +failed_device_create:
5252 + class_destroy(vchiq_class);
5253 +failed_class_create:
5254 + cdev_del(&vchiq_cdev);
5255 + err = PTR_ERR(ptr_err);
5256 +failed_cdev_add:
5257 + unregister_chrdev_region(vchiq_devid, 1);
5258 +failed_alloc_chrdev:
5259 + vchiq_debugfs_deinit();
5260 +failed_debugfs_init:
5261 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
5262 + return err;
5263 +}
5264 +
5265 +static int vchiq_remove(struct platform_device *pdev)
5266 +{
5267 + device_destroy(vchiq_class, vchiq_devid);
5268 + class_destroy(vchiq_class);
5269 + cdev_del(&vchiq_cdev);
5270 + unregister_chrdev_region(vchiq_devid, 1);
5271 +
5272 + return 0;
5273 +}
5274 +
5275 +static const struct of_device_id vchiq_of_match[] = {
5276 + { .compatible = "brcm,bcm2835-vchiq", },
5277 + {},
5278 +};
5279 +MODULE_DEVICE_TABLE(of, vchiq_of_match);
5280 +
5281 +static struct platform_driver vchiq_driver = {
5282 + .driver = {
5283 + .name = "bcm2835_vchiq",
5284 + .owner = THIS_MODULE,
5285 + .of_match_table = vchiq_of_match,
5286 + },
5287 + .probe = vchiq_probe,
5288 + .remove = vchiq_remove,
5289 +};
5290 +module_platform_driver(vchiq_driver);
5291 +
5292 +MODULE_LICENSE("GPL");
5293 +MODULE_AUTHOR("Broadcom Corporation");
5294 --- /dev/null
5295 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5296 @@ -0,0 +1,220 @@
5297 +/**
5298 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
5299 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5300 + *
5301 + * Redistribution and use in source and binary forms, with or without
5302 + * modification, are permitted provided that the following conditions
5303 + * are met:
5304 + * 1. Redistributions of source code must retain the above copyright
5305 + * notice, this list of conditions, and the following disclaimer,
5306 + * without modification.
5307 + * 2. Redistributions in binary form must reproduce the above copyright
5308 + * notice, this list of conditions and the following disclaimer in the
5309 + * documentation and/or other materials provided with the distribution.
5310 + * 3. The names of the above-listed copyright holders may not be used
5311 + * to endorse or promote products derived from this software without
5312 + * specific prior written permission.
5313 + *
5314 + * ALTERNATIVELY, this software may be distributed under the terms of the
5315 + * GNU General Public License ("GPL") version 2, as published by the Free
5316 + * Software Foundation.
5317 + *
5318 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5319 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5320 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5321 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5322 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5323 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5324 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5325 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5326 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5327 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5328 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5329 + */
5330 +
5331 +#ifndef VCHIQ_ARM_H
5332 +#define VCHIQ_ARM_H
5333 +
5334 +#include <linux/mutex.h>
5335 +#include <linux/platform_device.h>
5336 +#include <linux/semaphore.h>
5337 +#include <linux/atomic.h>
5338 +#include "vchiq_core.h"
5339 +#include "vchiq_debugfs.h"
5340 +
5341 +
5342 +enum vc_suspend_status {
5343 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5344 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
5345 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
5346 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
5347 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
5348 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
5349 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
5350 +};
5351 +
5352 +enum vc_resume_status {
5353 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
5354 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
5355 + VC_RESUME_REQUESTED, /* User has requested resume */
5356 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5357 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
5358 +};
5359 +
5360 +
5361 +enum USE_TYPE_E {
5362 + USE_TYPE_SERVICE,
5363 + USE_TYPE_SERVICE_NO_RESUME,
5364 + USE_TYPE_VCHIQ
5365 +};
5366 +
5367 +
5368 +
5369 +typedef struct vchiq_arm_state_struct {
5370 + /* Keepalive-related data */
5371 + struct task_struct *ka_thread;
5372 + struct completion ka_evt;
5373 + atomic_t ka_use_count;
5374 + atomic_t ka_use_ack_count;
5375 + atomic_t ka_release_count;
5376 +
5377 + struct completion vc_suspend_complete;
5378 + struct completion vc_resume_complete;
5379 +
5380 + rwlock_t susp_res_lock;
5381 + enum vc_suspend_status vc_suspend_state;
5382 + enum vc_resume_status vc_resume_state;
5383 +
5384 + unsigned int wake_address;
5385 +
5386 + struct timer_list suspend_timer;
5387 + int suspend_timer_timeout;
5388 + int suspend_timer_running;
5389 +
5390 + /* Global use count for videocore.
5391 + ** This is equal to the sum of the use counts for all services. When
5392 + ** this hits zero the videocore suspend procedure will be initiated.
5393 + */
5394 + int videocore_use_count;
5395 +
5396 + /* Use count to track requests from videocore peer.
5397 + ** This use count is not associated with a service, so needs to be
5398 + ** tracked separately with the state.
5399 + */
5400 + int peer_use_count;
5401 +
5402 + /* Flag to indicate whether resume is blocked. This happens when the
5403 + ** ARM is suspending
5404 + */
5405 + struct completion resume_blocker;
5406 + int resume_blocked;
5407 + struct completion blocked_blocker;
5408 + int blocked_count;
5409 +
5410 + int autosuspend_override;
5411 +
5412 + /* Flag to indicate that the first vchiq connect has made it through.
5413 + ** This means that both sides should be fully ready, and we should
5414 + ** be able to suspend after this point.
5415 + */
5416 + int first_connect;
5417 +
5418 + unsigned long long suspend_start_time;
5419 + unsigned long long sleep_start_time;
5420 + unsigned long long resume_start_time;
5421 + unsigned long long last_wake_time;
5422 +
5423 +} VCHIQ_ARM_STATE_T;
5424 +
5425 +extern int vchiq_arm_log_level;
5426 +extern int vchiq_susp_log_level;
5427 +
5428 +int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state);
5429 +
5430 +extern VCHIQ_STATE_T *
5431 +vchiq_get_state(void);
5432 +
5433 +extern VCHIQ_STATUS_T
5434 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5435 +
5436 +extern VCHIQ_STATUS_T
5437 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5438 +
5439 +extern int
5440 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5441 +
5442 +extern VCHIQ_STATUS_T
5443 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5444 +
5445 +extern VCHIQ_STATUS_T
5446 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5447 +
5448 +extern int
5449 +vchiq_check_resume(VCHIQ_STATE_T *state);
5450 +
5451 +extern void
5452 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5453 + VCHIQ_STATUS_T
5454 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5455 +
5456 +extern VCHIQ_STATUS_T
5457 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5458 +
5459 +extern VCHIQ_STATUS_T
5460 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5461 +
5462 +extern VCHIQ_STATUS_T
5463 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5464 +
5465 +extern int
5466 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5467 +
5468 +extern int
5469 +vchiq_platform_use_suspend_timer(void);
5470 +
5471 +extern void
5472 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5473 +
5474 +extern void
5475 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5476 +
5477 +extern VCHIQ_ARM_STATE_T*
5478 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5479 +
5480 +extern int
5481 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5482 +
5483 +extern VCHIQ_STATUS_T
5484 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5485 + enum USE_TYPE_E use_type);
5486 +extern VCHIQ_STATUS_T
5487 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5488 +
5489 +extern VCHIQ_DEBUGFS_NODE_T *
5490 +vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance);
5491 +
5492 +extern int
5493 +vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance);
5494 +
5495 +extern int
5496 +vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance);
5497 +
5498 +extern int
5499 +vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance);
5500 +
5501 +extern void
5502 +vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace);
5503 +
5504 +extern void
5505 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5506 + enum vc_suspend_status new_state);
5507 +
5508 +extern void
5509 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5510 + enum vc_resume_status new_state);
5511 +
5512 +extern void
5513 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5514 +
5515 +
5516 +#endif /* VCHIQ_ARM_H */
5517 --- /dev/null
5518 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5519 @@ -0,0 +1,37 @@
5520 +/**
5521 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5522 + *
5523 + * Redistribution and use in source and binary forms, with or without
5524 + * modification, are permitted provided that the following conditions
5525 + * are met:
5526 + * 1. Redistributions of source code must retain the above copyright
5527 + * notice, this list of conditions, and the following disclaimer,
5528 + * without modification.
5529 + * 2. Redistributions in binary form must reproduce the above copyright
5530 + * notice, this list of conditions and the following disclaimer in the
5531 + * documentation and/or other materials provided with the distribution.
5532 + * 3. The names of the above-listed copyright holders may not be used
5533 + * to endorse or promote products derived from this software without
5534 + * specific prior written permission.
5535 + *
5536 + * ALTERNATIVELY, this software may be distributed under the terms of the
5537 + * GNU General Public License ("GPL") version 2, as published by the Free
5538 + * Software Foundation.
5539 + *
5540 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5541 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5542 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5543 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5544 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5545 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5546 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5547 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5548 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5549 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5550 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5551 + */
5552 +
5553 +const char *vchiq_get_build_hostname(void);
5554 +const char *vchiq_get_build_version(void);
5555 +const char *vchiq_get_build_time(void);
5556 +const char *vchiq_get_build_date(void);
5557 --- /dev/null
5558 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5559 @@ -0,0 +1,69 @@
5560 +/**
5561 + * Copyright (c) 2010-2014 Broadcom. All rights reserved.
5562 + *
5563 + * Redistribution and use in source and binary forms, with or without
5564 + * modification, are permitted provided that the following conditions
5565 + * are met:
5566 + * 1. Redistributions of source code must retain the above copyright
5567 + * notice, this list of conditions, and the following disclaimer,
5568 + * without modification.
5569 + * 2. Redistributions in binary form must reproduce the above copyright
5570 + * notice, this list of conditions and the following disclaimer in the
5571 + * documentation and/or other materials provided with the distribution.
5572 + * 3. The names of the above-listed copyright holders may not be used
5573 + * to endorse or promote products derived from this software without
5574 + * specific prior written permission.
5575 + *
5576 + * ALTERNATIVELY, this software may be distributed under the terms of the
5577 + * GNU General Public License ("GPL") version 2, as published by the Free
5578 + * Software Foundation.
5579 + *
5580 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5581 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5582 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5583 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5584 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5585 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5586 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5587 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5588 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5589 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5590 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5591 + */
5592 +
5593 +#ifndef VCHIQ_CFG_H
5594 +#define VCHIQ_CFG_H
5595 +
5596 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5597 +/* The version of VCHIQ - change with any non-trivial change */
5598 +#define VCHIQ_VERSION 8
5599 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5600 +** incompatible change */
5601 +#define VCHIQ_VERSION_MIN 3
5602 +
5603 +/* The version that introduced the VCHIQ_IOC_LIB_VERSION ioctl */
5604 +#define VCHIQ_VERSION_LIB_VERSION 7
5605 +
5606 +/* The version that introduced the VCHIQ_IOC_CLOSE_DELIVERED ioctl */
5607 +#define VCHIQ_VERSION_CLOSE_DELIVERED 7
5608 +
5609 +/* The version that made it safe to use SYNCHRONOUS mode */
5610 +#define VCHIQ_VERSION_SYNCHRONOUS_MODE 8
5611 +
5612 +#define VCHIQ_MAX_STATES 1
5613 +#define VCHIQ_MAX_SERVICES 4096
5614 +#define VCHIQ_MAX_SLOTS 128
5615 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5616 +
5617 +#define VCHIQ_NUM_CURRENT_BULKS 32
5618 +#define VCHIQ_NUM_SERVICE_BULKS 4
5619 +
5620 +#ifndef VCHIQ_ENABLE_DEBUG
5621 +#define VCHIQ_ENABLE_DEBUG 1
5622 +#endif
5623 +
5624 +#ifndef VCHIQ_ENABLE_STATS
5625 +#define VCHIQ_ENABLE_STATS 1
5626 +#endif
5627 +
5628 +#endif /* VCHIQ_CFG_H */
5629 --- /dev/null
5630 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5631 @@ -0,0 +1,120 @@
5632 +/**
5633 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5634 + *
5635 + * Redistribution and use in source and binary forms, with or without
5636 + * modification, are permitted provided that the following conditions
5637 + * are met:
5638 + * 1. Redistributions of source code must retain the above copyright
5639 + * notice, this list of conditions, and the following disclaimer,
5640 + * without modification.
5641 + * 2. Redistributions in binary form must reproduce the above copyright
5642 + * notice, this list of conditions and the following disclaimer in the
5643 + * documentation and/or other materials provided with the distribution.
5644 + * 3. The names of the above-listed copyright holders may not be used
5645 + * to endorse or promote products derived from this software without
5646 + * specific prior written permission.
5647 + *
5648 + * ALTERNATIVELY, this software may be distributed under the terms of the
5649 + * GNU General Public License ("GPL") version 2, as published by the Free
5650 + * Software Foundation.
5651 + *
5652 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5653 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5654 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5655 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5656 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5657 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5658 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5659 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5660 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5661 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5662 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5663 + */
5664 +
5665 +#include "vchiq_connected.h"
5666 +#include "vchiq_core.h"
5667 +#include "vchiq_killable.h"
5668 +#include <linux/module.h>
5669 +#include <linux/mutex.h>
5670 +
5671 +#define MAX_CALLBACKS 10
5672 +
5673 +static int g_connected;
5674 +static int g_num_deferred_callbacks;
5675 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5676 +static int g_once_init;
5677 +static struct mutex g_connected_mutex;
5678 +
5679 +/****************************************************************************
5680 +*
5681 +* Function to initialize our lock.
5682 +*
5683 +***************************************************************************/
5684 +
5685 +static void connected_init(void)
5686 +{
5687 + if (!g_once_init) {
5688 + mutex_init(&g_connected_mutex);
5689 + g_once_init = 1;
5690 + }
5691 +}
5692 +
5693 +/****************************************************************************
5694 +*
5695 +* This function is used to defer initialization until the vchiq stack is
5696 +* initialized. If the stack is already initialized, then the callback will
5697 +* be made immediately, otherwise it will be deferred until
5698 +* vchiq_call_connected_callbacks is called.
5699 +*
5700 +***************************************************************************/
5701 +
5702 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5703 +{
5704 + connected_init();
5705 +
5706 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5707 + return;
5708 +
5709 + if (g_connected)
5710 + /* We're already connected. Call the callback immediately. */
5711 +
5712 + callback();
5713 + else {
5714 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5715 + vchiq_log_error(vchiq_core_log_level,
5716 + "There already %d callback registered - "
5717 + "please increase MAX_CALLBACKS",
5718 + g_num_deferred_callbacks);
5719 + else {
5720 + g_deferred_callback[g_num_deferred_callbacks] =
5721 + callback;
5722 + g_num_deferred_callbacks++;
5723 + }
5724 + }
5725 + mutex_unlock(&g_connected_mutex);
5726 +}
5727 +
5728 +/****************************************************************************
5729 +*
5730 +* This function is called by the vchiq stack once it has been connected to
5731 +* the videocore and clients can start to use the stack.
5732 +*
5733 +***************************************************************************/
5734 +
5735 +void vchiq_call_connected_callbacks(void)
5736 +{
5737 + int i;
5738 +
5739 + connected_init();
5740 +
5741 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5742 + return;
5743 +
5744 + for (i = 0; i < g_num_deferred_callbacks; i++)
5745 + g_deferred_callback[i]();
5746 +
5747 + g_num_deferred_callbacks = 0;
5748 + g_connected = 1;
5749 + mutex_unlock(&g_connected_mutex);
5750 +}
5751 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5752 --- /dev/null
5753 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5754 @@ -0,0 +1,50 @@
5755 +/**
5756 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5757 + *
5758 + * Redistribution and use in source and binary forms, with or without
5759 + * modification, are permitted provided that the following conditions
5760 + * are met:
5761 + * 1. Redistributions of source code must retain the above copyright
5762 + * notice, this list of conditions, and the following disclaimer,
5763 + * without modification.
5764 + * 2. Redistributions in binary form must reproduce the above copyright
5765 + * notice, this list of conditions and the following disclaimer in the
5766 + * documentation and/or other materials provided with the distribution.
5767 + * 3. The names of the above-listed copyright holders may not be used
5768 + * to endorse or promote products derived from this software without
5769 + * specific prior written permission.
5770 + *
5771 + * ALTERNATIVELY, this software may be distributed under the terms of the
5772 + * GNU General Public License ("GPL") version 2, as published by the Free
5773 + * Software Foundation.
5774 + *
5775 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5776 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5777 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5778 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5779 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5780 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5781 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5782 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5783 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5784 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5785 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5786 + */
5787 +
5788 +#ifndef VCHIQ_CONNECTED_H
5789 +#define VCHIQ_CONNECTED_H
5790 +
5791 +/* ---- Include Files ----------------------------------------------------- */
5792 +
5793 +/* ---- Constants and Types ---------------------------------------------- */
5794 +
5795 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5796 +
5797 +/* ---- Variable Externs ------------------------------------------------- */
5798 +
5799 +/* ---- Function Prototypes ---------------------------------------------- */
5800 +
5801 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5802 +void vchiq_call_connected_callbacks(void);
5803 +
5804 +#endif /* VCHIQ_CONNECTED_H */
5805 --- /dev/null
5806 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5807 @@ -0,0 +1,3934 @@
5808 +/**
5809 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5810 + *
5811 + * Redistribution and use in source and binary forms, with or without
5812 + * modification, are permitted provided that the following conditions
5813 + * are met:
5814 + * 1. Redistributions of source code must retain the above copyright
5815 + * notice, this list of conditions, and the following disclaimer,
5816 + * without modification.
5817 + * 2. Redistributions in binary form must reproduce the above copyright
5818 + * notice, this list of conditions and the following disclaimer in the
5819 + * documentation and/or other materials provided with the distribution.
5820 + * 3. The names of the above-listed copyright holders may not be used
5821 + * to endorse or promote products derived from this software without
5822 + * specific prior written permission.
5823 + *
5824 + * ALTERNATIVELY, this software may be distributed under the terms of the
5825 + * GNU General Public License ("GPL") version 2, as published by the Free
5826 + * Software Foundation.
5827 + *
5828 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5829 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5830 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5831 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5832 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5833 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5834 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5835 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5836 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5837 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5838 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5839 + */
5840 +
5841 +#include "vchiq_core.h"
5842 +#include "vchiq_killable.h"
5843 +
5844 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5845 +
5846 +#define HANDLE_STATE_SHIFT 12
5847 +
5848 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5849 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5850 +#define SLOT_INDEX_FROM_DATA(state, data) \
5851 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5852 + VCHIQ_SLOT_SIZE)
5853 +#define SLOT_INDEX_FROM_INFO(state, info) \
5854 + ((unsigned int)(info - state->slot_info))
5855 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5856 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5857 +
5858 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5859 +
5860 +#define SRVTRACE_LEVEL(srv) \
5861 + (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
5862 +#define SRVTRACE_ENABLED(srv, lev) \
5863 + (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
5864 +
5865 +struct vchiq_open_payload {
5866 + int fourcc;
5867 + int client_id;
5868 + short version;
5869 + short version_min;
5870 +};
5871 +
5872 +struct vchiq_openack_payload {
5873 + short version;
5874 +};
5875 +
5876 +enum
5877 +{
5878 + QMFLAGS_IS_BLOCKING = (1 << 0),
5879 + QMFLAGS_NO_MUTEX_LOCK = (1 << 1),
5880 + QMFLAGS_NO_MUTEX_UNLOCK = (1 << 2)
5881 +};
5882 +
5883 +/* we require this for consistency between endpoints */
5884 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5885 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5886 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5887 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5888 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5889 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5890 +
5891 +/* Run time control of log level, based on KERN_XXX level. */
5892 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5893 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5894 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5895 +
5896 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5897 +
5898 +static DEFINE_SPINLOCK(service_spinlock);
5899 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5900 +DEFINE_SPINLOCK(quota_spinlock);
5901 +
5902 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5903 +static unsigned int handle_seq;
5904 +
5905 +static const char *const srvstate_names[] = {
5906 + "FREE",
5907 + "HIDDEN",
5908 + "LISTENING",
5909 + "OPENING",
5910 + "OPEN",
5911 + "OPENSYNC",
5912 + "CLOSESENT",
5913 + "CLOSERECVD",
5914 + "CLOSEWAIT",
5915 + "CLOSED"
5916 +};
5917 +
5918 +static const char *const reason_names[] = {
5919 + "SERVICE_OPENED",
5920 + "SERVICE_CLOSED",
5921 + "MESSAGE_AVAILABLE",
5922 + "BULK_TRANSMIT_DONE",
5923 + "BULK_RECEIVE_DONE",
5924 + "BULK_TRANSMIT_ABORTED",
5925 + "BULK_RECEIVE_ABORTED"
5926 +};
5927 +
5928 +static const char *const conn_state_names[] = {
5929 + "DISCONNECTED",
5930 + "CONNECTING",
5931 + "CONNECTED",
5932 + "PAUSING",
5933 + "PAUSE_SENT",
5934 + "PAUSED",
5935 + "RESUMING",
5936 + "PAUSE_TIMEOUT",
5937 + "RESUME_TIMEOUT"
5938 +};
5939 +
5940 +
5941 +static void
5942 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5943 +
5944 +static const char *msg_type_str(unsigned int msg_type)
5945 +{
5946 + switch (msg_type) {
5947 + case VCHIQ_MSG_PADDING: return "PADDING";
5948 + case VCHIQ_MSG_CONNECT: return "CONNECT";
5949 + case VCHIQ_MSG_OPEN: return "OPEN";
5950 + case VCHIQ_MSG_OPENACK: return "OPENACK";
5951 + case VCHIQ_MSG_CLOSE: return "CLOSE";
5952 + case VCHIQ_MSG_DATA: return "DATA";
5953 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
5954 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
5955 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
5956 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
5957 + case VCHIQ_MSG_PAUSE: return "PAUSE";
5958 + case VCHIQ_MSG_RESUME: return "RESUME";
5959 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
5960 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
5961 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
5962 + }
5963 + return "???";
5964 +}
5965 +
5966 +static inline void
5967 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5968 +{
5969 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5970 + service->state->id, service->localport,
5971 + srvstate_names[service->srvstate],
5972 + srvstate_names[newstate]);
5973 + service->srvstate = newstate;
5974 +}
5975 +
5976 +VCHIQ_SERVICE_T *
5977 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5978 +{
5979 + VCHIQ_SERVICE_T *service;
5980 +
5981 + spin_lock(&service_spinlock);
5982 + service = handle_to_service(handle);
5983 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5984 + (service->handle == handle)) {
5985 + BUG_ON(service->ref_count == 0);
5986 + service->ref_count++;
5987 + } else
5988 + service = NULL;
5989 + spin_unlock(&service_spinlock);
5990 +
5991 + if (!service)
5992 + vchiq_log_info(vchiq_core_log_level,
5993 + "Invalid service handle 0x%x", handle);
5994 +
5995 + return service;
5996 +}
5997 +
5998 +VCHIQ_SERVICE_T *
5999 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
6000 +{
6001 + VCHIQ_SERVICE_T *service = NULL;
6002 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
6003 + spin_lock(&service_spinlock);
6004 + service = state->services[localport];
6005 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
6006 + BUG_ON(service->ref_count == 0);
6007 + service->ref_count++;
6008 + } else
6009 + service = NULL;
6010 + spin_unlock(&service_spinlock);
6011 + }
6012 +
6013 + if (!service)
6014 + vchiq_log_info(vchiq_core_log_level,
6015 + "Invalid port %d", localport);
6016 +
6017 + return service;
6018 +}
6019 +
6020 +VCHIQ_SERVICE_T *
6021 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
6022 + VCHIQ_SERVICE_HANDLE_T handle) {
6023 + VCHIQ_SERVICE_T *service;
6024 +
6025 + spin_lock(&service_spinlock);
6026 + service = handle_to_service(handle);
6027 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
6028 + (service->handle == handle) &&
6029 + (service->instance == instance)) {
6030 + BUG_ON(service->ref_count == 0);
6031 + service->ref_count++;
6032 + } else
6033 + service = NULL;
6034 + spin_unlock(&service_spinlock);
6035 +
6036 + if (!service)
6037 + vchiq_log_info(vchiq_core_log_level,
6038 + "Invalid service handle 0x%x", handle);
6039 +
6040 + return service;
6041 +}
6042 +
6043 +VCHIQ_SERVICE_T *
6044 +find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
6045 + VCHIQ_SERVICE_HANDLE_T handle) {
6046 + VCHIQ_SERVICE_T *service;
6047 +
6048 + spin_lock(&service_spinlock);
6049 + service = handle_to_service(handle);
6050 + if (service &&
6051 + ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
6052 + (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) &&
6053 + (service->handle == handle) &&
6054 + (service->instance == instance)) {
6055 + BUG_ON(service->ref_count == 0);
6056 + service->ref_count++;
6057 + } else
6058 + service = NULL;
6059 + spin_unlock(&service_spinlock);
6060 +
6061 + if (!service)
6062 + vchiq_log_info(vchiq_core_log_level,
6063 + "Invalid service handle 0x%x", handle);
6064 +
6065 + return service;
6066 +}
6067 +
6068 +VCHIQ_SERVICE_T *
6069 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
6070 + int *pidx)
6071 +{
6072 + VCHIQ_SERVICE_T *service = NULL;
6073 + int idx = *pidx;
6074 +
6075 + spin_lock(&service_spinlock);
6076 + while (idx < state->unused_service) {
6077 + VCHIQ_SERVICE_T *srv = state->services[idx++];
6078 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
6079 + (srv->instance == instance)) {
6080 + service = srv;
6081 + BUG_ON(service->ref_count == 0);
6082 + service->ref_count++;
6083 + break;
6084 + }
6085 + }
6086 + spin_unlock(&service_spinlock);
6087 +
6088 + *pidx = idx;
6089 +
6090 + return service;
6091 +}
6092 +
6093 +void
6094 +lock_service(VCHIQ_SERVICE_T *service)
6095 +{
6096 + spin_lock(&service_spinlock);
6097 + BUG_ON(!service || (service->ref_count == 0));
6098 + if (service)
6099 + service->ref_count++;
6100 + spin_unlock(&service_spinlock);
6101 +}
6102 +
6103 +void
6104 +unlock_service(VCHIQ_SERVICE_T *service)
6105 +{
6106 + VCHIQ_STATE_T *state = service->state;
6107 + spin_lock(&service_spinlock);
6108 + BUG_ON(!service || (service->ref_count == 0));
6109 + if (service && service->ref_count) {
6110 + service->ref_count--;
6111 + if (!service->ref_count) {
6112 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
6113 + state->services[service->localport] = NULL;
6114 + } else
6115 + service = NULL;
6116 + }
6117 + spin_unlock(&service_spinlock);
6118 +
6119 + if (service && service->userdata_term)
6120 + service->userdata_term(service->base.userdata);
6121 +
6122 + kfree(service);
6123 +}
6124 +
6125 +int
6126 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
6127 +{
6128 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
6129 + int id;
6130 +
6131 + id = service ? service->client_id : 0;
6132 + if (service)
6133 + unlock_service(service);
6134 +
6135 + return id;
6136 +}
6137 +
6138 +void *
6139 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
6140 +{
6141 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6142 +
6143 + return service ? service->base.userdata : NULL;
6144 +}
6145 +
6146 +int
6147 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
6148 +{
6149 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6150 +
6151 + return service ? service->base.fourcc : 0;
6152 +}
6153 +
6154 +static void
6155 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
6156 +{
6157 + VCHIQ_STATE_T *state = service->state;
6158 + VCHIQ_SERVICE_QUOTA_T *service_quota;
6159 +
6160 + service->closing = 1;
6161 +
6162 + /* Synchronise with other threads. */
6163 + mutex_lock(&state->recycle_mutex);
6164 + mutex_unlock(&state->recycle_mutex);
6165 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
6166 + /* If we're pausing then the slot_mutex is held until resume
6167 + * by the slot handler. Therefore don't try to acquire this
6168 + * mutex if we're the slot handler and in the pause sent state.
6169 + * We don't need to in this case anyway. */
6170 + mutex_lock(&state->slot_mutex);
6171 + mutex_unlock(&state->slot_mutex);
6172 + }
6173 +
6174 + /* Unblock any sending thread. */
6175 + service_quota = &state->service_quotas[service->localport];
6176 + up(&service_quota->quota_event);
6177 +}
6178 +
6179 +static void
6180 +mark_service_closing(VCHIQ_SERVICE_T *service)
6181 +{
6182 + mark_service_closing_internal(service, 0);
6183 +}
6184 +
6185 +static inline VCHIQ_STATUS_T
6186 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
6187 + VCHIQ_HEADER_T *header, void *bulk_userdata)
6188 +{
6189 + VCHIQ_STATUS_T status;
6190 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
6191 + service->state->id, service->localport, reason_names[reason],
6192 + (unsigned int)header, (unsigned int)bulk_userdata);
6193 + status = service->base.callback(reason, header, service->handle,
6194 + bulk_userdata);
6195 + if (status == VCHIQ_ERROR) {
6196 + vchiq_log_warning(vchiq_core_log_level,
6197 + "%d: ignoring ERROR from callback to service %x",
6198 + service->state->id, service->handle);
6199 + status = VCHIQ_SUCCESS;
6200 + }
6201 + return status;
6202 +}
6203 +
6204 +inline void
6205 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
6206 +{
6207 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
6208 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
6209 + conn_state_names[oldstate],
6210 + conn_state_names[newstate]);
6211 + state->conn_state = newstate;
6212 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
6213 +}
6214 +
6215 +static inline void
6216 +remote_event_create(REMOTE_EVENT_T *event)
6217 +{
6218 + event->armed = 0;
6219 + /* Don't clear the 'fired' flag because it may already have been set
6220 + ** by the other side. */
6221 + sema_init(event->event, 0);
6222 +}
6223 +
6224 +static inline void
6225 +remote_event_destroy(REMOTE_EVENT_T *event)
6226 +{
6227 + (void)event;
6228 +}
6229 +
6230 +static inline int
6231 +remote_event_wait(REMOTE_EVENT_T *event)
6232 +{
6233 + if (!event->fired) {
6234 + event->armed = 1;
6235 + dsb();
6236 + if (!event->fired) {
6237 + if (down_interruptible(event->event) != 0) {
6238 + event->armed = 0;
6239 + return 0;
6240 + }
6241 + }
6242 + event->armed = 0;
6243 + wmb();
6244 + }
6245 +
6246 + event->fired = 0;
6247 + return 1;
6248 +}
6249 +
6250 +static inline void
6251 +remote_event_signal_local(REMOTE_EVENT_T *event)
6252 +{
6253 + event->armed = 0;
6254 + up(event->event);
6255 +}
6256 +
6257 +static inline void
6258 +remote_event_poll(REMOTE_EVENT_T *event)
6259 +{
6260 + if (event->fired && event->armed)
6261 + remote_event_signal_local(event);
6262 +}
6263 +
6264 +void
6265 +remote_event_pollall(VCHIQ_STATE_T *state)
6266 +{
6267 + remote_event_poll(&state->local->sync_trigger);
6268 + remote_event_poll(&state->local->sync_release);
6269 + remote_event_poll(&state->local->trigger);
6270 + remote_event_poll(&state->local->recycle);
6271 +}
6272 +
6273 +/* Round up message sizes so that any space at the end of a slot is always big
6274 +** enough for a header. This relies on header size being a power of two, which
6275 +** has been verified earlier by a static assertion. */
6276 +
6277 +static inline unsigned int
6278 +calc_stride(unsigned int size)
6279 +{
6280 + /* Allow room for the header */
6281 + size += sizeof(VCHIQ_HEADER_T);
6282 +
6283 + /* Round up */
6284 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
6285 + - 1);
6286 +}
6287 +
6288 +/* Called by the slot handler thread */
6289 +static VCHIQ_SERVICE_T *
6290 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
6291 +{
6292 + int i;
6293 +
6294 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
6295 +
6296 + for (i = 0; i < state->unused_service; i++) {
6297 + VCHIQ_SERVICE_T *service = state->services[i];
6298 + if (service &&
6299 + (service->public_fourcc == fourcc) &&
6300 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
6301 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
6302 + (service->remoteport == VCHIQ_PORT_FREE)))) {
6303 + lock_service(service);
6304 + return service;
6305 + }
6306 + }
6307 +
6308 + return NULL;
6309 +}
6310 +
6311 +/* Called by the slot handler thread */
6312 +static VCHIQ_SERVICE_T *
6313 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
6314 +{
6315 + int i;
6316 + for (i = 0; i < state->unused_service; i++) {
6317 + VCHIQ_SERVICE_T *service = state->services[i];
6318 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
6319 + && (service->remoteport == port)) {
6320 + lock_service(service);
6321 + return service;
6322 + }
6323 + }
6324 + return NULL;
6325 +}
6326 +
6327 +inline void
6328 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
6329 +{
6330 + uint32_t value;
6331 +
6332 + if (service) {
6333 + do {
6334 + value = atomic_read(&service->poll_flags);
6335 + } while (atomic_cmpxchg(&service->poll_flags, value,
6336 + value | (1 << poll_type)) != value);
6337 +
6338 + do {
6339 + value = atomic_read(&state->poll_services[
6340 + service->localport>>5]);
6341 + } while (atomic_cmpxchg(
6342 + &state->poll_services[service->localport>>5],
6343 + value, value | (1 << (service->localport & 0x1f)))
6344 + != value);
6345 + }
6346 +
6347 + state->poll_needed = 1;
6348 + wmb();
6349 +
6350 + /* ... and ensure the slot handler runs. */
6351 + remote_event_signal_local(&state->local->trigger);
6352 +}
6353 +
6354 +/* Called from queue_message, by the slot handler and application threads,
6355 +** with slot_mutex held */
6356 +static VCHIQ_HEADER_T *
6357 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6358 +{
6359 + VCHIQ_SHARED_STATE_T *local = state->local;
6360 + int tx_pos = state->local_tx_pos;
6361 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6362 +
6363 + if (space > slot_space) {
6364 + VCHIQ_HEADER_T *header;
6365 + /* Fill the remaining space with padding */
6366 + WARN_ON(state->tx_data == NULL);
6367 + header = (VCHIQ_HEADER_T *)
6368 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6369 + header->msgid = VCHIQ_MSGID_PADDING;
6370 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6371 +
6372 + tx_pos += slot_space;
6373 + }
6374 +
6375 + /* If necessary, get the next slot. */
6376 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6377 + int slot_index;
6378 +
6379 + /* If there is no free slot... */
6380 +
6381 + if (down_trylock(&state->slot_available_event) != 0) {
6382 + /* ...wait for one. */
6383 +
6384 + VCHIQ_STATS_INC(state, slot_stalls);
6385 +
6386 + /* But first, flush through the last slot. */
6387 + state->local_tx_pos = tx_pos;
6388 + local->tx_pos = tx_pos;
6389 + remote_event_signal(&state->remote->trigger);
6390 +
6391 + if (!is_blocking ||
6392 + (down_interruptible(
6393 + &state->slot_available_event) != 0))
6394 + return NULL; /* No space available */
6395 + }
6396 +
6397 + BUG_ON(tx_pos ==
6398 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6399 +
6400 + slot_index = local->slot_queue[
6401 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6402 + VCHIQ_SLOT_QUEUE_MASK];
6403 + state->tx_data =
6404 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6405 + }
6406 +
6407 + state->local_tx_pos = tx_pos + space;
6408 +
6409 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6410 +}
6411 +
6412 +/* Called by the recycle thread. */
6413 +static void
6414 +process_free_queue(VCHIQ_STATE_T *state)
6415 +{
6416 + VCHIQ_SHARED_STATE_T *local = state->local;
6417 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6418 + int slot_queue_available;
6419 +
6420 + /* Use a read memory barrier to ensure that any state that may have
6421 + ** been modified by another thread is not masked by stale prefetched
6422 + ** values. */
6423 + rmb();
6424 +
6425 + /* Find slots which have been freed by the other side, and return them
6426 + ** to the available queue. */
6427 + slot_queue_available = state->slot_queue_available;
6428 +
6429 + while (slot_queue_available != local->slot_queue_recycle) {
6430 + unsigned int pos;
6431 + int slot_index = local->slot_queue[slot_queue_available++ &
6432 + VCHIQ_SLOT_QUEUE_MASK];
6433 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6434 + int data_found = 0;
6435 +
6436 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6437 + state->id, slot_index, (unsigned int)data,
6438 + local->slot_queue_recycle, slot_queue_available);
6439 +
6440 + /* Initialise the bitmask for services which have used this
6441 + ** slot */
6442 + BITSET_ZERO(service_found);
6443 +
6444 + pos = 0;
6445 +
6446 + while (pos < VCHIQ_SLOT_SIZE) {
6447 + VCHIQ_HEADER_T *header =
6448 + (VCHIQ_HEADER_T *)(data + pos);
6449 + int msgid = header->msgid;
6450 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6451 + int port = VCHIQ_MSG_SRCPORT(msgid);
6452 + VCHIQ_SERVICE_QUOTA_T *service_quota =
6453 + &state->service_quotas[port];
6454 + int count;
6455 + spin_lock(&quota_spinlock);
6456 + count = service_quota->message_use_count;
6457 + if (count > 0)
6458 + service_quota->message_use_count =
6459 + count - 1;
6460 + spin_unlock(&quota_spinlock);
6461 +
6462 + if (count == service_quota->message_quota)
6463 + /* Signal the service that it
6464 + ** has dropped below its quota
6465 + */
6466 + up(&service_quota->quota_event);
6467 + else if (count == 0) {
6468 + vchiq_log_error(vchiq_core_log_level,
6469 + "service %d "
6470 + "message_use_count=%d "
6471 + "(header %x, msgid %x, "
6472 + "header->msgid %x, "
6473 + "header->size %x)",
6474 + port,
6475 + service_quota->
6476 + message_use_count,
6477 + (unsigned int)header, msgid,
6478 + header->msgid,
6479 + header->size);
6480 + WARN(1, "invalid message use count\n");
6481 + }
6482 + if (!BITSET_IS_SET(service_found, port)) {
6483 + /* Set the found bit for this service */
6484 + BITSET_SET(service_found, port);
6485 +
6486 + spin_lock(&quota_spinlock);
6487 + count = service_quota->slot_use_count;
6488 + if (count > 0)
6489 + service_quota->slot_use_count =
6490 + count - 1;
6491 + spin_unlock(&quota_spinlock);
6492 +
6493 + if (count > 0) {
6494 + /* Signal the service in case
6495 + ** it has dropped below its
6496 + ** quota */
6497 + up(&service_quota->quota_event);
6498 + vchiq_log_trace(
6499 + vchiq_core_log_level,
6500 + "%d: pfq:%d %x@%x - "
6501 + "slot_use->%d",
6502 + state->id, port,
6503 + header->size,
6504 + (unsigned int)header,
6505 + count - 1);
6506 + } else {
6507 + vchiq_log_error(
6508 + vchiq_core_log_level,
6509 + "service %d "
6510 + "slot_use_count"
6511 + "=%d (header %x"
6512 + ", msgid %x, "
6513 + "header->msgid"
6514 + " %x, header->"
6515 + "size %x)",
6516 + port, count,
6517 + (unsigned int)header,
6518 + msgid,
6519 + header->msgid,
6520 + header->size);
6521 + WARN(1, "bad slot use count\n");
6522 + }
6523 + }
6524 +
6525 + data_found = 1;
6526 + }
6527 +
6528 + pos += calc_stride(header->size);
6529 + if (pos > VCHIQ_SLOT_SIZE) {
6530 + vchiq_log_error(vchiq_core_log_level,
6531 + "pfq - pos %x: header %x, msgid %x, "
6532 + "header->msgid %x, header->size %x",
6533 + pos, (unsigned int)header, msgid,
6534 + header->msgid, header->size);
6535 + WARN(1, "invalid slot position\n");
6536 + }
6537 + }
6538 +
6539 + if (data_found) {
6540 + int count;
6541 + spin_lock(&quota_spinlock);
6542 + count = state->data_use_count;
6543 + if (count > 0)
6544 + state->data_use_count =
6545 + count - 1;
6546 + spin_unlock(&quota_spinlock);
6547 + if (count == state->data_quota)
6548 + up(&state->data_quota_event);
6549 + }
6550 +
6551 + state->slot_queue_available = slot_queue_available;
6552 + up(&state->slot_available_event);
6553 + }
6554 +}
6555 +
6556 +/* Called by the slot handler and application threads */
6557 +static VCHIQ_STATUS_T
6558 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6559 + int msgid, const VCHIQ_ELEMENT_T *elements,
6560 + int count, int size, int flags)
6561 +{
6562 + VCHIQ_SHARED_STATE_T *local;
6563 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6564 + VCHIQ_HEADER_T *header;
6565 + int type = VCHIQ_MSG_TYPE(msgid);
6566 +
6567 + unsigned int stride;
6568 +
6569 + local = state->local;
6570 +
6571 + stride = calc_stride(size);
6572 +
6573 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6574 +
6575 + if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
6576 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
6577 + return VCHIQ_RETRY;
6578 +
6579 + if (type == VCHIQ_MSG_DATA) {
6580 + int tx_end_index;
6581 +
6582 + BUG_ON(!service);
6583 + BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
6584 + QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
6585 +
6586 + if (service->closing) {
6587 + /* The service has been closed */
6588 + mutex_unlock(&state->slot_mutex);
6589 + return VCHIQ_ERROR;
6590 + }
6591 +
6592 + service_quota = &state->service_quotas[service->localport];
6593 +
6594 + spin_lock(&quota_spinlock);
6595 +
6596 + /* Ensure this service doesn't use more than its quota of
6597 + ** messages or slots */
6598 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6599 + state->local_tx_pos + stride - 1);
6600 +
6601 + /* Ensure data messages don't use more than their quota of
6602 + ** slots */
6603 + while ((tx_end_index != state->previous_data_index) &&
6604 + (state->data_use_count == state->data_quota)) {
6605 + VCHIQ_STATS_INC(state, data_stalls);
6606 + spin_unlock(&quota_spinlock);
6607 + mutex_unlock(&state->slot_mutex);
6608 +
6609 + if (down_interruptible(&state->data_quota_event)
6610 + != 0)
6611 + return VCHIQ_RETRY;
6612 +
6613 + mutex_lock(&state->slot_mutex);
6614 + spin_lock(&quota_spinlock);
6615 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6616 + state->local_tx_pos + stride - 1);
6617 + if ((tx_end_index == state->previous_data_index) ||
6618 + (state->data_use_count < state->data_quota)) {
6619 + /* Pass the signal on to other waiters */
6620 + up(&state->data_quota_event);
6621 + break;
6622 + }
6623 + }
6624 +
6625 + while ((service_quota->message_use_count ==
6626 + service_quota->message_quota) ||
6627 + ((tx_end_index != service_quota->previous_tx_index) &&
6628 + (service_quota->slot_use_count ==
6629 + service_quota->slot_quota))) {
6630 + spin_unlock(&quota_spinlock);
6631 + vchiq_log_trace(vchiq_core_log_level,
6632 + "%d: qm:%d %s,%x - quota stall "
6633 + "(msg %d, slot %d)",
6634 + state->id, service->localport,
6635 + msg_type_str(type), size,
6636 + service_quota->message_use_count,
6637 + service_quota->slot_use_count);
6638 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6639 + mutex_unlock(&state->slot_mutex);
6640 + if (down_interruptible(&service_quota->quota_event)
6641 + != 0)
6642 + return VCHIQ_RETRY;
6643 + if (service->closing)
6644 + return VCHIQ_ERROR;
6645 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6646 + return VCHIQ_RETRY;
6647 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6648 + /* The service has been closed */
6649 + mutex_unlock(&state->slot_mutex);
6650 + return VCHIQ_ERROR;
6651 + }
6652 + spin_lock(&quota_spinlock);
6653 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6654 + state->local_tx_pos + stride - 1);
6655 + }
6656 +
6657 + spin_unlock(&quota_spinlock);
6658 + }
6659 +
6660 + header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
6661 +
6662 + if (!header) {
6663 + if (service)
6664 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6665 + /* In the event of a failure, return the mutex to the
6666 + state it was in */
6667 + if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
6668 + mutex_unlock(&state->slot_mutex);
6669 + return VCHIQ_RETRY;
6670 + }
6671 +
6672 + if (type == VCHIQ_MSG_DATA) {
6673 + int i, pos;
6674 + int tx_end_index;
6675 + int slot_use_count;
6676 +
6677 + vchiq_log_info(vchiq_core_log_level,
6678 + "%d: qm %s@%x,%x (%d->%d)",
6679 + state->id,
6680 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6681 + (unsigned int)header, size,
6682 + VCHIQ_MSG_SRCPORT(msgid),
6683 + VCHIQ_MSG_DSTPORT(msgid));
6684 +
6685 + BUG_ON(!service);
6686 + BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
6687 + QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
6688 +
6689 + for (i = 0, pos = 0; i < (unsigned int)count;
6690 + pos += elements[i++].size)
6691 + if (elements[i].size) {
6692 + if (vchiq_copy_from_user
6693 + (header->data + pos, elements[i].data,
6694 + (size_t) elements[i].size) !=
6695 + VCHIQ_SUCCESS) {
6696 + mutex_unlock(&state->slot_mutex);
6697 + VCHIQ_SERVICE_STATS_INC(service,
6698 + error_count);
6699 + return VCHIQ_ERROR;
6700 + }
6701 + if (i == 0) {
6702 + if (SRVTRACE_ENABLED(service,
6703 + VCHIQ_LOG_INFO))
6704 + vchiq_log_dump_mem("Sent", 0,
6705 + header->data + pos,
6706 + min(64u,
6707 + elements[0].size));
6708 + }
6709 + }
6710 +
6711 + spin_lock(&quota_spinlock);
6712 + service_quota->message_use_count++;
6713 +
6714 + tx_end_index =
6715 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6716 +
6717 + /* If this transmission can't fit in the last slot used by any
6718 + ** service, the data_use_count must be increased. */
6719 + if (tx_end_index != state->previous_data_index) {
6720 + state->previous_data_index = tx_end_index;
6721 + state->data_use_count++;
6722 + }
6723 +
6724 + /* If this isn't the same slot last used by this service,
6725 + ** the service's slot_use_count must be increased. */
6726 + if (tx_end_index != service_quota->previous_tx_index) {
6727 + service_quota->previous_tx_index = tx_end_index;
6728 + slot_use_count = ++service_quota->slot_use_count;
6729 + } else {
6730 + slot_use_count = 0;
6731 + }
6732 +
6733 + spin_unlock(&quota_spinlock);
6734 +
6735 + if (slot_use_count)
6736 + vchiq_log_trace(vchiq_core_log_level,
6737 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6738 + state->id, service->localport,
6739 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6740 + slot_use_count, header);
6741 +
6742 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6743 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6744 + } else {
6745 + vchiq_log_info(vchiq_core_log_level,
6746 + "%d: qm %s@%x,%x (%d->%d)", state->id,
6747 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6748 + (unsigned int)header, size,
6749 + VCHIQ_MSG_SRCPORT(msgid),
6750 + VCHIQ_MSG_DSTPORT(msgid));
6751 + if (size != 0) {
6752 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6753 + memcpy(header->data, elements[0].data,
6754 + elements[0].size);
6755 + }
6756 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6757 + }
6758 +
6759 + header->msgid = msgid;
6760 + header->size = size;
6761 +
6762 + {
6763 + int svc_fourcc;
6764 +
6765 + svc_fourcc = service
6766 + ? service->base.fourcc
6767 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6768 +
6769 + vchiq_log_info(SRVTRACE_LEVEL(service),
6770 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6771 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6772 + VCHIQ_MSG_TYPE(msgid),
6773 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6774 + VCHIQ_MSG_SRCPORT(msgid),
6775 + VCHIQ_MSG_DSTPORT(msgid),
6776 + size);
6777 + }
6778 +
6779 + /* Make sure the new header is visible to the peer. */
6780 + wmb();
6781 +
6782 + /* Make the new tx_pos visible to the peer. */
6783 + local->tx_pos = state->local_tx_pos;
6784 + wmb();
6785 +
6786 + if (service && (type == VCHIQ_MSG_CLOSE))
6787 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6788 +
6789 + if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
6790 + mutex_unlock(&state->slot_mutex);
6791 +
6792 + remote_event_signal(&state->remote->trigger);
6793 +
6794 + return VCHIQ_SUCCESS;
6795 +}
6796 +
6797 +/* Called by the slot handler and application threads */
6798 +static VCHIQ_STATUS_T
6799 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6800 + int msgid, const VCHIQ_ELEMENT_T *elements,
6801 + int count, int size, int is_blocking)
6802 +{
6803 + VCHIQ_SHARED_STATE_T *local;
6804 + VCHIQ_HEADER_T *header;
6805 +
6806 + local = state->local;
6807 +
6808 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6809 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
6810 + return VCHIQ_RETRY;
6811 +
6812 + remote_event_wait(&local->sync_release);
6813 +
6814 + rmb();
6815 +
6816 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6817 + local->slot_sync);
6818 +
6819 + {
6820 + int oldmsgid = header->msgid;
6821 + if (oldmsgid != VCHIQ_MSGID_PADDING)
6822 + vchiq_log_error(vchiq_core_log_level,
6823 + "%d: qms - msgid %x, not PADDING",
6824 + state->id, oldmsgid);
6825 + }
6826 +
6827 + if (service) {
6828 + int i, pos;
6829 +
6830 + vchiq_log_info(vchiq_sync_log_level,
6831 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6832 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6833 + (unsigned int)header, size,
6834 + VCHIQ_MSG_SRCPORT(msgid),
6835 + VCHIQ_MSG_DSTPORT(msgid));
6836 +
6837 + for (i = 0, pos = 0; i < (unsigned int)count;
6838 + pos += elements[i++].size)
6839 + if (elements[i].size) {
6840 + if (vchiq_copy_from_user
6841 + (header->data + pos, elements[i].data,
6842 + (size_t) elements[i].size) !=
6843 + VCHIQ_SUCCESS) {
6844 + mutex_unlock(&state->sync_mutex);
6845 + VCHIQ_SERVICE_STATS_INC(service,
6846 + error_count);
6847 + return VCHIQ_ERROR;
6848 + }
6849 + if (i == 0) {
6850 + if (vchiq_sync_log_level >=
6851 + VCHIQ_LOG_TRACE)
6852 + vchiq_log_dump_mem("Sent Sync",
6853 + 0, header->data + pos,
6854 + min(64u,
6855 + elements[0].size));
6856 + }
6857 + }
6858 +
6859 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6860 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6861 + } else {
6862 + vchiq_log_info(vchiq_sync_log_level,
6863 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6864 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6865 + (unsigned int)header, size,
6866 + VCHIQ_MSG_SRCPORT(msgid),
6867 + VCHIQ_MSG_DSTPORT(msgid));
6868 + if (size != 0) {
6869 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6870 + memcpy(header->data, elements[0].data,
6871 + elements[0].size);
6872 + }
6873 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6874 + }
6875 +
6876 + header->size = size;
6877 + header->msgid = msgid;
6878 +
6879 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6880 + int svc_fourcc;
6881 +
6882 + svc_fourcc = service
6883 + ? service->base.fourcc
6884 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6885 +
6886 + vchiq_log_trace(vchiq_sync_log_level,
6887 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6888 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6889 + VCHIQ_MSG_TYPE(msgid),
6890 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6891 + VCHIQ_MSG_SRCPORT(msgid),
6892 + VCHIQ_MSG_DSTPORT(msgid),
6893 + size);
6894 + }
6895 +
6896 + /* Make sure the new header is visible to the peer. */
6897 + wmb();
6898 +
6899 + remote_event_signal(&state->remote->sync_trigger);
6900 +
6901 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6902 + mutex_unlock(&state->sync_mutex);
6903 +
6904 + return VCHIQ_SUCCESS;
6905 +}
6906 +
6907 +static inline void
6908 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6909 +{
6910 + slot->use_count++;
6911 +}
6912 +
6913 +static void
6914 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6915 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6916 +{
6917 + int release_count;
6918 +
6919 + mutex_lock(&state->recycle_mutex);
6920 +
6921 + if (header) {
6922 + int msgid = header->msgid;
6923 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6924 + (service && service->closing)) {
6925 + mutex_unlock(&state->recycle_mutex);
6926 + return;
6927 + }
6928 +
6929 + /* Rewrite the message header to prevent a double
6930 + ** release */
6931 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6932 + }
6933 +
6934 + release_count = slot_info->release_count;
6935 + slot_info->release_count = ++release_count;
6936 +
6937 + if (release_count == slot_info->use_count) {
6938 + int slot_queue_recycle;
6939 + /* Add to the freed queue */
6940 +
6941 + /* A read barrier is necessary here to prevent speculative
6942 + ** fetches of remote->slot_queue_recycle from overtaking the
6943 + ** mutex. */
6944 + rmb();
6945 +
6946 + slot_queue_recycle = state->remote->slot_queue_recycle;
6947 + state->remote->slot_queue[slot_queue_recycle &
6948 + VCHIQ_SLOT_QUEUE_MASK] =
6949 + SLOT_INDEX_FROM_INFO(state, slot_info);
6950 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6951 + vchiq_log_info(vchiq_core_log_level,
6952 + "%d: release_slot %d - recycle->%x",
6953 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6954 + state->remote->slot_queue_recycle);
6955 +
6956 + /* A write barrier is necessary, but remote_event_signal
6957 + ** contains one. */
6958 + remote_event_signal(&state->remote->recycle);
6959 + }
6960 +
6961 + mutex_unlock(&state->recycle_mutex);
6962 +}
6963 +
6964 +/* Called by the slot handler - don't hold the bulk mutex */
6965 +static VCHIQ_STATUS_T
6966 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6967 + int retry_poll)
6968 +{
6969 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6970 +
6971 + vchiq_log_trace(vchiq_core_log_level,
6972 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
6973 + service->state->id, service->localport,
6974 + (queue == &service->bulk_tx) ? 't' : 'r',
6975 + queue->process, queue->remote_notify, queue->remove);
6976 +
6977 + if (service->state->is_master) {
6978 + while (queue->remote_notify != queue->process) {
6979 + VCHIQ_BULK_T *bulk =
6980 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
6981 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6982 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6983 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6984 + service->remoteport);
6985 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6986 + /* Only reply to non-dummy bulk requests */
6987 + if (bulk->remote_data) {
6988 + status = queue_message(service->state, NULL,
6989 + msgid, &element, 1, 4, 0);
6990 + if (status != VCHIQ_SUCCESS)
6991 + break;
6992 + }
6993 + queue->remote_notify++;
6994 + }
6995 + } else {
6996 + queue->remote_notify = queue->process;
6997 + }
6998 +
6999 + if (status == VCHIQ_SUCCESS) {
7000 + while (queue->remove != queue->remote_notify) {
7001 + VCHIQ_BULK_T *bulk =
7002 + &queue->bulks[BULK_INDEX(queue->remove)];
7003 +
7004 + /* Only generate callbacks for non-dummy bulk
7005 + ** requests, and non-terminated services */
7006 + if (bulk->data && service->instance) {
7007 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
7008 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
7009 + VCHIQ_SERVICE_STATS_INC(service,
7010 + bulk_tx_count);
7011 + VCHIQ_SERVICE_STATS_ADD(service,
7012 + bulk_tx_bytes,
7013 + bulk->actual);
7014 + } else {
7015 + VCHIQ_SERVICE_STATS_INC(service,
7016 + bulk_rx_count);
7017 + VCHIQ_SERVICE_STATS_ADD(service,
7018 + bulk_rx_bytes,
7019 + bulk->actual);
7020 + }
7021 + } else {
7022 + VCHIQ_SERVICE_STATS_INC(service,
7023 + bulk_aborted_count);
7024 + }
7025 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
7026 + struct bulk_waiter *waiter;
7027 + spin_lock(&bulk_waiter_spinlock);
7028 + waiter = bulk->userdata;
7029 + if (waiter) {
7030 + waiter->actual = bulk->actual;
7031 + up(&waiter->event);
7032 + }
7033 + spin_unlock(&bulk_waiter_spinlock);
7034 + } else if (bulk->mode ==
7035 + VCHIQ_BULK_MODE_CALLBACK) {
7036 + VCHIQ_REASON_T reason = (bulk->dir ==
7037 + VCHIQ_BULK_TRANSMIT) ?
7038 + ((bulk->actual ==
7039 + VCHIQ_BULK_ACTUAL_ABORTED) ?
7040 + VCHIQ_BULK_TRANSMIT_ABORTED :
7041 + VCHIQ_BULK_TRANSMIT_DONE) :
7042 + ((bulk->actual ==
7043 + VCHIQ_BULK_ACTUAL_ABORTED) ?
7044 + VCHIQ_BULK_RECEIVE_ABORTED :
7045 + VCHIQ_BULK_RECEIVE_DONE);
7046 + status = make_service_callback(service,
7047 + reason, NULL, bulk->userdata);
7048 + if (status == VCHIQ_RETRY)
7049 + break;
7050 + }
7051 + }
7052 +
7053 + queue->remove++;
7054 + up(&service->bulk_remove_event);
7055 + }
7056 + if (!retry_poll)
7057 + status = VCHIQ_SUCCESS;
7058 + }
7059 +
7060 + if (status == VCHIQ_RETRY)
7061 + request_poll(service->state, service,
7062 + (queue == &service->bulk_tx) ?
7063 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
7064 +
7065 + return status;
7066 +}
7067 +
7068 +/* Called by the slot handler thread */
7069 +static void
7070 +poll_services(VCHIQ_STATE_T *state)
7071 +{
7072 + int group, i;
7073 +
7074 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
7075 + uint32_t flags;
7076 + flags = atomic_xchg(&state->poll_services[group], 0);
7077 + for (i = 0; flags; i++) {
7078 + if (flags & (1 << i)) {
7079 + VCHIQ_SERVICE_T *service =
7080 + find_service_by_port(state,
7081 + (group<<5) + i);
7082 + uint32_t service_flags;
7083 + flags &= ~(1 << i);
7084 + if (!service)
7085 + continue;
7086 + service_flags =
7087 + atomic_xchg(&service->poll_flags, 0);
7088 + if (service_flags &
7089 + (1 << VCHIQ_POLL_REMOVE)) {
7090 + vchiq_log_info(vchiq_core_log_level,
7091 + "%d: ps - remove %d<->%d",
7092 + state->id, service->localport,
7093 + service->remoteport);
7094 +
7095 + /* Make it look like a client, because
7096 + it must be removed and not left in
7097 + the LISTENING state. */
7098 + service->public_fourcc =
7099 + VCHIQ_FOURCC_INVALID;
7100 +
7101 + if (vchiq_close_service_internal(
7102 + service, 0/*!close_recvd*/) !=
7103 + VCHIQ_SUCCESS)
7104 + request_poll(state, service,
7105 + VCHIQ_POLL_REMOVE);
7106 + } else if (service_flags &
7107 + (1 << VCHIQ_POLL_TERMINATE)) {
7108 + vchiq_log_info(vchiq_core_log_level,
7109 + "%d: ps - terminate %d<->%d",
7110 + state->id, service->localport,
7111 + service->remoteport);
7112 + if (vchiq_close_service_internal(
7113 + service, 0/*!close_recvd*/) !=
7114 + VCHIQ_SUCCESS)
7115 + request_poll(state, service,
7116 + VCHIQ_POLL_TERMINATE);
7117 + }
7118 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
7119 + notify_bulks(service,
7120 + &service->bulk_tx,
7121 + 1/*retry_poll*/);
7122 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
7123 + notify_bulks(service,
7124 + &service->bulk_rx,
7125 + 1/*retry_poll*/);
7126 + unlock_service(service);
7127 + }
7128 + }
7129 + }
7130 +}
7131 +
7132 +/* Called by the slot handler or application threads, holding the bulk mutex. */
7133 +static int
7134 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7135 +{
7136 + VCHIQ_STATE_T *state = service->state;
7137 + int resolved = 0;
7138 + int rc;
7139 +
7140 + while ((queue->process != queue->local_insert) &&
7141 + (queue->process != queue->remote_insert)) {
7142 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7143 +
7144 + vchiq_log_trace(vchiq_core_log_level,
7145 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
7146 + state->id, service->localport,
7147 + (queue == &service->bulk_tx) ? 't' : 'r',
7148 + queue->local_insert, queue->remote_insert,
7149 + queue->process);
7150 +
7151 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
7152 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
7153 +
7154 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
7155 + if (rc != 0)
7156 + break;
7157 +
7158 + vchiq_transfer_bulk(bulk);
7159 + mutex_unlock(&state->bulk_transfer_mutex);
7160 +
7161 + if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
7162 + const char *header = (queue == &service->bulk_tx) ?
7163 + "Send Bulk to" : "Recv Bulk from";
7164 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
7165 + vchiq_log_info(SRVTRACE_LEVEL(service),
7166 + "%s %c%c%c%c d:%d len:%d %x<->%x",
7167 + header,
7168 + VCHIQ_FOURCC_AS_4CHARS(
7169 + service->base.fourcc),
7170 + service->remoteport,
7171 + bulk->size,
7172 + (unsigned int)bulk->data,
7173 + (unsigned int)bulk->remote_data);
7174 + else
7175 + vchiq_log_info(SRVTRACE_LEVEL(service),
7176 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
7177 + " rx len:%d %x<->%x",
7178 + header,
7179 + VCHIQ_FOURCC_AS_4CHARS(
7180 + service->base.fourcc),
7181 + service->remoteport,
7182 + bulk->size,
7183 + bulk->remote_size,
7184 + (unsigned int)bulk->data,
7185 + (unsigned int)bulk->remote_data);
7186 + }
7187 +
7188 + vchiq_complete_bulk(bulk);
7189 + queue->process++;
7190 + resolved++;
7191 + }
7192 + return resolved;
7193 +}
7194 +
7195 +/* Called with the bulk_mutex held */
7196 +static void
7197 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7198 +{
7199 + int is_tx = (queue == &service->bulk_tx);
7200 + vchiq_log_trace(vchiq_core_log_level,
7201 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
7202 + service->state->id, service->localport, is_tx ? 't' : 'r',
7203 + queue->local_insert, queue->remote_insert, queue->process);
7204 +
7205 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
7206 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
7207 +
7208 + while ((queue->process != queue->local_insert) ||
7209 + (queue->process != queue->remote_insert)) {
7210 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7211 +
7212 + if (queue->process == queue->remote_insert) {
7213 + /* fabricate a matching dummy bulk */
7214 + bulk->remote_data = NULL;
7215 + bulk->remote_size = 0;
7216 + queue->remote_insert++;
7217 + }
7218 +
7219 + if (queue->process != queue->local_insert) {
7220 + vchiq_complete_bulk(bulk);
7221 +
7222 + vchiq_log_info(SRVTRACE_LEVEL(service),
7223 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
7224 + "rx len:%d",
7225 + is_tx ? "Send Bulk to" : "Recv Bulk from",
7226 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7227 + service->remoteport,
7228 + bulk->size,
7229 + bulk->remote_size);
7230 + } else {
7231 + /* fabricate a matching dummy bulk */
7232 + bulk->data = NULL;
7233 + bulk->size = 0;
7234 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
7235 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
7236 + VCHIQ_BULK_RECEIVE;
7237 + queue->local_insert++;
7238 + }
7239 +
7240 + queue->process++;
7241 + }
7242 +}
7243 +
7244 +/* Called from the slot handler thread */
7245 +static void
7246 +pause_bulks(VCHIQ_STATE_T *state)
7247 +{
7248 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
7249 + WARN_ON_ONCE(1);
7250 + atomic_set(&pause_bulks_count, 1);
7251 + return;
7252 + }
7253 +
7254 + /* Block bulk transfers from all services */
7255 + mutex_lock(&state->bulk_transfer_mutex);
7256 +}
7257 +
7258 +/* Called from the slot handler thread */
7259 +static void
7260 +resume_bulks(VCHIQ_STATE_T *state)
7261 +{
7262 + int i;
7263 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
7264 + WARN_ON_ONCE(1);
7265 + atomic_set(&pause_bulks_count, 0);
7266 + return;
7267 + }
7268 +
7269 + /* Allow bulk transfers from all services */
7270 + mutex_unlock(&state->bulk_transfer_mutex);
7271 +
7272 + if (state->deferred_bulks == 0)
7273 + return;
7274 +
7275 + /* Deal with any bulks which had to be deferred due to being in
7276 + * paused state. Don't try to match up to number of deferred bulks
7277 + * in case we've had something come and close the service in the
7278 + * interim - just process all bulk queues for all services */
7279 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
7280 + __func__, state->deferred_bulks);
7281 +
7282 + for (i = 0; i < state->unused_service; i++) {
7283 + VCHIQ_SERVICE_T *service = state->services[i];
7284 + int resolved_rx = 0;
7285 + int resolved_tx = 0;
7286 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
7287 + continue;
7288 +
7289 + mutex_lock(&service->bulk_mutex);
7290 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
7291 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
7292 + mutex_unlock(&service->bulk_mutex);
7293 + if (resolved_rx)
7294 + notify_bulks(service, &service->bulk_rx, 1);
7295 + if (resolved_tx)
7296 + notify_bulks(service, &service->bulk_tx, 1);
7297 + }
7298 + state->deferred_bulks = 0;
7299 +}
7300 +
7301 +static int
7302 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
7303 +{
7304 + VCHIQ_SERVICE_T *service = NULL;
7305 + int msgid, size;
7306 + int type;
7307 + unsigned int localport, remoteport;
7308 +
7309 + msgid = header->msgid;
7310 + size = header->size;
7311 + type = VCHIQ_MSG_TYPE(msgid);
7312 + localport = VCHIQ_MSG_DSTPORT(msgid);
7313 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7314 + if (size >= sizeof(struct vchiq_open_payload)) {
7315 + const struct vchiq_open_payload *payload =
7316 + (struct vchiq_open_payload *)header->data;
7317 + unsigned int fourcc;
7318 +
7319 + fourcc = payload->fourcc;
7320 + vchiq_log_info(vchiq_core_log_level,
7321 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
7322 + state->id, (unsigned int)header,
7323 + localport,
7324 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
7325 +
7326 + service = get_listening_service(state, fourcc);
7327 +
7328 + if (service) {
7329 + /* A matching service exists */
7330 + short version = payload->version;
7331 + short version_min = payload->version_min;
7332 + if ((service->version < version_min) ||
7333 + (version < service->version_min)) {
7334 + /* Version mismatch */
7335 + vchiq_loud_error_header();
7336 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
7337 + "version mismatch - local (%d, min %d)"
7338 + " vs. remote (%d, min %d)",
7339 + state->id, service->localport,
7340 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
7341 + service->version, service->version_min,
7342 + version, version_min);
7343 + vchiq_loud_error_footer();
7344 + unlock_service(service);
7345 + service = NULL;
7346 + goto fail_open;
7347 + }
7348 + service->peer_version = version;
7349 +
7350 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
7351 + struct vchiq_openack_payload ack_payload = {
7352 + service->version
7353 + };
7354 + VCHIQ_ELEMENT_T body = {
7355 + &ack_payload,
7356 + sizeof(ack_payload)
7357 + };
7358 +
7359 + if (state->version_common <
7360 + VCHIQ_VERSION_SYNCHRONOUS_MODE)
7361 + service->sync = 0;
7362 +
7363 + /* Acknowledge the OPEN */
7364 + if (service->sync &&
7365 + (state->version_common >=
7366 + VCHIQ_VERSION_SYNCHRONOUS_MODE)) {
7367 + if (queue_message_sync(state, NULL,
7368 + VCHIQ_MAKE_MSG(
7369 + VCHIQ_MSG_OPENACK,
7370 + service->localport,
7371 + remoteport),
7372 + &body, 1, sizeof(ack_payload),
7373 + 0) == VCHIQ_RETRY)
7374 + goto bail_not_ready;
7375 + } else {
7376 + if (queue_message(state, NULL,
7377 + VCHIQ_MAKE_MSG(
7378 + VCHIQ_MSG_OPENACK,
7379 + service->localport,
7380 + remoteport),
7381 + &body, 1, sizeof(ack_payload),
7382 + 0) == VCHIQ_RETRY)
7383 + goto bail_not_ready;
7384 + }
7385 +
7386 + /* The service is now open */
7387 + vchiq_set_service_state(service,
7388 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7389 + : VCHIQ_SRVSTATE_OPEN);
7390 + }
7391 +
7392 + service->remoteport = remoteport;
7393 + service->client_id = ((int *)header->data)[1];
7394 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7395 + NULL, NULL) == VCHIQ_RETRY) {
7396 + /* Bail out if not ready */
7397 + service->remoteport = VCHIQ_PORT_FREE;
7398 + goto bail_not_ready;
7399 + }
7400 +
7401 + /* Success - the message has been dealt with */
7402 + unlock_service(service);
7403 + return 1;
7404 + }
7405 + }
7406 +
7407 +fail_open:
7408 + /* No available service, or an invalid request - send a CLOSE */
7409 + if (queue_message(state, NULL,
7410 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7411 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7412 + goto bail_not_ready;
7413 +
7414 + return 1;
7415 +
7416 +bail_not_ready:
7417 + if (service)
7418 + unlock_service(service);
7419 +
7420 + return 0;
7421 +}
7422 +
7423 +/* Called by the slot handler thread */
7424 +static void
7425 +parse_rx_slots(VCHIQ_STATE_T *state)
7426 +{
7427 + VCHIQ_SHARED_STATE_T *remote = state->remote;
7428 + VCHIQ_SERVICE_T *service = NULL;
7429 + int tx_pos;
7430 + DEBUG_INITIALISE(state->local)
7431 +
7432 + tx_pos = remote->tx_pos;
7433 +
7434 + while (state->rx_pos != tx_pos) {
7435 + VCHIQ_HEADER_T *header;
7436 + int msgid, size;
7437 + int type;
7438 + unsigned int localport, remoteport;
7439 +
7440 + DEBUG_TRACE(PARSE_LINE);
7441 + if (!state->rx_data) {
7442 + int rx_index;
7443 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7444 + rx_index = remote->slot_queue[
7445 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7446 + VCHIQ_SLOT_QUEUE_MASK];
7447 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7448 + rx_index);
7449 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7450 +
7451 + /* Initialise use_count to one, and increment
7452 + ** release_count at the end of the slot to avoid
7453 + ** releasing the slot prematurely. */
7454 + state->rx_info->use_count = 1;
7455 + state->rx_info->release_count = 0;
7456 + }
7457 +
7458 + header = (VCHIQ_HEADER_T *)(state->rx_data +
7459 + (state->rx_pos & VCHIQ_SLOT_MASK));
7460 + DEBUG_VALUE(PARSE_HEADER, (int)header);
7461 + msgid = header->msgid;
7462 + DEBUG_VALUE(PARSE_MSGID, msgid);
7463 + size = header->size;
7464 + type = VCHIQ_MSG_TYPE(msgid);
7465 + localport = VCHIQ_MSG_DSTPORT(msgid);
7466 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7467 +
7468 + if (type != VCHIQ_MSG_DATA)
7469 + VCHIQ_STATS_INC(state, ctrl_rx_count);
7470 +
7471 + switch (type) {
7472 + case VCHIQ_MSG_OPENACK:
7473 + case VCHIQ_MSG_CLOSE:
7474 + case VCHIQ_MSG_DATA:
7475 + case VCHIQ_MSG_BULK_RX:
7476 + case VCHIQ_MSG_BULK_TX:
7477 + case VCHIQ_MSG_BULK_RX_DONE:
7478 + case VCHIQ_MSG_BULK_TX_DONE:
7479 + service = find_service_by_port(state, localport);
7480 + if ((!service ||
7481 + ((service->remoteport != remoteport) &&
7482 + (service->remoteport != VCHIQ_PORT_FREE))) &&
7483 + (localport == 0) &&
7484 + (type == VCHIQ_MSG_CLOSE)) {
7485 + /* This could be a CLOSE from a client which
7486 + hadn't yet received the OPENACK - look for
7487 + the connected service */
7488 + if (service)
7489 + unlock_service(service);
7490 + service = get_connected_service(state,
7491 + remoteport);
7492 + if (service)
7493 + vchiq_log_warning(vchiq_core_log_level,
7494 + "%d: prs %s@%x (%d->%d) - "
7495 + "found connected service %d",
7496 + state->id, msg_type_str(type),
7497 + (unsigned int)header,
7498 + remoteport, localport,
7499 + service->localport);
7500 + }
7501 +
7502 + if (!service) {
7503 + vchiq_log_error(vchiq_core_log_level,
7504 + "%d: prs %s@%x (%d->%d) - "
7505 + "invalid/closed service %d",
7506 + state->id, msg_type_str(type),
7507 + (unsigned int)header,
7508 + remoteport, localport, localport);
7509 + goto skip_message;
7510 + }
7511 + break;
7512 + default:
7513 + break;
7514 + }
7515 +
7516 + if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
7517 + int svc_fourcc;
7518 +
7519 + svc_fourcc = service
7520 + ? service->base.fourcc
7521 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7522 + vchiq_log_info(SRVTRACE_LEVEL(service),
7523 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7524 + "len:%d",
7525 + msg_type_str(type), type,
7526 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7527 + remoteport, localport, size);
7528 + if (size > 0)
7529 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7530 + min(64, size));
7531 + }
7532 +
7533 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7534 + > VCHIQ_SLOT_SIZE) {
7535 + vchiq_log_error(vchiq_core_log_level,
7536 + "header %x (msgid %x) - size %x too big for "
7537 + "slot",
7538 + (unsigned int)header, (unsigned int)msgid,
7539 + (unsigned int)size);
7540 + WARN(1, "oversized for slot\n");
7541 + }
7542 +
7543 + switch (type) {
7544 + case VCHIQ_MSG_OPEN:
7545 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7546 + if (!parse_open(state, header))
7547 + goto bail_not_ready;
7548 + break;
7549 + case VCHIQ_MSG_OPENACK:
7550 + if (size >= sizeof(struct vchiq_openack_payload)) {
7551 + const struct vchiq_openack_payload *payload =
7552 + (struct vchiq_openack_payload *)
7553 + header->data;
7554 + service->peer_version = payload->version;
7555 + }
7556 + vchiq_log_info(vchiq_core_log_level,
7557 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7558 + state->id, (unsigned int)header, size,
7559 + remoteport, localport, service->peer_version);
7560 + if (service->srvstate ==
7561 + VCHIQ_SRVSTATE_OPENING) {
7562 + service->remoteport = remoteport;
7563 + vchiq_set_service_state(service,
7564 + VCHIQ_SRVSTATE_OPEN);
7565 + up(&service->remove_event);
7566 + } else
7567 + vchiq_log_error(vchiq_core_log_level,
7568 + "OPENACK received in state %s",
7569 + srvstate_names[service->srvstate]);
7570 + break;
7571 + case VCHIQ_MSG_CLOSE:
7572 + WARN_ON(size != 0); /* There should be no data */
7573 +
7574 + vchiq_log_info(vchiq_core_log_level,
7575 + "%d: prs CLOSE@%x (%d->%d)",
7576 + state->id, (unsigned int)header,
7577 + remoteport, localport);
7578 +
7579 + mark_service_closing_internal(service, 1);
7580 +
7581 + if (vchiq_close_service_internal(service,
7582 + 1/*close_recvd*/) == VCHIQ_RETRY)
7583 + goto bail_not_ready;
7584 +
7585 + vchiq_log_info(vchiq_core_log_level,
7586 + "Close Service %c%c%c%c s:%u d:%d",
7587 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7588 + service->localport,
7589 + service->remoteport);
7590 + break;
7591 + case VCHIQ_MSG_DATA:
7592 + vchiq_log_info(vchiq_core_log_level,
7593 + "%d: prs DATA@%x,%x (%d->%d)",
7594 + state->id, (unsigned int)header, size,
7595 + remoteport, localport);
7596 +
7597 + if ((service->remoteport == remoteport)
7598 + && (service->srvstate ==
7599 + VCHIQ_SRVSTATE_OPEN)) {
7600 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7601 + claim_slot(state->rx_info);
7602 + DEBUG_TRACE(PARSE_LINE);
7603 + if (make_service_callback(service,
7604 + VCHIQ_MESSAGE_AVAILABLE, header,
7605 + NULL) == VCHIQ_RETRY) {
7606 + DEBUG_TRACE(PARSE_LINE);
7607 + goto bail_not_ready;
7608 + }
7609 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7610 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7611 + size);
7612 + } else {
7613 + VCHIQ_STATS_INC(state, error_count);
7614 + }
7615 + break;
7616 + case VCHIQ_MSG_CONNECT:
7617 + vchiq_log_info(vchiq_core_log_level,
7618 + "%d: prs CONNECT@%x",
7619 + state->id, (unsigned int)header);
7620 + state->version_common = ((VCHIQ_SLOT_ZERO_T *)
7621 + state->slot_data)->version;
7622 + up(&state->connect);
7623 + break;
7624 + case VCHIQ_MSG_BULK_RX:
7625 + case VCHIQ_MSG_BULK_TX: {
7626 + VCHIQ_BULK_QUEUE_T *queue;
7627 + WARN_ON(!state->is_master);
7628 + queue = (type == VCHIQ_MSG_BULK_RX) ?
7629 + &service->bulk_tx : &service->bulk_rx;
7630 + if ((service->remoteport == remoteport)
7631 + && (service->srvstate ==
7632 + VCHIQ_SRVSTATE_OPEN)) {
7633 + VCHIQ_BULK_T *bulk;
7634 + int resolved = 0;
7635 +
7636 + DEBUG_TRACE(PARSE_LINE);
7637 + if (mutex_lock_interruptible(
7638 + &service->bulk_mutex) != 0) {
7639 + DEBUG_TRACE(PARSE_LINE);
7640 + goto bail_not_ready;
7641 + }
7642 +
7643 + WARN_ON(!(queue->remote_insert < queue->remove +
7644 + VCHIQ_NUM_SERVICE_BULKS));
7645 + bulk = &queue->bulks[
7646 + BULK_INDEX(queue->remote_insert)];
7647 + bulk->remote_data =
7648 + (void *)((int *)header->data)[0];
7649 + bulk->remote_size = ((int *)header->data)[1];
7650 + wmb();
7651 +
7652 + vchiq_log_info(vchiq_core_log_level,
7653 + "%d: prs %s@%x (%d->%d) %x@%x",
7654 + state->id, msg_type_str(type),
7655 + (unsigned int)header,
7656 + remoteport, localport,
7657 + bulk->remote_size,
7658 + (unsigned int)bulk->remote_data);
7659 +
7660 + queue->remote_insert++;
7661 +
7662 + if (atomic_read(&pause_bulks_count)) {
7663 + state->deferred_bulks++;
7664 + vchiq_log_info(vchiq_core_log_level,
7665 + "%s: deferring bulk (%d)",
7666 + __func__,
7667 + state->deferred_bulks);
7668 + if (state->conn_state !=
7669 + VCHIQ_CONNSTATE_PAUSE_SENT)
7670 + vchiq_log_error(
7671 + vchiq_core_log_level,
7672 + "%s: bulks paused in "
7673 + "unexpected state %s",
7674 + __func__,
7675 + conn_state_names[
7676 + state->conn_state]);
7677 + } else if (state->conn_state ==
7678 + VCHIQ_CONNSTATE_CONNECTED) {
7679 + DEBUG_TRACE(PARSE_LINE);
7680 + resolved = resolve_bulks(service,
7681 + queue);
7682 + }
7683 +
7684 + mutex_unlock(&service->bulk_mutex);
7685 + if (resolved)
7686 + notify_bulks(service, queue,
7687 + 1/*retry_poll*/);
7688 + }
7689 + } break;
7690 + case VCHIQ_MSG_BULK_RX_DONE:
7691 + case VCHIQ_MSG_BULK_TX_DONE:
7692 + WARN_ON(state->is_master);
7693 + if ((service->remoteport == remoteport)
7694 + && (service->srvstate !=
7695 + VCHIQ_SRVSTATE_FREE)) {
7696 + VCHIQ_BULK_QUEUE_T *queue;
7697 + VCHIQ_BULK_T *bulk;
7698 +
7699 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7700 + &service->bulk_rx : &service->bulk_tx;
7701 +
7702 + DEBUG_TRACE(PARSE_LINE);
7703 + if (mutex_lock_interruptible(
7704 + &service->bulk_mutex) != 0) {
7705 + DEBUG_TRACE(PARSE_LINE);
7706 + goto bail_not_ready;
7707 + }
7708 + if ((int)(queue->remote_insert -
7709 + queue->local_insert) >= 0) {
7710 + vchiq_log_error(vchiq_core_log_level,
7711 + "%d: prs %s@%x (%d->%d) "
7712 + "unexpected (ri=%d,li=%d)",
7713 + state->id, msg_type_str(type),
7714 + (unsigned int)header,
7715 + remoteport, localport,
7716 + queue->remote_insert,
7717 + queue->local_insert);
7718 + mutex_unlock(&service->bulk_mutex);
7719 + break;
7720 + }
7721 +
7722 + BUG_ON(queue->process == queue->local_insert);
7723 + BUG_ON(queue->process != queue->remote_insert);
7724 +
7725 + bulk = &queue->bulks[
7726 + BULK_INDEX(queue->remote_insert)];
7727 + bulk->actual = *(int *)header->data;
7728 + queue->remote_insert++;
7729 +
7730 + vchiq_log_info(vchiq_core_log_level,
7731 + "%d: prs %s@%x (%d->%d) %x@%x",
7732 + state->id, msg_type_str(type),
7733 + (unsigned int)header,
7734 + remoteport, localport,
7735 + bulk->actual, (unsigned int)bulk->data);
7736 +
7737 + vchiq_log_trace(vchiq_core_log_level,
7738 + "%d: prs:%d %cx li=%x ri=%x p=%x",
7739 + state->id, localport,
7740 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
7741 + 'r' : 't',
7742 + queue->local_insert,
7743 + queue->remote_insert, queue->process);
7744 +
7745 + DEBUG_TRACE(PARSE_LINE);
7746 + WARN_ON(queue->process == queue->local_insert);
7747 + vchiq_complete_bulk(bulk);
7748 + queue->process++;
7749 + mutex_unlock(&service->bulk_mutex);
7750 + DEBUG_TRACE(PARSE_LINE);
7751 + notify_bulks(service, queue, 1/*retry_poll*/);
7752 + DEBUG_TRACE(PARSE_LINE);
7753 + }
7754 + break;
7755 + case VCHIQ_MSG_PADDING:
7756 + vchiq_log_trace(vchiq_core_log_level,
7757 + "%d: prs PADDING@%x,%x",
7758 + state->id, (unsigned int)header, size);
7759 + break;
7760 + case VCHIQ_MSG_PAUSE:
7761 + /* If initiated, signal the application thread */
7762 + vchiq_log_trace(vchiq_core_log_level,
7763 + "%d: prs PAUSE@%x,%x",
7764 + state->id, (unsigned int)header, size);
7765 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7766 + vchiq_log_error(vchiq_core_log_level,
7767 + "%d: PAUSE received in state PAUSED",
7768 + state->id);
7769 + break;
7770 + }
7771 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7772 + /* Send a PAUSE in response */
7773 + if (queue_message(state, NULL,
7774 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7775 + NULL, 0, 0, QMFLAGS_NO_MUTEX_UNLOCK)
7776 + == VCHIQ_RETRY)
7777 + goto bail_not_ready;
7778 + if (state->is_master)
7779 + pause_bulks(state);
7780 + }
7781 + /* At this point slot_mutex is held */
7782 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7783 + vchiq_platform_paused(state);
7784 + break;
7785 + case VCHIQ_MSG_RESUME:
7786 + vchiq_log_trace(vchiq_core_log_level,
7787 + "%d: prs RESUME@%x,%x",
7788 + state->id, (unsigned int)header, size);
7789 + /* Release the slot mutex */
7790 + mutex_unlock(&state->slot_mutex);
7791 + if (state->is_master)
7792 + resume_bulks(state);
7793 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7794 + vchiq_platform_resumed(state);
7795 + break;
7796 +
7797 + case VCHIQ_MSG_REMOTE_USE:
7798 + vchiq_on_remote_use(state);
7799 + break;
7800 + case VCHIQ_MSG_REMOTE_RELEASE:
7801 + vchiq_on_remote_release(state);
7802 + break;
7803 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7804 + vchiq_on_remote_use_active(state);
7805 + break;
7806 +
7807 + default:
7808 + vchiq_log_error(vchiq_core_log_level,
7809 + "%d: prs invalid msgid %x@%x,%x",
7810 + state->id, msgid, (unsigned int)header, size);
7811 + WARN(1, "invalid message\n");
7812 + break;
7813 + }
7814 +
7815 +skip_message:
7816 + if (service) {
7817 + unlock_service(service);
7818 + service = NULL;
7819 + }
7820 +
7821 + state->rx_pos += calc_stride(size);
7822 +
7823 + DEBUG_TRACE(PARSE_LINE);
7824 + /* Perform some housekeeping when the end of the slot is
7825 + ** reached. */
7826 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7827 + /* Remove the extra reference count. */
7828 + release_slot(state, state->rx_info, NULL, NULL);
7829 + state->rx_data = NULL;
7830 + }
7831 + }
7832 +
7833 +bail_not_ready:
7834 + if (service)
7835 + unlock_service(service);
7836 +}
7837 +
7838 +/* Called by the slot handler thread */
7839 +static int
7840 +slot_handler_func(void *v)
7841 +{
7842 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7843 + VCHIQ_SHARED_STATE_T *local = state->local;
7844 + DEBUG_INITIALISE(local)
7845 +
7846 + while (1) {
7847 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
7848 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7849 + remote_event_wait(&local->trigger);
7850 +
7851 + rmb();
7852 +
7853 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7854 + if (state->poll_needed) {
7855 + /* Check if we need to suspend - may change our
7856 + * conn_state */
7857 + vchiq_platform_check_suspend(state);
7858 +
7859 + state->poll_needed = 0;
7860 +
7861 + /* Handle service polling and other rare conditions here
7862 + ** out of the mainline code */
7863 + switch (state->conn_state) {
7864 + case VCHIQ_CONNSTATE_CONNECTED:
7865 + /* Poll the services as requested */
7866 + poll_services(state);
7867 + break;
7868 +
7869 + case VCHIQ_CONNSTATE_PAUSING:
7870 + if (state->is_master)
7871 + pause_bulks(state);
7872 + if (queue_message(state, NULL,
7873 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7874 + NULL, 0, 0,
7875 + QMFLAGS_NO_MUTEX_UNLOCK)
7876 + != VCHIQ_RETRY) {
7877 + vchiq_set_conn_state(state,
7878 + VCHIQ_CONNSTATE_PAUSE_SENT);
7879 + } else {
7880 + if (state->is_master)
7881 + resume_bulks(state);
7882 + /* Retry later */
7883 + state->poll_needed = 1;
7884 + }
7885 + break;
7886 +
7887 + case VCHIQ_CONNSTATE_PAUSED:
7888 + vchiq_platform_resume(state);
7889 + break;
7890 +
7891 + case VCHIQ_CONNSTATE_RESUMING:
7892 + if (queue_message(state, NULL,
7893 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7894 + NULL, 0, 0, QMFLAGS_NO_MUTEX_LOCK)
7895 + != VCHIQ_RETRY) {
7896 + if (state->is_master)
7897 + resume_bulks(state);
7898 + vchiq_set_conn_state(state,
7899 + VCHIQ_CONNSTATE_CONNECTED);
7900 + vchiq_platform_resumed(state);
7901 + } else {
7902 + /* This should really be impossible,
7903 + ** since the PAUSE should have flushed
7904 + ** through outstanding messages. */
7905 + vchiq_log_error(vchiq_core_log_level,
7906 + "Failed to send RESUME "
7907 + "message");
7908 + BUG();
7909 + }
7910 + break;
7911 +
7912 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7913 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7914 + vchiq_platform_handle_timeout(state);
7915 + break;
7916 + default:
7917 + break;
7918 + }
7919 +
7920 +
7921 + }
7922 +
7923 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7924 + parse_rx_slots(state);
7925 + }
7926 + return 0;
7927 +}
7928 +
7929 +
7930 +/* Called by the recycle thread */
7931 +static int
7932 +recycle_func(void *v)
7933 +{
7934 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7935 + VCHIQ_SHARED_STATE_T *local = state->local;
7936 +
7937 + while (1) {
7938 + remote_event_wait(&local->recycle);
7939 +
7940 + process_free_queue(state);
7941 + }
7942 + return 0;
7943 +}
7944 +
7945 +
7946 +/* Called by the sync thread */
7947 +static int
7948 +sync_func(void *v)
7949 +{
7950 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7951 + VCHIQ_SHARED_STATE_T *local = state->local;
7952 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7953 + state->remote->slot_sync);
7954 +
7955 + while (1) {
7956 + VCHIQ_SERVICE_T *service;
7957 + int msgid, size;
7958 + int type;
7959 + unsigned int localport, remoteport;
7960 +
7961 + remote_event_wait(&local->sync_trigger);
7962 +
7963 + rmb();
7964 +
7965 + msgid = header->msgid;
7966 + size = header->size;
7967 + type = VCHIQ_MSG_TYPE(msgid);
7968 + localport = VCHIQ_MSG_DSTPORT(msgid);
7969 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7970 +
7971 + service = find_service_by_port(state, localport);
7972 +
7973 + if (!service) {
7974 + vchiq_log_error(vchiq_sync_log_level,
7975 + "%d: sf %s@%x (%d->%d) - "
7976 + "invalid/closed service %d",
7977 + state->id, msg_type_str(type),
7978 + (unsigned int)header,
7979 + remoteport, localport, localport);
7980 + release_message_sync(state, header);
7981 + continue;
7982 + }
7983 +
7984 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7985 + int svc_fourcc;
7986 +
7987 + svc_fourcc = service
7988 + ? service->base.fourcc
7989 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7990 + vchiq_log_trace(vchiq_sync_log_level,
7991 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7992 + msg_type_str(type),
7993 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7994 + remoteport, localport, size);
7995 + if (size > 0)
7996 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7997 + min(64, size));
7998 + }
7999 +
8000 + switch (type) {
8001 + case VCHIQ_MSG_OPENACK:
8002 + if (size >= sizeof(struct vchiq_openack_payload)) {
8003 + const struct vchiq_openack_payload *payload =
8004 + (struct vchiq_openack_payload *)
8005 + header->data;
8006 + service->peer_version = payload->version;
8007 + }
8008 + vchiq_log_info(vchiq_sync_log_level,
8009 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
8010 + state->id, (unsigned int)header, size,
8011 + remoteport, localport, service->peer_version);
8012 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
8013 + service->remoteport = remoteport;
8014 + vchiq_set_service_state(service,
8015 + VCHIQ_SRVSTATE_OPENSYNC);
8016 + service->sync = 1;
8017 + up(&service->remove_event);
8018 + }
8019 + release_message_sync(state, header);
8020 + break;
8021 +
8022 + case VCHIQ_MSG_DATA:
8023 + vchiq_log_trace(vchiq_sync_log_level,
8024 + "%d: sf DATA@%x,%x (%d->%d)",
8025 + state->id, (unsigned int)header, size,
8026 + remoteport, localport);
8027 +
8028 + if ((service->remoteport == remoteport) &&
8029 + (service->srvstate ==
8030 + VCHIQ_SRVSTATE_OPENSYNC)) {
8031 + if (make_service_callback(service,
8032 + VCHIQ_MESSAGE_AVAILABLE, header,
8033 + NULL) == VCHIQ_RETRY)
8034 + vchiq_log_error(vchiq_sync_log_level,
8035 + "synchronous callback to "
8036 + "service %d returns "
8037 + "VCHIQ_RETRY",
8038 + localport);
8039 + }
8040 + break;
8041 +
8042 + default:
8043 + vchiq_log_error(vchiq_sync_log_level,
8044 + "%d: sf unexpected msgid %x@%x,%x",
8045 + state->id, msgid, (unsigned int)header, size);
8046 + release_message_sync(state, header);
8047 + break;
8048 + }
8049 +
8050 + unlock_service(service);
8051 + }
8052 +
8053 + return 0;
8054 +}
8055 +
8056 +
8057 +static void
8058 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
8059 +{
8060 + queue->local_insert = 0;
8061 + queue->remote_insert = 0;
8062 + queue->process = 0;
8063 + queue->remote_notify = 0;
8064 + queue->remove = 0;
8065 +}
8066 +
8067 +
8068 +inline const char *
8069 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
8070 +{
8071 + return conn_state_names[conn_state];
8072 +}
8073 +
8074 +
8075 +VCHIQ_SLOT_ZERO_T *
8076 +vchiq_init_slots(void *mem_base, int mem_size)
8077 +{
8078 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
8079 + VCHIQ_SLOT_ZERO_T *slot_zero =
8080 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
8081 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
8082 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
8083 +
8084 + /* Ensure there is enough memory to run an absolutely minimum system */
8085 + num_slots -= first_data_slot;
8086 +
8087 + if (num_slots < 4) {
8088 + vchiq_log_error(vchiq_core_log_level,
8089 + "vchiq_init_slots - insufficient memory %x bytes",
8090 + mem_size);
8091 + return NULL;
8092 + }
8093 +
8094 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
8095 +
8096 + slot_zero->magic = VCHIQ_MAGIC;
8097 + slot_zero->version = VCHIQ_VERSION;
8098 + slot_zero->version_min = VCHIQ_VERSION_MIN;
8099 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
8100 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
8101 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
8102 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
8103 +
8104 + slot_zero->master.slot_sync = first_data_slot;
8105 + slot_zero->master.slot_first = first_data_slot + 1;
8106 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
8107 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
8108 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
8109 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
8110 +
8111 + return slot_zero;
8112 +}
8113 +
8114 +VCHIQ_STATUS_T
8115 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
8116 + int is_master)
8117 +{
8118 + VCHIQ_SHARED_STATE_T *local;
8119 + VCHIQ_SHARED_STATE_T *remote;
8120 + VCHIQ_STATUS_T status;
8121 + char threadname[10];
8122 + static int id;
8123 + int i;
8124 +
8125 + vchiq_log_warning(vchiq_core_log_level,
8126 + "%s: slot_zero = 0x%08lx, is_master = %d",
8127 + __func__, (unsigned long)slot_zero, is_master);
8128 +
8129 + /* Check the input configuration */
8130 +
8131 + if (slot_zero->magic != VCHIQ_MAGIC) {
8132 + vchiq_loud_error_header();
8133 + vchiq_loud_error("Invalid VCHIQ magic value found.");
8134 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
8135 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
8136 + vchiq_loud_error_footer();
8137 + return VCHIQ_ERROR;
8138 + }
8139 +
8140 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
8141 + vchiq_loud_error_header();
8142 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8143 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
8144 + "(minimum %d)",
8145 + (unsigned int)slot_zero, slot_zero->version,
8146 + VCHIQ_VERSION_MIN);
8147 + vchiq_loud_error("Restart with a newer VideoCore image.");
8148 + vchiq_loud_error_footer();
8149 + return VCHIQ_ERROR;
8150 + }
8151 +
8152 + if (VCHIQ_VERSION < slot_zero->version_min) {
8153 + vchiq_loud_error_header();
8154 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8155 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
8156 + "minimum %d)",
8157 + (unsigned int)slot_zero, VCHIQ_VERSION,
8158 + slot_zero->version_min);
8159 + vchiq_loud_error("Restart with a newer kernel.");
8160 + vchiq_loud_error_footer();
8161 + return VCHIQ_ERROR;
8162 + }
8163 +
8164 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
8165 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
8166 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
8167 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
8168 + vchiq_loud_error_header();
8169 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
8170 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
8171 + "(expected %x)",
8172 + (unsigned int)slot_zero,
8173 + slot_zero->slot_zero_size,
8174 + sizeof(VCHIQ_SLOT_ZERO_T));
8175 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
8176 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
8177 + "(expected %d",
8178 + (unsigned int)slot_zero, slot_zero->slot_size,
8179 + VCHIQ_SLOT_SIZE);
8180 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
8181 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
8182 + "(expected %d)",
8183 + (unsigned int)slot_zero, slot_zero->max_slots,
8184 + VCHIQ_MAX_SLOTS);
8185 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
8186 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
8187 + "(expected %d)",
8188 + (unsigned int)slot_zero,
8189 + slot_zero->max_slots_per_side,
8190 + VCHIQ_MAX_SLOTS_PER_SIDE);
8191 + vchiq_loud_error_footer();
8192 + return VCHIQ_ERROR;
8193 + }
8194 +
8195 + if (VCHIQ_VERSION < slot_zero->version)
8196 + slot_zero->version = VCHIQ_VERSION;
8197 +
8198 + if (is_master) {
8199 + local = &slot_zero->master;
8200 + remote = &slot_zero->slave;
8201 + } else {
8202 + local = &slot_zero->slave;
8203 + remote = &slot_zero->master;
8204 + }
8205 +
8206 + if (local->initialised) {
8207 + vchiq_loud_error_header();
8208 + if (remote->initialised)
8209 + vchiq_loud_error("local state has already been "
8210 + "initialised");
8211 + else
8212 + vchiq_loud_error("master/slave mismatch - two %ss",
8213 + is_master ? "master" : "slave");
8214 + vchiq_loud_error_footer();
8215 + return VCHIQ_ERROR;
8216 + }
8217 +
8218 + memset(state, 0, sizeof(VCHIQ_STATE_T));
8219 +
8220 + state->id = id++;
8221 + state->is_master = is_master;
8222 +
8223 + /*
8224 + initialize shared state pointers
8225 + */
8226 +
8227 + state->local = local;
8228 + state->remote = remote;
8229 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
8230 +
8231 + /*
8232 + initialize events and mutexes
8233 + */
8234 +
8235 + sema_init(&state->connect, 0);
8236 + mutex_init(&state->mutex);
8237 + sema_init(&state->trigger_event, 0);
8238 + sema_init(&state->recycle_event, 0);
8239 + sema_init(&state->sync_trigger_event, 0);
8240 + sema_init(&state->sync_release_event, 0);
8241 +
8242 + mutex_init(&state->slot_mutex);
8243 + mutex_init(&state->recycle_mutex);
8244 + mutex_init(&state->sync_mutex);
8245 + mutex_init(&state->bulk_transfer_mutex);
8246 +
8247 + sema_init(&state->slot_available_event, 0);
8248 + sema_init(&state->slot_remove_event, 0);
8249 + sema_init(&state->data_quota_event, 0);
8250 +
8251 + state->slot_queue_available = 0;
8252 +
8253 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
8254 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8255 + &state->service_quotas[i];
8256 + sema_init(&service_quota->quota_event, 0);
8257 + }
8258 +
8259 + for (i = local->slot_first; i <= local->slot_last; i++) {
8260 + local->slot_queue[state->slot_queue_available++] = i;
8261 + up(&state->slot_available_event);
8262 + }
8263 +
8264 + state->default_slot_quota = state->slot_queue_available/2;
8265 + state->default_message_quota =
8266 + min((unsigned short)(state->default_slot_quota * 256),
8267 + (unsigned short)~0);
8268 +
8269 + state->previous_data_index = -1;
8270 + state->data_use_count = 0;
8271 + state->data_quota = state->slot_queue_available - 1;
8272 +
8273 + local->trigger.event = &state->trigger_event;
8274 + remote_event_create(&local->trigger);
8275 + local->tx_pos = 0;
8276 +
8277 + local->recycle.event = &state->recycle_event;
8278 + remote_event_create(&local->recycle);
8279 + local->slot_queue_recycle = state->slot_queue_available;
8280 +
8281 + local->sync_trigger.event = &state->sync_trigger_event;
8282 + remote_event_create(&local->sync_trigger);
8283 +
8284 + local->sync_release.event = &state->sync_release_event;
8285 + remote_event_create(&local->sync_release);
8286 +
8287 + /* At start-of-day, the slot is empty and available */
8288 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
8289 + = VCHIQ_MSGID_PADDING;
8290 + remote_event_signal_local(&local->sync_release);
8291 +
8292 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
8293 +
8294 + status = vchiq_platform_init_state(state);
8295 +
8296 + /*
8297 + bring up slot handler thread
8298 + */
8299 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
8300 + state->slot_handler_thread = kthread_create(&slot_handler_func,
8301 + (void *)state,
8302 + threadname);
8303 +
8304 + if (state->slot_handler_thread == NULL) {
8305 + vchiq_loud_error_header();
8306 + vchiq_loud_error("couldn't create thread %s", threadname);
8307 + vchiq_loud_error_footer();
8308 + return VCHIQ_ERROR;
8309 + }
8310 + set_user_nice(state->slot_handler_thread, -19);
8311 + wake_up_process(state->slot_handler_thread);
8312 +
8313 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
8314 + state->recycle_thread = kthread_create(&recycle_func,
8315 + (void *)state,
8316 + threadname);
8317 + if (state->recycle_thread == NULL) {
8318 + vchiq_loud_error_header();
8319 + vchiq_loud_error("couldn't create thread %s", threadname);
8320 + vchiq_loud_error_footer();
8321 + return VCHIQ_ERROR;
8322 + }
8323 + set_user_nice(state->recycle_thread, -19);
8324 + wake_up_process(state->recycle_thread);
8325 +
8326 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
8327 + state->sync_thread = kthread_create(&sync_func,
8328 + (void *)state,
8329 + threadname);
8330 + if (state->sync_thread == NULL) {
8331 + vchiq_loud_error_header();
8332 + vchiq_loud_error("couldn't create thread %s", threadname);
8333 + vchiq_loud_error_footer();
8334 + return VCHIQ_ERROR;
8335 + }
8336 + set_user_nice(state->sync_thread, -20);
8337 + wake_up_process(state->sync_thread);
8338 +
8339 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
8340 + vchiq_states[state->id] = state;
8341 +
8342 + /* Indicate readiness to the other side */
8343 + local->initialised = 1;
8344 +
8345 + return status;
8346 +}
8347 +
8348 +/* Called from application thread when a client or server service is created. */
8349 +VCHIQ_SERVICE_T *
8350 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
8351 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
8352 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
8353 +{
8354 + VCHIQ_SERVICE_T *service;
8355 +
8356 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
8357 + if (service) {
8358 + service->base.fourcc = params->fourcc;
8359 + service->base.callback = params->callback;
8360 + service->base.userdata = params->userdata;
8361 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
8362 + service->ref_count = 1;
8363 + service->srvstate = VCHIQ_SRVSTATE_FREE;
8364 + service->userdata_term = userdata_term;
8365 + service->localport = VCHIQ_PORT_FREE;
8366 + service->remoteport = VCHIQ_PORT_FREE;
8367 +
8368 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
8369 + VCHIQ_FOURCC_INVALID : params->fourcc;
8370 + service->client_id = 0;
8371 + service->auto_close = 1;
8372 + service->sync = 0;
8373 + service->closing = 0;
8374 + service->trace = 0;
8375 + atomic_set(&service->poll_flags, 0);
8376 + service->version = params->version;
8377 + service->version_min = params->version_min;
8378 + service->state = state;
8379 + service->instance = instance;
8380 + service->service_use_count = 0;
8381 + init_bulk_queue(&service->bulk_tx);
8382 + init_bulk_queue(&service->bulk_rx);
8383 + sema_init(&service->remove_event, 0);
8384 + sema_init(&service->bulk_remove_event, 0);
8385 + mutex_init(&service->bulk_mutex);
8386 + memset(&service->stats, 0, sizeof(service->stats));
8387 + } else {
8388 + vchiq_log_error(vchiq_core_log_level,
8389 + "Out of memory");
8390 + }
8391 +
8392 + if (service) {
8393 + VCHIQ_SERVICE_T **pservice = NULL;
8394 + int i;
8395 +
8396 + /* Although it is perfectly possible to use service_spinlock
8397 + ** to protect the creation of services, it is overkill as it
8398 + ** disables interrupts while the array is searched.
8399 + ** The only danger is of another thread trying to create a
8400 + ** service - service deletion is safe.
8401 + ** Therefore it is preferable to use state->mutex which,
8402 + ** although slower to claim, doesn't block interrupts while
8403 + ** it is held.
8404 + */
8405 +
8406 + mutex_lock(&state->mutex);
8407 +
8408 + /* Prepare to use a previously unused service */
8409 + if (state->unused_service < VCHIQ_MAX_SERVICES)
8410 + pservice = &state->services[state->unused_service];
8411 +
8412 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8413 + for (i = 0; i < state->unused_service; i++) {
8414 + VCHIQ_SERVICE_T *srv = state->services[i];
8415 + if (!srv) {
8416 + pservice = &state->services[i];
8417 + break;
8418 + }
8419 + }
8420 + } else {
8421 + for (i = (state->unused_service - 1); i >= 0; i--) {
8422 + VCHIQ_SERVICE_T *srv = state->services[i];
8423 + if (!srv)
8424 + pservice = &state->services[i];
8425 + else if ((srv->public_fourcc == params->fourcc)
8426 + && ((srv->instance != instance) ||
8427 + (srv->base.callback !=
8428 + params->callback))) {
8429 + /* There is another server using this
8430 + ** fourcc which doesn't match. */
8431 + pservice = NULL;
8432 + break;
8433 + }
8434 + }
8435 + }
8436 +
8437 + if (pservice) {
8438 + service->localport = (pservice - state->services);
8439 + if (!handle_seq)
8440 + handle_seq = VCHIQ_MAX_STATES *
8441 + VCHIQ_MAX_SERVICES;
8442 + service->handle = handle_seq |
8443 + (state->id * VCHIQ_MAX_SERVICES) |
8444 + service->localport;
8445 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8446 + *pservice = service;
8447 + if (pservice == &state->services[state->unused_service])
8448 + state->unused_service++;
8449 + }
8450 +
8451 + mutex_unlock(&state->mutex);
8452 +
8453 + if (!pservice) {
8454 + kfree(service);
8455 + service = NULL;
8456 + }
8457 + }
8458 +
8459 + if (service) {
8460 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8461 + &state->service_quotas[service->localport];
8462 + service_quota->slot_quota = state->default_slot_quota;
8463 + service_quota->message_quota = state->default_message_quota;
8464 + if (service_quota->slot_use_count == 0)
8465 + service_quota->previous_tx_index =
8466 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8467 + - 1;
8468 +
8469 + /* Bring this service online */
8470 + vchiq_set_service_state(service, srvstate);
8471 +
8472 + vchiq_log_info(vchiq_core_msg_log_level,
8473 + "%s Service %c%c%c%c SrcPort:%d",
8474 + (srvstate == VCHIQ_SRVSTATE_OPENING)
8475 + ? "Open" : "Add",
8476 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8477 + service->localport);
8478 + }
8479 +
8480 + /* Don't unlock the service - leave it with a ref_count of 1. */
8481 +
8482 + return service;
8483 +}
8484 +
8485 +VCHIQ_STATUS_T
8486 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8487 +{
8488 + struct vchiq_open_payload payload = {
8489 + service->base.fourcc,
8490 + client_id,
8491 + service->version,
8492 + service->version_min
8493 + };
8494 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8495 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8496 +
8497 + service->client_id = client_id;
8498 + vchiq_use_service_internal(service);
8499 + status = queue_message(service->state, NULL,
8500 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8501 + &body, 1, sizeof(payload), QMFLAGS_IS_BLOCKING);
8502 + if (status == VCHIQ_SUCCESS) {
8503 + /* Wait for the ACK/NAK */
8504 + if (down_interruptible(&service->remove_event) != 0) {
8505 + status = VCHIQ_RETRY;
8506 + vchiq_release_service_internal(service);
8507 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8508 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8509 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8510 + vchiq_log_error(vchiq_core_log_level,
8511 + "%d: osi - srvstate = %s (ref %d)",
8512 + service->state->id,
8513 + srvstate_names[service->srvstate],
8514 + service->ref_count);
8515 + status = VCHIQ_ERROR;
8516 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8517 + vchiq_release_service_internal(service);
8518 + }
8519 + }
8520 + return status;
8521 +}
8522 +
8523 +static void
8524 +release_service_messages(VCHIQ_SERVICE_T *service)
8525 +{
8526 + VCHIQ_STATE_T *state = service->state;
8527 + int slot_last = state->remote->slot_last;
8528 + int i;
8529 +
8530 + /* Release any claimed messages aimed at this service */
8531 +
8532 + if (service->sync) {
8533 + VCHIQ_HEADER_T *header =
8534 + (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
8535 + state->remote->slot_sync);
8536 + if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
8537 + release_message_sync(state, header);
8538 +
8539 + return;
8540 + }
8541 +
8542 + for (i = state->remote->slot_first; i <= slot_last; i++) {
8543 + VCHIQ_SLOT_INFO_T *slot_info =
8544 + SLOT_INFO_FROM_INDEX(state, i);
8545 + if (slot_info->release_count != slot_info->use_count) {
8546 + char *data =
8547 + (char *)SLOT_DATA_FROM_INDEX(state, i);
8548 + unsigned int pos, end;
8549 +
8550 + end = VCHIQ_SLOT_SIZE;
8551 + if (data == state->rx_data)
8552 + /* This buffer is still being read from - stop
8553 + ** at the current read position */
8554 + end = state->rx_pos & VCHIQ_SLOT_MASK;
8555 +
8556 + pos = 0;
8557 +
8558 + while (pos < end) {
8559 + VCHIQ_HEADER_T *header =
8560 + (VCHIQ_HEADER_T *)(data + pos);
8561 + int msgid = header->msgid;
8562 + int port = VCHIQ_MSG_DSTPORT(msgid);
8563 + if ((port == service->localport) &&
8564 + (msgid & VCHIQ_MSGID_CLAIMED)) {
8565 + vchiq_log_info(vchiq_core_log_level,
8566 + " fsi - hdr %x",
8567 + (unsigned int)header);
8568 + release_slot(state, slot_info, header,
8569 + NULL);
8570 + }
8571 + pos += calc_stride(header->size);
8572 + if (pos > VCHIQ_SLOT_SIZE) {
8573 + vchiq_log_error(vchiq_core_log_level,
8574 + "fsi - pos %x: header %x, "
8575 + "msgid %x, header->msgid %x, "
8576 + "header->size %x",
8577 + pos, (unsigned int)header,
8578 + msgid, header->msgid,
8579 + header->size);
8580 + WARN(1, "invalid slot position\n");
8581 + }
8582 + }
8583 + }
8584 + }
8585 +}
8586 +
8587 +static int
8588 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8589 +{
8590 + VCHIQ_STATUS_T status;
8591 +
8592 + /* Abort any outstanding bulk transfers */
8593 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8594 + return 0;
8595 + abort_outstanding_bulks(service, &service->bulk_tx);
8596 + abort_outstanding_bulks(service, &service->bulk_rx);
8597 + mutex_unlock(&service->bulk_mutex);
8598 +
8599 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8600 + if (status == VCHIQ_SUCCESS)
8601 + status = notify_bulks(service, &service->bulk_rx,
8602 + 0/*!retry_poll*/);
8603 + return (status == VCHIQ_SUCCESS);
8604 +}
8605 +
8606 +static VCHIQ_STATUS_T
8607 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8608 +{
8609 + VCHIQ_STATUS_T status;
8610 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8611 + int newstate;
8612 +
8613 + switch (service->srvstate) {
8614 + case VCHIQ_SRVSTATE_OPEN:
8615 + case VCHIQ_SRVSTATE_CLOSESENT:
8616 + case VCHIQ_SRVSTATE_CLOSERECVD:
8617 + if (is_server) {
8618 + if (service->auto_close) {
8619 + service->client_id = 0;
8620 + service->remoteport = VCHIQ_PORT_FREE;
8621 + newstate = VCHIQ_SRVSTATE_LISTENING;
8622 + } else
8623 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8624 + } else
8625 + newstate = VCHIQ_SRVSTATE_CLOSED;
8626 + vchiq_set_service_state(service, newstate);
8627 + break;
8628 + case VCHIQ_SRVSTATE_LISTENING:
8629 + break;
8630 + default:
8631 + vchiq_log_error(vchiq_core_log_level,
8632 + "close_service_complete(%x) called in state %s",
8633 + service->handle, srvstate_names[service->srvstate]);
8634 + WARN(1, "close_service_complete in unexpected state\n");
8635 + return VCHIQ_ERROR;
8636 + }
8637 +
8638 + status = make_service_callback(service,
8639 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
8640 +
8641 + if (status != VCHIQ_RETRY) {
8642 + int uc = service->service_use_count;
8643 + int i;
8644 + /* Complete the close process */
8645 + for (i = 0; i < uc; i++)
8646 + /* cater for cases where close is forced and the
8647 + ** client may not close all it's handles */
8648 + vchiq_release_service_internal(service);
8649 +
8650 + service->client_id = 0;
8651 + service->remoteport = VCHIQ_PORT_FREE;
8652 +
8653 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8654 + vchiq_free_service_internal(service);
8655 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8656 + if (is_server)
8657 + service->closing = 0;
8658 +
8659 + up(&service->remove_event);
8660 + }
8661 + } else
8662 + vchiq_set_service_state(service, failstate);
8663 +
8664 + return status;
8665 +}
8666 +
8667 +/* Called by the slot handler */
8668 +VCHIQ_STATUS_T
8669 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8670 +{
8671 + VCHIQ_STATE_T *state = service->state;
8672 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8673 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8674 +
8675 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8676 + service->state->id, service->localport, close_recvd,
8677 + srvstate_names[service->srvstate]);
8678 +
8679 + switch (service->srvstate) {
8680 + case VCHIQ_SRVSTATE_CLOSED:
8681 + case VCHIQ_SRVSTATE_HIDDEN:
8682 + case VCHIQ_SRVSTATE_LISTENING:
8683 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8684 + if (close_recvd)
8685 + vchiq_log_error(vchiq_core_log_level,
8686 + "vchiq_close_service_internal(1) called "
8687 + "in state %s",
8688 + srvstate_names[service->srvstate]);
8689 + else if (is_server) {
8690 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8691 + status = VCHIQ_ERROR;
8692 + } else {
8693 + service->client_id = 0;
8694 + service->remoteport = VCHIQ_PORT_FREE;
8695 + if (service->srvstate ==
8696 + VCHIQ_SRVSTATE_CLOSEWAIT)
8697 + vchiq_set_service_state(service,
8698 + VCHIQ_SRVSTATE_LISTENING);
8699 + }
8700 + up(&service->remove_event);
8701 + } else
8702 + vchiq_free_service_internal(service);
8703 + break;
8704 + case VCHIQ_SRVSTATE_OPENING:
8705 + if (close_recvd) {
8706 + /* The open was rejected - tell the user */
8707 + vchiq_set_service_state(service,
8708 + VCHIQ_SRVSTATE_CLOSEWAIT);
8709 + up(&service->remove_event);
8710 + } else {
8711 + /* Shutdown mid-open - let the other side know */
8712 + status = queue_message(state, service,
8713 + VCHIQ_MAKE_MSG
8714 + (VCHIQ_MSG_CLOSE,
8715 + service->localport,
8716 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8717 + NULL, 0, 0, 0);
8718 + }
8719 + break;
8720 +
8721 + case VCHIQ_SRVSTATE_OPENSYNC:
8722 + mutex_lock(&state->sync_mutex);
8723 + /* Drop through */
8724 +
8725 + case VCHIQ_SRVSTATE_OPEN:
8726 + if (state->is_master || close_recvd) {
8727 + if (!do_abort_bulks(service))
8728 + status = VCHIQ_RETRY;
8729 + }
8730 +
8731 + release_service_messages(service);
8732 +
8733 + if (status == VCHIQ_SUCCESS)
8734 + status = queue_message(state, service,
8735 + VCHIQ_MAKE_MSG
8736 + (VCHIQ_MSG_CLOSE,
8737 + service->localport,
8738 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8739 + NULL, 0, 0, QMFLAGS_NO_MUTEX_UNLOCK);
8740 +
8741 + if (status == VCHIQ_SUCCESS) {
8742 + if (!close_recvd) {
8743 + /* Change the state while the mutex is
8744 + still held */
8745 + vchiq_set_service_state(service,
8746 + VCHIQ_SRVSTATE_CLOSESENT);
8747 + mutex_unlock(&state->slot_mutex);
8748 + if (service->sync)
8749 + mutex_unlock(&state->sync_mutex);
8750 + break;
8751 + }
8752 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8753 + mutex_unlock(&state->sync_mutex);
8754 + break;
8755 + } else
8756 + break;
8757 +
8758 + /* Change the state while the mutex is still held */
8759 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
8760 + mutex_unlock(&state->slot_mutex);
8761 + if (service->sync)
8762 + mutex_unlock(&state->sync_mutex);
8763 +
8764 + status = close_service_complete(service,
8765 + VCHIQ_SRVSTATE_CLOSERECVD);
8766 + break;
8767 +
8768 + case VCHIQ_SRVSTATE_CLOSESENT:
8769 + if (!close_recvd)
8770 + /* This happens when a process is killed mid-close */
8771 + break;
8772 +
8773 + if (!state->is_master) {
8774 + if (!do_abort_bulks(service)) {
8775 + status = VCHIQ_RETRY;
8776 + break;
8777 + }
8778 + }
8779 +
8780 + if (status == VCHIQ_SUCCESS)
8781 + status = close_service_complete(service,
8782 + VCHIQ_SRVSTATE_CLOSERECVD);
8783 + break;
8784 +
8785 + case VCHIQ_SRVSTATE_CLOSERECVD:
8786 + if (!close_recvd && is_server)
8787 + /* Force into LISTENING mode */
8788 + vchiq_set_service_state(service,
8789 + VCHIQ_SRVSTATE_LISTENING);
8790 + status = close_service_complete(service,
8791 + VCHIQ_SRVSTATE_CLOSERECVD);
8792 + break;
8793 +
8794 + default:
8795 + vchiq_log_error(vchiq_core_log_level,
8796 + "vchiq_close_service_internal(%d) called in state %s",
8797 + close_recvd, srvstate_names[service->srvstate]);
8798 + break;
8799 + }
8800 +
8801 + return status;
8802 +}
8803 +
8804 +/* Called from the application process upon process death */
8805 +void
8806 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8807 +{
8808 + VCHIQ_STATE_T *state = service->state;
8809 +
8810 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8811 + state->id, service->localport, service->remoteport);
8812 +
8813 + mark_service_closing(service);
8814 +
8815 + /* Mark the service for removal by the slot handler */
8816 + request_poll(state, service, VCHIQ_POLL_REMOVE);
8817 +}
8818 +
8819 +/* Called from the slot handler */
8820 +void
8821 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8822 +{
8823 + VCHIQ_STATE_T *state = service->state;
8824 +
8825 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8826 + state->id, service->localport);
8827 +
8828 + switch (service->srvstate) {
8829 + case VCHIQ_SRVSTATE_OPENING:
8830 + case VCHIQ_SRVSTATE_CLOSED:
8831 + case VCHIQ_SRVSTATE_HIDDEN:
8832 + case VCHIQ_SRVSTATE_LISTENING:
8833 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8834 + break;
8835 + default:
8836 + vchiq_log_error(vchiq_core_log_level,
8837 + "%d: fsi - (%d) in state %s",
8838 + state->id, service->localport,
8839 + srvstate_names[service->srvstate]);
8840 + return;
8841 + }
8842 +
8843 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8844 +
8845 + up(&service->remove_event);
8846 +
8847 + /* Release the initial lock */
8848 + unlock_service(service);
8849 +}
8850 +
8851 +VCHIQ_STATUS_T
8852 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8853 +{
8854 + VCHIQ_SERVICE_T *service;
8855 + int i;
8856 +
8857 + /* Find all services registered to this client and enable them. */
8858 + i = 0;
8859 + while ((service = next_service_by_instance(state, instance,
8860 + &i)) != NULL) {
8861 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8862 + vchiq_set_service_state(service,
8863 + VCHIQ_SRVSTATE_LISTENING);
8864 + unlock_service(service);
8865 + }
8866 +
8867 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8868 + if (queue_message(state, NULL,
8869 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8870 + 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
8871 + return VCHIQ_RETRY;
8872 +
8873 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8874 + }
8875 +
8876 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8877 + if (down_interruptible(&state->connect) != 0)
8878 + return VCHIQ_RETRY;
8879 +
8880 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8881 + up(&state->connect);
8882 + }
8883 +
8884 + return VCHIQ_SUCCESS;
8885 +}
8886 +
8887 +VCHIQ_STATUS_T
8888 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8889 +{
8890 + VCHIQ_SERVICE_T *service;
8891 + int i;
8892 +
8893 + /* Find all services registered to this client and enable them. */
8894 + i = 0;
8895 + while ((service = next_service_by_instance(state, instance,
8896 + &i)) != NULL) {
8897 + (void)vchiq_remove_service(service->handle);
8898 + unlock_service(service);
8899 + }
8900 +
8901 + return VCHIQ_SUCCESS;
8902 +}
8903 +
8904 +VCHIQ_STATUS_T
8905 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8906 +{
8907 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8908 +
8909 + switch (state->conn_state) {
8910 + case VCHIQ_CONNSTATE_CONNECTED:
8911 + /* Request a pause */
8912 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8913 + request_poll(state, NULL, 0);
8914 + break;
8915 + default:
8916 + vchiq_log_error(vchiq_core_log_level,
8917 + "vchiq_pause_internal in state %s\n",
8918 + conn_state_names[state->conn_state]);
8919 + status = VCHIQ_ERROR;
8920 + VCHIQ_STATS_INC(state, error_count);
8921 + break;
8922 + }
8923 +
8924 + return status;
8925 +}
8926 +
8927 +VCHIQ_STATUS_T
8928 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8929 +{
8930 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8931 +
8932 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8933 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8934 + request_poll(state, NULL, 0);
8935 + } else {
8936 + status = VCHIQ_ERROR;
8937 + VCHIQ_STATS_INC(state, error_count);
8938 + }
8939 +
8940 + return status;
8941 +}
8942 +
8943 +VCHIQ_STATUS_T
8944 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8945 +{
8946 + /* Unregister the service */
8947 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8948 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8949 +
8950 + if (!service)
8951 + return VCHIQ_ERROR;
8952 +
8953 + vchiq_log_info(vchiq_core_log_level,
8954 + "%d: close_service:%d",
8955 + service->state->id, service->localport);
8956 +
8957 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8958 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8959 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8960 + unlock_service(service);
8961 + return VCHIQ_ERROR;
8962 + }
8963 +
8964 + mark_service_closing(service);
8965 +
8966 + if (current == service->state->slot_handler_thread) {
8967 + status = vchiq_close_service_internal(service,
8968 + 0/*!close_recvd*/);
8969 + BUG_ON(status == VCHIQ_RETRY);
8970 + } else {
8971 + /* Mark the service for termination by the slot handler */
8972 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8973 + }
8974 +
8975 + while (1) {
8976 + if (down_interruptible(&service->remove_event) != 0) {
8977 + status = VCHIQ_RETRY;
8978 + break;
8979 + }
8980 +
8981 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8982 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8983 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8984 + break;
8985 +
8986 + vchiq_log_warning(vchiq_core_log_level,
8987 + "%d: close_service:%d - waiting in state %s",
8988 + service->state->id, service->localport,
8989 + srvstate_names[service->srvstate]);
8990 + }
8991 +
8992 + if ((status == VCHIQ_SUCCESS) &&
8993 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8994 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8995 + status = VCHIQ_ERROR;
8996 +
8997 + unlock_service(service);
8998 +
8999 + return status;
9000 +}
9001 +
9002 +VCHIQ_STATUS_T
9003 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
9004 +{
9005 + /* Unregister the service */
9006 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9007 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9008 +
9009 + if (!service)
9010 + return VCHIQ_ERROR;
9011 +
9012 + vchiq_log_info(vchiq_core_log_level,
9013 + "%d: remove_service:%d",
9014 + service->state->id, service->localport);
9015 +
9016 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
9017 + unlock_service(service);
9018 + return VCHIQ_ERROR;
9019 + }
9020 +
9021 + mark_service_closing(service);
9022 +
9023 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9024 + (current == service->state->slot_handler_thread)) {
9025 + /* Make it look like a client, because it must be removed and
9026 + not left in the LISTENING state. */
9027 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
9028 +
9029 + status = vchiq_close_service_internal(service,
9030 + 0/*!close_recvd*/);
9031 + BUG_ON(status == VCHIQ_RETRY);
9032 + } else {
9033 + /* Mark the service for removal by the slot handler */
9034 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
9035 + }
9036 + while (1) {
9037 + if (down_interruptible(&service->remove_event) != 0) {
9038 + status = VCHIQ_RETRY;
9039 + break;
9040 + }
9041 +
9042 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9043 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
9044 + break;
9045 +
9046 + vchiq_log_warning(vchiq_core_log_level,
9047 + "%d: remove_service:%d - waiting in state %s",
9048 + service->state->id, service->localport,
9049 + srvstate_names[service->srvstate]);
9050 + }
9051 +
9052 + if ((status == VCHIQ_SUCCESS) &&
9053 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
9054 + status = VCHIQ_ERROR;
9055 +
9056 + unlock_service(service);
9057 +
9058 + return status;
9059 +}
9060 +
9061 +
9062 +/* This function may be called by kernel threads or user threads.
9063 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
9064 + * received and the call should be retried after being returned to user
9065 + * context.
9066 + * When called in blocking mode, the userdata field points to a bulk_waiter
9067 + * structure.
9068 + */
9069 +VCHIQ_STATUS_T
9070 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9071 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9072 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
9073 +{
9074 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9075 + VCHIQ_BULK_QUEUE_T *queue;
9076 + VCHIQ_BULK_T *bulk;
9077 + VCHIQ_STATE_T *state;
9078 + struct bulk_waiter *bulk_waiter = NULL;
9079 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
9080 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
9081 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
9082 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9083 +
9084 + if (!service ||
9085 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
9086 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
9087 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
9088 + goto error_exit;
9089 +
9090 + switch (mode) {
9091 + case VCHIQ_BULK_MODE_NOCALLBACK:
9092 + case VCHIQ_BULK_MODE_CALLBACK:
9093 + break;
9094 + case VCHIQ_BULK_MODE_BLOCKING:
9095 + bulk_waiter = (struct bulk_waiter *)userdata;
9096 + sema_init(&bulk_waiter->event, 0);
9097 + bulk_waiter->actual = 0;
9098 + bulk_waiter->bulk = NULL;
9099 + break;
9100 + case VCHIQ_BULK_MODE_WAITING:
9101 + bulk_waiter = (struct bulk_waiter *)userdata;
9102 + bulk = bulk_waiter->bulk;
9103 + goto waiting;
9104 + default:
9105 + goto error_exit;
9106 + }
9107 +
9108 + state = service->state;
9109 +
9110 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
9111 + &service->bulk_tx : &service->bulk_rx;
9112 +
9113 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
9114 + status = VCHIQ_RETRY;
9115 + goto error_exit;
9116 + }
9117 +
9118 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
9119 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
9120 + do {
9121 + mutex_unlock(&service->bulk_mutex);
9122 + if (down_interruptible(&service->bulk_remove_event)
9123 + != 0) {
9124 + status = VCHIQ_RETRY;
9125 + goto error_exit;
9126 + }
9127 + if (mutex_lock_interruptible(&service->bulk_mutex)
9128 + != 0) {
9129 + status = VCHIQ_RETRY;
9130 + goto error_exit;
9131 + }
9132 + } while (queue->local_insert == queue->remove +
9133 + VCHIQ_NUM_SERVICE_BULKS);
9134 + }
9135 +
9136 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
9137 +
9138 + bulk->mode = mode;
9139 + bulk->dir = dir;
9140 + bulk->userdata = userdata;
9141 + bulk->size = size;
9142 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
9143 +
9144 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
9145 + VCHIQ_SUCCESS)
9146 + goto unlock_error_exit;
9147 +
9148 + wmb();
9149 +
9150 + vchiq_log_info(vchiq_core_log_level,
9151 + "%d: bt (%d->%d) %cx %x@%x %x",
9152 + state->id,
9153 + service->localport, service->remoteport, dir_char,
9154 + size, (unsigned int)bulk->data, (unsigned int)userdata);
9155 +
9156 + /* The slot mutex must be held when the service is being closed, so
9157 + claim it here to ensure that isn't happening */
9158 + if (mutex_lock_interruptible(&state->slot_mutex) != 0) {
9159 + status = VCHIQ_RETRY;
9160 + goto cancel_bulk_error_exit;
9161 + }
9162 +
9163 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
9164 + goto unlock_both_error_exit;
9165 +
9166 + if (state->is_master) {
9167 + queue->local_insert++;
9168 + if (resolve_bulks(service, queue))
9169 + request_poll(state, service,
9170 + (dir == VCHIQ_BULK_TRANSMIT) ?
9171 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
9172 + } else {
9173 + int payload[2] = { (int)bulk->data, bulk->size };
9174 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
9175 +
9176 + status = queue_message(state, NULL,
9177 + VCHIQ_MAKE_MSG(dir_msgtype,
9178 + service->localport, service->remoteport),
9179 + &element, 1, sizeof(payload),
9180 + QMFLAGS_IS_BLOCKING |
9181 + QMFLAGS_NO_MUTEX_LOCK |
9182 + QMFLAGS_NO_MUTEX_UNLOCK);
9183 + if (status != VCHIQ_SUCCESS) {
9184 + goto unlock_both_error_exit;
9185 + }
9186 + queue->local_insert++;
9187 + }
9188 +
9189 + mutex_unlock(&state->slot_mutex);
9190 + mutex_unlock(&service->bulk_mutex);
9191 +
9192 + vchiq_log_trace(vchiq_core_log_level,
9193 + "%d: bt:%d %cx li=%x ri=%x p=%x",
9194 + state->id,
9195 + service->localport, dir_char,
9196 + queue->local_insert, queue->remote_insert, queue->process);
9197 +
9198 +waiting:
9199 + unlock_service(service);
9200 +
9201 + status = VCHIQ_SUCCESS;
9202 +
9203 + if (bulk_waiter) {
9204 + bulk_waiter->bulk = bulk;
9205 + if (down_interruptible(&bulk_waiter->event) != 0)
9206 + status = VCHIQ_RETRY;
9207 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
9208 + status = VCHIQ_ERROR;
9209 + }
9210 +
9211 + return status;
9212 +
9213 +unlock_both_error_exit:
9214 + mutex_unlock(&state->slot_mutex);
9215 +cancel_bulk_error_exit:
9216 + vchiq_complete_bulk(bulk);
9217 +unlock_error_exit:
9218 + mutex_unlock(&service->bulk_mutex);
9219 +
9220 +error_exit:
9221 + if (service)
9222 + unlock_service(service);
9223 + return status;
9224 +}
9225 +
9226 +VCHIQ_STATUS_T
9227 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
9228 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
9229 +{
9230 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9231 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9232 +
9233 + unsigned int size = 0;
9234 + unsigned int i;
9235 +
9236 + if (!service ||
9237 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
9238 + goto error_exit;
9239 +
9240 + for (i = 0; i < (unsigned int)count; i++) {
9241 + if (elements[i].size) {
9242 + if (elements[i].data == NULL) {
9243 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9244 + goto error_exit;
9245 + }
9246 + size += elements[i].size;
9247 + }
9248 + }
9249 +
9250 + if (size > VCHIQ_MAX_MSG_SIZE) {
9251 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9252 + goto error_exit;
9253 + }
9254 +
9255 + switch (service->srvstate) {
9256 + case VCHIQ_SRVSTATE_OPEN:
9257 + status = queue_message(service->state, service,
9258 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9259 + service->localport,
9260 + service->remoteport),
9261 + elements, count, size, 1);
9262 + break;
9263 + case VCHIQ_SRVSTATE_OPENSYNC:
9264 + status = queue_message_sync(service->state, service,
9265 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9266 + service->localport,
9267 + service->remoteport),
9268 + elements, count, size, 1);
9269 + break;
9270 + default:
9271 + status = VCHIQ_ERROR;
9272 + break;
9273 + }
9274 +
9275 +error_exit:
9276 + if (service)
9277 + unlock_service(service);
9278 +
9279 + return status;
9280 +}
9281 +
9282 +void
9283 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
9284 +{
9285 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9286 + VCHIQ_SHARED_STATE_T *remote;
9287 + VCHIQ_STATE_T *state;
9288 + int slot_index;
9289 +
9290 + if (!service)
9291 + return;
9292 +
9293 + state = service->state;
9294 + remote = state->remote;
9295 +
9296 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
9297 +
9298 + if ((slot_index >= remote->slot_first) &&
9299 + (slot_index <= remote->slot_last)) {
9300 + int msgid = header->msgid;
9301 + if (msgid & VCHIQ_MSGID_CLAIMED) {
9302 + VCHIQ_SLOT_INFO_T *slot_info =
9303 + SLOT_INFO_FROM_INDEX(state, slot_index);
9304 +
9305 + release_slot(state, slot_info, header, service);
9306 + }
9307 + } else if (slot_index == remote->slot_sync)
9308 + release_message_sync(state, header);
9309 +
9310 + unlock_service(service);
9311 +}
9312 +
9313 +static void
9314 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
9315 +{
9316 + header->msgid = VCHIQ_MSGID_PADDING;
9317 + wmb();
9318 + remote_event_signal(&state->remote->sync_release);
9319 +}
9320 +
9321 +VCHIQ_STATUS_T
9322 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
9323 +{
9324 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9325 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9326 +
9327 + if (!service ||
9328 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
9329 + !peer_version)
9330 + goto exit;
9331 + *peer_version = service->peer_version;
9332 + status = VCHIQ_SUCCESS;
9333 +
9334 +exit:
9335 + if (service)
9336 + unlock_service(service);
9337 + return status;
9338 +}
9339 +
9340 +VCHIQ_STATUS_T
9341 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
9342 + int config_size, VCHIQ_CONFIG_T *pconfig)
9343 +{
9344 + VCHIQ_CONFIG_T config;
9345 +
9346 + (void)instance;
9347 +
9348 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
9349 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
9350 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
9351 + config.max_services = VCHIQ_MAX_SERVICES;
9352 + config.version = VCHIQ_VERSION;
9353 + config.version_min = VCHIQ_VERSION_MIN;
9354 +
9355 + if (config_size > sizeof(VCHIQ_CONFIG_T))
9356 + return VCHIQ_ERROR;
9357 +
9358 + memcpy(pconfig, &config,
9359 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
9360 +
9361 + return VCHIQ_SUCCESS;
9362 +}
9363 +
9364 +VCHIQ_STATUS_T
9365 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
9366 + VCHIQ_SERVICE_OPTION_T option, int value)
9367 +{
9368 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9369 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9370 +
9371 + if (service) {
9372 + switch (option) {
9373 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
9374 + service->auto_close = value;
9375 + status = VCHIQ_SUCCESS;
9376 + break;
9377 +
9378 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
9379 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9380 + &service->state->service_quotas[
9381 + service->localport];
9382 + if (value == 0)
9383 + value = service->state->default_slot_quota;
9384 + if ((value >= service_quota->slot_use_count) &&
9385 + (value < (unsigned short)~0)) {
9386 + service_quota->slot_quota = value;
9387 + if ((value >= service_quota->slot_use_count) &&
9388 + (service_quota->message_quota >=
9389 + service_quota->message_use_count)) {
9390 + /* Signal the service that it may have
9391 + ** dropped below its quota */
9392 + up(&service_quota->quota_event);
9393 + }
9394 + status = VCHIQ_SUCCESS;
9395 + }
9396 + } break;
9397 +
9398 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
9399 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9400 + &service->state->service_quotas[
9401 + service->localport];
9402 + if (value == 0)
9403 + value = service->state->default_message_quota;
9404 + if ((value >= service_quota->message_use_count) &&
9405 + (value < (unsigned short)~0)) {
9406 + service_quota->message_quota = value;
9407 + if ((value >=
9408 + service_quota->message_use_count) &&
9409 + (service_quota->slot_quota >=
9410 + service_quota->slot_use_count))
9411 + /* Signal the service that it may have
9412 + ** dropped below its quota */
9413 + up(&service_quota->quota_event);
9414 + status = VCHIQ_SUCCESS;
9415 + }
9416 + } break;
9417 +
9418 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
9419 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9420 + (service->srvstate ==
9421 + VCHIQ_SRVSTATE_LISTENING)) {
9422 + service->sync = value;
9423 + status = VCHIQ_SUCCESS;
9424 + }
9425 + break;
9426 +
9427 + case VCHIQ_SERVICE_OPTION_TRACE:
9428 + service->trace = value;
9429 + status = VCHIQ_SUCCESS;
9430 + break;
9431 +
9432 + default:
9433 + break;
9434 + }
9435 + unlock_service(service);
9436 + }
9437 +
9438 + return status;
9439 +}
9440 +
9441 +void
9442 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9443 + VCHIQ_SHARED_STATE_T *shared, const char *label)
9444 +{
9445 + static const char *const debug_names[] = {
9446 + "<entries>",
9447 + "SLOT_HANDLER_COUNT",
9448 + "SLOT_HANDLER_LINE",
9449 + "PARSE_LINE",
9450 + "PARSE_HEADER",
9451 + "PARSE_MSGID",
9452 + "AWAIT_COMPLETION_LINE",
9453 + "DEQUEUE_MESSAGE_LINE",
9454 + "SERVICE_CALLBACK_LINE",
9455 + "MSG_QUEUE_FULL_COUNT",
9456 + "COMPLETION_QUEUE_FULL_COUNT"
9457 + };
9458 + int i;
9459 +
9460 + char buf[80];
9461 + int len;
9462 + len = snprintf(buf, sizeof(buf),
9463 + " %s: slots %d-%d tx_pos=%x recycle=%x",
9464 + label, shared->slot_first, shared->slot_last,
9465 + shared->tx_pos, shared->slot_queue_recycle);
9466 + vchiq_dump(dump_context, buf, len + 1);
9467 +
9468 + len = snprintf(buf, sizeof(buf),
9469 + " Slots claimed:");
9470 + vchiq_dump(dump_context, buf, len + 1);
9471 +
9472 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
9473 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9474 + if (slot_info.use_count != slot_info.release_count) {
9475 + len = snprintf(buf, sizeof(buf),
9476 + " %d: %d/%d", i, slot_info.use_count,
9477 + slot_info.release_count);
9478 + vchiq_dump(dump_context, buf, len + 1);
9479 + }
9480 + }
9481 +
9482 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9483 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
9484 + debug_names[i], shared->debug[i], shared->debug[i]);
9485 + vchiq_dump(dump_context, buf, len + 1);
9486 + }
9487 +}
9488 +
9489 +void
9490 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9491 +{
9492 + char buf[80];
9493 + int len;
9494 + int i;
9495 +
9496 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9497 + conn_state_names[state->conn_state]);
9498 + vchiq_dump(dump_context, buf, len + 1);
9499 +
9500 + len = snprintf(buf, sizeof(buf),
9501 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
9502 + state->local->tx_pos,
9503 + (uint32_t)state->tx_data +
9504 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
9505 + state->rx_pos,
9506 + (uint32_t)state->rx_data +
9507 + (state->rx_pos & VCHIQ_SLOT_MASK));
9508 + vchiq_dump(dump_context, buf, len + 1);
9509 +
9510 + len = snprintf(buf, sizeof(buf),
9511 + " Version: %d (min %d)",
9512 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9513 + vchiq_dump(dump_context, buf, len + 1);
9514 +
9515 + if (VCHIQ_ENABLE_STATS) {
9516 + len = snprintf(buf, sizeof(buf),
9517 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9518 + "error_count=%d",
9519 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9520 + state->stats.error_count);
9521 + vchiq_dump(dump_context, buf, len + 1);
9522 + }
9523 +
9524 + len = snprintf(buf, sizeof(buf),
9525 + " Slots: %d available (%d data), %d recyclable, %d stalls "
9526 + "(%d data)",
9527 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9528 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9529 + state->data_quota - state->data_use_count,
9530 + state->local->slot_queue_recycle - state->slot_queue_available,
9531 + state->stats.slot_stalls, state->stats.data_stalls);
9532 + vchiq_dump(dump_context, buf, len + 1);
9533 +
9534 + vchiq_dump_platform_state(dump_context);
9535 +
9536 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9537 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9538 +
9539 + vchiq_dump_platform_instances(dump_context);
9540 +
9541 + for (i = 0; i < state->unused_service; i++) {
9542 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9543 +
9544 + if (service) {
9545 + vchiq_dump_service_state(dump_context, service);
9546 + unlock_service(service);
9547 + }
9548 + }
9549 +}
9550 +
9551 +void
9552 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9553 +{
9554 + char buf[80];
9555 + int len;
9556 +
9557 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9558 + service->localport, srvstate_names[service->srvstate],
9559 + service->ref_count - 1); /*Don't include the lock just taken*/
9560 +
9561 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9562 + char remoteport[30];
9563 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9564 + &service->state->service_quotas[service->localport];
9565 + int fourcc = service->base.fourcc;
9566 + int tx_pending, rx_pending;
9567 + if (service->remoteport != VCHIQ_PORT_FREE) {
9568 + int len2 = snprintf(remoteport, sizeof(remoteport),
9569 + "%d", service->remoteport);
9570 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9571 + snprintf(remoteport + len2,
9572 + sizeof(remoteport) - len2,
9573 + " (client %x)", service->client_id);
9574 + } else
9575 + strcpy(remoteport, "n/a");
9576 +
9577 + len += snprintf(buf + len, sizeof(buf) - len,
9578 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9579 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
9580 + remoteport,
9581 + service_quota->message_use_count,
9582 + service_quota->message_quota,
9583 + service_quota->slot_use_count,
9584 + service_quota->slot_quota);
9585 +
9586 + vchiq_dump(dump_context, buf, len + 1);
9587 +
9588 + tx_pending = service->bulk_tx.local_insert -
9589 + service->bulk_tx.remote_insert;
9590 +
9591 + rx_pending = service->bulk_rx.local_insert -
9592 + service->bulk_rx.remote_insert;
9593 +
9594 + len = snprintf(buf, sizeof(buf),
9595 + " Bulk: tx_pending=%d (size %d),"
9596 + " rx_pending=%d (size %d)",
9597 + tx_pending,
9598 + tx_pending ? service->bulk_tx.bulks[
9599 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
9600 + rx_pending,
9601 + rx_pending ? service->bulk_rx.bulks[
9602 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
9603 +
9604 + if (VCHIQ_ENABLE_STATS) {
9605 + vchiq_dump(dump_context, buf, len + 1);
9606 +
9607 + len = snprintf(buf, sizeof(buf),
9608 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
9609 + "rx_count=%d, rx_bytes=%llu",
9610 + service->stats.ctrl_tx_count,
9611 + service->stats.ctrl_tx_bytes,
9612 + service->stats.ctrl_rx_count,
9613 + service->stats.ctrl_rx_bytes);
9614 + vchiq_dump(dump_context, buf, len + 1);
9615 +
9616 + len = snprintf(buf, sizeof(buf),
9617 + " Bulk: tx_count=%d, tx_bytes=%llu, "
9618 + "rx_count=%d, rx_bytes=%llu",
9619 + service->stats.bulk_tx_count,
9620 + service->stats.bulk_tx_bytes,
9621 + service->stats.bulk_rx_count,
9622 + service->stats.bulk_rx_bytes);
9623 + vchiq_dump(dump_context, buf, len + 1);
9624 +
9625 + len = snprintf(buf, sizeof(buf),
9626 + " %d quota stalls, %d slot stalls, "
9627 + "%d bulk stalls, %d aborted, %d errors",
9628 + service->stats.quota_stalls,
9629 + service->stats.slot_stalls,
9630 + service->stats.bulk_stalls,
9631 + service->stats.bulk_aborted_count,
9632 + service->stats.error_count);
9633 + }
9634 + }
9635 +
9636 + vchiq_dump(dump_context, buf, len + 1);
9637 +
9638 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9639 + vchiq_dump_platform_service_state(dump_context, service);
9640 +}
9641 +
9642 +
9643 +void
9644 +vchiq_loud_error_header(void)
9645 +{
9646 + vchiq_log_error(vchiq_core_log_level,
9647 + "============================================================"
9648 + "================");
9649 + vchiq_log_error(vchiq_core_log_level,
9650 + "============================================================"
9651 + "================");
9652 + vchiq_log_error(vchiq_core_log_level, "=====");
9653 +}
9654 +
9655 +void
9656 +vchiq_loud_error_footer(void)
9657 +{
9658 + vchiq_log_error(vchiq_core_log_level, "=====");
9659 + vchiq_log_error(vchiq_core_log_level,
9660 + "============================================================"
9661 + "================");
9662 + vchiq_log_error(vchiq_core_log_level,
9663 + "============================================================"
9664 + "================");
9665 +}
9666 +
9667 +
9668 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9669 +{
9670 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9671 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9672 + status = queue_message(state, NULL,
9673 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9674 + NULL, 0, 0, 0);
9675 + return status;
9676 +}
9677 +
9678 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9679 +{
9680 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9681 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9682 + status = queue_message(state, NULL,
9683 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9684 + NULL, 0, 0, 0);
9685 + return status;
9686 +}
9687 +
9688 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9689 +{
9690 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9691 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9692 + status = queue_message(state, NULL,
9693 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9694 + NULL, 0, 0, 0);
9695 + return status;
9696 +}
9697 +
9698 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9699 + size_t numBytes)
9700 +{
9701 + const uint8_t *mem = (const uint8_t *)voidMem;
9702 + size_t offset;
9703 + char lineBuf[100];
9704 + char *s;
9705 +
9706 + while (numBytes > 0) {
9707 + s = lineBuf;
9708 +
9709 + for (offset = 0; offset < 16; offset++) {
9710 + if (offset < numBytes)
9711 + s += snprintf(s, 4, "%02x ", mem[offset]);
9712 + else
9713 + s += snprintf(s, 4, " ");
9714 + }
9715 +
9716 + for (offset = 0; offset < 16; offset++) {
9717 + if (offset < numBytes) {
9718 + uint8_t ch = mem[offset];
9719 +
9720 + if ((ch < ' ') || (ch > '~'))
9721 + ch = '.';
9722 + *s++ = (char)ch;
9723 + }
9724 + }
9725 + *s++ = '\0';
9726 +
9727 + if ((label != NULL) && (*label != '\0'))
9728 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9729 + "%s: %08x: %s", label, addr, lineBuf);
9730 + else
9731 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9732 + "%08x: %s", addr, lineBuf);
9733 +
9734 + addr += 16;
9735 + mem += 16;
9736 + if (numBytes > 16)
9737 + numBytes -= 16;
9738 + else
9739 + numBytes = 0;
9740 + }
9741 +}
9742 --- /dev/null
9743 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9744 @@ -0,0 +1,712 @@
9745 +/**
9746 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9747 + *
9748 + * Redistribution and use in source and binary forms, with or without
9749 + * modification, are permitted provided that the following conditions
9750 + * are met:
9751 + * 1. Redistributions of source code must retain the above copyright
9752 + * notice, this list of conditions, and the following disclaimer,
9753 + * without modification.
9754 + * 2. Redistributions in binary form must reproduce the above copyright
9755 + * notice, this list of conditions and the following disclaimer in the
9756 + * documentation and/or other materials provided with the distribution.
9757 + * 3. The names of the above-listed copyright holders may not be used
9758 + * to endorse or promote products derived from this software without
9759 + * specific prior written permission.
9760 + *
9761 + * ALTERNATIVELY, this software may be distributed under the terms of the
9762 + * GNU General Public License ("GPL") version 2, as published by the Free
9763 + * Software Foundation.
9764 + *
9765 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9766 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9767 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9768 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9769 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9770 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9771 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9772 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9773 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9774 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9775 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9776 + */
9777 +
9778 +#ifndef VCHIQ_CORE_H
9779 +#define VCHIQ_CORE_H
9780 +
9781 +#include <linux/mutex.h>
9782 +#include <linux/semaphore.h>
9783 +#include <linux/kthread.h>
9784 +
9785 +#include "vchiq_cfg.h"
9786 +
9787 +#include "vchiq.h"
9788 +
9789 +/* Run time control of log level, based on KERN_XXX level. */
9790 +#define VCHIQ_LOG_DEFAULT 4
9791 +#define VCHIQ_LOG_ERROR 3
9792 +#define VCHIQ_LOG_WARNING 4
9793 +#define VCHIQ_LOG_INFO 6
9794 +#define VCHIQ_LOG_TRACE 7
9795 +
9796 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
9797 +
9798 +#ifndef vchiq_log_error
9799 +#define vchiq_log_error(cat, fmt, ...) \
9800 + do { if (cat >= VCHIQ_LOG_ERROR) \
9801 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9802 +#endif
9803 +#ifndef vchiq_log_warning
9804 +#define vchiq_log_warning(cat, fmt, ...) \
9805 + do { if (cat >= VCHIQ_LOG_WARNING) \
9806 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9807 +#endif
9808 +#ifndef vchiq_log_info
9809 +#define vchiq_log_info(cat, fmt, ...) \
9810 + do { if (cat >= VCHIQ_LOG_INFO) \
9811 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9812 +#endif
9813 +#ifndef vchiq_log_trace
9814 +#define vchiq_log_trace(cat, fmt, ...) \
9815 + do { if (cat >= VCHIQ_LOG_TRACE) \
9816 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9817 +#endif
9818 +
9819 +#define vchiq_loud_error(...) \
9820 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9821 +
9822 +#ifndef vchiq_static_assert
9823 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9824 + extern int vchiq_static_assert[(cond) ? 1 : -1]
9825 +#endif
9826 +
9827 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9828 +
9829 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9830 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9831 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9832 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9833 +
9834 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
9835 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9836 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9837 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9838 +
9839 +#define VCHIQ_MSG_PADDING 0 /* - */
9840 +#define VCHIQ_MSG_CONNECT 1 /* - */
9841 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
9842 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
9843 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
9844 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
9845 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
9846 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
9847 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
9848 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
9849 +#define VCHIQ_MSG_PAUSE 10 /* - */
9850 +#define VCHIQ_MSG_RESUME 11 /* - */
9851 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
9852 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
9853 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
9854 +
9855 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
9856 +#define VCHIQ_PORT_FREE 0x1000
9857 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
9858 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9859 + ((type<<24) | (srcport<<12) | (dstport<<0))
9860 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
9861 +#define VCHIQ_MSG_SRCPORT(msgid) \
9862 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9863 +#define VCHIQ_MSG_DSTPORT(msgid) \
9864 + ((unsigned short)msgid & 0xfff)
9865 +
9866 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9867 + ((fourcc) >> 24) & 0xff, \
9868 + ((fourcc) >> 16) & 0xff, \
9869 + ((fourcc) >> 8) & 0xff, \
9870 + (fourcc) & 0xff
9871 +
9872 +/* Ensure the fields are wide enough */
9873 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9874 + == 0);
9875 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9876 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9877 + (unsigned int)VCHIQ_PORT_FREE);
9878 +
9879 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9880 +#define VCHIQ_MSGID_CLAIMED 0x40000000
9881 +
9882 +#define VCHIQ_FOURCC_INVALID 0x00000000
9883 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
9884 +
9885 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9886 +
9887 +typedef uint32_t BITSET_T;
9888 +
9889 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9890 +
9891 +#define BITSET_SIZE(b) ((b + 31) >> 5)
9892 +#define BITSET_WORD(b) (b >> 5)
9893 +#define BITSET_BIT(b) (1 << (b & 31))
9894 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
9895 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9896 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9897 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9898 +
9899 +#if VCHIQ_ENABLE_STATS
9900 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9901 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9902 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9903 + (service->stats. stat += addend)
9904 +#else
9905 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9906 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9907 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9908 +#endif
9909 +
9910 +enum {
9911 + DEBUG_ENTRIES,
9912 +#if VCHIQ_ENABLE_DEBUG
9913 + DEBUG_SLOT_HANDLER_COUNT,
9914 + DEBUG_SLOT_HANDLER_LINE,
9915 + DEBUG_PARSE_LINE,
9916 + DEBUG_PARSE_HEADER,
9917 + DEBUG_PARSE_MSGID,
9918 + DEBUG_AWAIT_COMPLETION_LINE,
9919 + DEBUG_DEQUEUE_MESSAGE_LINE,
9920 + DEBUG_SERVICE_CALLBACK_LINE,
9921 + DEBUG_MSG_QUEUE_FULL_COUNT,
9922 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9923 +#endif
9924 + DEBUG_MAX
9925 +};
9926 +
9927 +#if VCHIQ_ENABLE_DEBUG
9928 +
9929 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9930 +#define DEBUG_TRACE(d) \
9931 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9932 +#define DEBUG_VALUE(d, v) \
9933 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9934 +#define DEBUG_COUNT(d) \
9935 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9936 +
9937 +#else /* VCHIQ_ENABLE_DEBUG */
9938 +
9939 +#define DEBUG_INITIALISE(local)
9940 +#define DEBUG_TRACE(d)
9941 +#define DEBUG_VALUE(d, v)
9942 +#define DEBUG_COUNT(d)
9943 +
9944 +#endif /* VCHIQ_ENABLE_DEBUG */
9945 +
9946 +typedef enum {
9947 + VCHIQ_CONNSTATE_DISCONNECTED,
9948 + VCHIQ_CONNSTATE_CONNECTING,
9949 + VCHIQ_CONNSTATE_CONNECTED,
9950 + VCHIQ_CONNSTATE_PAUSING,
9951 + VCHIQ_CONNSTATE_PAUSE_SENT,
9952 + VCHIQ_CONNSTATE_PAUSED,
9953 + VCHIQ_CONNSTATE_RESUMING,
9954 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9955 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
9956 +} VCHIQ_CONNSTATE_T;
9957 +
9958 +enum {
9959 + VCHIQ_SRVSTATE_FREE,
9960 + VCHIQ_SRVSTATE_HIDDEN,
9961 + VCHIQ_SRVSTATE_LISTENING,
9962 + VCHIQ_SRVSTATE_OPENING,
9963 + VCHIQ_SRVSTATE_OPEN,
9964 + VCHIQ_SRVSTATE_OPENSYNC,
9965 + VCHIQ_SRVSTATE_CLOSESENT,
9966 + VCHIQ_SRVSTATE_CLOSERECVD,
9967 + VCHIQ_SRVSTATE_CLOSEWAIT,
9968 + VCHIQ_SRVSTATE_CLOSED
9969 +};
9970 +
9971 +enum {
9972 + VCHIQ_POLL_TERMINATE,
9973 + VCHIQ_POLL_REMOVE,
9974 + VCHIQ_POLL_TXNOTIFY,
9975 + VCHIQ_POLL_RXNOTIFY,
9976 + VCHIQ_POLL_COUNT
9977 +};
9978 +
9979 +typedef enum {
9980 + VCHIQ_BULK_TRANSMIT,
9981 + VCHIQ_BULK_RECEIVE
9982 +} VCHIQ_BULK_DIR_T;
9983 +
9984 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9985 +
9986 +typedef struct vchiq_bulk_struct {
9987 + short mode;
9988 + short dir;
9989 + void *userdata;
9990 + VCHI_MEM_HANDLE_T handle;
9991 + void *data;
9992 + int size;
9993 + void *remote_data;
9994 + int remote_size;
9995 + int actual;
9996 +} VCHIQ_BULK_T;
9997 +
9998 +typedef struct vchiq_bulk_queue_struct {
9999 + int local_insert; /* Where to insert the next local bulk */
10000 + int remote_insert; /* Where to insert the next remote bulk (master) */
10001 + int process; /* Bulk to transfer next */
10002 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
10003 + int remove; /* Bulk to notify the local client of, and remove,
10004 + ** next */
10005 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
10006 +} VCHIQ_BULK_QUEUE_T;
10007 +
10008 +typedef struct remote_event_struct {
10009 + int armed;
10010 + int fired;
10011 + struct semaphore *event;
10012 +} REMOTE_EVENT_T;
10013 +
10014 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
10015 +
10016 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
10017 +
10018 +typedef struct vchiq_slot_struct {
10019 + char data[VCHIQ_SLOT_SIZE];
10020 +} VCHIQ_SLOT_T;
10021 +
10022 +typedef struct vchiq_slot_info_struct {
10023 + /* Use two counters rather than one to avoid the need for a mutex. */
10024 + short use_count;
10025 + short release_count;
10026 +} VCHIQ_SLOT_INFO_T;
10027 +
10028 +typedef struct vchiq_service_struct {
10029 + VCHIQ_SERVICE_BASE_T base;
10030 + VCHIQ_SERVICE_HANDLE_T handle;
10031 + unsigned int ref_count;
10032 + int srvstate;
10033 + VCHIQ_USERDATA_TERM_T userdata_term;
10034 + unsigned int localport;
10035 + unsigned int remoteport;
10036 + int public_fourcc;
10037 + int client_id;
10038 + char auto_close;
10039 + char sync;
10040 + char closing;
10041 + char trace;
10042 + atomic_t poll_flags;
10043 + short version;
10044 + short version_min;
10045 + short peer_version;
10046 +
10047 + VCHIQ_STATE_T *state;
10048 + VCHIQ_INSTANCE_T instance;
10049 +
10050 + int service_use_count;
10051 +
10052 + VCHIQ_BULK_QUEUE_T bulk_tx;
10053 + VCHIQ_BULK_QUEUE_T bulk_rx;
10054 +
10055 + struct semaphore remove_event;
10056 + struct semaphore bulk_remove_event;
10057 + struct mutex bulk_mutex;
10058 +
10059 + struct service_stats_struct {
10060 + int quota_stalls;
10061 + int slot_stalls;
10062 + int bulk_stalls;
10063 + int error_count;
10064 + int ctrl_tx_count;
10065 + int ctrl_rx_count;
10066 + int bulk_tx_count;
10067 + int bulk_rx_count;
10068 + int bulk_aborted_count;
10069 + uint64_t ctrl_tx_bytes;
10070 + uint64_t ctrl_rx_bytes;
10071 + uint64_t bulk_tx_bytes;
10072 + uint64_t bulk_rx_bytes;
10073 + } stats;
10074 +} VCHIQ_SERVICE_T;
10075 +
10076 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
10077 + statically allocated, since for accounting reasons a service's slot
10078 + usage is carried over between users of the same port number.
10079 + */
10080 +typedef struct vchiq_service_quota_struct {
10081 + unsigned short slot_quota;
10082 + unsigned short slot_use_count;
10083 + unsigned short message_quota;
10084 + unsigned short message_use_count;
10085 + struct semaphore quota_event;
10086 + int previous_tx_index;
10087 +} VCHIQ_SERVICE_QUOTA_T;
10088 +
10089 +typedef struct vchiq_shared_state_struct {
10090 +
10091 + /* A non-zero value here indicates that the content is valid. */
10092 + int initialised;
10093 +
10094 + /* The first and last (inclusive) slots allocated to the owner. */
10095 + int slot_first;
10096 + int slot_last;
10097 +
10098 + /* The slot allocated to synchronous messages from the owner. */
10099 + int slot_sync;
10100 +
10101 + /* Signalling this event indicates that owner's slot handler thread
10102 + ** should run. */
10103 + REMOTE_EVENT_T trigger;
10104 +
10105 + /* Indicates the byte position within the stream where the next message
10106 + ** will be written. The least significant bits are an index into the
10107 + ** slot. The next bits are the index of the slot in slot_queue. */
10108 + int tx_pos;
10109 +
10110 + /* This event should be signalled when a slot is recycled. */
10111 + REMOTE_EVENT_T recycle;
10112 +
10113 + /* The slot_queue index where the next recycled slot will be written. */
10114 + int slot_queue_recycle;
10115 +
10116 + /* This event should be signalled when a synchronous message is sent. */
10117 + REMOTE_EVENT_T sync_trigger;
10118 +
10119 + /* This event should be signalled when a synchronous message has been
10120 + ** released. */
10121 + REMOTE_EVENT_T sync_release;
10122 +
10123 + /* A circular buffer of slot indexes. */
10124 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
10125 +
10126 + /* Debugging state */
10127 + int debug[DEBUG_MAX];
10128 +} VCHIQ_SHARED_STATE_T;
10129 +
10130 +typedef struct vchiq_slot_zero_struct {
10131 + int magic;
10132 + short version;
10133 + short version_min;
10134 + int slot_zero_size;
10135 + int slot_size;
10136 + int max_slots;
10137 + int max_slots_per_side;
10138 + int platform_data[2];
10139 + VCHIQ_SHARED_STATE_T master;
10140 + VCHIQ_SHARED_STATE_T slave;
10141 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
10142 +} VCHIQ_SLOT_ZERO_T;
10143 +
10144 +struct vchiq_state_struct {
10145 + int id;
10146 + int initialised;
10147 + VCHIQ_CONNSTATE_T conn_state;
10148 + int is_master;
10149 + short version_common;
10150 +
10151 + VCHIQ_SHARED_STATE_T *local;
10152 + VCHIQ_SHARED_STATE_T *remote;
10153 + VCHIQ_SLOT_T *slot_data;
10154 +
10155 + unsigned short default_slot_quota;
10156 + unsigned short default_message_quota;
10157 +
10158 + /* Event indicating connect message received */
10159 + struct semaphore connect;
10160 +
10161 + /* Mutex protecting services */
10162 + struct mutex mutex;
10163 + VCHIQ_INSTANCE_T *instance;
10164 +
10165 + /* Processes incoming messages */
10166 + struct task_struct *slot_handler_thread;
10167 +
10168 + /* Processes recycled slots */
10169 + struct task_struct *recycle_thread;
10170 +
10171 + /* Processes synchronous messages */
10172 + struct task_struct *sync_thread;
10173 +
10174 + /* Local implementation of the trigger remote event */
10175 + struct semaphore trigger_event;
10176 +
10177 + /* Local implementation of the recycle remote event */
10178 + struct semaphore recycle_event;
10179 +
10180 + /* Local implementation of the sync trigger remote event */
10181 + struct semaphore sync_trigger_event;
10182 +
10183 + /* Local implementation of the sync release remote event */
10184 + struct semaphore sync_release_event;
10185 +
10186 + char *tx_data;
10187 + char *rx_data;
10188 + VCHIQ_SLOT_INFO_T *rx_info;
10189 +
10190 + struct mutex slot_mutex;
10191 +
10192 + struct mutex recycle_mutex;
10193 +
10194 + struct mutex sync_mutex;
10195 +
10196 + struct mutex bulk_transfer_mutex;
10197 +
10198 + /* Indicates the byte position within the stream from where the next
10199 + ** message will be read. The least significant bits are an index into
10200 + ** the slot.The next bits are the index of the slot in
10201 + ** remote->slot_queue. */
10202 + int rx_pos;
10203 +
10204 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
10205 + from remote->tx_pos. */
10206 + int local_tx_pos;
10207 +
10208 + /* The slot_queue index of the slot to become available next. */
10209 + int slot_queue_available;
10210 +
10211 + /* A flag to indicate if any poll has been requested */
10212 + int poll_needed;
10213 +
10214 + /* Ths index of the previous slot used for data messages. */
10215 + int previous_data_index;
10216 +
10217 + /* The number of slots occupied by data messages. */
10218 + unsigned short data_use_count;
10219 +
10220 + /* The maximum number of slots to be occupied by data messages. */
10221 + unsigned short data_quota;
10222 +
10223 + /* An array of bit sets indicating which services must be polled. */
10224 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
10225 +
10226 + /* The number of the first unused service */
10227 + int unused_service;
10228 +
10229 + /* Signalled when a free slot becomes available. */
10230 + struct semaphore slot_available_event;
10231 +
10232 + struct semaphore slot_remove_event;
10233 +
10234 + /* Signalled when a free data slot becomes available. */
10235 + struct semaphore data_quota_event;
10236 +
10237 + /* Incremented when there are bulk transfers which cannot be processed
10238 + * whilst paused and must be processed on resume */
10239 + int deferred_bulks;
10240 +
10241 + struct state_stats_struct {
10242 + int slot_stalls;
10243 + int data_stalls;
10244 + int ctrl_tx_count;
10245 + int ctrl_rx_count;
10246 + int error_count;
10247 + } stats;
10248 +
10249 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
10250 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
10251 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
10252 +
10253 + VCHIQ_PLATFORM_STATE_T platform_state;
10254 +};
10255 +
10256 +struct bulk_waiter {
10257 + VCHIQ_BULK_T *bulk;
10258 + struct semaphore event;
10259 + int actual;
10260 +};
10261 +
10262 +extern spinlock_t bulk_waiter_spinlock;
10263 +
10264 +extern int vchiq_core_log_level;
10265 +extern int vchiq_core_msg_log_level;
10266 +extern int vchiq_sync_log_level;
10267 +
10268 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
10269 +
10270 +extern const char *
10271 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
10272 +
10273 +extern VCHIQ_SLOT_ZERO_T *
10274 +vchiq_init_slots(void *mem_base, int mem_size);
10275 +
10276 +extern VCHIQ_STATUS_T
10277 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
10278 + int is_master);
10279 +
10280 +extern VCHIQ_STATUS_T
10281 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10282 +
10283 +extern VCHIQ_SERVICE_T *
10284 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
10285 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
10286 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
10287 +
10288 +extern VCHIQ_STATUS_T
10289 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
10290 +
10291 +extern VCHIQ_STATUS_T
10292 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
10293 +
10294 +extern void
10295 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
10296 +
10297 +extern void
10298 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
10299 +
10300 +extern VCHIQ_STATUS_T
10301 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10302 +
10303 +extern VCHIQ_STATUS_T
10304 +vchiq_pause_internal(VCHIQ_STATE_T *state);
10305 +
10306 +extern VCHIQ_STATUS_T
10307 +vchiq_resume_internal(VCHIQ_STATE_T *state);
10308 +
10309 +extern void
10310 +remote_event_pollall(VCHIQ_STATE_T *state);
10311 +
10312 +extern VCHIQ_STATUS_T
10313 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
10314 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
10315 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
10316 +
10317 +extern void
10318 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
10319 +
10320 +extern void
10321 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
10322 +
10323 +extern void
10324 +vchiq_loud_error_header(void);
10325 +
10326 +extern void
10327 +vchiq_loud_error_footer(void);
10328 +
10329 +extern void
10330 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
10331 +
10332 +static inline VCHIQ_SERVICE_T *
10333 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
10334 +{
10335 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
10336 + (VCHIQ_MAX_STATES - 1)];
10337 + if (!state)
10338 + return NULL;
10339 +
10340 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
10341 +}
10342 +
10343 +extern VCHIQ_SERVICE_T *
10344 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
10345 +
10346 +extern VCHIQ_SERVICE_T *
10347 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
10348 +
10349 +extern VCHIQ_SERVICE_T *
10350 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
10351 + VCHIQ_SERVICE_HANDLE_T handle);
10352 +
10353 +extern VCHIQ_SERVICE_T *
10354 +find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
10355 + VCHIQ_SERVICE_HANDLE_T handle);
10356 +
10357 +extern VCHIQ_SERVICE_T *
10358 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
10359 + int *pidx);
10360 +
10361 +extern void
10362 +lock_service(VCHIQ_SERVICE_T *service);
10363 +
10364 +extern void
10365 +unlock_service(VCHIQ_SERVICE_T *service);
10366 +
10367 +/* The following functions are called from vchiq_core, and external
10368 +** implementations must be provided. */
10369 +
10370 +extern VCHIQ_STATUS_T
10371 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
10372 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
10373 +
10374 +extern void
10375 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
10376 +
10377 +extern void
10378 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
10379 +
10380 +extern VCHIQ_STATUS_T
10381 +vchiq_copy_from_user(void *dst, const void *src, int size);
10382 +
10383 +extern void
10384 +remote_event_signal(REMOTE_EVENT_T *event);
10385 +
10386 +void
10387 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
10388 +
10389 +extern void
10390 +vchiq_platform_paused(VCHIQ_STATE_T *state);
10391 +
10392 +extern VCHIQ_STATUS_T
10393 +vchiq_platform_resume(VCHIQ_STATE_T *state);
10394 +
10395 +extern void
10396 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
10397 +
10398 +extern void
10399 +vchiq_dump(void *dump_context, const char *str, int len);
10400 +
10401 +extern void
10402 +vchiq_dump_platform_state(void *dump_context);
10403 +
10404 +extern void
10405 +vchiq_dump_platform_instances(void *dump_context);
10406 +
10407 +extern void
10408 +vchiq_dump_platform_service_state(void *dump_context,
10409 + VCHIQ_SERVICE_T *service);
10410 +
10411 +extern VCHIQ_STATUS_T
10412 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
10413 +
10414 +extern VCHIQ_STATUS_T
10415 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
10416 +
10417 +extern void
10418 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
10419 +
10420 +extern void
10421 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
10422 +
10423 +extern VCHIQ_STATUS_T
10424 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
10425 +
10426 +extern VCHIQ_STATUS_T
10427 +vchiq_check_service(VCHIQ_SERVICE_T *service);
10428 +
10429 +extern void
10430 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10431 +
10432 +extern VCHIQ_STATUS_T
10433 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10434 +
10435 +extern VCHIQ_STATUS_T
10436 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10437 +
10438 +extern VCHIQ_STATUS_T
10439 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10440 +
10441 +extern void
10442 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10443 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10444 +
10445 +extern void
10446 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10447 +
10448 +extern void
10449 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10450 +
10451 +
10452 +extern void
10453 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10454 + size_t numBytes);
10455 +
10456 +#endif
10457 --- /dev/null
10458 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
10459 @@ -0,0 +1,383 @@
10460 +/**
10461 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
10462 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10463 + *
10464 + * Redistribution and use in source and binary forms, with or without
10465 + * modification, are permitted provided that the following conditions
10466 + * are met:
10467 + * 1. Redistributions of source code must retain the above copyright
10468 + * notice, this list of conditions, and the following disclaimer,
10469 + * without modification.
10470 + * 2. Redistributions in binary form must reproduce the above copyright
10471 + * notice, this list of conditions and the following disclaimer in the
10472 + * documentation and/or other materials provided with the distribution.
10473 + * 3. The names of the above-listed copyright holders may not be used
10474 + * to endorse or promote products derived from this software without
10475 + * specific prior written permission.
10476 + *
10477 + * ALTERNATIVELY, this software may be distributed under the terms of the
10478 + * GNU General Public License ("GPL") version 2, as published by the Free
10479 + * Software Foundation.
10480 + *
10481 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10482 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10483 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10484 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10485 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10486 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10487 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10488 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10489 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10490 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10491 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10492 + */
10493 +
10494 +
10495 +#include <linux/debugfs.h>
10496 +#include "vchiq_core.h"
10497 +#include "vchiq_arm.h"
10498 +#include "vchiq_debugfs.h"
10499 +
10500 +#ifdef CONFIG_DEBUG_FS
10501 +
10502 +/****************************************************************************
10503 +*
10504 +* log category entries
10505 +*
10506 +***************************************************************************/
10507 +#define DEBUGFS_WRITE_BUF_SIZE 256
10508 +
10509 +#define VCHIQ_LOG_ERROR_STR "error"
10510 +#define VCHIQ_LOG_WARNING_STR "warning"
10511 +#define VCHIQ_LOG_INFO_STR "info"
10512 +#define VCHIQ_LOG_TRACE_STR "trace"
10513 +
10514 +
10515 +/* Top-level debug info */
10516 +struct vchiq_debugfs_info {
10517 + /* Global 'vchiq' debugfs entry used by all instances */
10518 + struct dentry *vchiq_cfg_dir;
10519 +
10520 + /* one entry per client process */
10521 + struct dentry *clients;
10522 +
10523 + /* log categories */
10524 + struct dentry *log_categories;
10525 +};
10526 +
10527 +static struct vchiq_debugfs_info debugfs_info;
10528 +
10529 +/* Log category debugfs entries */
10530 +struct vchiq_debugfs_log_entry {
10531 + const char *name;
10532 + int *plevel;
10533 + struct dentry *dir;
10534 +};
10535 +
10536 +static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
10537 + { "core", &vchiq_core_log_level },
10538 + { "msg", &vchiq_core_msg_log_level },
10539 + { "sync", &vchiq_sync_log_level },
10540 + { "susp", &vchiq_susp_log_level },
10541 + { "arm", &vchiq_arm_log_level },
10542 +};
10543 +static int n_log_entries =
10544 + sizeof(vchiq_debugfs_log_entries)/sizeof(vchiq_debugfs_log_entries[0]);
10545 +
10546 +
10547 +static struct dentry *vchiq_clients_top(void);
10548 +static struct dentry *vchiq_debugfs_top(void);
10549 +
10550 +static int debugfs_log_show(struct seq_file *f, void *offset)
10551 +{
10552 + int *levp = f->private;
10553 + char *log_value = NULL;
10554 +
10555 + switch (*levp) {
10556 + case VCHIQ_LOG_ERROR:
10557 + log_value = VCHIQ_LOG_ERROR_STR;
10558 + break;
10559 + case VCHIQ_LOG_WARNING:
10560 + log_value = VCHIQ_LOG_WARNING_STR;
10561 + break;
10562 + case VCHIQ_LOG_INFO:
10563 + log_value = VCHIQ_LOG_INFO_STR;
10564 + break;
10565 + case VCHIQ_LOG_TRACE:
10566 + log_value = VCHIQ_LOG_TRACE_STR;
10567 + break;
10568 + default:
10569 + break;
10570 + }
10571 +
10572 + seq_printf(f, "%s\n", log_value ? log_value : "(null)");
10573 +
10574 + return 0;
10575 +}
10576 +
10577 +static int debugfs_log_open(struct inode *inode, struct file *file)
10578 +{
10579 + return single_open(file, debugfs_log_show, inode->i_private);
10580 +}
10581 +
10582 +static int debugfs_log_write(struct file *file,
10583 + const char __user *buffer,
10584 + size_t count, loff_t *ppos)
10585 +{
10586 + struct seq_file *f = (struct seq_file *)file->private_data;
10587 + int *levp = f->private;
10588 + char kbuf[DEBUGFS_WRITE_BUF_SIZE + 1];
10589 +
10590 + memset(kbuf, 0, DEBUGFS_WRITE_BUF_SIZE + 1);
10591 + if (count >= DEBUGFS_WRITE_BUF_SIZE)
10592 + count = DEBUGFS_WRITE_BUF_SIZE;
10593 +
10594 + if (copy_from_user(kbuf, buffer, count) != 0)
10595 + return -EFAULT;
10596 + kbuf[count - 1] = 0;
10597 +
10598 + if (strncmp("error", kbuf, strlen("error")) == 0)
10599 + *levp = VCHIQ_LOG_ERROR;
10600 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
10601 + *levp = VCHIQ_LOG_WARNING;
10602 + else if (strncmp("info", kbuf, strlen("info")) == 0)
10603 + *levp = VCHIQ_LOG_INFO;
10604 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
10605 + *levp = VCHIQ_LOG_TRACE;
10606 + else
10607 + *levp = VCHIQ_LOG_DEFAULT;
10608 +
10609 + *ppos += count;
10610 +
10611 + return count;
10612 +}
10613 +
10614 +static const struct file_operations debugfs_log_fops = {
10615 + .owner = THIS_MODULE,
10616 + .open = debugfs_log_open,
10617 + .write = debugfs_log_write,
10618 + .read = seq_read,
10619 + .llseek = seq_lseek,
10620 + .release = single_release,
10621 +};
10622 +
10623 +/* create an entry under <debugfs>/vchiq/log for each log category */
10624 +static int vchiq_debugfs_create_log_entries(struct dentry *top)
10625 +{
10626 + struct dentry *dir;
10627 + size_t i;
10628 + int ret = 0;
10629 + dir = debugfs_create_dir("log", vchiq_debugfs_top());
10630 + if (!dir)
10631 + return -ENOMEM;
10632 + debugfs_info.log_categories = dir;
10633 +
10634 + for (i = 0; i < n_log_entries; i++) {
10635 + void *levp = (void *)vchiq_debugfs_log_entries[i].plevel;
10636 + dir = debugfs_create_file(vchiq_debugfs_log_entries[i].name,
10637 + 0644,
10638 + debugfs_info.log_categories,
10639 + levp,
10640 + &debugfs_log_fops);
10641 + if (!dir) {
10642 + ret = -ENOMEM;
10643 + break;
10644 + }
10645 +
10646 + vchiq_debugfs_log_entries[i].dir = dir;
10647 + }
10648 + return ret;
10649 +}
10650 +
10651 +static int debugfs_usecount_show(struct seq_file *f, void *offset)
10652 +{
10653 + VCHIQ_INSTANCE_T instance = f->private;
10654 + int use_count;
10655 +
10656 + use_count = vchiq_instance_get_use_count(instance);
10657 + seq_printf(f, "%d\n", use_count);
10658 +
10659 + return 0;
10660 +}
10661 +
10662 +static int debugfs_usecount_open(struct inode *inode, struct file *file)
10663 +{
10664 + return single_open(file, debugfs_usecount_show, inode->i_private);
10665 +}
10666 +
10667 +static const struct file_operations debugfs_usecount_fops = {
10668 + .owner = THIS_MODULE,
10669 + .open = debugfs_usecount_open,
10670 + .read = seq_read,
10671 + .llseek = seq_lseek,
10672 + .release = single_release,
10673 +};
10674 +
10675 +static int debugfs_trace_show(struct seq_file *f, void *offset)
10676 +{
10677 + VCHIQ_INSTANCE_T instance = f->private;
10678 + int trace;
10679 +
10680 + trace = vchiq_instance_get_trace(instance);
10681 + seq_printf(f, "%s\n", trace ? "Y" : "N");
10682 +
10683 + return 0;
10684 +}
10685 +
10686 +static int debugfs_trace_open(struct inode *inode, struct file *file)
10687 +{
10688 + return single_open(file, debugfs_trace_show, inode->i_private);
10689 +}
10690 +
10691 +static int debugfs_trace_write(struct file *file,
10692 + const char __user *buffer,
10693 + size_t count, loff_t *ppos)
10694 +{
10695 + struct seq_file *f = (struct seq_file *)file->private_data;
10696 + VCHIQ_INSTANCE_T instance = f->private;
10697 + char firstchar;
10698 +
10699 + if (copy_from_user(&firstchar, buffer, 1) != 0)
10700 + return -EFAULT;
10701 +
10702 + switch (firstchar) {
10703 + case 'Y':
10704 + case 'y':
10705 + case '1':
10706 + vchiq_instance_set_trace(instance, 1);
10707 + break;
10708 + case 'N':
10709 + case 'n':
10710 + case '0':
10711 + vchiq_instance_set_trace(instance, 0);
10712 + break;
10713 + default:
10714 + break;
10715 + }
10716 +
10717 + *ppos += count;
10718 +
10719 + return count;
10720 +}
10721 +
10722 +static const struct file_operations debugfs_trace_fops = {
10723 + .owner = THIS_MODULE,
10724 + .open = debugfs_trace_open,
10725 + .write = debugfs_trace_write,
10726 + .read = seq_read,
10727 + .llseek = seq_lseek,
10728 + .release = single_release,
10729 +};
10730 +
10731 +/* add an instance (process) to the debugfs entries */
10732 +int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
10733 +{
10734 + char pidstr[16];
10735 + struct dentry *top, *use_count, *trace;
10736 + struct dentry *clients = vchiq_clients_top();
10737 +
10738 + snprintf(pidstr, sizeof(pidstr), "%d",
10739 + vchiq_instance_get_pid(instance));
10740 +
10741 + top = debugfs_create_dir(pidstr, clients);
10742 + if (!top)
10743 + goto fail_top;
10744 +
10745 + use_count = debugfs_create_file("use_count",
10746 + 0444, top,
10747 + instance,
10748 + &debugfs_usecount_fops);
10749 + if (!use_count)
10750 + goto fail_use_count;
10751 +
10752 + trace = debugfs_create_file("trace",
10753 + 0644, top,
10754 + instance,
10755 + &debugfs_trace_fops);
10756 + if (!trace)
10757 + goto fail_trace;
10758 +
10759 + vchiq_instance_get_debugfs_node(instance)->dentry = top;
10760 +
10761 + return 0;
10762 +
10763 +fail_trace:
10764 + debugfs_remove(use_count);
10765 +fail_use_count:
10766 + debugfs_remove(top);
10767 +fail_top:
10768 + return -ENOMEM;
10769 +}
10770 +
10771 +void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
10772 +{
10773 + VCHIQ_DEBUGFS_NODE_T *node = vchiq_instance_get_debugfs_node(instance);
10774 + debugfs_remove_recursive(node->dentry);
10775 +}
10776 +
10777 +
10778 +int vchiq_debugfs_init(void)
10779 +{
10780 + BUG_ON(debugfs_info.vchiq_cfg_dir != NULL);
10781 +
10782 + debugfs_info.vchiq_cfg_dir = debugfs_create_dir("vchiq", NULL);
10783 + if (debugfs_info.vchiq_cfg_dir == NULL)
10784 + goto fail;
10785 +
10786 + debugfs_info.clients = debugfs_create_dir("clients",
10787 + vchiq_debugfs_top());
10788 + if (!debugfs_info.clients)
10789 + goto fail;
10790 +
10791 + if (vchiq_debugfs_create_log_entries(vchiq_debugfs_top()) != 0)
10792 + goto fail;
10793 +
10794 + return 0;
10795 +
10796 +fail:
10797 + vchiq_debugfs_deinit();
10798 + vchiq_log_error(vchiq_arm_log_level,
10799 + "%s: failed to create debugfs directory",
10800 + __func__);
10801 +
10802 + return -ENOMEM;
10803 +}
10804 +
10805 +/* remove all the debugfs entries */
10806 +void vchiq_debugfs_deinit(void)
10807 +{
10808 + debugfs_remove_recursive(vchiq_debugfs_top());
10809 +}
10810 +
10811 +static struct dentry *vchiq_clients_top(void)
10812 +{
10813 + return debugfs_info.clients;
10814 +}
10815 +
10816 +static struct dentry *vchiq_debugfs_top(void)
10817 +{
10818 + BUG_ON(debugfs_info.vchiq_cfg_dir == NULL);
10819 + return debugfs_info.vchiq_cfg_dir;
10820 +}
10821 +
10822 +#else /* CONFIG_DEBUG_FS */
10823 +
10824 +int vchiq_debugfs_init(void)
10825 +{
10826 + return 0;
10827 +}
10828 +
10829 +void vchiq_debugfs_deinit(void)
10830 +{
10831 +}
10832 +
10833 +int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
10834 +{
10835 + return 0;
10836 +}
10837 +
10838 +void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
10839 +{
10840 +}
10841 +
10842 +#endif /* CONFIG_DEBUG_FS */
10843 --- /dev/null
10844 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
10845 @@ -0,0 +1,52 @@
10846 +/**
10847 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
10848 + *
10849 + * Redistribution and use in source and binary forms, with or without
10850 + * modification, are permitted provided that the following conditions
10851 + * are met:
10852 + * 1. Redistributions of source code must retain the above copyright
10853 + * notice, this list of conditions, and the following disclaimer,
10854 + * without modification.
10855 + * 2. Redistributions in binary form must reproduce the above copyright
10856 + * notice, this list of conditions and the following disclaimer in the
10857 + * documentation and/or other materials provided with the distribution.
10858 + * 3. The names of the above-listed copyright holders may not be used
10859 + * to endorse or promote products derived from this software without
10860 + * specific prior written permission.
10861 + *
10862 + * ALTERNATIVELY, this software may be distributed under the terms of the
10863 + * GNU General Public License ("GPL") version 2, as published by the Free
10864 + * Software Foundation.
10865 + *
10866 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10867 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10868 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10869 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10870 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10871 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10872 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10873 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10874 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10875 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10876 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10877 + */
10878 +
10879 +#ifndef VCHIQ_DEBUGFS_H
10880 +#define VCHIQ_DEBUGFS_H
10881 +
10882 +#include "vchiq_core.h"
10883 +
10884 +typedef struct vchiq_debugfs_node_struct
10885 +{
10886 + struct dentry *dentry;
10887 +} VCHIQ_DEBUGFS_NODE_T;
10888 +
10889 +int vchiq_debugfs_init(void);
10890 +
10891 +void vchiq_debugfs_deinit(void);
10892 +
10893 +int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
10894 +
10895 +void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance);
10896 +
10897 +#endif /* VCHIQ_DEBUGFS_H */
10898 --- /dev/null
10899 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10900 @@ -0,0 +1,87 @@
10901 +#!/usr/bin/perl -w
10902 +
10903 +use strict;
10904 +
10905 +#
10906 +# Generate a version from available information
10907 +#
10908 +
10909 +my $prefix = shift @ARGV;
10910 +my $root = shift @ARGV;
10911 +
10912 +
10913 +if ( not defined $root ) {
10914 + die "usage: $0 prefix root-dir\n";
10915 +}
10916 +
10917 +if ( ! -d $root ) {
10918 + die "root directory $root not found\n";
10919 +}
10920 +
10921 +my $version = "unknown";
10922 +my $tainted = "";
10923 +
10924 +if ( -d "$root/.git" ) {
10925 + # attempt to work out git version. only do so
10926 + # on a linux build host, as cygwin builds are
10927 + # already slow enough
10928 +
10929 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10930 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10931 + $version = "no git version";
10932 + }
10933 + else {
10934 + $version = <F>;
10935 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10936 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10937 + }
10938 +
10939 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10940 + $tainted = <G>;
10941 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10942 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10943 + if (length $tainted) {
10944 + $version = join ' ', $version, "(tainted)";
10945 + }
10946 + else {
10947 + $version = join ' ', $version, "(clean)";
10948 + }
10949 + }
10950 + }
10951 +}
10952 +
10953 +my $hostname = `hostname`;
10954 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10955 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10956 +
10957 +
10958 +print STDERR "Version $version\n";
10959 +print <<EOF;
10960 +#include "${prefix}_build_info.h"
10961 +#include <linux/broadcom/vc_debug_sym.h>
10962 +
10963 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10964 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10965 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
10966 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
10967 +
10968 +const char *vchiq_get_build_hostname( void )
10969 +{
10970 + return vchiq_build_hostname;
10971 +}
10972 +
10973 +const char *vchiq_get_build_version( void )
10974 +{
10975 + return vchiq_build_version;
10976 +}
10977 +
10978 +const char *vchiq_get_build_date( void )
10979 +{
10980 + return vchiq_build_date;
10981 +}
10982 +
10983 +const char *vchiq_get_build_time( void )
10984 +{
10985 + return vchiq_build_time;
10986 +}
10987 +EOF
10988 --- /dev/null
10989 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10990 @@ -0,0 +1,189 @@
10991 +/**
10992 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10993 + *
10994 + * Redistribution and use in source and binary forms, with or without
10995 + * modification, are permitted provided that the following conditions
10996 + * are met:
10997 + * 1. Redistributions of source code must retain the above copyright
10998 + * notice, this list of conditions, and the following disclaimer,
10999 + * without modification.
11000 + * 2. Redistributions in binary form must reproduce the above copyright
11001 + * notice, this list of conditions and the following disclaimer in the
11002 + * documentation and/or other materials provided with the distribution.
11003 + * 3. The names of the above-listed copyright holders may not be used
11004 + * to endorse or promote products derived from this software without
11005 + * specific prior written permission.
11006 + *
11007 + * ALTERNATIVELY, this software may be distributed under the terms of the
11008 + * GNU General Public License ("GPL") version 2, as published by the Free
11009 + * Software Foundation.
11010 + *
11011 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11012 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11013 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11014 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11015 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11016 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11017 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11018 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11019 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11020 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11021 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11022 + */
11023 +
11024 +#ifndef VCHIQ_IF_H
11025 +#define VCHIQ_IF_H
11026 +
11027 +#include "interface/vchi/vchi_mh.h"
11028 +
11029 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
11030 +
11031 +#define VCHIQ_SLOT_SIZE 4096
11032 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
11033 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
11034 +
11035 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
11036 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
11037 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
11038 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
11039 +
11040 +typedef enum {
11041 + VCHIQ_SERVICE_OPENED, /* service, -, - */
11042 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
11043 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
11044 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
11045 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
11046 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
11047 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
11048 +} VCHIQ_REASON_T;
11049 +
11050 +typedef enum {
11051 + VCHIQ_ERROR = -1,
11052 + VCHIQ_SUCCESS = 0,
11053 + VCHIQ_RETRY = 1
11054 +} VCHIQ_STATUS_T;
11055 +
11056 +typedef enum {
11057 + VCHIQ_BULK_MODE_CALLBACK,
11058 + VCHIQ_BULK_MODE_BLOCKING,
11059 + VCHIQ_BULK_MODE_NOCALLBACK,
11060 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
11061 +} VCHIQ_BULK_MODE_T;
11062 +
11063 +typedef enum {
11064 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
11065 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
11066 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
11067 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
11068 + VCHIQ_SERVICE_OPTION_TRACE
11069 +} VCHIQ_SERVICE_OPTION_T;
11070 +
11071 +typedef struct vchiq_header_struct {
11072 + /* The message identifier - opaque to applications. */
11073 + int msgid;
11074 +
11075 + /* Size of message data. */
11076 + unsigned int size;
11077 +
11078 + char data[0]; /* message */
11079 +} VCHIQ_HEADER_T;
11080 +
11081 +typedef struct {
11082 + const void *data;
11083 + unsigned int size;
11084 +} VCHIQ_ELEMENT_T;
11085 +
11086 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
11087 +
11088 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
11089 + VCHIQ_SERVICE_HANDLE_T, void *);
11090 +
11091 +typedef struct vchiq_service_base_struct {
11092 + int fourcc;
11093 + VCHIQ_CALLBACK_T callback;
11094 + void *userdata;
11095 +} VCHIQ_SERVICE_BASE_T;
11096 +
11097 +typedef struct vchiq_service_params_struct {
11098 + int fourcc;
11099 + VCHIQ_CALLBACK_T callback;
11100 + void *userdata;
11101 + short version; /* Increment for non-trivial changes */
11102 + short version_min; /* Update for incompatible changes */
11103 +} VCHIQ_SERVICE_PARAMS_T;
11104 +
11105 +typedef struct vchiq_config_struct {
11106 + unsigned int max_msg_size;
11107 + unsigned int bulk_threshold; /* The message size above which it
11108 + is better to use a bulk transfer
11109 + (<= max_msg_size) */
11110 + unsigned int max_outstanding_bulks;
11111 + unsigned int max_services;
11112 + short version; /* The version of VCHIQ */
11113 + short version_min; /* The minimum compatible version of VCHIQ */
11114 +} VCHIQ_CONFIG_T;
11115 +
11116 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
11117 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
11118 +
11119 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
11120 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
11121 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
11122 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
11123 + const VCHIQ_SERVICE_PARAMS_T *params,
11124 + VCHIQ_SERVICE_HANDLE_T *pservice);
11125 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
11126 + const VCHIQ_SERVICE_PARAMS_T *params,
11127 + VCHIQ_SERVICE_HANDLE_T *pservice);
11128 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
11129 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
11130 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
11131 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
11132 + VCHIQ_SERVICE_HANDLE_T service);
11133 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
11134 +
11135 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
11136 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
11137 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
11138 + VCHIQ_HEADER_T *header);
11139 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11140 + const void *data, unsigned int size, void *userdata);
11141 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11142 + void *data, unsigned int size, void *userdata);
11143 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
11144 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11145 + const void *offset, unsigned int size, void *userdata);
11146 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
11147 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11148 + void *offset, unsigned int size, void *userdata);
11149 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11150 + const void *data, unsigned int size, void *userdata,
11151 + VCHIQ_BULK_MODE_T mode);
11152 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11153 + void *data, unsigned int size, void *userdata,
11154 + VCHIQ_BULK_MODE_T mode);
11155 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
11156 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
11157 + void *userdata, VCHIQ_BULK_MODE_T mode);
11158 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
11159 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
11160 + void *userdata, VCHIQ_BULK_MODE_T mode);
11161 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
11162 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
11163 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
11164 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
11165 + int config_size, VCHIQ_CONFIG_T *pconfig);
11166 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
11167 + VCHIQ_SERVICE_OPTION_T option, int value);
11168 +
11169 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
11170 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
11171 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
11172 +
11173 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
11174 + void *ptr, size_t num_bytes);
11175 +
11176 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
11177 + short *peer_version);
11178 +
11179 +#endif /* VCHIQ_IF_H */
11180 --- /dev/null
11181 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
11182 @@ -0,0 +1,131 @@
11183 +/**
11184 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11185 + *
11186 + * Redistribution and use in source and binary forms, with or without
11187 + * modification, are permitted provided that the following conditions
11188 + * are met:
11189 + * 1. Redistributions of source code must retain the above copyright
11190 + * notice, this list of conditions, and the following disclaimer,
11191 + * without modification.
11192 + * 2. Redistributions in binary form must reproduce the above copyright
11193 + * notice, this list of conditions and the following disclaimer in the
11194 + * documentation and/or other materials provided with the distribution.
11195 + * 3. The names of the above-listed copyright holders may not be used
11196 + * to endorse or promote products derived from this software without
11197 + * specific prior written permission.
11198 + *
11199 + * ALTERNATIVELY, this software may be distributed under the terms of the
11200 + * GNU General Public License ("GPL") version 2, as published by the Free
11201 + * Software Foundation.
11202 + *
11203 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11204 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11205 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11206 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11207 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11208 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11209 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11210 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11211 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11212 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11213 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11214 + */
11215 +
11216 +#ifndef VCHIQ_IOCTLS_H
11217 +#define VCHIQ_IOCTLS_H
11218 +
11219 +#include <linux/ioctl.h>
11220 +#include "vchiq_if.h"
11221 +
11222 +#define VCHIQ_IOC_MAGIC 0xc4
11223 +#define VCHIQ_INVALID_HANDLE (~0)
11224 +
11225 +typedef struct {
11226 + VCHIQ_SERVICE_PARAMS_T params;
11227 + int is_open;
11228 + int is_vchi;
11229 + unsigned int handle; /* OUT */
11230 +} VCHIQ_CREATE_SERVICE_T;
11231 +
11232 +typedef struct {
11233 + unsigned int handle;
11234 + unsigned int count;
11235 + const VCHIQ_ELEMENT_T *elements;
11236 +} VCHIQ_QUEUE_MESSAGE_T;
11237 +
11238 +typedef struct {
11239 + unsigned int handle;
11240 + void *data;
11241 + unsigned int size;
11242 + void *userdata;
11243 + VCHIQ_BULK_MODE_T mode;
11244 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
11245 +
11246 +typedef struct {
11247 + VCHIQ_REASON_T reason;
11248 + VCHIQ_HEADER_T *header;
11249 + void *service_userdata;
11250 + void *bulk_userdata;
11251 +} VCHIQ_COMPLETION_DATA_T;
11252 +
11253 +typedef struct {
11254 + unsigned int count;
11255 + VCHIQ_COMPLETION_DATA_T *buf;
11256 + unsigned int msgbufsize;
11257 + unsigned int msgbufcount; /* IN/OUT */
11258 + void **msgbufs;
11259 +} VCHIQ_AWAIT_COMPLETION_T;
11260 +
11261 +typedef struct {
11262 + unsigned int handle;
11263 + int blocking;
11264 + unsigned int bufsize;
11265 + void *buf;
11266 +} VCHIQ_DEQUEUE_MESSAGE_T;
11267 +
11268 +typedef struct {
11269 + unsigned int config_size;
11270 + VCHIQ_CONFIG_T *pconfig;
11271 +} VCHIQ_GET_CONFIG_T;
11272 +
11273 +typedef struct {
11274 + unsigned int handle;
11275 + VCHIQ_SERVICE_OPTION_T option;
11276 + int value;
11277 +} VCHIQ_SET_SERVICE_OPTION_T;
11278 +
11279 +typedef struct {
11280 + void *virt_addr;
11281 + size_t num_bytes;
11282 +} VCHIQ_DUMP_MEM_T;
11283 +
11284 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
11285 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
11286 +#define VCHIQ_IOC_CREATE_SERVICE \
11287 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
11288 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
11289 +#define VCHIQ_IOC_QUEUE_MESSAGE \
11290 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
11291 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
11292 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
11293 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
11294 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
11295 +#define VCHIQ_IOC_AWAIT_COMPLETION \
11296 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
11297 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
11298 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
11299 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
11300 +#define VCHIQ_IOC_GET_CONFIG \
11301 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
11302 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
11303 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
11304 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
11305 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
11306 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
11307 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
11308 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
11309 +#define VCHIQ_IOC_LIB_VERSION _IO(VCHIQ_IOC_MAGIC, 16)
11310 +#define VCHIQ_IOC_CLOSE_DELIVERED _IO(VCHIQ_IOC_MAGIC, 17)
11311 +#define VCHIQ_IOC_MAX 17
11312 +
11313 +#endif
11314 --- /dev/null
11315 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
11316 @@ -0,0 +1,458 @@
11317 +/**
11318 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11319 + *
11320 + * Redistribution and use in source and binary forms, with or without
11321 + * modification, are permitted provided that the following conditions
11322 + * are met:
11323 + * 1. Redistributions of source code must retain the above copyright
11324 + * notice, this list of conditions, and the following disclaimer,
11325 + * without modification.
11326 + * 2. Redistributions in binary form must reproduce the above copyright
11327 + * notice, this list of conditions and the following disclaimer in the
11328 + * documentation and/or other materials provided with the distribution.
11329 + * 3. The names of the above-listed copyright holders may not be used
11330 + * to endorse or promote products derived from this software without
11331 + * specific prior written permission.
11332 + *
11333 + * ALTERNATIVELY, this software may be distributed under the terms of the
11334 + * GNU General Public License ("GPL") version 2, as published by the Free
11335 + * Software Foundation.
11336 + *
11337 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11338 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11339 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11340 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11341 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11342 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11343 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11344 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11345 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11346 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11347 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11348 + */
11349 +
11350 +/* ---- Include Files ---------------------------------------------------- */
11351 +
11352 +#include <linux/kernel.h>
11353 +#include <linux/module.h>
11354 +#include <linux/mutex.h>
11355 +
11356 +#include "vchiq_core.h"
11357 +#include "vchiq_arm.h"
11358 +#include "vchiq_killable.h"
11359 +
11360 +/* ---- Public Variables ------------------------------------------------- */
11361 +
11362 +/* ---- Private Constants and Types -------------------------------------- */
11363 +
11364 +struct bulk_waiter_node {
11365 + struct bulk_waiter bulk_waiter;
11366 + int pid;
11367 + struct list_head list;
11368 +};
11369 +
11370 +struct vchiq_instance_struct {
11371 + VCHIQ_STATE_T *state;
11372 +
11373 + int connected;
11374 +
11375 + struct list_head bulk_waiter_list;
11376 + struct mutex bulk_waiter_list_mutex;
11377 +};
11378 +
11379 +static VCHIQ_STATUS_T
11380 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11381 + unsigned int size, VCHIQ_BULK_DIR_T dir);
11382 +
11383 +/****************************************************************************
11384 +*
11385 +* vchiq_initialise
11386 +*
11387 +***************************************************************************/
11388 +#define VCHIQ_INIT_RETRIES 10
11389 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
11390 +{
11391 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11392 + VCHIQ_STATE_T *state;
11393 + VCHIQ_INSTANCE_T instance = NULL;
11394 + int i;
11395 +
11396 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
11397 +
11398 + /* VideoCore may not be ready due to boot up timing.
11399 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
11400 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
11401 + state = vchiq_get_state();
11402 + if (state)
11403 + break;
11404 + udelay(500);
11405 + }
11406 + if (i==VCHIQ_INIT_RETRIES) {
11407 + vchiq_log_error(vchiq_core_log_level,
11408 + "%s: videocore not initialized\n", __func__);
11409 + goto failed;
11410 + } else if (i>0) {
11411 + vchiq_log_warning(vchiq_core_log_level,
11412 + "%s: videocore initialized after %d retries\n", __func__, i);
11413 + }
11414 +
11415 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
11416 + if (!instance) {
11417 + vchiq_log_error(vchiq_core_log_level,
11418 + "%s: error allocating vchiq instance\n", __func__);
11419 + goto failed;
11420 + }
11421 +
11422 + instance->connected = 0;
11423 + instance->state = state;
11424 + mutex_init(&instance->bulk_waiter_list_mutex);
11425 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
11426 +
11427 + *instanceOut = instance;
11428 +
11429 + status = VCHIQ_SUCCESS;
11430 +
11431 +failed:
11432 + vchiq_log_trace(vchiq_core_log_level,
11433 + "%s(%p): returning %d", __func__, instance, status);
11434 +
11435 + return status;
11436 +}
11437 +EXPORT_SYMBOL(vchiq_initialise);
11438 +
11439 +/****************************************************************************
11440 +*
11441 +* vchiq_shutdown
11442 +*
11443 +***************************************************************************/
11444 +
11445 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
11446 +{
11447 + VCHIQ_STATUS_T status;
11448 + VCHIQ_STATE_T *state = instance->state;
11449 +
11450 + vchiq_log_trace(vchiq_core_log_level,
11451 + "%s(%p) called", __func__, instance);
11452 +
11453 + if (mutex_lock_interruptible(&state->mutex) != 0)
11454 + return VCHIQ_RETRY;
11455 +
11456 + /* Remove all services */
11457 + status = vchiq_shutdown_internal(state, instance);
11458 +
11459 + mutex_unlock(&state->mutex);
11460 +
11461 + vchiq_log_trace(vchiq_core_log_level,
11462 + "%s(%p): returning %d", __func__, instance, status);
11463 +
11464 + if (status == VCHIQ_SUCCESS) {
11465 + struct list_head *pos, *next;
11466 + list_for_each_safe(pos, next,
11467 + &instance->bulk_waiter_list) {
11468 + struct bulk_waiter_node *waiter;
11469 + waiter = list_entry(pos,
11470 + struct bulk_waiter_node,
11471 + list);
11472 + list_del(pos);
11473 + vchiq_log_info(vchiq_arm_log_level,
11474 + "bulk_waiter - cleaned up %x "
11475 + "for pid %d",
11476 + (unsigned int)waiter, waiter->pid);
11477 + kfree(waiter);
11478 + }
11479 + kfree(instance);
11480 + }
11481 +
11482 + return status;
11483 +}
11484 +EXPORT_SYMBOL(vchiq_shutdown);
11485 +
11486 +/****************************************************************************
11487 +*
11488 +* vchiq_is_connected
11489 +*
11490 +***************************************************************************/
11491 +
11492 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
11493 +{
11494 + return instance->connected;
11495 +}
11496 +
11497 +/****************************************************************************
11498 +*
11499 +* vchiq_connect
11500 +*
11501 +***************************************************************************/
11502 +
11503 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
11504 +{
11505 + VCHIQ_STATUS_T status;
11506 + VCHIQ_STATE_T *state = instance->state;
11507 +
11508 + vchiq_log_trace(vchiq_core_log_level,
11509 + "%s(%p) called", __func__, instance);
11510 +
11511 + if (mutex_lock_interruptible(&state->mutex) != 0) {
11512 + vchiq_log_trace(vchiq_core_log_level,
11513 + "%s: call to mutex_lock failed", __func__);
11514 + status = VCHIQ_RETRY;
11515 + goto failed;
11516 + }
11517 + status = vchiq_connect_internal(state, instance);
11518 +
11519 + if (status == VCHIQ_SUCCESS)
11520 + instance->connected = 1;
11521 +
11522 + mutex_unlock(&state->mutex);
11523 +
11524 +failed:
11525 + vchiq_log_trace(vchiq_core_log_level,
11526 + "%s(%p): returning %d", __func__, instance, status);
11527 +
11528 + return status;
11529 +}
11530 +EXPORT_SYMBOL(vchiq_connect);
11531 +
11532 +/****************************************************************************
11533 +*
11534 +* vchiq_add_service
11535 +*
11536 +***************************************************************************/
11537 +
11538 +VCHIQ_STATUS_T vchiq_add_service(
11539 + VCHIQ_INSTANCE_T instance,
11540 + const VCHIQ_SERVICE_PARAMS_T *params,
11541 + VCHIQ_SERVICE_HANDLE_T *phandle)
11542 +{
11543 + VCHIQ_STATUS_T status;
11544 + VCHIQ_STATE_T *state = instance->state;
11545 + VCHIQ_SERVICE_T *service = NULL;
11546 + int srvstate;
11547 +
11548 + vchiq_log_trace(vchiq_core_log_level,
11549 + "%s(%p) called", __func__, instance);
11550 +
11551 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11552 +
11553 + srvstate = vchiq_is_connected(instance)
11554 + ? VCHIQ_SRVSTATE_LISTENING
11555 + : VCHIQ_SRVSTATE_HIDDEN;
11556 +
11557 + service = vchiq_add_service_internal(
11558 + state,
11559 + params,
11560 + srvstate,
11561 + instance,
11562 + NULL);
11563 +
11564 + if (service) {
11565 + *phandle = service->handle;
11566 + status = VCHIQ_SUCCESS;
11567 + } else
11568 + status = VCHIQ_ERROR;
11569 +
11570 + vchiq_log_trace(vchiq_core_log_level,
11571 + "%s(%p): returning %d", __func__, instance, status);
11572 +
11573 + return status;
11574 +}
11575 +EXPORT_SYMBOL(vchiq_add_service);
11576 +
11577 +/****************************************************************************
11578 +*
11579 +* vchiq_open_service
11580 +*
11581 +***************************************************************************/
11582 +
11583 +VCHIQ_STATUS_T vchiq_open_service(
11584 + VCHIQ_INSTANCE_T instance,
11585 + const VCHIQ_SERVICE_PARAMS_T *params,
11586 + VCHIQ_SERVICE_HANDLE_T *phandle)
11587 +{
11588 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11589 + VCHIQ_STATE_T *state = instance->state;
11590 + VCHIQ_SERVICE_T *service = NULL;
11591 +
11592 + vchiq_log_trace(vchiq_core_log_level,
11593 + "%s(%p) called", __func__, instance);
11594 +
11595 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11596 +
11597 + if (!vchiq_is_connected(instance))
11598 + goto failed;
11599 +
11600 + service = vchiq_add_service_internal(state,
11601 + params,
11602 + VCHIQ_SRVSTATE_OPENING,
11603 + instance,
11604 + NULL);
11605 +
11606 + if (service) {
11607 + *phandle = service->handle;
11608 + status = vchiq_open_service_internal(service, current->pid);
11609 + if (status != VCHIQ_SUCCESS) {
11610 + vchiq_remove_service(service->handle);
11611 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11612 + }
11613 + }
11614 +
11615 +failed:
11616 + vchiq_log_trace(vchiq_core_log_level,
11617 + "%s(%p): returning %d", __func__, instance, status);
11618 +
11619 + return status;
11620 +}
11621 +EXPORT_SYMBOL(vchiq_open_service);
11622 +
11623 +VCHIQ_STATUS_T
11624 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
11625 + const void *data, unsigned int size, void *userdata)
11626 +{
11627 + return vchiq_bulk_transfer(handle,
11628 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11629 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
11630 +}
11631 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
11632 +
11633 +VCHIQ_STATUS_T
11634 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11635 + unsigned int size, void *userdata)
11636 +{
11637 + return vchiq_bulk_transfer(handle,
11638 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11639 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
11640 +}
11641 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
11642 +
11643 +VCHIQ_STATUS_T
11644 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
11645 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11646 +{
11647 + VCHIQ_STATUS_T status;
11648 +
11649 + switch (mode) {
11650 + case VCHIQ_BULK_MODE_NOCALLBACK:
11651 + case VCHIQ_BULK_MODE_CALLBACK:
11652 + status = vchiq_bulk_transfer(handle,
11653 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11654 + mode, VCHIQ_BULK_TRANSMIT);
11655 + break;
11656 + case VCHIQ_BULK_MODE_BLOCKING:
11657 + status = vchiq_blocking_bulk_transfer(handle,
11658 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
11659 + break;
11660 + default:
11661 + return VCHIQ_ERROR;
11662 + }
11663 +
11664 + return status;
11665 +}
11666 +EXPORT_SYMBOL(vchiq_bulk_transmit);
11667 +
11668 +VCHIQ_STATUS_T
11669 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11670 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11671 +{
11672 + VCHIQ_STATUS_T status;
11673 +
11674 + switch (mode) {
11675 + case VCHIQ_BULK_MODE_NOCALLBACK:
11676 + case VCHIQ_BULK_MODE_CALLBACK:
11677 + status = vchiq_bulk_transfer(handle,
11678 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11679 + mode, VCHIQ_BULK_RECEIVE);
11680 + break;
11681 + case VCHIQ_BULK_MODE_BLOCKING:
11682 + status = vchiq_blocking_bulk_transfer(handle,
11683 + (void *)data, size, VCHIQ_BULK_RECEIVE);
11684 + break;
11685 + default:
11686 + return VCHIQ_ERROR;
11687 + }
11688 +
11689 + return status;
11690 +}
11691 +EXPORT_SYMBOL(vchiq_bulk_receive);
11692 +
11693 +static VCHIQ_STATUS_T
11694 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11695 + unsigned int size, VCHIQ_BULK_DIR_T dir)
11696 +{
11697 + VCHIQ_INSTANCE_T instance;
11698 + VCHIQ_SERVICE_T *service;
11699 + VCHIQ_STATUS_T status;
11700 + struct bulk_waiter_node *waiter = NULL;
11701 + struct list_head *pos;
11702 +
11703 + service = find_service_by_handle(handle);
11704 + if (!service)
11705 + return VCHIQ_ERROR;
11706 +
11707 + instance = service->instance;
11708 +
11709 + unlock_service(service);
11710 +
11711 + mutex_lock(&instance->bulk_waiter_list_mutex);
11712 + list_for_each(pos, &instance->bulk_waiter_list) {
11713 + if (list_entry(pos, struct bulk_waiter_node,
11714 + list)->pid == current->pid) {
11715 + waiter = list_entry(pos,
11716 + struct bulk_waiter_node,
11717 + list);
11718 + list_del(pos);
11719 + break;
11720 + }
11721 + }
11722 + mutex_unlock(&instance->bulk_waiter_list_mutex);
11723 +
11724 + if (waiter) {
11725 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11726 + if (bulk) {
11727 + /* This thread has an outstanding bulk transfer. */
11728 + if ((bulk->data != data) ||
11729 + (bulk->size != size)) {
11730 + /* This is not a retry of the previous one.
11731 + ** Cancel the signal when the transfer
11732 + ** completes. */
11733 + spin_lock(&bulk_waiter_spinlock);
11734 + bulk->userdata = NULL;
11735 + spin_unlock(&bulk_waiter_spinlock);
11736 + }
11737 + }
11738 + }
11739 +
11740 + if (!waiter) {
11741 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
11742 + if (!waiter) {
11743 + vchiq_log_error(vchiq_core_log_level,
11744 + "%s - out of memory", __func__);
11745 + return VCHIQ_ERROR;
11746 + }
11747 + }
11748 +
11749 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
11750 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
11751 + dir);
11752 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
11753 + !waiter->bulk_waiter.bulk) {
11754 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11755 + if (bulk) {
11756 + /* Cancel the signal when the transfer
11757 + ** completes. */
11758 + spin_lock(&bulk_waiter_spinlock);
11759 + bulk->userdata = NULL;
11760 + spin_unlock(&bulk_waiter_spinlock);
11761 + }
11762 + kfree(waiter);
11763 + } else {
11764 + waiter->pid = current->pid;
11765 + mutex_lock(&instance->bulk_waiter_list_mutex);
11766 + list_add(&waiter->list, &instance->bulk_waiter_list);
11767 + mutex_unlock(&instance->bulk_waiter_list_mutex);
11768 + vchiq_log_info(vchiq_arm_log_level,
11769 + "saved bulk_waiter %x for pid %d",
11770 + (unsigned int)waiter, current->pid);
11771 + }
11772 +
11773 + return status;
11774 +}
11775 --- /dev/null
11776 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h
11777 @@ -0,0 +1,69 @@
11778 +/**
11779 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11780 + *
11781 + * Redistribution and use in source and binary forms, with or without
11782 + * modification, are permitted provided that the following conditions
11783 + * are met:
11784 + * 1. Redistributions of source code must retain the above copyright
11785 + * notice, this list of conditions, and the following disclaimer,
11786 + * without modification.
11787 + * 2. Redistributions in binary form must reproduce the above copyright
11788 + * notice, this list of conditions and the following disclaimer in the
11789 + * documentation and/or other materials provided with the distribution.
11790 + * 3. The names of the above-listed copyright holders may not be used
11791 + * to endorse or promote products derived from this software without
11792 + * specific prior written permission.
11793 + *
11794 + * ALTERNATIVELY, this software may be distributed under the terms of the
11795 + * GNU General Public License ("GPL") version 2, as published by the Free
11796 + * Software Foundation.
11797 + *
11798 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11799 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11800 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11801 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11802 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11803 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11804 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11805 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11806 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11807 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11808 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11809 + */
11810 +
11811 +#ifndef VCHIQ_KILLABLE_H
11812 +#define VCHIQ_KILLABLE_H
11813 +
11814 +#include <linux/mutex.h>
11815 +#include <linux/semaphore.h>
11816 +
11817 +#define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTRAP) | sigmask(SIGSTOP) | sigmask(SIGCONT))
11818 +
11819 +static inline int __must_check down_interruptible_killable(struct semaphore *sem)
11820 +{
11821 + /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
11822 + int ret;
11823 + sigset_t blocked, oldset;
11824 + siginitsetinv(&blocked, SHUTDOWN_SIGS);
11825 + sigprocmask(SIG_SETMASK, &blocked, &oldset);
11826 + ret = down_interruptible(sem);
11827 + sigprocmask(SIG_SETMASK, &oldset, NULL);
11828 + return ret;
11829 +}
11830 +#define down_interruptible down_interruptible_killable
11831 +
11832 +
11833 +static inline int __must_check mutex_lock_interruptible_killable(struct mutex *lock)
11834 +{
11835 + /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
11836 + int ret;
11837 + sigset_t blocked, oldset;
11838 + siginitsetinv(&blocked, SHUTDOWN_SIGS);
11839 + sigprocmask(SIG_SETMASK, &blocked, &oldset);
11840 + ret = mutex_lock_interruptible(lock);
11841 + sigprocmask(SIG_SETMASK, &oldset, NULL);
11842 + return ret;
11843 +}
11844 +#define mutex_lock_interruptible mutex_lock_interruptible_killable
11845 +
11846 +#endif
11847 --- /dev/null
11848 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
11849 @@ -0,0 +1,71 @@
11850 +/**
11851 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11852 + *
11853 + * Redistribution and use in source and binary forms, with or without
11854 + * modification, are permitted provided that the following conditions
11855 + * are met:
11856 + * 1. Redistributions of source code must retain the above copyright
11857 + * notice, this list of conditions, and the following disclaimer,
11858 + * without modification.
11859 + * 2. Redistributions in binary form must reproduce the above copyright
11860 + * notice, this list of conditions and the following disclaimer in the
11861 + * documentation and/or other materials provided with the distribution.
11862 + * 3. The names of the above-listed copyright holders may not be used
11863 + * to endorse or promote products derived from this software without
11864 + * specific prior written permission.
11865 + *
11866 + * ALTERNATIVELY, this software may be distributed under the terms of the
11867 + * GNU General Public License ("GPL") version 2, as published by the Free
11868 + * Software Foundation.
11869 + *
11870 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11871 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11872 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11873 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11874 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11875 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11876 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11877 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11878 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11879 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11880 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11881 + */
11882 +
11883 +#ifndef VCHIQ_MEMDRV_H
11884 +#define VCHIQ_MEMDRV_H
11885 +
11886 +/* ---- Include Files ----------------------------------------------------- */
11887 +
11888 +#include <linux/kernel.h>
11889 +#include "vchiq_if.h"
11890 +
11891 +/* ---- Constants and Types ---------------------------------------------- */
11892 +
11893 +typedef struct {
11894 + void *armSharedMemVirt;
11895 + dma_addr_t armSharedMemPhys;
11896 + size_t armSharedMemSize;
11897 +
11898 + void *vcSharedMemVirt;
11899 + dma_addr_t vcSharedMemPhys;
11900 + size_t vcSharedMemSize;
11901 +} VCHIQ_SHARED_MEM_INFO_T;
11902 +
11903 +/* ---- Variable Externs ------------------------------------------------- */
11904 +
11905 +/* ---- Function Prototypes ---------------------------------------------- */
11906 +
11907 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
11908 +
11909 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
11910 +
11911 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
11912 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11913 +
11914 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
11915 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11916 +
11917 +VCHIQ_STATUS_T vchiq_userdrv_resume(
11918 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11919 +
11920 +#endif
11921 --- /dev/null
11922 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11923 @@ -0,0 +1,58 @@
11924 +/**
11925 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11926 + *
11927 + * Redistribution and use in source and binary forms, with or without
11928 + * modification, are permitted provided that the following conditions
11929 + * are met:
11930 + * 1. Redistributions of source code must retain the above copyright
11931 + * notice, this list of conditions, and the following disclaimer,
11932 + * without modification.
11933 + * 2. Redistributions in binary form must reproduce the above copyright
11934 + * notice, this list of conditions and the following disclaimer in the
11935 + * documentation and/or other materials provided with the distribution.
11936 + * 3. The names of the above-listed copyright holders may not be used
11937 + * to endorse or promote products derived from this software without
11938 + * specific prior written permission.
11939 + *
11940 + * ALTERNATIVELY, this software may be distributed under the terms of the
11941 + * GNU General Public License ("GPL") version 2, as published by the Free
11942 + * Software Foundation.
11943 + *
11944 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11945 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11946 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11947 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11948 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11949 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11950 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11951 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11952 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11953 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11954 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11955 + */
11956 +
11957 +#ifndef VCHIQ_PAGELIST_H
11958 +#define VCHIQ_PAGELIST_H
11959 +
11960 +#ifndef PAGE_SIZE
11961 +#define PAGE_SIZE 4096
11962 +#endif
11963 +#define CACHE_LINE_SIZE 32
11964 +#define PAGELIST_WRITE 0
11965 +#define PAGELIST_READ 1
11966 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11967 +
11968 +typedef struct pagelist_struct {
11969 + unsigned long length;
11970 + unsigned short type;
11971 + unsigned short offset;
11972 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11973 + pages at consecutive addresses. */
11974 +} PAGELIST_T;
11975 +
11976 +typedef struct fragments_struct {
11977 + char headbuf[CACHE_LINE_SIZE];
11978 + char tailbuf[CACHE_LINE_SIZE];
11979 +} FRAGMENTS_T;
11980 +
11981 +#endif /* VCHIQ_PAGELIST_H */
11982 --- /dev/null
11983 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11984 @@ -0,0 +1,860 @@
11985 +/**
11986 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11987 + *
11988 + * Redistribution and use in source and binary forms, with or without
11989 + * modification, are permitted provided that the following conditions
11990 + * are met:
11991 + * 1. Redistributions of source code must retain the above copyright
11992 + * notice, this list of conditions, and the following disclaimer,
11993 + * without modification.
11994 + * 2. Redistributions in binary form must reproduce the above copyright
11995 + * notice, this list of conditions and the following disclaimer in the
11996 + * documentation and/or other materials provided with the distribution.
11997 + * 3. The names of the above-listed copyright holders may not be used
11998 + * to endorse or promote products derived from this software without
11999 + * specific prior written permission.
12000 + *
12001 + * ALTERNATIVELY, this software may be distributed under the terms of the
12002 + * GNU General Public License ("GPL") version 2, as published by the Free
12003 + * Software Foundation.
12004 + *
12005 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12006 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12007 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12008 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12009 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12010 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12011 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12012 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12013 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12014 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12015 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12016 + */
12017 +#include <linux/module.h>
12018 +#include <linux/types.h>
12019 +
12020 +#include "interface/vchi/vchi.h"
12021 +#include "vchiq.h"
12022 +#include "vchiq_core.h"
12023 +
12024 +#include "vchiq_util.h"
12025 +
12026 +#include <stddef.h>
12027 +
12028 +#define vchiq_status_to_vchi(status) ((int32_t)status)
12029 +
12030 +typedef struct {
12031 + VCHIQ_SERVICE_HANDLE_T handle;
12032 +
12033 + VCHIU_QUEUE_T queue;
12034 +
12035 + VCHI_CALLBACK_T callback;
12036 + void *callback_param;
12037 +} SHIM_SERVICE_T;
12038 +
12039 +/* ----------------------------------------------------------------------
12040 + * return pointer to the mphi message driver function table
12041 + * -------------------------------------------------------------------- */
12042 +const VCHI_MESSAGE_DRIVER_T *
12043 +vchi_mphi_message_driver_func_table(void)
12044 +{
12045 + return NULL;
12046 +}
12047 +
12048 +/* ----------------------------------------------------------------------
12049 + * return a pointer to the 'single' connection driver fops
12050 + * -------------------------------------------------------------------- */
12051 +const VCHI_CONNECTION_API_T *
12052 +single_get_func_table(void)
12053 +{
12054 + return NULL;
12055 +}
12056 +
12057 +VCHI_CONNECTION_T *vchi_create_connection(
12058 + const VCHI_CONNECTION_API_T *function_table,
12059 + const VCHI_MESSAGE_DRIVER_T *low_level)
12060 +{
12061 + (void)function_table;
12062 + (void)low_level;
12063 + return NULL;
12064 +}
12065 +
12066 +/***********************************************************
12067 + * Name: vchi_msg_peek
12068 + *
12069 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
12070 + * void **data,
12071 + * uint32_t *msg_size,
12072 +
12073 +
12074 + * VCHI_FLAGS_T flags
12075 + *
12076 + * Description: Routine to return a pointer to the current message (to allow in
12077 + * place processing). The message can be removed using
12078 + * vchi_msg_remove when you're finished
12079 + *
12080 + * Returns: int32_t - success == 0
12081 + *
12082 + ***********************************************************/
12083 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
12084 + void **data,
12085 + uint32_t *msg_size,
12086 + VCHI_FLAGS_T flags)
12087 +{
12088 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12089 + VCHIQ_HEADER_T *header;
12090 +
12091 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12092 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12093 +
12094 + if (flags == VCHI_FLAGS_NONE)
12095 + if (vchiu_queue_is_empty(&service->queue))
12096 + return -1;
12097 +
12098 + header = vchiu_queue_peek(&service->queue);
12099 +
12100 + *data = header->data;
12101 + *msg_size = header->size;
12102 +
12103 + return 0;
12104 +}
12105 +EXPORT_SYMBOL(vchi_msg_peek);
12106 +
12107 +/***********************************************************
12108 + * Name: vchi_msg_remove
12109 + *
12110 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
12111 + *
12112 + * Description: Routine to remove a message (after it has been read with
12113 + * vchi_msg_peek)
12114 + *
12115 + * Returns: int32_t - success == 0
12116 + *
12117 + ***********************************************************/
12118 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
12119 +{
12120 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12121 + VCHIQ_HEADER_T *header;
12122 +
12123 + header = vchiu_queue_pop(&service->queue);
12124 +
12125 + vchiq_release_message(service->handle, header);
12126 +
12127 + return 0;
12128 +}
12129 +EXPORT_SYMBOL(vchi_msg_remove);
12130 +
12131 +/***********************************************************
12132 + * Name: vchi_msg_queue
12133 + *
12134 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12135 + * const void *data,
12136 + * uint32_t data_size,
12137 + * VCHI_FLAGS_T flags,
12138 + * void *msg_handle,
12139 + *
12140 + * Description: Thin wrapper to queue a message onto a connection
12141 + *
12142 + * Returns: int32_t - success == 0
12143 + *
12144 + ***********************************************************/
12145 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
12146 + const void *data,
12147 + uint32_t data_size,
12148 + VCHI_FLAGS_T flags,
12149 + void *msg_handle)
12150 +{
12151 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12152 + VCHIQ_ELEMENT_T element = {data, data_size};
12153 + VCHIQ_STATUS_T status;
12154 +
12155 + (void)msg_handle;
12156 +
12157 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12158 +
12159 + status = vchiq_queue_message(service->handle, &element, 1);
12160 +
12161 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
12162 + ** implement a retry mechanism since this function is supposed
12163 + ** to block until queued
12164 + */
12165 + while (status == VCHIQ_RETRY) {
12166 + msleep(1);
12167 + status = vchiq_queue_message(service->handle, &element, 1);
12168 + }
12169 +
12170 + return vchiq_status_to_vchi(status);
12171 +}
12172 +EXPORT_SYMBOL(vchi_msg_queue);
12173 +
12174 +/***********************************************************
12175 + * Name: vchi_bulk_queue_receive
12176 + *
12177 + * Arguments: VCHI_BULK_HANDLE_T handle,
12178 + * void *data_dst,
12179 + * const uint32_t data_size,
12180 + * VCHI_FLAGS_T flags
12181 + * void *bulk_handle
12182 + *
12183 + * Description: Routine to setup a rcv buffer
12184 + *
12185 + * Returns: int32_t - success == 0
12186 + *
12187 + ***********************************************************/
12188 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
12189 + void *data_dst,
12190 + uint32_t data_size,
12191 + VCHI_FLAGS_T flags,
12192 + void *bulk_handle)
12193 +{
12194 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12195 + VCHIQ_BULK_MODE_T mode;
12196 + VCHIQ_STATUS_T status;
12197 +
12198 + switch ((int)flags) {
12199 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12200 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12201 + WARN_ON(!service->callback);
12202 + mode = VCHIQ_BULK_MODE_CALLBACK;
12203 + break;
12204 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12205 + mode = VCHIQ_BULK_MODE_BLOCKING;
12206 + break;
12207 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12208 + case VCHI_FLAGS_NONE:
12209 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12210 + break;
12211 + default:
12212 + WARN(1, "unsupported message\n");
12213 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12214 + }
12215 +
12216 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
12217 + bulk_handle, mode);
12218 +
12219 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
12220 + ** implement a retry mechanism since this function is supposed
12221 + ** to block until queued
12222 + */
12223 + while (status == VCHIQ_RETRY) {
12224 + msleep(1);
12225 + status = vchiq_bulk_receive(service->handle, data_dst,
12226 + data_size, bulk_handle, mode);
12227 + }
12228 +
12229 + return vchiq_status_to_vchi(status);
12230 +}
12231 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
12232 +
12233 +/***********************************************************
12234 + * Name: vchi_bulk_queue_transmit
12235 + *
12236 + * Arguments: VCHI_BULK_HANDLE_T handle,
12237 + * const void *data_src,
12238 + * uint32_t data_size,
12239 + * VCHI_FLAGS_T flags,
12240 + * void *bulk_handle
12241 + *
12242 + * Description: Routine to transmit some data
12243 + *
12244 + * Returns: int32_t - success == 0
12245 + *
12246 + ***********************************************************/
12247 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
12248 + const void *data_src,
12249 + uint32_t data_size,
12250 + VCHI_FLAGS_T flags,
12251 + void *bulk_handle)
12252 +{
12253 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12254 + VCHIQ_BULK_MODE_T mode;
12255 + VCHIQ_STATUS_T status;
12256 +
12257 + switch ((int)flags) {
12258 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12259 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12260 + WARN_ON(!service->callback);
12261 + mode = VCHIQ_BULK_MODE_CALLBACK;
12262 + break;
12263 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
12264 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12265 + mode = VCHIQ_BULK_MODE_BLOCKING;
12266 + break;
12267 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12268 + case VCHI_FLAGS_NONE:
12269 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12270 + break;
12271 + default:
12272 + WARN(1, "unsupported message\n");
12273 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12274 + }
12275 +
12276 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
12277 + bulk_handle, mode);
12278 +
12279 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
12280 + ** implement a retry mechanism since this function is supposed
12281 + ** to block until queued
12282 + */
12283 + while (status == VCHIQ_RETRY) {
12284 + msleep(1);
12285 + status = vchiq_bulk_transmit(service->handle, data_src,
12286 + data_size, bulk_handle, mode);
12287 + }
12288 +
12289 + return vchiq_status_to_vchi(status);
12290 +}
12291 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
12292 +
12293 +/***********************************************************
12294 + * Name: vchi_msg_dequeue
12295 + *
12296 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12297 + * void *data,
12298 + * uint32_t max_data_size_to_read,
12299 + * uint32_t *actual_msg_size
12300 + * VCHI_FLAGS_T flags
12301 + *
12302 + * Description: Routine to dequeue a message into the supplied buffer
12303 + *
12304 + * Returns: int32_t - success == 0
12305 + *
12306 + ***********************************************************/
12307 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
12308 + void *data,
12309 + uint32_t max_data_size_to_read,
12310 + uint32_t *actual_msg_size,
12311 + VCHI_FLAGS_T flags)
12312 +{
12313 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12314 + VCHIQ_HEADER_T *header;
12315 +
12316 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12317 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12318 +
12319 + if (flags == VCHI_FLAGS_NONE)
12320 + if (vchiu_queue_is_empty(&service->queue))
12321 + return -1;
12322 +
12323 + header = vchiu_queue_pop(&service->queue);
12324 +
12325 + memcpy(data, header->data, header->size < max_data_size_to_read ?
12326 + header->size : max_data_size_to_read);
12327 +
12328 + *actual_msg_size = header->size;
12329 +
12330 + vchiq_release_message(service->handle, header);
12331 +
12332 + return 0;
12333 +}
12334 +EXPORT_SYMBOL(vchi_msg_dequeue);
12335 +
12336 +/***********************************************************
12337 + * Name: vchi_msg_queuev
12338 + *
12339 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12340 + * VCHI_MSG_VECTOR_T *vector,
12341 + * uint32_t count,
12342 + * VCHI_FLAGS_T flags,
12343 + * void *msg_handle
12344 + *
12345 + * Description: Thin wrapper to queue a message onto a connection
12346 + *
12347 + * Returns: int32_t - success == 0
12348 + *
12349 + ***********************************************************/
12350 +
12351 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
12352 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
12353 + offsetof(VCHIQ_ELEMENT_T, data));
12354 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
12355 + offsetof(VCHIQ_ELEMENT_T, size));
12356 +
12357 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
12358 + VCHI_MSG_VECTOR_T *vector,
12359 + uint32_t count,
12360 + VCHI_FLAGS_T flags,
12361 + void *msg_handle)
12362 +{
12363 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12364 +
12365 + (void)msg_handle;
12366 +
12367 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12368 +
12369 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
12370 + (const VCHIQ_ELEMENT_T *)vector, count));
12371 +}
12372 +EXPORT_SYMBOL(vchi_msg_queuev);
12373 +
12374 +/***********************************************************
12375 + * Name: vchi_held_msg_release
12376 + *
12377 + * Arguments: VCHI_HELD_MSG_T *message
12378 + *
12379 + * Description: Routine to release a held message (after it has been read with
12380 + * vchi_msg_hold)
12381 + *
12382 + * Returns: int32_t - success == 0
12383 + *
12384 + ***********************************************************/
12385 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
12386 +{
12387 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
12388 + (VCHIQ_HEADER_T *)message->message);
12389 +
12390 + return 0;
12391 +}
12392 +EXPORT_SYMBOL(vchi_held_msg_release);
12393 +
12394 +/***********************************************************
12395 + * Name: vchi_msg_hold
12396 + *
12397 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12398 + * void **data,
12399 + * uint32_t *msg_size,
12400 + * VCHI_FLAGS_T flags,
12401 + * VCHI_HELD_MSG_T *message_handle
12402 + *
12403 + * Description: Routine to return a pointer to the current message (to allow
12404 + * in place processing). The message is dequeued - don't forget
12405 + * to release the message using vchi_held_msg_release when you're
12406 + * finished.
12407 + *
12408 + * Returns: int32_t - success == 0
12409 + *
12410 + ***********************************************************/
12411 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
12412 + void **data,
12413 + uint32_t *msg_size,
12414 + VCHI_FLAGS_T flags,
12415 + VCHI_HELD_MSG_T *message_handle)
12416 +{
12417 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12418 + VCHIQ_HEADER_T *header;
12419 +
12420 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12421 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12422 +
12423 + if (flags == VCHI_FLAGS_NONE)
12424 + if (vchiu_queue_is_empty(&service->queue))
12425 + return -1;
12426 +
12427 + header = vchiu_queue_pop(&service->queue);
12428 +
12429 + *data = header->data;
12430 + *msg_size = header->size;
12431 +
12432 + message_handle->service =
12433 + (struct opaque_vchi_service_t *)service->handle;
12434 + message_handle->message = header;
12435 +
12436 + return 0;
12437 +}
12438 +EXPORT_SYMBOL(vchi_msg_hold);
12439 +
12440 +/***********************************************************
12441 + * Name: vchi_initialise
12442 + *
12443 + * Arguments: VCHI_INSTANCE_T *instance_handle
12444 + *
12445 + * Description: Initialises the hardware but does not transmit anything
12446 + * When run as a Host App this will be called twice hence the need
12447 + * to malloc the state information
12448 + *
12449 + * Returns: 0 if successful, failure otherwise
12450 + *
12451 + ***********************************************************/
12452 +
12453 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
12454 +{
12455 + VCHIQ_INSTANCE_T instance;
12456 + VCHIQ_STATUS_T status;
12457 +
12458 + status = vchiq_initialise(&instance);
12459 +
12460 + *instance_handle = (VCHI_INSTANCE_T)instance;
12461 +
12462 + return vchiq_status_to_vchi(status);
12463 +}
12464 +EXPORT_SYMBOL(vchi_initialise);
12465 +
12466 +/***********************************************************
12467 + * Name: vchi_connect
12468 + *
12469 + * Arguments: VCHI_CONNECTION_T **connections
12470 + * const uint32_t num_connections
12471 + * VCHI_INSTANCE_T instance_handle)
12472 + *
12473 + * Description: Starts the command service on each connection,
12474 + * causing INIT messages to be pinged back and forth
12475 + *
12476 + * Returns: 0 if successful, failure otherwise
12477 + *
12478 + ***********************************************************/
12479 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
12480 + const uint32_t num_connections,
12481 + VCHI_INSTANCE_T instance_handle)
12482 +{
12483 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12484 +
12485 + (void)connections;
12486 + (void)num_connections;
12487 +
12488 + return vchiq_connect(instance);
12489 +}
12490 +EXPORT_SYMBOL(vchi_connect);
12491 +
12492 +
12493 +/***********************************************************
12494 + * Name: vchi_disconnect
12495 + *
12496 + * Arguments: VCHI_INSTANCE_T instance_handle
12497 + *
12498 + * Description: Stops the command service on each connection,
12499 + * causing DE-INIT messages to be pinged back and forth
12500 + *
12501 + * Returns: 0 if successful, failure otherwise
12502 + *
12503 + ***********************************************************/
12504 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
12505 +{
12506 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12507 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
12508 +}
12509 +EXPORT_SYMBOL(vchi_disconnect);
12510 +
12511 +
12512 +/***********************************************************
12513 + * Name: vchi_service_open
12514 + * Name: vchi_service_create
12515 + *
12516 + * Arguments: VCHI_INSTANCE_T *instance_handle
12517 + * SERVICE_CREATION_T *setup,
12518 + * VCHI_SERVICE_HANDLE_T *handle
12519 + *
12520 + * Description: Routine to open a service
12521 + *
12522 + * Returns: int32_t - success == 0
12523 + *
12524 + ***********************************************************/
12525 +
12526 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
12527 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
12528 +{
12529 + SHIM_SERVICE_T *service =
12530 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
12531 +
12532 + if (!service->callback)
12533 + goto release;
12534 +
12535 + switch (reason) {
12536 + case VCHIQ_MESSAGE_AVAILABLE:
12537 + vchiu_queue_push(&service->queue, header);
12538 +
12539 + service->callback(service->callback_param,
12540 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
12541 +
12542 + goto done;
12543 + break;
12544 +
12545 + case VCHIQ_BULK_TRANSMIT_DONE:
12546 + service->callback(service->callback_param,
12547 + VCHI_CALLBACK_BULK_SENT, bulk_user);
12548 + break;
12549 +
12550 + case VCHIQ_BULK_RECEIVE_DONE:
12551 + service->callback(service->callback_param,
12552 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
12553 + break;
12554 +
12555 + case VCHIQ_SERVICE_CLOSED:
12556 + service->callback(service->callback_param,
12557 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
12558 + break;
12559 +
12560 + case VCHIQ_SERVICE_OPENED:
12561 + /* No equivalent VCHI reason */
12562 + break;
12563 +
12564 + case VCHIQ_BULK_TRANSMIT_ABORTED:
12565 + service->callback(service->callback_param,
12566 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
12567 + bulk_user);
12568 + break;
12569 +
12570 + case VCHIQ_BULK_RECEIVE_ABORTED:
12571 + service->callback(service->callback_param,
12572 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
12573 + bulk_user);
12574 + break;
12575 +
12576 + default:
12577 + WARN(1, "not supported\n");
12578 + break;
12579 + }
12580 +
12581 +release:
12582 + vchiq_release_message(service->handle, header);
12583 +done:
12584 + return VCHIQ_SUCCESS;
12585 +}
12586 +
12587 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
12588 + SERVICE_CREATION_T *setup)
12589 +{
12590 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
12591 +
12592 + (void)instance;
12593 +
12594 + if (service) {
12595 + if (vchiu_queue_init(&service->queue, 64)) {
12596 + service->callback = setup->callback;
12597 + service->callback_param = setup->callback_param;
12598 + } else {
12599 + kfree(service);
12600 + service = NULL;
12601 + }
12602 + }
12603 +
12604 + return service;
12605 +}
12606 +
12607 +static void service_free(SHIM_SERVICE_T *service)
12608 +{
12609 + if (service) {
12610 + vchiu_queue_delete(&service->queue);
12611 + kfree(service);
12612 + }
12613 +}
12614 +
12615 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
12616 + SERVICE_CREATION_T *setup,
12617 + VCHI_SERVICE_HANDLE_T *handle)
12618 +{
12619 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12620 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12621 +
12622 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12623 +
12624 + if (service) {
12625 + VCHIQ_SERVICE_PARAMS_T params;
12626 + VCHIQ_STATUS_T status;
12627 +
12628 + memset(&params, 0, sizeof(params));
12629 + params.fourcc = setup->service_id;
12630 + params.callback = shim_callback;
12631 + params.userdata = service;
12632 + params.version = setup->version.version;
12633 + params.version_min = setup->version.version_min;
12634 +
12635 + status = vchiq_open_service(instance, &params,
12636 + &service->handle);
12637 + if (status != VCHIQ_SUCCESS) {
12638 + service_free(service);
12639 + service = NULL;
12640 + *handle = NULL;
12641 + }
12642 + }
12643 +
12644 + return (service != NULL) ? 0 : -1;
12645 +}
12646 +EXPORT_SYMBOL(vchi_service_open);
12647 +
12648 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
12649 + SERVICE_CREATION_T *setup,
12650 + VCHI_SERVICE_HANDLE_T *handle)
12651 +{
12652 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12653 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12654 +
12655 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12656 +
12657 + if (service) {
12658 + VCHIQ_SERVICE_PARAMS_T params;
12659 + VCHIQ_STATUS_T status;
12660 +
12661 + memset(&params, 0, sizeof(params));
12662 + params.fourcc = setup->service_id;
12663 + params.callback = shim_callback;
12664 + params.userdata = service;
12665 + params.version = setup->version.version;
12666 + params.version_min = setup->version.version_min;
12667 + status = vchiq_add_service(instance, &params, &service->handle);
12668 +
12669 + if (status != VCHIQ_SUCCESS) {
12670 + service_free(service);
12671 + service = NULL;
12672 + *handle = NULL;
12673 + }
12674 + }
12675 +
12676 + return (service != NULL) ? 0 : -1;
12677 +}
12678 +EXPORT_SYMBOL(vchi_service_create);
12679 +
12680 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
12681 +{
12682 + int32_t ret = -1;
12683 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12684 + if (service) {
12685 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
12686 + if (status == VCHIQ_SUCCESS) {
12687 + service_free(service);
12688 + service = NULL;
12689 + }
12690 +
12691 + ret = vchiq_status_to_vchi(status);
12692 + }
12693 + return ret;
12694 +}
12695 +EXPORT_SYMBOL(vchi_service_close);
12696 +
12697 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
12698 +{
12699 + int32_t ret = -1;
12700 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12701 + if (service) {
12702 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
12703 + if (status == VCHIQ_SUCCESS) {
12704 + service_free(service);
12705 + service = NULL;
12706 + }
12707 +
12708 + ret = vchiq_status_to_vchi(status);
12709 + }
12710 + return ret;
12711 +}
12712 +EXPORT_SYMBOL(vchi_service_destroy);
12713 +
12714 +int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
12715 + VCHI_SERVICE_OPTION_T option,
12716 + int value)
12717 +{
12718 + int32_t ret = -1;
12719 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12720 + VCHIQ_SERVICE_OPTION_T vchiq_option;
12721 + switch (option) {
12722 + case VCHI_SERVICE_OPTION_TRACE:
12723 + vchiq_option = VCHIQ_SERVICE_OPTION_TRACE;
12724 + break;
12725 + case VCHI_SERVICE_OPTION_SYNCHRONOUS:
12726 + vchiq_option = VCHIQ_SERVICE_OPTION_SYNCHRONOUS;
12727 + break;
12728 + default:
12729 + service = NULL;
12730 + break;
12731 + }
12732 + if (service) {
12733 + VCHIQ_STATUS_T status =
12734 + vchiq_set_service_option(service->handle,
12735 + vchiq_option,
12736 + value);
12737 +
12738 + ret = vchiq_status_to_vchi(status);
12739 + }
12740 + return ret;
12741 +}
12742 +EXPORT_SYMBOL(vchi_service_set_option);
12743 +
12744 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12745 +{
12746 + int32_t ret = -1;
12747 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12748 + if(service)
12749 + {
12750 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12751 + ret = vchiq_status_to_vchi( status );
12752 + }
12753 + return ret;
12754 +}
12755 +EXPORT_SYMBOL(vchi_get_peer_version);
12756 +
12757 +/* ----------------------------------------------------------------------
12758 + * read a uint32_t from buffer.
12759 + * network format is defined to be little endian
12760 + * -------------------------------------------------------------------- */
12761 +uint32_t
12762 +vchi_readbuf_uint32(const void *_ptr)
12763 +{
12764 + const unsigned char *ptr = _ptr;
12765 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12766 +}
12767 +
12768 +/* ----------------------------------------------------------------------
12769 + * write a uint32_t to buffer.
12770 + * network format is defined to be little endian
12771 + * -------------------------------------------------------------------- */
12772 +void
12773 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12774 +{
12775 + unsigned char *ptr = _ptr;
12776 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
12777 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
12778 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12779 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12780 +}
12781 +
12782 +/* ----------------------------------------------------------------------
12783 + * read a uint16_t from buffer.
12784 + * network format is defined to be little endian
12785 + * -------------------------------------------------------------------- */
12786 +uint16_t
12787 +vchi_readbuf_uint16(const void *_ptr)
12788 +{
12789 + const unsigned char *ptr = _ptr;
12790 + return ptr[0] | (ptr[1] << 8);
12791 +}
12792 +
12793 +/* ----------------------------------------------------------------------
12794 + * write a uint16_t into the buffer.
12795 + * network format is defined to be little endian
12796 + * -------------------------------------------------------------------- */
12797 +void
12798 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12799 +{
12800 + unsigned char *ptr = _ptr;
12801 + ptr[0] = (value >> 0) & 0xFF;
12802 + ptr[1] = (value >> 8) & 0xFF;
12803 +}
12804 +
12805 +/***********************************************************
12806 + * Name: vchi_service_use
12807 + *
12808 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12809 + *
12810 + * Description: Routine to increment refcount on a service
12811 + *
12812 + * Returns: void
12813 + *
12814 + ***********************************************************/
12815 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12816 +{
12817 + int32_t ret = -1;
12818 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12819 + if (service)
12820 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12821 + return ret;
12822 +}
12823 +EXPORT_SYMBOL(vchi_service_use);
12824 +
12825 +/***********************************************************
12826 + * Name: vchi_service_release
12827 + *
12828 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12829 + *
12830 + * Description: Routine to decrement refcount on a service
12831 + *
12832 + * Returns: void
12833 + *
12834 + ***********************************************************/
12835 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12836 +{
12837 + int32_t ret = -1;
12838 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12839 + if (service)
12840 + ret = vchiq_status_to_vchi(
12841 + vchiq_release_service(service->handle));
12842 + return ret;
12843 +}
12844 +EXPORT_SYMBOL(vchi_service_release);
12845 --- /dev/null
12846 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12847 @@ -0,0 +1,152 @@
12848 +/**
12849 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12850 + *
12851 + * Redistribution and use in source and binary forms, with or without
12852 + * modification, are permitted provided that the following conditions
12853 + * are met:
12854 + * 1. Redistributions of source code must retain the above copyright
12855 + * notice, this list of conditions, and the following disclaimer,
12856 + * without modification.
12857 + * 2. Redistributions in binary form must reproduce the above copyright
12858 + * notice, this list of conditions and the following disclaimer in the
12859 + * documentation and/or other materials provided with the distribution.
12860 + * 3. The names of the above-listed copyright holders may not be used
12861 + * to endorse or promote products derived from this software without
12862 + * specific prior written permission.
12863 + *
12864 + * ALTERNATIVELY, this software may be distributed under the terms of the
12865 + * GNU General Public License ("GPL") version 2, as published by the Free
12866 + * Software Foundation.
12867 + *
12868 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12869 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12870 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12871 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12872 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12873 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12874 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12875 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12876 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12877 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12878 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12879 + */
12880 +
12881 +#include "vchiq_util.h"
12882 +#include "vchiq_killable.h"
12883 +
12884 +static inline int is_pow2(int i)
12885 +{
12886 + return i && !(i & (i - 1));
12887 +}
12888 +
12889 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12890 +{
12891 + WARN_ON(!is_pow2(size));
12892 +
12893 + queue->size = size;
12894 + queue->read = 0;
12895 + queue->write = 0;
12896 +
12897 + sema_init(&queue->pop, 0);
12898 + sema_init(&queue->push, 0);
12899 +
12900 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12901 + if (queue->storage == NULL) {
12902 + vchiu_queue_delete(queue);
12903 + return 0;
12904 + }
12905 + return 1;
12906 +}
12907 +
12908 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12909 +{
12910 + if (queue->storage != NULL)
12911 + kfree(queue->storage);
12912 +}
12913 +
12914 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12915 +{
12916 + return queue->read == queue->write;
12917 +}
12918 +
12919 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12920 +{
12921 + return queue->write == queue->read + queue->size;
12922 +}
12923 +
12924 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12925 +{
12926 + while (queue->write == queue->read + queue->size) {
12927 + if (down_interruptible(&queue->pop) != 0) {
12928 + flush_signals(current);
12929 + }
12930 + }
12931 +
12932 + /*
12933 + * Write to queue->storage must be visible after read from
12934 + * queue->read
12935 + */
12936 + smp_mb();
12937 +
12938 + queue->storage[queue->write & (queue->size - 1)] = header;
12939 +
12940 + /*
12941 + * Write to queue->storage must be visible before write to
12942 + * queue->write
12943 + */
12944 + smp_wmb();
12945 +
12946 + queue->write++;
12947 +
12948 + up(&queue->push);
12949 +}
12950 +
12951 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12952 +{
12953 + while (queue->write == queue->read) {
12954 + if (down_interruptible(&queue->push) != 0) {
12955 + flush_signals(current);
12956 + }
12957 + }
12958 +
12959 + up(&queue->push); // We haven't removed anything from the queue.
12960 +
12961 + /*
12962 + * Read from queue->storage must be visible after read from
12963 + * queue->write
12964 + */
12965 + smp_rmb();
12966 +
12967 + return queue->storage[queue->read & (queue->size - 1)];
12968 +}
12969 +
12970 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12971 +{
12972 + VCHIQ_HEADER_T *header;
12973 +
12974 + while (queue->write == queue->read) {
12975 + if (down_interruptible(&queue->push) != 0) {
12976 + flush_signals(current);
12977 + }
12978 + }
12979 +
12980 + /*
12981 + * Read from queue->storage must be visible after read from
12982 + * queue->write
12983 + */
12984 + smp_rmb();
12985 +
12986 + header = queue->storage[queue->read & (queue->size - 1)];
12987 +
12988 + /*
12989 + * Read from queue->storage must be visible before write to
12990 + * queue->read
12991 + */
12992 + smp_mb();
12993 +
12994 + queue->read++;
12995 +
12996 + up(&queue->pop);
12997 +
12998 + return header;
12999 +}
13000 --- /dev/null
13001 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
13002 @@ -0,0 +1,81 @@
13003 +/**
13004 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13005 + *
13006 + * Redistribution and use in source and binary forms, with or without
13007 + * modification, are permitted provided that the following conditions
13008 + * are met:
13009 + * 1. Redistributions of source code must retain the above copyright
13010 + * notice, this list of conditions, and the following disclaimer,
13011 + * without modification.
13012 + * 2. Redistributions in binary form must reproduce the above copyright
13013 + * notice, this list of conditions and the following disclaimer in the
13014 + * documentation and/or other materials provided with the distribution.
13015 + * 3. The names of the above-listed copyright holders may not be used
13016 + * to endorse or promote products derived from this software without
13017 + * specific prior written permission.
13018 + *
13019 + * ALTERNATIVELY, this software may be distributed under the terms of the
13020 + * GNU General Public License ("GPL") version 2, as published by the Free
13021 + * Software Foundation.
13022 + *
13023 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13024 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13025 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13026 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13027 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13028 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13029 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13030 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13031 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13032 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13033 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13034 + */
13035 +
13036 +#ifndef VCHIQ_UTIL_H
13037 +#define VCHIQ_UTIL_H
13038 +
13039 +#include <linux/types.h>
13040 +#include <linux/semaphore.h>
13041 +#include <linux/mutex.h>
13042 +#include <linux/bitops.h>
13043 +#include <linux/kthread.h>
13044 +#include <linux/wait.h>
13045 +#include <linux/vmalloc.h>
13046 +#include <linux/jiffies.h>
13047 +#include <linux/delay.h>
13048 +#include <linux/string.h>
13049 +#include <linux/types.h>
13050 +#include <linux/interrupt.h>
13051 +#include <linux/random.h>
13052 +#include <linux/sched.h>
13053 +#include <linux/ctype.h>
13054 +#include <linux/uaccess.h>
13055 +#include <linux/time.h> /* for time_t */
13056 +#include <linux/slab.h>
13057 +#include <linux/vmalloc.h>
13058 +
13059 +#include "vchiq_if.h"
13060 +
13061 +typedef struct {
13062 + int size;
13063 + int read;
13064 + int write;
13065 +
13066 + struct semaphore pop;
13067 + struct semaphore push;
13068 +
13069 + VCHIQ_HEADER_T **storage;
13070 +} VCHIU_QUEUE_T;
13071 +
13072 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
13073 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
13074 +
13075 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
13076 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
13077 +
13078 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
13079 +
13080 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
13081 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
13082 +
13083 +#endif
13084 --- /dev/null
13085 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
13086 @@ -0,0 +1,59 @@
13087 +/**
13088 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13089 + *
13090 + * Redistribution and use in source and binary forms, with or without
13091 + * modification, are permitted provided that the following conditions
13092 + * are met:
13093 + * 1. Redistributions of source code must retain the above copyright
13094 + * notice, this list of conditions, and the following disclaimer,
13095 + * without modification.
13096 + * 2. Redistributions in binary form must reproduce the above copyright
13097 + * notice, this list of conditions and the following disclaimer in the
13098 + * documentation and/or other materials provided with the distribution.
13099 + * 3. The names of the above-listed copyright holders may not be used
13100 + * to endorse or promote products derived from this software without
13101 + * specific prior written permission.
13102 + *
13103 + * ALTERNATIVELY, this software may be distributed under the terms of the
13104 + * GNU General Public License ("GPL") version 2, as published by the Free
13105 + * Software Foundation.
13106 + *
13107 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13108 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13109 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13110 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13111 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13112 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13113 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13114 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13115 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13116 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13117 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13118 + */
13119 +#include "vchiq_build_info.h"
13120 +#include <linux/broadcom/vc_debug_sym.h>
13121 +
13122 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
13123 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
13124 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
13125 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
13126 +
13127 +const char *vchiq_get_build_hostname( void )
13128 +{
13129 + return vchiq_build_hostname;
13130 +}
13131 +
13132 +const char *vchiq_get_build_version( void )
13133 +{
13134 + return vchiq_build_version;
13135 +}
13136 +
13137 +const char *vchiq_get_build_date( void )
13138 +{
13139 + return vchiq_build_date;
13140 +}
13141 +
13142 +const char *vchiq_get_build_time( void )
13143 +{
13144 + return vchiq_build_time;
13145 +}