brcm2708: update to v3.18
[openwrt/staging/wigyori.git] / target / linux / brcm2708 / patches-3.18 / 0010-bcm2708-vchiq-driver.patch
1 From adee2a81f0be488e079498ac457bf01c954a029e Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 010/114] bcm2708 vchiq driver
5
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
7
8 vchiq: create_pagelist copes with vmalloc memory
9
10 Signed-off-by: Daniel Stone <daniels@collabora.com>
11
12 vchiq: fix the shim message release
13
14 Signed-off-by: Daniel Stone <daniels@collabora.com>
15
16 vchiq: export additional symbols
17
18 Signed-off-by: Daniel Stone <daniels@collabora.com>
19
20 VCHIQ: Make service closure fully synchronous (drv)
21
22 This is one half of a two-part patch, the other half of which is to
23 the vchiq_lib user library. With these patches, calls to
24 vchiq_close_service and vchiq_remove_service won't return until any
25 associated callbacks have been delivered to the callback thread.
26
27 VCHIQ: Add per-service tracing
28
29 The new service option VCHIQ_SERVICE_OPTION_TRACE is a boolean that
30 toggles tracing for the specified service.
31
32 This commit also introduces vchi_service_set_option and the associated
33 option VCHI_SERVICE_OPTION_TRACE.
34
35 vchiq: Make the synchronous-CLOSE logic more tolerant
36
37 vchiq: Move logging control into debugfs
38
39 vchiq: Take care of a corner case tickled by VCSM
40
41 Closing a connection that isn't fully open requires care, since one
42 side does not know the other side's port number. Code was present to
43 handle the case where a CLOSE is sent immediately after an OPEN, i.e.
44 before the OPENACK has been received, but this was incorrectly being
45 used when an OPEN from a client using port 0 was rejected.
46
47 (In the observed failure, the host was attempting to use the VCSM
48 service, which isn't present in the 'cutdown' firmware. The failure
49 was intermittent because sometimes the keepalive service would
50 grab port 0.)
51
52 This case can be distinguished because the client's remoteport will
53 still be VCHIQ_PORT_FREE, and the srvstate will be OPENING. Either
54 condition is sufficient to differentiate it from the special case
55 described above.
56 ---
57 drivers/misc/Kconfig | 1 +
58 drivers/misc/Makefile | 1 +
59 drivers/misc/vc04_services/Kconfig | 9 +
60 drivers/misc/vc04_services/Makefile | 17 +
61 .../interface/vchi/connections/connection.h | 328 ++
62 .../interface/vchi/message_drivers/message.h | 204 ++
63 drivers/misc/vc04_services/interface/vchi/vchi.h | 378 ++
64 .../misc/vc04_services/interface/vchi/vchi_cfg.h | 224 ++
65 .../interface/vchi/vchi_cfg_internal.h | 71 +
66 .../vc04_services/interface/vchi/vchi_common.h | 174 +
67 .../misc/vc04_services/interface/vchi/vchi_mh.h | 42 +
68 .../misc/vc04_services/interface/vchiq_arm/vchiq.h | 40 +
69 .../vc04_services/interface/vchiq_arm/vchiq_2835.h | 42 +
70 .../interface/vchiq_arm/vchiq_2835_arm.c | 561 +++
71 .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 2883 +++++++++++++++
72 .../vc04_services/interface/vchiq_arm/vchiq_arm.h | 223 ++
73 .../interface/vchiq_arm/vchiq_build_info.h | 37 +
74 .../vc04_services/interface/vchiq_arm/vchiq_cfg.h | 66 +
75 .../interface/vchiq_arm/vchiq_connected.c | 119 +
76 .../interface/vchiq_arm/vchiq_connected.h | 50 +
77 .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3861 ++++++++++++++++++++
78 .../vc04_services/interface/vchiq_arm/vchiq_core.h | 711 ++++
79 .../interface/vchiq_arm/vchiq_debugfs.c | 383 ++
80 .../interface/vchiq_arm/vchiq_debugfs.h | 52 +
81 .../interface/vchiq_arm/vchiq_genversion | 87 +
82 .../vc04_services/interface/vchiq_arm/vchiq_if.h | 189 +
83 .../interface/vchiq_arm/vchiq_ioctl.h | 131 +
84 .../interface/vchiq_arm/vchiq_kern_lib.c | 456 +++
85 .../interface/vchiq_arm/vchiq_memdrv.h | 71 +
86 .../interface/vchiq_arm/vchiq_pagelist.h | 58 +
87 .../vc04_services/interface/vchiq_arm/vchiq_shim.c | 853 +++++
88 .../vc04_services/interface/vchiq_arm/vchiq_util.c | 151 +
89 .../vc04_services/interface/vchiq_arm/vchiq_util.h | 81 +
90 .../interface/vchiq_arm/vchiq_version.c | 59 +
91 34 files changed, 12613 insertions(+)
92 create mode 100644 drivers/misc/vc04_services/Kconfig
93 create mode 100644 drivers/misc/vc04_services/Makefile
94 create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
95 create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
96 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
97 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
98 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
99 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
100 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
101 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
102 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
103 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
104 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
105 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
106 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
107 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
108 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
109 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
110 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
111 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
112 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
113 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
114 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
115 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
116 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
117 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
118 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
119 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
120 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
121 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
122 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
123 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
124
125 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
126 index bbeb451..b6109a2 100644
127 --- a/drivers/misc/Kconfig
128 +++ b/drivers/misc/Kconfig
129 @@ -524,6 +524,7 @@ source "drivers/misc/carma/Kconfig"
130 source "drivers/misc/altera-stapl/Kconfig"
131 source "drivers/misc/mei/Kconfig"
132 source "drivers/misc/vmw_vmci/Kconfig"
133 +source "drivers/misc/vc04_services/Kconfig"
134 source "drivers/misc/mic/Kconfig"
135 source "drivers/misc/genwqe/Kconfig"
136 source "drivers/misc/echo/Kconfig"
137 diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
138 index 7d5c4cd..c085ede 100644
139 --- a/drivers/misc/Makefile
140 +++ b/drivers/misc/Makefile
141 @@ -51,6 +51,7 @@ obj-$(CONFIG_INTEL_MEI) += mei/
142 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
143 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
144 obj-$(CONFIG_SRAM) += sram.o
145 +obj-y += vc04_services/
146 obj-y += mic/
147 obj-$(CONFIG_GENWQE) += genwqe/
148 obj-$(CONFIG_ECHO) += echo/
149 diff --git a/drivers/misc/vc04_services/Kconfig b/drivers/misc/vc04_services/Kconfig
150 new file mode 100644
151 index 0000000..2663933
152 --- /dev/null
153 +++ b/drivers/misc/vc04_services/Kconfig
154 @@ -0,0 +1,9 @@
155 +config BCM2708_VCHIQ
156 + tristate "Videocore VCHIQ"
157 + depends on MACH_BCM2708
158 + default y
159 + help
160 + Kernel to VideoCore communication interface for the
161 + BCM2708 family of products.
162 + Defaults to Y when the Broadcom Videocore services
163 + are included in the build, N otherwise.
164 diff --git a/drivers/misc/vc04_services/Makefile b/drivers/misc/vc04_services/Makefile
165 new file mode 100644
166 index 0000000..0c82520
167 --- /dev/null
168 +++ b/drivers/misc/vc04_services/Makefile
169 @@ -0,0 +1,17 @@
170 +ifeq ($(CONFIG_MACH_BCM2708),y)
171 +
172 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
173 +
174 +vchiq-objs := \
175 + interface/vchiq_arm/vchiq_core.o \
176 + interface/vchiq_arm/vchiq_arm.o \
177 + interface/vchiq_arm/vchiq_kern_lib.o \
178 + interface/vchiq_arm/vchiq_2835_arm.o \
179 + interface/vchiq_arm/vchiq_debugfs.o \
180 + interface/vchiq_arm/vchiq_shim.o \
181 + interface/vchiq_arm/vchiq_util.o \
182 + interface/vchiq_arm/vchiq_connected.o \
183 +
184 +ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
185 +
186 +endif
187 diff --git a/drivers/misc/vc04_services/interface/vchi/connections/connection.h b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
188 new file mode 100644
189 index 0000000..fef6ac3
190 --- /dev/null
191 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
192 @@ -0,0 +1,328 @@
193 +/**
194 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
195 + *
196 + * Redistribution and use in source and binary forms, with or without
197 + * modification, are permitted provided that the following conditions
198 + * are met:
199 + * 1. Redistributions of source code must retain the above copyright
200 + * notice, this list of conditions, and the following disclaimer,
201 + * without modification.
202 + * 2. Redistributions in binary form must reproduce the above copyright
203 + * notice, this list of conditions and the following disclaimer in the
204 + * documentation and/or other materials provided with the distribution.
205 + * 3. The names of the above-listed copyright holders may not be used
206 + * to endorse or promote products derived from this software without
207 + * specific prior written permission.
208 + *
209 + * ALTERNATIVELY, this software may be distributed under the terms of the
210 + * GNU General Public License ("GPL") version 2, as published by the Free
211 + * Software Foundation.
212 + *
213 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
214 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
215 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
216 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
217 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
218 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
219 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
220 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
221 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
222 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
223 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
224 + */
225 +
226 +#ifndef CONNECTION_H_
227 +#define CONNECTION_H_
228 +
229 +#include <linux/kernel.h>
230 +#include <linux/types.h>
231 +#include <linux/semaphore.h>
232 +
233 +#include "interface/vchi/vchi_cfg_internal.h"
234 +#include "interface/vchi/vchi_common.h"
235 +#include "interface/vchi/message_drivers/message.h"
236 +
237 +/******************************************************************************
238 + Global defs
239 + *****************************************************************************/
240 +
241 +// Opaque handle for a connection / service pair
242 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
243 +
244 +// opaque handle to the connection state information
245 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
246 +
247 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
248 +
249 +
250 +/******************************************************************************
251 + API
252 + *****************************************************************************/
253 +
254 +// Routine to init a connection with a particular low level driver
255 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
256 + const VCHI_MESSAGE_DRIVER_T * driver );
257 +
258 +// Routine to control CRC enabling at a connection level
259 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
260 + VCHI_CRC_CONTROL_T control );
261 +
262 +// Routine to create a service
263 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
264 + int32_t service_id,
265 + uint32_t rx_fifo_size,
266 + uint32_t tx_fifo_size,
267 + int server,
268 + VCHI_CALLBACK_T callback,
269 + void *callback_param,
270 + int32_t want_crc,
271 + int32_t want_unaligned_bulk_rx,
272 + int32_t want_unaligned_bulk_tx,
273 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
274 +
275 +// Routine to close a service
276 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
277 +
278 +// Routine to queue a message
279 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
280 + const void *data,
281 + uint32_t data_size,
282 + VCHI_FLAGS_T flags,
283 + void *msg_handle );
284 +
285 +// scatter-gather (vector) message queueing
286 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
287 + VCHI_MSG_VECTOR_T *vector,
288 + uint32_t count,
289 + VCHI_FLAGS_T flags,
290 + void *msg_handle );
291 +
292 +// Routine to dequeue a message
293 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
294 + void *data,
295 + uint32_t max_data_size_to_read,
296 + uint32_t *actual_msg_size,
297 + VCHI_FLAGS_T flags );
298 +
299 +// Routine to peek at a message
300 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
301 + void **data,
302 + uint32_t *msg_size,
303 + VCHI_FLAGS_T flags );
304 +
305 +// Routine to hold a message
306 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
307 + void **data,
308 + uint32_t *msg_size,
309 + VCHI_FLAGS_T flags,
310 + void **message_handle );
311 +
312 +// Routine to initialise a received message iterator
313 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
314 + VCHI_MSG_ITER_T *iter,
315 + VCHI_FLAGS_T flags );
316 +
317 +// Routine to release a held message
318 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
319 + void *message_handle );
320 +
321 +// Routine to get info on a held message
322 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
323 + void *message_handle,
324 + void **data,
325 + int32_t *msg_size,
326 + uint32_t *tx_timestamp,
327 + uint32_t *rx_timestamp );
328 +
329 +// Routine to check whether the iterator has a next message
330 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
331 + const VCHI_MSG_ITER_T *iter );
332 +
333 +// Routine to advance the iterator
334 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
335 + VCHI_MSG_ITER_T *iter,
336 + void **data,
337 + uint32_t *msg_size );
338 +
339 +// Routine to remove the last message returned by the iterator
340 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
341 + VCHI_MSG_ITER_T *iter );
342 +
343 +// Routine to hold the last message returned by the iterator
344 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
345 + VCHI_MSG_ITER_T *iter,
346 + void **msg_handle );
347 +
348 +// Routine to transmit bulk data
349 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
350 + const void *data_src,
351 + uint32_t data_size,
352 + VCHI_FLAGS_T flags,
353 + void *bulk_handle );
354 +
355 +// Routine to receive data
356 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
357 + void *data_dst,
358 + uint32_t data_size,
359 + VCHI_FLAGS_T flags,
360 + void *bulk_handle );
361 +
362 +// Routine to report if a server is available
363 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
364 +
365 +// Routine to report the number of RX slots available
366 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
367 +
368 +// Routine to report the RX slot size
369 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
370 +
371 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
372 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
373 + int32_t service,
374 + uint32_t length,
375 + MESSAGE_TX_CHANNEL_T channel,
376 + uint32_t channel_params,
377 + uint32_t data_length,
378 + uint32_t data_offset);
379 +
380 +// Callback to inform a service that a Xon or Xoff message has been received
381 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
382 +
383 +// Callback to inform a service that a server available reply message has been received
384 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
385 +
386 +// Callback to indicate that bulk auxiliary messages have arrived
387 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
388 +
389 +// Callback to indicate that bulk auxiliary messages have arrived
390 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
391 +
392 +// Callback with all the connection info you require
393 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
394 +
395 +// Callback to inform of a disconnect
396 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
397 +
398 +// Callback to inform of a power control request
399 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
400 +
401 +// allocate memory suitably aligned for this connection
402 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
403 +
404 +// free memory allocated by buffer_allocate
405 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
406 +
407 +
408 +/******************************************************************************
409 + System driver struct
410 + *****************************************************************************/
411 +
412 +struct opaque_vchi_connection_api_t
413 +{
414 + // Routine to init the connection
415 + VCHI_CONNECTION_INIT_T init;
416 +
417 + // Connection-level CRC control
418 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
419 +
420 + // Routine to connect to or create service
421 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
422 +
423 + // Routine to disconnect from a service
424 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
425 +
426 + // Routine to queue a message
427 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
428 +
429 + // scatter-gather (vector) message queue
430 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
431 +
432 + // Routine to dequeue a message
433 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
434 +
435 + // Routine to peek at a message
436 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
437 +
438 + // Routine to hold a message
439 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
440 +
441 + // Routine to initialise a received message iterator
442 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
443 +
444 + // Routine to release a message
445 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
446 +
447 + // Routine to get information on a held message
448 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
449 +
450 + // Routine to check for next message on iterator
451 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
452 +
453 + // Routine to get next message on iterator
454 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
455 +
456 + // Routine to remove the last message returned by iterator
457 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
458 +
459 + // Routine to hold the last message returned by iterator
460 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
461 +
462 + // Routine to transmit bulk data
463 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
464 +
465 + // Routine to receive data
466 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
467 +
468 + // Routine to report the available servers
469 + VCHI_CONNECTION_SERVER_PRESENT server_present;
470 +
471 + // Routine to report the number of RX slots available
472 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
473 +
474 + // Routine to report the RX slot size
475 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
476 +
477 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
478 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
479 +
480 + // Callback to inform a service that a Xon or Xoff message has been received
481 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
482 +
483 + // Callback to inform a service that a server available reply message has been received
484 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
485 +
486 + // Callback to indicate that bulk auxiliary messages have arrived
487 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
488 +
489 + // Callback to indicate that a bulk auxiliary message has been transmitted
490 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
491 +
492 + // Callback to provide information about the connection
493 + VCHI_CONNECTION_INFO connection_info;
494 +
495 + // Callback to notify that peer has requested disconnect
496 + VCHI_CONNECTION_DISCONNECT disconnect;
497 +
498 + // Callback to notify that peer has requested power change
499 + VCHI_CONNECTION_POWER_CONTROL power_control;
500 +
501 + // allocate memory suitably aligned for this connection
502 + VCHI_BUFFER_ALLOCATE buffer_allocate;
503 +
504 + // free memory allocated by buffer_allocate
505 + VCHI_BUFFER_FREE buffer_free;
506 +
507 +};
508 +
509 +struct vchi_connection_t {
510 + const VCHI_CONNECTION_API_T *api;
511 + VCHI_CONNECTION_STATE_T *state;
512 +#ifdef VCHI_COARSE_LOCKING
513 + struct semaphore sem;
514 +#endif
515 +};
516 +
517 +
518 +#endif /* CONNECTION_H_ */
519 +
520 +/****************************** End of file **********************************/
521 diff --git a/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
522 new file mode 100644
523 index 0000000..8b3f767
524 --- /dev/null
525 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
526 @@ -0,0 +1,204 @@
527 +/**
528 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
529 + *
530 + * Redistribution and use in source and binary forms, with or without
531 + * modification, are permitted provided that the following conditions
532 + * are met:
533 + * 1. Redistributions of source code must retain the above copyright
534 + * notice, this list of conditions, and the following disclaimer,
535 + * without modification.
536 + * 2. Redistributions in binary form must reproduce the above copyright
537 + * notice, this list of conditions and the following disclaimer in the
538 + * documentation and/or other materials provided with the distribution.
539 + * 3. The names of the above-listed copyright holders may not be used
540 + * to endorse or promote products derived from this software without
541 + * specific prior written permission.
542 + *
543 + * ALTERNATIVELY, this software may be distributed under the terms of the
544 + * GNU General Public License ("GPL") version 2, as published by the Free
545 + * Software Foundation.
546 + *
547 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
548 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
549 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
550 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
551 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
552 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
553 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
554 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
555 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
556 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
557 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
558 + */
559 +
560 +#ifndef _VCHI_MESSAGE_H_
561 +#define _VCHI_MESSAGE_H_
562 +
563 +#include <linux/kernel.h>
564 +#include <linux/types.h>
565 +#include <linux/semaphore.h>
566 +
567 +#include "interface/vchi/vchi_cfg_internal.h"
568 +#include "interface/vchi/vchi_common.h"
569 +
570 +
571 +typedef enum message_event_type {
572 + MESSAGE_EVENT_NONE,
573 + MESSAGE_EVENT_NOP,
574 + MESSAGE_EVENT_MESSAGE,
575 + MESSAGE_EVENT_SLOT_COMPLETE,
576 + MESSAGE_EVENT_RX_BULK_PAUSED,
577 + MESSAGE_EVENT_RX_BULK_COMPLETE,
578 + MESSAGE_EVENT_TX_COMPLETE,
579 + MESSAGE_EVENT_MSG_DISCARDED
580 +} MESSAGE_EVENT_TYPE_T;
581 +
582 +typedef enum vchi_msg_flags
583 +{
584 + VCHI_MSG_FLAGS_NONE = 0x0,
585 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
586 +} VCHI_MSG_FLAGS_T;
587 +
588 +typedef enum message_tx_channel
589 +{
590 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
591 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
592 +} MESSAGE_TX_CHANNEL_T;
593 +
594 +// Macros used for cycling through bulk channels
595 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
596 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
597 +
598 +typedef enum message_rx_channel
599 +{
600 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
601 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
602 +} MESSAGE_RX_CHANNEL_T;
603 +
604 +// Message receive slot information
605 +typedef struct rx_msg_slot_info {
606 +
607 + struct rx_msg_slot_info *next;
608 + //struct slot_info *prev;
609 +#if !defined VCHI_COARSE_LOCKING
610 + struct semaphore sem;
611 +#endif
612 +
613 + uint8_t *addr; // base address of slot
614 + uint32_t len; // length of slot in bytes
615 +
616 + uint32_t write_ptr; // hardware causes this to advance
617 + uint32_t read_ptr; // this module does the reading
618 + int active; // is this slot in the hardware dma fifo?
619 + uint32_t msgs_parsed; // count how many messages are in this slot
620 + uint32_t msgs_released; // how many messages have been released
621 + void *state; // connection state information
622 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
623 +} RX_MSG_SLOTINFO_T;
624 +
625 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
626 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
627 +// driver will be tasked with sending the aligned core section.
628 +typedef struct rx_bulk_slotinfo_t {
629 + struct rx_bulk_slotinfo_t *next;
630 +
631 + struct semaphore *blocking;
632 +
633 + // needed by DMA
634 + void *addr;
635 + uint32_t len;
636 +
637 + // needed for the callback
638 + void *service;
639 + void *handle;
640 + VCHI_FLAGS_T flags;
641 +} RX_BULK_SLOTINFO_T;
642 +
643 +
644 +/* ----------------------------------------------------------------------
645 + * each connection driver will have a pool of the following struct.
646 + *
647 + * the pool will be managed by vchi_qman_*
648 + * this means there will be multiple queues (single linked lists)
649 + * a given struct message_info will be on exactly one of these queues
650 + * at any one time
651 + * -------------------------------------------------------------------- */
652 +typedef struct rx_message_info {
653 +
654 + struct message_info *next;
655 + //struct message_info *prev;
656 +
657 + uint8_t *addr;
658 + uint32_t len;
659 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
660 + uint32_t tx_timestamp;
661 + uint32_t rx_timestamp;
662 +
663 +} RX_MESSAGE_INFO_T;
664 +
665 +typedef struct {
666 + MESSAGE_EVENT_TYPE_T type;
667 +
668 + struct {
669 + // for messages
670 + void *addr; // address of message
671 + uint16_t slot_delta; // whether this message indicated slot delta
672 + uint32_t len; // length of message
673 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
674 + int32_t service; // service id this message is destined for
675 + uint32_t tx_timestamp; // timestamp from the header
676 + uint32_t rx_timestamp; // timestamp when we parsed it
677 + } message;
678 +
679 + // FIXME: cleanup slot reporting...
680 + RX_MSG_SLOTINFO_T *rx_msg;
681 + RX_BULK_SLOTINFO_T *rx_bulk;
682 + void *tx_handle;
683 + MESSAGE_TX_CHANNEL_T tx_channel;
684 +
685 +} MESSAGE_EVENT_T;
686 +
687 +
688 +// callbacks
689 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
690 +
691 +typedef struct {
692 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
693 +} VCHI_MESSAGE_DRIVER_OPEN_T;
694 +
695 +
696 +// handle to this instance of message driver (as returned by ->open)
697 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
698 +
699 +struct opaque_vchi_message_driver_t {
700 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
701 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
702 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
703 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
704 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
705 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
706 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
707 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
708 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
709 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
710 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
711 +
712 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
713 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
714 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
715 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
716 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
717 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
718 +
719 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
720 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
721 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
722 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
723 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
724 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
725 +};
726 +
727 +
728 +#endif // _VCHI_MESSAGE_H_
729 +
730 +/****************************** End of file ***********************************/
731 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi.h b/drivers/misc/vc04_services/interface/vchi/vchi.h
732 new file mode 100644
733 index 0000000..1b17e98
734 --- /dev/null
735 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
736 @@ -0,0 +1,378 @@
737 +/**
738 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
739 + *
740 + * Redistribution and use in source and binary forms, with or without
741 + * modification, are permitted provided that the following conditions
742 + * are met:
743 + * 1. Redistributions of source code must retain the above copyright
744 + * notice, this list of conditions, and the following disclaimer,
745 + * without modification.
746 + * 2. Redistributions in binary form must reproduce the above copyright
747 + * notice, this list of conditions and the following disclaimer in the
748 + * documentation and/or other materials provided with the distribution.
749 + * 3. The names of the above-listed copyright holders may not be used
750 + * to endorse or promote products derived from this software without
751 + * specific prior written permission.
752 + *
753 + * ALTERNATIVELY, this software may be distributed under the terms of the
754 + * GNU General Public License ("GPL") version 2, as published by the Free
755 + * Software Foundation.
756 + *
757 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
758 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
759 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
760 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
761 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
762 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
763 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
764 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
765 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
766 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
767 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
768 + */
769 +
770 +#ifndef VCHI_H_
771 +#define VCHI_H_
772 +
773 +#include "interface/vchi/vchi_cfg.h"
774 +#include "interface/vchi/vchi_common.h"
775 +#include "interface/vchi/connections/connection.h"
776 +#include "vchi_mh.h"
777 +
778 +
779 +/******************************************************************************
780 + Global defs
781 + *****************************************************************************/
782 +
783 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
784 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
785 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
786 +
787 +#ifdef USE_VCHIQ_ARM
788 +#define VCHI_BULK_ALIGNED(x) 1
789 +#else
790 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
791 +#endif
792 +
793 +struct vchi_version {
794 + uint32_t version;
795 + uint32_t version_min;
796 +};
797 +#define VCHI_VERSION(v_) { v_, v_ }
798 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
799 +
800 +typedef enum
801 +{
802 + VCHI_VEC_POINTER,
803 + VCHI_VEC_HANDLE,
804 + VCHI_VEC_LIST
805 +} VCHI_MSG_VECTOR_TYPE_T;
806 +
807 +typedef struct vchi_msg_vector_ex {
808 +
809 + VCHI_MSG_VECTOR_TYPE_T type;
810 + union
811 + {
812 + // a memory handle
813 + struct
814 + {
815 + VCHI_MEM_HANDLE_T handle;
816 + uint32_t offset;
817 + int32_t vec_len;
818 + } handle;
819 +
820 + // an ordinary data pointer
821 + struct
822 + {
823 + const void *vec_base;
824 + int32_t vec_len;
825 + } ptr;
826 +
827 + // a nested vector list
828 + struct
829 + {
830 + struct vchi_msg_vector_ex *vec;
831 + uint32_t vec_len;
832 + } list;
833 + } u;
834 +} VCHI_MSG_VECTOR_EX_T;
835 +
836 +
837 +// Construct an entry in a msg vector for a pointer (p) of length (l)
838 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
839 +
840 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
841 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
842 +
843 +// Macros to manipulate 'FOURCC' values
844 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
845 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
846 +
847 +
848 +// Opaque service information
849 +struct opaque_vchi_service_t;
850 +
851 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
852 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
853 +typedef struct
854 +{
855 + struct opaque_vchi_service_t *service;
856 + void *message;
857 +} VCHI_HELD_MSG_T;
858 +
859 +
860 +
861 +// structure used to provide the information needed to open a server or a client
862 +typedef struct {
863 + struct vchi_version version;
864 + int32_t service_id;
865 + VCHI_CONNECTION_T *connection;
866 + uint32_t rx_fifo_size;
867 + uint32_t tx_fifo_size;
868 + VCHI_CALLBACK_T callback;
869 + void *callback_param;
870 + /* client intends to receive bulk transfers of
871 + odd lengths or into unaligned buffers */
872 + int32_t want_unaligned_bulk_rx;
873 + /* client intends to transmit bulk transfers of
874 + odd lengths or out of unaligned buffers */
875 + int32_t want_unaligned_bulk_tx;
876 + /* client wants to check CRCs on (bulk) xfers.
877 + Only needs to be set at 1 end - will do both directions. */
878 + int32_t want_crc;
879 +} SERVICE_CREATION_T;
880 +
881 +// Opaque handle for a VCHI instance
882 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
883 +
884 +// Opaque handle for a server or client
885 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
886 +
887 +// Service registration & startup
888 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
889 +
890 +typedef struct service_info_tag {
891 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
892 + VCHI_SERVICE_INIT init; /* Service initialisation function */
893 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
894 +} SERVICE_INFO_T;
895 +
896 +/******************************************************************************
897 + Global funcs - implementation is specific to which side you are on (local / remote)
898 + *****************************************************************************/
899 +
900 +#ifdef __cplusplus
901 +extern "C" {
902 +#endif
903 +
904 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
905 + const VCHI_MESSAGE_DRIVER_T * low_level);
906 +
907 +
908 +// Routine used to initialise the vchi on both local + remote connections
909 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
910 +
911 +extern int32_t vchi_exit( void );
912 +
913 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
914 + const uint32_t num_connections,
915 + VCHI_INSTANCE_T instance_handle );
916 +
917 +//When this is called, ensure that all services have no data pending.
918 +//Bulk transfers can remain 'queued'
919 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
920 +
921 +// Global control over bulk CRC checking
922 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
923 + VCHI_CRC_CONTROL_T control );
924 +
925 +// helper functions
926 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
927 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
928 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
929 +
930 +
931 +/******************************************************************************
932 + Global service API
933 + *****************************************************************************/
934 +// Routine to create a named service
935 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
936 + SERVICE_CREATION_T *setup,
937 + VCHI_SERVICE_HANDLE_T *handle );
938 +
939 +// Routine to destory a service
940 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
941 +
942 +// Routine to open a named service
943 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
944 + SERVICE_CREATION_T *setup,
945 + VCHI_SERVICE_HANDLE_T *handle);
946 +
947 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
948 + short *peer_version );
949 +
950 +// Routine to close a named service
951 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
952 +
953 +// Routine to increment ref count on a named service
954 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
955 +
956 +// Routine to decrement ref count on a named service
957 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
958 +
959 +// Routine to set a control option for a named service
960 +extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle,
961 + VCHI_SERVICE_OPTION_T option,
962 + int value);
963 +
964 +// Routine to send a message across a service
965 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
966 + const void *data,
967 + uint32_t data_size,
968 + VCHI_FLAGS_T flags,
969 + void *msg_handle );
970 +
971 +// scatter-gather (vector) and send message
972 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
973 + VCHI_MSG_VECTOR_EX_T *vector,
974 + uint32_t count,
975 + VCHI_FLAGS_T flags,
976 + void *msg_handle );
977 +
978 +// legacy scatter-gather (vector) and send message, only handles pointers
979 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
980 + VCHI_MSG_VECTOR_T *vector,
981 + uint32_t count,
982 + VCHI_FLAGS_T flags,
983 + void *msg_handle );
984 +
985 +// Routine to receive a msg from a service
986 +// Dequeue is equivalent to hold, copy into client buffer, release
987 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
988 + void *data,
989 + uint32_t max_data_size_to_read,
990 + uint32_t *actual_msg_size,
991 + VCHI_FLAGS_T flags );
992 +
993 +// Routine to look at a message in place.
994 +// The message is not dequeued, so a subsequent call to peek or dequeue
995 +// will return the same message.
996 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
997 + void **data,
998 + uint32_t *msg_size,
999 + VCHI_FLAGS_T flags );
1000 +
1001 +// Routine to remove a message after it has been read in place with peek
1002 +// The first message on the queue is dequeued.
1003 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
1004 +
1005 +// Routine to look at a message in place.
1006 +// The message is dequeued, so the caller is left holding it; the descriptor is
1007 +// filled in and must be released when the user has finished with the message.
1008 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
1009 + void **data, // } may be NULL, as info can be
1010 + uint32_t *msg_size, // } obtained from HELD_MSG_T
1011 + VCHI_FLAGS_T flags,
1012 + VCHI_HELD_MSG_T *message_descriptor );
1013 +
1014 +// Initialise an iterator to look through messages in place
1015 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
1016 + VCHI_MSG_ITER_T *iter,
1017 + VCHI_FLAGS_T flags );
1018 +
1019 +/******************************************************************************
1020 + Global service support API - operations on held messages and message iterators
1021 + *****************************************************************************/
1022 +
1023 +// Routine to get the address of a held message
1024 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
1025 +
1026 +// Routine to get the size of a held message
1027 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
1028 +
1029 +// Routine to get the transmit timestamp as written into the header by the peer
1030 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
1031 +
1032 +// Routine to get the reception timestamp, written as we parsed the header
1033 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
1034 +
1035 +// Routine to release a held message after it has been processed
1036 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
1037 +
1038 +// Indicates whether the iterator has a next message.
1039 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
1040 +
1041 +// Return the pointer and length for the next message and advance the iterator.
1042 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
1043 + void **data,
1044 + uint32_t *msg_size );
1045 +
1046 +// Remove the last message returned by vchi_msg_iter_next.
1047 +// Can only be called once after each call to vchi_msg_iter_next.
1048 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
1049 +
1050 +// Hold the last message returned by vchi_msg_iter_next.
1051 +// Can only be called once after each call to vchi_msg_iter_next.
1052 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
1053 + VCHI_HELD_MSG_T *message );
1054 +
1055 +// Return information for the next message, and hold it, advancing the iterator.
1056 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
1057 + void **data, // } may be NULL
1058 + uint32_t *msg_size, // }
1059 + VCHI_HELD_MSG_T *message );
1060 +
1061 +
1062 +/******************************************************************************
1063 + Global bulk API
1064 + *****************************************************************************/
1065 +
1066 +// Routine to prepare interface for a transfer from the other side
1067 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
1068 + void *data_dst,
1069 + uint32_t data_size,
1070 + VCHI_FLAGS_T flags,
1071 + void *transfer_handle );
1072 +
1073 +
1074 +// Prepare interface for a transfer from the other side into relocatable memory.
1075 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
1076 + VCHI_MEM_HANDLE_T h_dst,
1077 + uint32_t offset,
1078 + uint32_t data_size,
1079 + const VCHI_FLAGS_T flags,
1080 + void * const bulk_handle );
1081 +
1082 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1083 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1084 + const void *data_src,
1085 + uint32_t data_size,
1086 + VCHI_FLAGS_T flags,
1087 + void *transfer_handle );
1088 +
1089 +
1090 +/******************************************************************************
1091 + Configuration plumbing
1092 + *****************************************************************************/
1093 +
1094 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1095 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1096 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1097 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1098 +
1099 +// declare all message drivers here
1100 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1101 +
1102 +#ifdef __cplusplus
1103 +}
1104 +#endif
1105 +
1106 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1107 + VCHI_MEM_HANDLE_T h_src,
1108 + uint32_t offset,
1109 + uint32_t data_size,
1110 + VCHI_FLAGS_T flags,
1111 + void *transfer_handle );
1112 +#endif /* VCHI_H_ */
1113 +
1114 +/****************************** End of file **********************************/
1115 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1116 new file mode 100644
1117 index 0000000..26bc2d3
1118 --- /dev/null
1119 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1120 @@ -0,0 +1,224 @@
1121 +/**
1122 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1123 + *
1124 + * Redistribution and use in source and binary forms, with or without
1125 + * modification, are permitted provided that the following conditions
1126 + * are met:
1127 + * 1. Redistributions of source code must retain the above copyright
1128 + * notice, this list of conditions, and the following disclaimer,
1129 + * without modification.
1130 + * 2. Redistributions in binary form must reproduce the above copyright
1131 + * notice, this list of conditions and the following disclaimer in the
1132 + * documentation and/or other materials provided with the distribution.
1133 + * 3. The names of the above-listed copyright holders may not be used
1134 + * to endorse or promote products derived from this software without
1135 + * specific prior written permission.
1136 + *
1137 + * ALTERNATIVELY, this software may be distributed under the terms of the
1138 + * GNU General Public License ("GPL") version 2, as published by the Free
1139 + * Software Foundation.
1140 + *
1141 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1142 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1143 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1144 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1145 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1146 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1147 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1148 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1149 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1150 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1151 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1152 + */
1153 +
1154 +#ifndef VCHI_CFG_H_
1155 +#define VCHI_CFG_H_
1156 +
1157 +/****************************************************************************************
1158 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1159 + * services.
1160 + ***************************************************************************************/
1161 +
1162 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1163 +/* Really determined by the message driver, and should be available from a run-time call. */
1164 +#ifndef VCHI_BULK_ALIGN
1165 +# if __VCCOREVER__ >= 0x04000000
1166 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1167 +# else
1168 +# define VCHI_BULK_ALIGN 16
1169 +# endif
1170 +#endif
1171 +
1172 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1173 +/* May be less than or greater than VCHI_BULK_ALIGN */
1174 +/* Really determined by the message driver, and should be available from a run-time call. */
1175 +#ifndef VCHI_BULK_GRANULARITY
1176 +# if __VCCOREVER__ >= 0x04000000
1177 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1178 +# else
1179 +# define VCHI_BULK_GRANULARITY 16
1180 +# endif
1181 +#endif
1182 +
1183 +/* The largest possible message to be queued with vchi_msg_queue. */
1184 +#ifndef VCHI_MAX_MSG_SIZE
1185 +# if defined VCHI_LOCAL_HOST_PORT
1186 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1187 +# else
1188 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1189 +# endif
1190 +#endif
1191 +
1192 +/******************************************************************************************
1193 + * Defines below are system configuration options, and should not be used by VCHI services.
1194 + *****************************************************************************************/
1195 +
1196 +/* How many connections can we support? A localhost implementation uses 2 connections,
1197 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1198 + * driver. */
1199 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1200 +# define VCHI_MAX_NUM_CONNECTIONS 3
1201 +#endif
1202 +
1203 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1204 + * amount of static memory. */
1205 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1206 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1207 +#endif
1208 +
1209 +/* Adjust if using a message driver that supports more logical TX channels */
1210 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1211 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1212 +#endif
1213 +
1214 +/* Adjust if using a message driver that supports more logical RX channels */
1215 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1216 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1217 +#endif
1218 +
1219 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1220 + * receive queue space, less message headers. */
1221 +#ifndef VCHI_NUM_READ_SLOTS
1222 +# if defined(VCHI_LOCAL_HOST_PORT)
1223 +# define VCHI_NUM_READ_SLOTS 4
1224 +# else
1225 +# define VCHI_NUM_READ_SLOTS 48
1226 +# endif
1227 +#endif
1228 +
1229 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1230 + * performance. Only define on VideoCore end, talking to host.
1231 + */
1232 +//#define VCHI_MSG_RX_OVERRUN
1233 +
1234 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1235 + * underneath VCHI will usually have its own buffering. */
1236 +#ifndef VCHI_NUM_WRITE_SLOTS
1237 +# define VCHI_NUM_WRITE_SLOTS 4
1238 +#endif
1239 +
1240 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1241 + * then it's taking up too much buffer space, and the peer service will be told to stop
1242 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1243 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1244 + * is too high. */
1245 +#ifndef VCHI_XOFF_THRESHOLD
1246 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1247 +#endif
1248 +
1249 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1250 + * service has dequeued/released enough messages that it's now occupying
1251 + * VCHI_XON_THRESHOLD slots or fewer. */
1252 +#ifndef VCHI_XON_THRESHOLD
1253 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1254 +#endif
1255 +
1256 +/* A size below which a bulk transfer omits the handshake completely and always goes
1257 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1258 + * can guarantee this by enabling unaligned transmits).
1259 + * Not API. */
1260 +#ifndef VCHI_MIN_BULK_SIZE
1261 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1262 +#endif
1263 +
1264 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1265 + * speed and latency; the smaller the chunk size the better change of messages and other
1266 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1267 + * break transmissions into chunks.
1268 + */
1269 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1270 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1271 +#endif
1272 +
1273 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1274 + * with multiple-line frames. Only use if the receiver can cope. */
1275 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1276 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1277 +#endif
1278 +
1279 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1280 + * vchi_msg_queue will be blocked. */
1281 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1282 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1283 +#endif
1284 +
1285 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1286 + * will be suspended until older messages are dequeued/released. */
1287 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1288 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1289 +#endif
1290 +
1291 +/* Really should be able to cope if we run out of received message descriptors, by
1292 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1293 + * under the carpet. */
1294 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1295 +# undef VCHI_RX_MSG_QUEUE_SIZE
1296 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1297 +#endif
1298 +
1299 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1300 + * will be blocked. */
1301 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1302 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1303 +#endif
1304 +
1305 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1306 + * will be blocked. */
1307 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1308 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1309 +#endif
1310 +
1311 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1312 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1313 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1314 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1315 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1316 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1317 +#endif
1318 +
1319 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1320 + * transmitter on and off.
1321 + */
1322 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1323 +
1324 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1325 +
1326 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1327 + * negative for no IDLE.
1328 + */
1329 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1330 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1331 +# endif
1332 +
1333 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1334 + * negative for no OFF.
1335 + */
1336 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1337 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1338 +# endif
1339 +
1340 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1341 +
1342 +#endif /* VCHI_CFG_H_ */
1343 +
1344 +/****************************** End of file **********************************/
1345 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1346 new file mode 100644
1347 index 0000000..35dcba4
1348 --- /dev/null
1349 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1350 @@ -0,0 +1,71 @@
1351 +/**
1352 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1353 + *
1354 + * Redistribution and use in source and binary forms, with or without
1355 + * modification, are permitted provided that the following conditions
1356 + * are met:
1357 + * 1. Redistributions of source code must retain the above copyright
1358 + * notice, this list of conditions, and the following disclaimer,
1359 + * without modification.
1360 + * 2. Redistributions in binary form must reproduce the above copyright
1361 + * notice, this list of conditions and the following disclaimer in the
1362 + * documentation and/or other materials provided with the distribution.
1363 + * 3. The names of the above-listed copyright holders may not be used
1364 + * to endorse or promote products derived from this software without
1365 + * specific prior written permission.
1366 + *
1367 + * ALTERNATIVELY, this software may be distributed under the terms of the
1368 + * GNU General Public License ("GPL") version 2, as published by the Free
1369 + * Software Foundation.
1370 + *
1371 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1372 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1373 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1374 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1375 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1376 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1377 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1378 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1379 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1380 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1381 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1382 + */
1383 +
1384 +#ifndef VCHI_CFG_INTERNAL_H_
1385 +#define VCHI_CFG_INTERNAL_H_
1386 +
1387 +/****************************************************************************************
1388 + * Control optimisation attempts.
1389 + ***************************************************************************************/
1390 +
1391 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1392 +#define VCHI_COARSE_LOCKING
1393 +
1394 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1395 +// (only relevant if VCHI_COARSE_LOCKING)
1396 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1397 +
1398 +// Avoid lock on non-blocking peek
1399 +// (only relevant if VCHI_COARSE_LOCKING)
1400 +#define VCHI_AVOID_PEEK_LOCK
1401 +
1402 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1403 +#define VCHI_MULTIPLE_HANDLER_THREADS
1404 +
1405 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1406 +// our way through the pool of descriptors.
1407 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1408 +
1409 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1410 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1411 +
1412 +// Don't use message descriptors for TX messages that don't need them
1413 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1414 +
1415 +// Nano-locks for multiqueue
1416 +//#define VCHI_MQUEUE_NANOLOCKS
1417 +
1418 +// Lock-free(er) dequeuing
1419 +//#define VCHI_RX_NANOLOCKS
1420 +
1421 +#endif /*VCHI_CFG_INTERNAL_H_*/
1422 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_common.h b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1423 new file mode 100644
1424 index 0000000..d76118c
1425 --- /dev/null
1426 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1427 @@ -0,0 +1,174 @@
1428 +/**
1429 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1430 + *
1431 + * Redistribution and use in source and binary forms, with or without
1432 + * modification, are permitted provided that the following conditions
1433 + * are met:
1434 + * 1. Redistributions of source code must retain the above copyright
1435 + * notice, this list of conditions, and the following disclaimer,
1436 + * without modification.
1437 + * 2. Redistributions in binary form must reproduce the above copyright
1438 + * notice, this list of conditions and the following disclaimer in the
1439 + * documentation and/or other materials provided with the distribution.
1440 + * 3. The names of the above-listed copyright holders may not be used
1441 + * to endorse or promote products derived from this software without
1442 + * specific prior written permission.
1443 + *
1444 + * ALTERNATIVELY, this software may be distributed under the terms of the
1445 + * GNU General Public License ("GPL") version 2, as published by the Free
1446 + * Software Foundation.
1447 + *
1448 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1449 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1450 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1451 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1452 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1453 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1454 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1455 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1456 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1457 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1458 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1459 + */
1460 +
1461 +#ifndef VCHI_COMMON_H_
1462 +#define VCHI_COMMON_H_
1463 +
1464 +
1465 +//flags used when sending messages (must be bitmapped)
1466 +typedef enum
1467 +{
1468 + VCHI_FLAGS_NONE = 0x0,
1469 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
1470 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
1471 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
1472 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
1473 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
1474 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
1475 +
1476 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
1477 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
1478 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
1479 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
1480 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
1481 + VCHI_FLAGS_INTERNAL = 0xFF0000
1482 +} VCHI_FLAGS_T;
1483 +
1484 +// constants for vchi_crc_control()
1485 +typedef enum {
1486 + VCHI_CRC_NOTHING = -1,
1487 + VCHI_CRC_PER_SERVICE = 0,
1488 + VCHI_CRC_EVERYTHING = 1,
1489 +} VCHI_CRC_CONTROL_T;
1490 +
1491 +//callback reasons when an event occurs on a service
1492 +typedef enum
1493 +{
1494 + VCHI_CALLBACK_REASON_MIN,
1495 +
1496 + //This indicates that there is data available
1497 + //handle is the msg id that was transmitted with the data
1498 + // When a message is received and there was no FULL message available previously, send callback
1499 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1500 + VCHI_CALLBACK_MSG_AVAILABLE,
1501 + VCHI_CALLBACK_MSG_SENT,
1502 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1503 +
1504 + // This indicates that a transfer from the other side has completed
1505 + VCHI_CALLBACK_BULK_RECEIVED,
1506 + //This indicates that data queued up to be sent has now gone
1507 + //handle is the msg id that was used when sending the data
1508 + VCHI_CALLBACK_BULK_SENT,
1509 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1510 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1511 +
1512 + VCHI_CALLBACK_SERVICE_CLOSED,
1513 +
1514 + // this side has sent XOFF to peer due to lack of data consumption by service
1515 + // (suggests the service may need to take some recovery action if it has
1516 + // been deliberately holding off consuming data)
1517 + VCHI_CALLBACK_SENT_XOFF,
1518 + VCHI_CALLBACK_SENT_XON,
1519 +
1520 + // indicates that a bulk transfer has finished reading the source buffer
1521 + VCHI_CALLBACK_BULK_DATA_READ,
1522 +
1523 + // power notification events (currently host side only)
1524 + VCHI_CALLBACK_PEER_OFF,
1525 + VCHI_CALLBACK_PEER_SUSPENDED,
1526 + VCHI_CALLBACK_PEER_ON,
1527 + VCHI_CALLBACK_PEER_RESUMED,
1528 + VCHI_CALLBACK_FORCED_POWER_OFF,
1529 +
1530 +#ifdef USE_VCHIQ_ARM
1531 + // some extra notifications provided by vchiq_arm
1532 + VCHI_CALLBACK_SERVICE_OPENED,
1533 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1534 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1535 +#endif
1536 +
1537 + VCHI_CALLBACK_REASON_MAX
1538 +} VCHI_CALLBACK_REASON_T;
1539 +
1540 +// service control options
1541 +typedef enum
1542 +{
1543 + VCHI_SERVICE_OPTION_MIN,
1544 +
1545 + VCHI_SERVICE_OPTION_TRACE,
1546 +
1547 + VCHI_SERVICE_OPTION_MAX
1548 +} VCHI_SERVICE_OPTION_T;
1549 +
1550 +
1551 +//Callback used by all services / bulk transfers
1552 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1553 + VCHI_CALLBACK_REASON_T reason,
1554 + void *handle ); //for transmitting msg's only
1555 +
1556 +
1557 +
1558 +/*
1559 + * Define vector struct for scatter-gather (vector) operations
1560 + * Vectors can be nested - if a vector element has negative length, then
1561 + * the data pointer is treated as pointing to another vector array, with
1562 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1563 + * you can do this:
1564 + *
1565 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1566 + * {
1567 + * VCHI_MSG_VECTOR_T nv[2];
1568 + * nv[0].vec_base = my_header;
1569 + * nv[0].vec_len = sizeof my_header;
1570 + * nv[1].vec_base = v;
1571 + * nv[1].vec_len = -n;
1572 + * ...
1573 + *
1574 + */
1575 +typedef struct vchi_msg_vector {
1576 + const void *vec_base;
1577 + int32_t vec_len;
1578 +} VCHI_MSG_VECTOR_T;
1579 +
1580 +// Opaque type for a connection API
1581 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1582 +
1583 +// Opaque type for a message driver
1584 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1585 +
1586 +
1587 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1588 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1589 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1590 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1591 +// is used again after messages for that service are removed/dequeued by any
1592 +// means other than vchi_msg_iter_... calls on the iterator itself.
1593 +typedef struct {
1594 + struct opaque_vchi_service_t *service;
1595 + void *last;
1596 + void *next;
1597 + void *remove;
1598 +} VCHI_MSG_ITER_T;
1599 +
1600 +
1601 +#endif // VCHI_COMMON_H_
1602 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_mh.h b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1603 new file mode 100644
1604 index 0000000..198bd07
1605 --- /dev/null
1606 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1607 @@ -0,0 +1,42 @@
1608 +/**
1609 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1610 + *
1611 + * Redistribution and use in source and binary forms, with or without
1612 + * modification, are permitted provided that the following conditions
1613 + * are met:
1614 + * 1. Redistributions of source code must retain the above copyright
1615 + * notice, this list of conditions, and the following disclaimer,
1616 + * without modification.
1617 + * 2. Redistributions in binary form must reproduce the above copyright
1618 + * notice, this list of conditions and the following disclaimer in the
1619 + * documentation and/or other materials provided with the distribution.
1620 + * 3. The names of the above-listed copyright holders may not be used
1621 + * to endorse or promote products derived from this software without
1622 + * specific prior written permission.
1623 + *
1624 + * ALTERNATIVELY, this software may be distributed under the terms of the
1625 + * GNU General Public License ("GPL") version 2, as published by the Free
1626 + * Software Foundation.
1627 + *
1628 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1629 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1630 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1631 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1632 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1633 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1634 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1635 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1636 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1637 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1638 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1639 + */
1640 +
1641 +#ifndef VCHI_MH_H_
1642 +#define VCHI_MH_H_
1643 +
1644 +#include <linux/types.h>
1645 +
1646 +typedef int32_t VCHI_MEM_HANDLE_T;
1647 +#define VCHI_MEM_HANDLE_INVALID 0
1648 +
1649 +#endif
1650 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1651 new file mode 100644
1652 index 0000000..ad398ba
1653 --- /dev/null
1654 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1655 @@ -0,0 +1,40 @@
1656 +/**
1657 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1658 + *
1659 + * Redistribution and use in source and binary forms, with or without
1660 + * modification, are permitted provided that the following conditions
1661 + * are met:
1662 + * 1. Redistributions of source code must retain the above copyright
1663 + * notice, this list of conditions, and the following disclaimer,
1664 + * without modification.
1665 + * 2. Redistributions in binary form must reproduce the above copyright
1666 + * notice, this list of conditions and the following disclaimer in the
1667 + * documentation and/or other materials provided with the distribution.
1668 + * 3. The names of the above-listed copyright holders may not be used
1669 + * to endorse or promote products derived from this software without
1670 + * specific prior written permission.
1671 + *
1672 + * ALTERNATIVELY, this software may be distributed under the terms of the
1673 + * GNU General Public License ("GPL") version 2, as published by the Free
1674 + * Software Foundation.
1675 + *
1676 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1677 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1678 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1679 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1680 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1681 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1682 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1683 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1684 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1685 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1686 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1687 + */
1688 +
1689 +#ifndef VCHIQ_VCHIQ_H
1690 +#define VCHIQ_VCHIQ_H
1691 +
1692 +#include "vchiq_if.h"
1693 +#include "vchiq_util.h"
1694 +
1695 +#endif
1696 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1697 new file mode 100644
1698 index 0000000..7ea5c64
1699 --- /dev/null
1700 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1701 @@ -0,0 +1,42 @@
1702 +/**
1703 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1704 + *
1705 + * Redistribution and use in source and binary forms, with or without
1706 + * modification, are permitted provided that the following conditions
1707 + * are met:
1708 + * 1. Redistributions of source code must retain the above copyright
1709 + * notice, this list of conditions, and the following disclaimer,
1710 + * without modification.
1711 + * 2. Redistributions in binary form must reproduce the above copyright
1712 + * notice, this list of conditions and the following disclaimer in the
1713 + * documentation and/or other materials provided with the distribution.
1714 + * 3. The names of the above-listed copyright holders may not be used
1715 + * to endorse or promote products derived from this software without
1716 + * specific prior written permission.
1717 + *
1718 + * ALTERNATIVELY, this software may be distributed under the terms of the
1719 + * GNU General Public License ("GPL") version 2, as published by the Free
1720 + * Software Foundation.
1721 + *
1722 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1723 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1724 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1725 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1726 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1727 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1728 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1729 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1730 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1731 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1732 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1733 + */
1734 +
1735 +#ifndef VCHIQ_2835_H
1736 +#define VCHIQ_2835_H
1737 +
1738 +#include "vchiq_pagelist.h"
1739 +
1740 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1741 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
1742 +
1743 +#endif /* VCHIQ_2835_H */
1744 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1745 new file mode 100644
1746 index 0000000..b3bdaa2
1747 --- /dev/null
1748 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1749 @@ -0,0 +1,561 @@
1750 +/**
1751 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1752 + *
1753 + * Redistribution and use in source and binary forms, with or without
1754 + * modification, are permitted provided that the following conditions
1755 + * are met:
1756 + * 1. Redistributions of source code must retain the above copyright
1757 + * notice, this list of conditions, and the following disclaimer,
1758 + * without modification.
1759 + * 2. Redistributions in binary form must reproduce the above copyright
1760 + * notice, this list of conditions and the following disclaimer in the
1761 + * documentation and/or other materials provided with the distribution.
1762 + * 3. The names of the above-listed copyright holders may not be used
1763 + * to endorse or promote products derived from this software without
1764 + * specific prior written permission.
1765 + *
1766 + * ALTERNATIVELY, this software may be distributed under the terms of the
1767 + * GNU General Public License ("GPL") version 2, as published by the Free
1768 + * Software Foundation.
1769 + *
1770 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1771 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1772 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1773 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1774 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1775 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1776 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1777 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1778 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1779 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1780 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1781 + */
1782 +
1783 +#include <linux/kernel.h>
1784 +#include <linux/types.h>
1785 +#include <linux/errno.h>
1786 +#include <linux/interrupt.h>
1787 +#include <linux/irq.h>
1788 +#include <linux/pagemap.h>
1789 +#include <linux/dma-mapping.h>
1790 +#include <linux/version.h>
1791 +#include <linux/io.h>
1792 +#include <linux/uaccess.h>
1793 +#include <asm/pgtable.h>
1794 +
1795 +#include <mach/irqs.h>
1796 +
1797 +#include <mach/platform.h>
1798 +#include <mach/vcio.h>
1799 +
1800 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1801 +
1802 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
1803 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
1804 +
1805 +#include "vchiq_arm.h"
1806 +#include "vchiq_2835.h"
1807 +#include "vchiq_connected.h"
1808 +
1809 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1810 +
1811 +typedef struct vchiq_2835_state_struct {
1812 + int inited;
1813 + VCHIQ_ARM_STATE_T arm_state;
1814 +} VCHIQ_2835_ARM_STATE_T;
1815 +
1816 +static char *g_slot_mem;
1817 +static int g_slot_mem_size;
1818 +dma_addr_t g_slot_phys;
1819 +static FRAGMENTS_T *g_fragments_base;
1820 +static FRAGMENTS_T *g_free_fragments;
1821 +struct semaphore g_free_fragments_sema;
1822 +
1823 +extern int vchiq_arm_log_level;
1824 +
1825 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1826 +
1827 +static irqreturn_t
1828 +vchiq_doorbell_irq(int irq, void *dev_id);
1829 +
1830 +static int
1831 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1832 + struct task_struct *task, PAGELIST_T ** ppagelist);
1833 +
1834 +static void
1835 +free_pagelist(PAGELIST_T *pagelist, int actual);
1836 +
1837 +int __init
1838 +vchiq_platform_init(VCHIQ_STATE_T *state)
1839 +{
1840 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1841 + int frag_mem_size;
1842 + int err;
1843 + int i;
1844 +
1845 + /* Allocate space for the channels in coherent memory */
1846 + g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1847 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1848 +
1849 + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
1850 + &g_slot_phys, GFP_ATOMIC);
1851 +
1852 + if (!g_slot_mem) {
1853 + vchiq_log_error(vchiq_arm_log_level,
1854 + "Unable to allocate channel memory");
1855 + err = -ENOMEM;
1856 + goto failed_alloc;
1857 + }
1858 +
1859 + WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
1860 +
1861 + vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
1862 + if (!vchiq_slot_zero) {
1863 + err = -EINVAL;
1864 + goto failed_init_slots;
1865 + }
1866 +
1867 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1868 + (int)g_slot_phys + g_slot_mem_size;
1869 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1870 + MAX_FRAGMENTS;
1871 +
1872 + g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
1873 + g_slot_mem_size += frag_mem_size;
1874 +
1875 + g_free_fragments = g_fragments_base;
1876 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1877 + *(FRAGMENTS_T **)&g_fragments_base[i] =
1878 + &g_fragments_base[i + 1];
1879 + }
1880 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1881 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1882 +
1883 + if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
1884 + VCHIQ_SUCCESS) {
1885 + err = -EINVAL;
1886 + goto failed_vchiq_init;
1887 + }
1888 +
1889 + err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
1890 + IRQF_IRQPOLL, "VCHIQ doorbell",
1891 + state);
1892 + if (err < 0) {
1893 + vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
1894 + "irq=%d err=%d", __func__,
1895 + VCHIQ_DOORBELL_IRQ, err);
1896 + goto failed_request_irq;
1897 + }
1898 +
1899 + /* Send the base address of the slots to VideoCore */
1900 +
1901 + dsb(); /* Ensure all writes have completed */
1902 +
1903 + bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
1904 +
1905 + vchiq_log_info(vchiq_arm_log_level,
1906 + "vchiq_init - done (slots %x, phys %x)",
1907 + (unsigned int)vchiq_slot_zero, g_slot_phys);
1908 +
1909 + vchiq_call_connected_callbacks();
1910 +
1911 + return 0;
1912 +
1913 +failed_request_irq:
1914 +failed_vchiq_init:
1915 +failed_init_slots:
1916 + dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
1917 +
1918 +failed_alloc:
1919 + return err;
1920 +}
1921 +
1922 +void __exit
1923 +vchiq_platform_exit(VCHIQ_STATE_T *state)
1924 +{
1925 + free_irq(VCHIQ_DOORBELL_IRQ, state);
1926 + dma_free_coherent(NULL, g_slot_mem_size,
1927 + g_slot_mem, g_slot_phys);
1928 +}
1929 +
1930 +
1931 +VCHIQ_STATUS_T
1932 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
1933 +{
1934 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1935 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
1936 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
1937 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
1938 + if(status != VCHIQ_SUCCESS)
1939 + {
1940 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
1941 + }
1942 + return status;
1943 +}
1944 +
1945 +VCHIQ_ARM_STATE_T*
1946 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
1947 +{
1948 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
1949 + {
1950 + BUG();
1951 + }
1952 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
1953 +}
1954 +
1955 +void
1956 +remote_event_signal(REMOTE_EVENT_T *event)
1957 +{
1958 + wmb();
1959 +
1960 + event->fired = 1;
1961 +
1962 + dsb(); /* data barrier operation */
1963 +
1964 + if (event->armed) {
1965 + /* trigger vc interrupt */
1966 +
1967 + writel(0, __io_address(ARM_0_BELL2));
1968 + }
1969 +}
1970 +
1971 +int
1972 +vchiq_copy_from_user(void *dst, const void *src, int size)
1973 +{
1974 + if ((uint32_t)src < TASK_SIZE) {
1975 + return copy_from_user(dst, src, size);
1976 + } else {
1977 + memcpy(dst, src, size);
1978 + return 0;
1979 + }
1980 +}
1981 +
1982 +VCHIQ_STATUS_T
1983 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
1984 + void *offset, int size, int dir)
1985 +{
1986 + PAGELIST_T *pagelist;
1987 + int ret;
1988 +
1989 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
1990 +
1991 + ret = create_pagelist((char __user *)offset, size,
1992 + (dir == VCHIQ_BULK_RECEIVE)
1993 + ? PAGELIST_READ
1994 + : PAGELIST_WRITE,
1995 + current,
1996 + &pagelist);
1997 + if (ret != 0)
1998 + return VCHIQ_ERROR;
1999 +
2000 + bulk->handle = memhandle;
2001 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
2002 +
2003 + /* Store the pagelist address in remote_data, which isn't used by the
2004 + slave. */
2005 + bulk->remote_data = pagelist;
2006 +
2007 + return VCHIQ_SUCCESS;
2008 +}
2009 +
2010 +void
2011 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
2012 +{
2013 + if (bulk && bulk->remote_data && bulk->actual)
2014 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
2015 +}
2016 +
2017 +void
2018 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
2019 +{
2020 + /*
2021 + * This should only be called on the master (VideoCore) side, but
2022 + * provide an implementation to avoid the need for ifdefery.
2023 + */
2024 + BUG();
2025 +}
2026 +
2027 +void
2028 +vchiq_dump_platform_state(void *dump_context)
2029 +{
2030 + char buf[80];
2031 + int len;
2032 + len = snprintf(buf, sizeof(buf),
2033 + " Platform: 2835 (VC master)");
2034 + vchiq_dump(dump_context, buf, len + 1);
2035 +}
2036 +
2037 +VCHIQ_STATUS_T
2038 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
2039 +{
2040 + return VCHIQ_ERROR;
2041 +}
2042 +
2043 +VCHIQ_STATUS_T
2044 +vchiq_platform_resume(VCHIQ_STATE_T *state)
2045 +{
2046 + return VCHIQ_SUCCESS;
2047 +}
2048 +
2049 +void
2050 +vchiq_platform_paused(VCHIQ_STATE_T *state)
2051 +{
2052 +}
2053 +
2054 +void
2055 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
2056 +{
2057 +}
2058 +
2059 +int
2060 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
2061 +{
2062 + return 1; // autosuspend not supported - videocore always wanted
2063 +}
2064 +
2065 +int
2066 +vchiq_platform_use_suspend_timer(void)
2067 +{
2068 + return 0;
2069 +}
2070 +void
2071 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
2072 +{
2073 + vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
2074 +}
2075 +void
2076 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
2077 +{
2078 + (void)state;
2079 +}
2080 +/*
2081 + * Local functions
2082 + */
2083 +
2084 +static irqreturn_t
2085 +vchiq_doorbell_irq(int irq, void *dev_id)
2086 +{
2087 + VCHIQ_STATE_T *state = dev_id;
2088 + irqreturn_t ret = IRQ_NONE;
2089 + unsigned int status;
2090 +
2091 + /* Read (and clear) the doorbell */
2092 + status = readl(__io_address(ARM_0_BELL0));
2093 +
2094 + if (status & 0x4) { /* Was the doorbell rung? */
2095 + remote_event_pollall(state);
2096 + ret = IRQ_HANDLED;
2097 + }
2098 +
2099 + return ret;
2100 +}
2101 +
2102 +/* There is a potential problem with partial cache lines (pages?)
2103 +** at the ends of the block when reading. If the CPU accessed anything in
2104 +** the same line (page?) then it may have pulled old data into the cache,
2105 +** obscuring the new data underneath. We can solve this by transferring the
2106 +** partial cache lines separately, and allowing the ARM to copy into the
2107 +** cached area.
2108 +
2109 +** N.B. This implementation plays slightly fast and loose with the Linux
2110 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2111 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2112 +** from increased speed as a result.
2113 +*/
2114 +
2115 +static int
2116 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2117 + struct task_struct *task, PAGELIST_T ** ppagelist)
2118 +{
2119 + PAGELIST_T *pagelist;
2120 + struct page **pages;
2121 + struct page *page;
2122 + unsigned long *addrs;
2123 + unsigned int num_pages, offset, i;
2124 + char *addr, *base_addr, *next_addr;
2125 + int run, addridx, actual_pages;
2126 + unsigned long *need_release;
2127 +
2128 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
2129 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2130 +
2131 + *ppagelist = NULL;
2132 +
2133 + /* Allocate enough storage to hold the page pointers and the page
2134 + ** list
2135 + */
2136 + pagelist = kmalloc(sizeof(PAGELIST_T) +
2137 + (num_pages * sizeof(unsigned long)) +
2138 + sizeof(unsigned long) +
2139 + (num_pages * sizeof(pages[0])),
2140 + GFP_KERNEL);
2141 +
2142 + vchiq_log_trace(vchiq_arm_log_level,
2143 + "create_pagelist - %x", (unsigned int)pagelist);
2144 + if (!pagelist)
2145 + return -ENOMEM;
2146 +
2147 + addrs = pagelist->addrs;
2148 + need_release = (unsigned long *)(addrs + num_pages);
2149 + pages = (struct page **)(addrs + num_pages + 1);
2150 +
2151 + if (is_vmalloc_addr(buf)) {
2152 + for (actual_pages = 0; actual_pages < num_pages; actual_pages++) {
2153 + pages[actual_pages] = vmalloc_to_page(buf + (actual_pages * PAGE_SIZE));
2154 + }
2155 + *need_release = 0; /* do not try and release vmalloc pages */
2156 + } else {
2157 + down_read(&task->mm->mmap_sem);
2158 + actual_pages = get_user_pages(task, task->mm,
2159 + (unsigned long)buf & ~(PAGE_SIZE - 1),
2160 + num_pages,
2161 + (type == PAGELIST_READ) /*Write */ ,
2162 + 0 /*Force */ ,
2163 + pages,
2164 + NULL /*vmas */);
2165 + up_read(&task->mm->mmap_sem);
2166 +
2167 + if (actual_pages != num_pages) {
2168 + vchiq_log_info(vchiq_arm_log_level,
2169 + "create_pagelist - only %d/%d pages locked",
2170 + actual_pages,
2171 + num_pages);
2172 +
2173 + /* This is probably due to the process being killed */
2174 + while (actual_pages > 0)
2175 + {
2176 + actual_pages--;
2177 + page_cache_release(pages[actual_pages]);
2178 + }
2179 + kfree(pagelist);
2180 + if (actual_pages == 0)
2181 + actual_pages = -ENOMEM;
2182 + return actual_pages;
2183 + }
2184 + *need_release = 1; /* release user pages */
2185 + }
2186 +
2187 + pagelist->length = count;
2188 + pagelist->type = type;
2189 + pagelist->offset = offset;
2190 +
2191 + /* Group the pages into runs of contiguous pages */
2192 +
2193 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2194 + next_addr = base_addr + PAGE_SIZE;
2195 + addridx = 0;
2196 + run = 0;
2197 +
2198 + for (i = 1; i < num_pages; i++) {
2199 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2200 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2201 + next_addr += PAGE_SIZE;
2202 + run++;
2203 + } else {
2204 + addrs[addridx] = (unsigned long)base_addr + run;
2205 + addridx++;
2206 + base_addr = addr;
2207 + next_addr = addr + PAGE_SIZE;
2208 + run = 0;
2209 + }
2210 + }
2211 +
2212 + addrs[addridx] = (unsigned long)base_addr + run;
2213 + addridx++;
2214 +
2215 + /* Partial cache lines (fragments) require special measures */
2216 + if ((type == PAGELIST_READ) &&
2217 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2218 + ((pagelist->offset + pagelist->length) &
2219 + (CACHE_LINE_SIZE - 1)))) {
2220 + FRAGMENTS_T *fragments;
2221 +
2222 + if (down_interruptible(&g_free_fragments_sema) != 0) {
2223 + kfree(pagelist);
2224 + return -EINTR;
2225 + }
2226 +
2227 + WARN_ON(g_free_fragments == NULL);
2228 +
2229 + down(&g_free_fragments_mutex);
2230 + fragments = (FRAGMENTS_T *) g_free_fragments;
2231 + WARN_ON(fragments == NULL);
2232 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2233 + up(&g_free_fragments_mutex);
2234 + pagelist->type =
2235 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2236 + g_fragments_base);
2237 + }
2238 +
2239 + for (page = virt_to_page(pagelist);
2240 + page <= virt_to_page(addrs + num_pages - 1); page++) {
2241 + flush_dcache_page(page);
2242 + }
2243 +
2244 + *ppagelist = pagelist;
2245 +
2246 + return 0;
2247 +}
2248 +
2249 +static void
2250 +free_pagelist(PAGELIST_T *pagelist, int actual)
2251 +{
2252 + unsigned long *need_release;
2253 + struct page **pages;
2254 + unsigned int num_pages, i;
2255 +
2256 + vchiq_log_trace(vchiq_arm_log_level,
2257 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2258 +
2259 + num_pages =
2260 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2261 + PAGE_SIZE;
2262 +
2263 + need_release = (unsigned long *)(pagelist->addrs + num_pages);
2264 + pages = (struct page **)(pagelist->addrs + num_pages + 1);
2265 +
2266 + /* Deal with any partial cache lines (fragments) */
2267 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2268 + FRAGMENTS_T *fragments = g_fragments_base +
2269 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2270 + int head_bytes, tail_bytes;
2271 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2272 + (CACHE_LINE_SIZE - 1);
2273 + tail_bytes = (pagelist->offset + actual) &
2274 + (CACHE_LINE_SIZE - 1);
2275 +
2276 + if ((actual >= 0) && (head_bytes != 0)) {
2277 + if (head_bytes > actual)
2278 + head_bytes = actual;
2279 +
2280 + memcpy((char *)page_address(pages[0]) +
2281 + pagelist->offset,
2282 + fragments->headbuf,
2283 + head_bytes);
2284 + }
2285 + if ((actual >= 0) && (head_bytes < actual) &&
2286 + (tail_bytes != 0)) {
2287 + memcpy((char *)page_address(pages[num_pages - 1]) +
2288 + ((pagelist->offset + actual) &
2289 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2290 + fragments->tailbuf, tail_bytes);
2291 + }
2292 +
2293 + down(&g_free_fragments_mutex);
2294 + *(FRAGMENTS_T **) fragments = g_free_fragments;
2295 + g_free_fragments = fragments;
2296 + up(&g_free_fragments_mutex);
2297 + up(&g_free_fragments_sema);
2298 + }
2299 +
2300 + if (*need_release) {
2301 + for (i = 0; i < num_pages; i++) {
2302 + if (pagelist->type != PAGELIST_WRITE)
2303 + set_page_dirty(pages[i]);
2304 +
2305 + page_cache_release(pages[i]);
2306 + }
2307 + }
2308 +
2309 + kfree(pagelist);
2310 +}
2311 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2312 new file mode 100644
2313 index 0000000..2596818
2314 --- /dev/null
2315 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2316 @@ -0,0 +1,2883 @@
2317 +/**
2318 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
2319 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2320 + *
2321 + * Redistribution and use in source and binary forms, with or without
2322 + * modification, are permitted provided that the following conditions
2323 + * are met:
2324 + * 1. Redistributions of source code must retain the above copyright
2325 + * notice, this list of conditions, and the following disclaimer,
2326 + * without modification.
2327 + * 2. Redistributions in binary form must reproduce the above copyright
2328 + * notice, this list of conditions and the following disclaimer in the
2329 + * documentation and/or other materials provided with the distribution.
2330 + * 3. The names of the above-listed copyright holders may not be used
2331 + * to endorse or promote products derived from this software without
2332 + * specific prior written permission.
2333 + *
2334 + * ALTERNATIVELY, this software may be distributed under the terms of the
2335 + * GNU General Public License ("GPL") version 2, as published by the Free
2336 + * Software Foundation.
2337 + *
2338 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2339 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2340 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2341 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2342 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2343 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2344 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2345 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2346 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2347 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2348 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2349 + */
2350 +
2351 +#include <linux/kernel.h>
2352 +#include <linux/module.h>
2353 +#include <linux/types.h>
2354 +#include <linux/errno.h>
2355 +#include <linux/cdev.h>
2356 +#include <linux/fs.h>
2357 +#include <linux/device.h>
2358 +#include <linux/mm.h>
2359 +#include <linux/highmem.h>
2360 +#include <linux/pagemap.h>
2361 +#include <linux/bug.h>
2362 +#include <linux/semaphore.h>
2363 +#include <linux/list.h>
2364 +
2365 +#include "vchiq_core.h"
2366 +#include "vchiq_ioctl.h"
2367 +#include "vchiq_arm.h"
2368 +#include "vchiq_debugfs.h"
2369 +
2370 +#define DEVICE_NAME "vchiq"
2371 +
2372 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2373 +#undef MODULE_PARAM_PREFIX
2374 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2375 +
2376 +#define VCHIQ_MINOR 0
2377 +
2378 +/* Some per-instance constants */
2379 +#define MAX_COMPLETIONS 16
2380 +#define MAX_SERVICES 64
2381 +#define MAX_ELEMENTS 8
2382 +#define MSG_QUEUE_SIZE 64
2383 +
2384 +#define KEEPALIVE_VER 1
2385 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2386 +
2387 +/* Run time control of log level, based on KERN_XXX level. */
2388 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2389 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2390 +
2391 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2392 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2393 +
2394 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2395 +static const char *const suspend_state_names[] = {
2396 + "VC_SUSPEND_FORCE_CANCELED",
2397 + "VC_SUSPEND_REJECTED",
2398 + "VC_SUSPEND_FAILED",
2399 + "VC_SUSPEND_IDLE",
2400 + "VC_SUSPEND_REQUESTED",
2401 + "VC_SUSPEND_IN_PROGRESS",
2402 + "VC_SUSPEND_SUSPENDED"
2403 +};
2404 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2405 +static const char *const resume_state_names[] = {
2406 + "VC_RESUME_FAILED",
2407 + "VC_RESUME_IDLE",
2408 + "VC_RESUME_REQUESTED",
2409 + "VC_RESUME_IN_PROGRESS",
2410 + "VC_RESUME_RESUMED"
2411 +};
2412 +/* The number of times we allow force suspend to timeout before actually
2413 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
2414 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2415 +*/
2416 +#define FORCE_SUSPEND_FAIL_MAX 8
2417 +
2418 +/* The time in ms allowed for videocore to go idle when force suspend has been
2419 + * requested */
2420 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2421 +
2422 +
2423 +static void suspend_timer_callback(unsigned long context);
2424 +
2425 +
2426 +typedef struct user_service_struct {
2427 + VCHIQ_SERVICE_T *service;
2428 + void *userdata;
2429 + VCHIQ_INSTANCE_T instance;
2430 + char is_vchi;
2431 + char dequeue_pending;
2432 + char close_pending;
2433 + int message_available_pos;
2434 + int msg_insert;
2435 + int msg_remove;
2436 + struct semaphore insert_event;
2437 + struct semaphore remove_event;
2438 + struct semaphore close_event;
2439 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2440 +} USER_SERVICE_T;
2441 +
2442 +struct bulk_waiter_node {
2443 + struct bulk_waiter bulk_waiter;
2444 + int pid;
2445 + struct list_head list;
2446 +};
2447 +
2448 +struct vchiq_instance_struct {
2449 + VCHIQ_STATE_T *state;
2450 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2451 + int completion_insert;
2452 + int completion_remove;
2453 + struct semaphore insert_event;
2454 + struct semaphore remove_event;
2455 + struct mutex completion_mutex;
2456 +
2457 + int connected;
2458 + int closing;
2459 + int pid;
2460 + int mark;
2461 + int use_close_delivered;
2462 + int trace;
2463 +
2464 + struct list_head bulk_waiter_list;
2465 + struct mutex bulk_waiter_list_mutex;
2466 +
2467 + VCHIQ_DEBUGFS_NODE_T debugfs_node;
2468 +};
2469 +
2470 +typedef struct dump_context_struct {
2471 + char __user *buf;
2472 + size_t actual;
2473 + size_t space;
2474 + loff_t offset;
2475 +} DUMP_CONTEXT_T;
2476 +
2477 +static struct cdev vchiq_cdev;
2478 +static dev_t vchiq_devid;
2479 +static VCHIQ_STATE_T g_state;
2480 +static struct class *vchiq_class;
2481 +static struct device *vchiq_dev;
2482 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2483 +
2484 +static const char *const ioctl_names[] = {
2485 + "CONNECT",
2486 + "SHUTDOWN",
2487 + "CREATE_SERVICE",
2488 + "REMOVE_SERVICE",
2489 + "QUEUE_MESSAGE",
2490 + "QUEUE_BULK_TRANSMIT",
2491 + "QUEUE_BULK_RECEIVE",
2492 + "AWAIT_COMPLETION",
2493 + "DEQUEUE_MESSAGE",
2494 + "GET_CLIENT_ID",
2495 + "GET_CONFIG",
2496 + "CLOSE_SERVICE",
2497 + "USE_SERVICE",
2498 + "RELEASE_SERVICE",
2499 + "SET_SERVICE_OPTION",
2500 + "DUMP_PHYS_MEM",
2501 + "LIB_VERSION",
2502 + "CLOSE_DELIVERED"
2503 +};
2504 +
2505 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2506 + (VCHIQ_IOC_MAX + 1));
2507 +
2508 +static void
2509 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2510 +
2511 +/****************************************************************************
2512 +*
2513 +* add_completion
2514 +*
2515 +***************************************************************************/
2516 +
2517 +static VCHIQ_STATUS_T
2518 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2519 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2520 + void *bulk_userdata)
2521 +{
2522 + VCHIQ_COMPLETION_DATA_T *completion;
2523 + DEBUG_INITIALISE(g_state.local)
2524 +
2525 + while (instance->completion_insert ==
2526 + (instance->completion_remove + MAX_COMPLETIONS)) {
2527 + /* Out of space - wait for the client */
2528 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2529 + vchiq_log_trace(vchiq_arm_log_level,
2530 + "add_completion - completion queue full");
2531 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2532 + if (down_interruptible(&instance->remove_event) != 0) {
2533 + vchiq_log_info(vchiq_arm_log_level,
2534 + "service_callback interrupted");
2535 + return VCHIQ_RETRY;
2536 + } else if (instance->closing) {
2537 + vchiq_log_info(vchiq_arm_log_level,
2538 + "service_callback closing");
2539 + return VCHIQ_ERROR;
2540 + }
2541 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2542 + }
2543 +
2544 + completion =
2545 + &instance->completions[instance->completion_insert &
2546 + (MAX_COMPLETIONS - 1)];
2547 +
2548 + completion->header = header;
2549 + completion->reason = reason;
2550 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2551 + completion->service_userdata = user_service->service;
2552 + completion->bulk_userdata = bulk_userdata;
2553 +
2554 + if (reason == VCHIQ_SERVICE_CLOSED) {
2555 + /* Take an extra reference, to be held until
2556 + this CLOSED notification is delivered. */
2557 + lock_service(user_service->service);
2558 + if (instance->use_close_delivered)
2559 + user_service->close_pending = 1;
2560 + }
2561 +
2562 + /* A write barrier is needed here to ensure that the entire completion
2563 + record is written out before the insert point. */
2564 + wmb();
2565 +
2566 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
2567 + user_service->message_available_pos =
2568 + instance->completion_insert;
2569 + instance->completion_insert++;
2570 +
2571 + up(&instance->insert_event);
2572 +
2573 + return VCHIQ_SUCCESS;
2574 +}
2575 +
2576 +/****************************************************************************
2577 +*
2578 +* service_callback
2579 +*
2580 +***************************************************************************/
2581 +
2582 +static VCHIQ_STATUS_T
2583 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2584 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2585 +{
2586 + /* How do we ensure the callback goes to the right client?
2587 + ** The service_user data points to a USER_SERVICE_T record containing
2588 + ** the original callback and the user state structure, which contains a
2589 + ** circular buffer for completion records.
2590 + */
2591 + USER_SERVICE_T *user_service;
2592 + VCHIQ_SERVICE_T *service;
2593 + VCHIQ_INSTANCE_T instance;
2594 + DEBUG_INITIALISE(g_state.local)
2595 +
2596 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2597 +
2598 + service = handle_to_service(handle);
2599 + BUG_ON(!service);
2600 + user_service = (USER_SERVICE_T *)service->base.userdata;
2601 + instance = user_service->instance;
2602 +
2603 + if (!instance || instance->closing)
2604 + return VCHIQ_SUCCESS;
2605 +
2606 + vchiq_log_trace(vchiq_arm_log_level,
2607 + "service_callback - service %lx(%d,%p), reason %d, header %lx, "
2608 + "instance %lx, bulk_userdata %lx",
2609 + (unsigned long)user_service,
2610 + service->localport, user_service->userdata,
2611 + reason, (unsigned long)header,
2612 + (unsigned long)instance, (unsigned long)bulk_userdata);
2613 +
2614 + if (header && user_service->is_vchi) {
2615 + spin_lock(&msg_queue_spinlock);
2616 + while (user_service->msg_insert ==
2617 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2618 + spin_unlock(&msg_queue_spinlock);
2619 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2620 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2621 + vchiq_log_trace(vchiq_arm_log_level,
2622 + "service_callback - msg queue full");
2623 + /* If there is no MESSAGE_AVAILABLE in the completion
2624 + ** queue, add one
2625 + */
2626 + if ((user_service->message_available_pos -
2627 + instance->completion_remove) < 0) {
2628 + VCHIQ_STATUS_T status;
2629 + vchiq_log_info(vchiq_arm_log_level,
2630 + "Inserting extra MESSAGE_AVAILABLE");
2631 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2632 + status = add_completion(instance, reason,
2633 + NULL, user_service, bulk_userdata);
2634 + if (status != VCHIQ_SUCCESS) {
2635 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2636 + return status;
2637 + }
2638 + }
2639 +
2640 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2641 + if (down_interruptible(&user_service->remove_event)
2642 + != 0) {
2643 + vchiq_log_info(vchiq_arm_log_level,
2644 + "service_callback interrupted");
2645 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2646 + return VCHIQ_RETRY;
2647 + } else if (instance->closing) {
2648 + vchiq_log_info(vchiq_arm_log_level,
2649 + "service_callback closing");
2650 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2651 + return VCHIQ_ERROR;
2652 + }
2653 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2654 + spin_lock(&msg_queue_spinlock);
2655 + }
2656 +
2657 + user_service->msg_queue[user_service->msg_insert &
2658 + (MSG_QUEUE_SIZE - 1)] = header;
2659 + user_service->msg_insert++;
2660 + spin_unlock(&msg_queue_spinlock);
2661 +
2662 + up(&user_service->insert_event);
2663 +
2664 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2665 + ** there is a MESSAGE_AVAILABLE in the completion queue then
2666 + ** bypass the completion queue.
2667 + */
2668 + if (((user_service->message_available_pos -
2669 + instance->completion_remove) >= 0) ||
2670 + user_service->dequeue_pending) {
2671 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2672 + user_service->dequeue_pending = 0;
2673 + return VCHIQ_SUCCESS;
2674 + }
2675 +
2676 + header = NULL;
2677 + }
2678 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2679 +
2680 + return add_completion(instance, reason, header, user_service,
2681 + bulk_userdata);
2682 +}
2683 +
2684 +/****************************************************************************
2685 +*
2686 +* user_service_free
2687 +*
2688 +***************************************************************************/
2689 +static void
2690 +user_service_free(void *userdata)
2691 +{
2692 + kfree(userdata);
2693 +}
2694 +
2695 +/****************************************************************************
2696 +*
2697 +* close_delivered
2698 +*
2699 +***************************************************************************/
2700 +static void close_delivered(USER_SERVICE_T *user_service)
2701 +{
2702 + vchiq_log_info(vchiq_arm_log_level,
2703 + "close_delivered(handle=%x)",
2704 + user_service->service->handle);
2705 +
2706 + if (user_service->close_pending) {
2707 + /* Allow the underlying service to be culled */
2708 + unlock_service(user_service->service);
2709 +
2710 + /* Wake the user-thread blocked in close_ or remove_service */
2711 + up(&user_service->close_event);
2712 +
2713 + user_service->close_pending = 0;
2714 + }
2715 +}
2716 +
2717 +/****************************************************************************
2718 +*
2719 +* vchiq_ioctl
2720 +*
2721 +***************************************************************************/
2722 +static long
2723 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2724 +{
2725 + VCHIQ_INSTANCE_T instance = file->private_data;
2726 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2727 + VCHIQ_SERVICE_T *service = NULL;
2728 + long ret = 0;
2729 + int i, rc;
2730 + DEBUG_INITIALISE(g_state.local)
2731 +
2732 + vchiq_log_trace(vchiq_arm_log_level,
2733 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2734 + (unsigned int)instance,
2735 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2736 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2737 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2738 +
2739 + switch (cmd) {
2740 + case VCHIQ_IOC_SHUTDOWN:
2741 + if (!instance->connected)
2742 + break;
2743 +
2744 + /* Remove all services */
2745 + i = 0;
2746 + while ((service = next_service_by_instance(instance->state,
2747 + instance, &i)) != NULL) {
2748 + status = vchiq_remove_service(service->handle);
2749 + unlock_service(service);
2750 + if (status != VCHIQ_SUCCESS)
2751 + break;
2752 + }
2753 + service = NULL;
2754 +
2755 + if (status == VCHIQ_SUCCESS) {
2756 + /* Wake the completion thread and ask it to exit */
2757 + instance->closing = 1;
2758 + up(&instance->insert_event);
2759 + }
2760 +
2761 + break;
2762 +
2763 + case VCHIQ_IOC_CONNECT:
2764 + if (instance->connected) {
2765 + ret = -EINVAL;
2766 + break;
2767 + }
2768 + rc = mutex_lock_interruptible(&instance->state->mutex);
2769 + if (rc != 0) {
2770 + vchiq_log_error(vchiq_arm_log_level,
2771 + "vchiq: connect: could not lock mutex for "
2772 + "state %d: %d",
2773 + instance->state->id, rc);
2774 + ret = -EINTR;
2775 + break;
2776 + }
2777 + status = vchiq_connect_internal(instance->state, instance);
2778 + mutex_unlock(&instance->state->mutex);
2779 +
2780 + if (status == VCHIQ_SUCCESS)
2781 + instance->connected = 1;
2782 + else
2783 + vchiq_log_error(vchiq_arm_log_level,
2784 + "vchiq: could not connect: %d", status);
2785 + break;
2786 +
2787 + case VCHIQ_IOC_CREATE_SERVICE: {
2788 + VCHIQ_CREATE_SERVICE_T args;
2789 + USER_SERVICE_T *user_service = NULL;
2790 + void *userdata;
2791 + int srvstate;
2792 +
2793 + if (copy_from_user
2794 + (&args, (const void __user *)arg,
2795 + sizeof(args)) != 0) {
2796 + ret = -EFAULT;
2797 + break;
2798 + }
2799 +
2800 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2801 + if (!user_service) {
2802 + ret = -ENOMEM;
2803 + break;
2804 + }
2805 +
2806 + if (args.is_open) {
2807 + if (!instance->connected) {
2808 + ret = -ENOTCONN;
2809 + kfree(user_service);
2810 + break;
2811 + }
2812 + srvstate = VCHIQ_SRVSTATE_OPENING;
2813 + } else {
2814 + srvstate =
2815 + instance->connected ?
2816 + VCHIQ_SRVSTATE_LISTENING :
2817 + VCHIQ_SRVSTATE_HIDDEN;
2818 + }
2819 +
2820 + userdata = args.params.userdata;
2821 + args.params.callback = service_callback;
2822 + args.params.userdata = user_service;
2823 + service = vchiq_add_service_internal(
2824 + instance->state,
2825 + &args.params, srvstate,
2826 + instance, user_service_free);
2827 +
2828 + if (service != NULL) {
2829 + user_service->service = service;
2830 + user_service->userdata = userdata;
2831 + user_service->instance = instance;
2832 + user_service->is_vchi = (args.is_vchi != 0);
2833 + user_service->dequeue_pending = 0;
2834 + user_service->close_pending = 0;
2835 + user_service->message_available_pos =
2836 + instance->completion_remove - 1;
2837 + user_service->msg_insert = 0;
2838 + user_service->msg_remove = 0;
2839 + sema_init(&user_service->insert_event, 0);
2840 + sema_init(&user_service->remove_event, 0);
2841 + sema_init(&user_service->close_event, 0);
2842 +
2843 + if (args.is_open) {
2844 + status = vchiq_open_service_internal
2845 + (service, instance->pid);
2846 + if (status != VCHIQ_SUCCESS) {
2847 + vchiq_remove_service(service->handle);
2848 + service = NULL;
2849 + ret = (status == VCHIQ_RETRY) ?
2850 + -EINTR : -EIO;
2851 + break;
2852 + }
2853 + }
2854 +
2855 + if (copy_to_user((void __user *)
2856 + &(((VCHIQ_CREATE_SERVICE_T __user *)
2857 + arg)->handle),
2858 + (const void *)&service->handle,
2859 + sizeof(service->handle)) != 0) {
2860 + ret = -EFAULT;
2861 + vchiq_remove_service(service->handle);
2862 + }
2863 +
2864 + service = NULL;
2865 + } else {
2866 + ret = -EEXIST;
2867 + kfree(user_service);
2868 + }
2869 + } break;
2870 +
2871 + case VCHIQ_IOC_CLOSE_SERVICE: {
2872 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2873 +
2874 + service = find_service_for_instance(instance, handle);
2875 + if (service != NULL) {
2876 + USER_SERVICE_T *user_service =
2877 + (USER_SERVICE_T *)service->base.userdata;
2878 + /* close_pending is false on first entry, and when the
2879 + wait in vchiq_close_service has been interrupted. */
2880 + if (!user_service->close_pending) {
2881 + status = vchiq_close_service(service->handle);
2882 + if (status != VCHIQ_SUCCESS)
2883 + break;
2884 + }
2885 +
2886 + /* close_pending is true once the underlying service
2887 + has been closed until the client library calls the
2888 + CLOSE_DELIVERED ioctl, signalling close_event. */
2889 + if (user_service->close_pending &&
2890 + down_interruptible(&user_service->close_event))
2891 + status = VCHIQ_RETRY;
2892 + }
2893 + else
2894 + ret = -EINVAL;
2895 + } break;
2896 +
2897 + case VCHIQ_IOC_REMOVE_SERVICE: {
2898 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2899 +
2900 + service = find_service_for_instance(instance, handle);
2901 + if (service != NULL) {
2902 + USER_SERVICE_T *user_service =
2903 + (USER_SERVICE_T *)service->base.userdata;
2904 + /* close_pending is false on first entry, and when the
2905 + wait in vchiq_close_service has been interrupted. */
2906 + if (!user_service->close_pending) {
2907 + status = vchiq_remove_service(service->handle);
2908 + if (status != VCHIQ_SUCCESS)
2909 + break;
2910 + }
2911 +
2912 + /* close_pending is true once the underlying service
2913 + has been closed until the client library calls the
2914 + CLOSE_DELIVERED ioctl, signalling close_event. */
2915 + if (user_service->close_pending &&
2916 + down_interruptible(&user_service->close_event))
2917 + status = VCHIQ_RETRY;
2918 + }
2919 + else
2920 + ret = -EINVAL;
2921 + } break;
2922 +
2923 + case VCHIQ_IOC_USE_SERVICE:
2924 + case VCHIQ_IOC_RELEASE_SERVICE: {
2925 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2926 +
2927 + service = find_service_for_instance(instance, handle);
2928 + if (service != NULL) {
2929 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
2930 + vchiq_use_service_internal(service) :
2931 + vchiq_release_service_internal(service);
2932 + if (status != VCHIQ_SUCCESS) {
2933 + vchiq_log_error(vchiq_susp_log_level,
2934 + "%s: cmd %s returned error %d for "
2935 + "service %c%c%c%c:%03d",
2936 + __func__,
2937 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
2938 + "VCHIQ_IOC_USE_SERVICE" :
2939 + "VCHIQ_IOC_RELEASE_SERVICE",
2940 + status,
2941 + VCHIQ_FOURCC_AS_4CHARS(
2942 + service->base.fourcc),
2943 + service->client_id);
2944 + ret = -EINVAL;
2945 + }
2946 + } else
2947 + ret = -EINVAL;
2948 + } break;
2949 +
2950 + case VCHIQ_IOC_QUEUE_MESSAGE: {
2951 + VCHIQ_QUEUE_MESSAGE_T args;
2952 + if (copy_from_user
2953 + (&args, (const void __user *)arg,
2954 + sizeof(args)) != 0) {
2955 + ret = -EFAULT;
2956 + break;
2957 + }
2958 +
2959 + service = find_service_for_instance(instance, args.handle);
2960 +
2961 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
2962 + /* Copy elements into kernel space */
2963 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
2964 + if (copy_from_user(elements, args.elements,
2965 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
2966 + status = vchiq_queue_message
2967 + (args.handle,
2968 + elements, args.count);
2969 + else
2970 + ret = -EFAULT;
2971 + } else {
2972 + ret = -EINVAL;
2973 + }
2974 + } break;
2975 +
2976 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
2977 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
2978 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
2979 + struct bulk_waiter_node *waiter = NULL;
2980 + VCHIQ_BULK_DIR_T dir =
2981 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
2982 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
2983 +
2984 + if (copy_from_user
2985 + (&args, (const void __user *)arg,
2986 + sizeof(args)) != 0) {
2987 + ret = -EFAULT;
2988 + break;
2989 + }
2990 +
2991 + service = find_service_for_instance(instance, args.handle);
2992 + if (!service) {
2993 + ret = -EINVAL;
2994 + break;
2995 + }
2996 +
2997 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
2998 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
2999 + GFP_KERNEL);
3000 + if (!waiter) {
3001 + ret = -ENOMEM;
3002 + break;
3003 + }
3004 + args.userdata = &waiter->bulk_waiter;
3005 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
3006 + struct list_head *pos;
3007 + mutex_lock(&instance->bulk_waiter_list_mutex);
3008 + list_for_each(pos, &instance->bulk_waiter_list) {
3009 + if (list_entry(pos, struct bulk_waiter_node,
3010 + list)->pid == current->pid) {
3011 + waiter = list_entry(pos,
3012 + struct bulk_waiter_node,
3013 + list);
3014 + list_del(pos);
3015 + break;
3016 + }
3017 +
3018 + }
3019 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3020 + if (!waiter) {
3021 + vchiq_log_error(vchiq_arm_log_level,
3022 + "no bulk_waiter found for pid %d",
3023 + current->pid);
3024 + ret = -ESRCH;
3025 + break;
3026 + }
3027 + vchiq_log_info(vchiq_arm_log_level,
3028 + "found bulk_waiter %x for pid %d",
3029 + (unsigned int)waiter, current->pid);
3030 + args.userdata = &waiter->bulk_waiter;
3031 + }
3032 + status = vchiq_bulk_transfer
3033 + (args.handle,
3034 + VCHI_MEM_HANDLE_INVALID,
3035 + args.data, args.size,
3036 + args.userdata, args.mode,
3037 + dir);
3038 + if (!waiter)
3039 + break;
3040 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
3041 + !waiter->bulk_waiter.bulk) {
3042 + if (waiter->bulk_waiter.bulk) {
3043 + /* Cancel the signal when the transfer
3044 + ** completes. */
3045 + spin_lock(&bulk_waiter_spinlock);
3046 + waiter->bulk_waiter.bulk->userdata = NULL;
3047 + spin_unlock(&bulk_waiter_spinlock);
3048 + }
3049 + kfree(waiter);
3050 + } else {
3051 + const VCHIQ_BULK_MODE_T mode_waiting =
3052 + VCHIQ_BULK_MODE_WAITING;
3053 + waiter->pid = current->pid;
3054 + mutex_lock(&instance->bulk_waiter_list_mutex);
3055 + list_add(&waiter->list, &instance->bulk_waiter_list);
3056 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3057 + vchiq_log_info(vchiq_arm_log_level,
3058 + "saved bulk_waiter %x for pid %d",
3059 + (unsigned int)waiter, current->pid);
3060 +
3061 + if (copy_to_user((void __user *)
3062 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
3063 + arg)->mode),
3064 + (const void *)&mode_waiting,
3065 + sizeof(mode_waiting)) != 0)
3066 + ret = -EFAULT;
3067 + }
3068 + } break;
3069 +
3070 + case VCHIQ_IOC_AWAIT_COMPLETION: {
3071 + VCHIQ_AWAIT_COMPLETION_T args;
3072 +
3073 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3074 + if (!instance->connected) {
3075 + ret = -ENOTCONN;
3076 + break;
3077 + }
3078 +
3079 + if (copy_from_user(&args, (const void __user *)arg,
3080 + sizeof(args)) != 0) {
3081 + ret = -EFAULT;
3082 + break;
3083 + }
3084 +
3085 + mutex_lock(&instance->completion_mutex);
3086 +
3087 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3088 + while ((instance->completion_remove ==
3089 + instance->completion_insert)
3090 + && !instance->closing) {
3091 + int rc;
3092 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3093 + mutex_unlock(&instance->completion_mutex);
3094 + rc = down_interruptible(&instance->insert_event);
3095 + mutex_lock(&instance->completion_mutex);
3096 + if (rc != 0) {
3097 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3098 + vchiq_log_info(vchiq_arm_log_level,
3099 + "AWAIT_COMPLETION interrupted");
3100 + ret = -EINTR;
3101 + break;
3102 + }
3103 + }
3104 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3105 +
3106 + /* A read memory barrier is needed to stop prefetch of a stale
3107 + ** completion record
3108 + */
3109 + rmb();
3110 +
3111 + if (ret == 0) {
3112 + int msgbufcount = args.msgbufcount;
3113 + for (ret = 0; ret < args.count; ret++) {
3114 + VCHIQ_COMPLETION_DATA_T *completion;
3115 + VCHIQ_SERVICE_T *service;
3116 + USER_SERVICE_T *user_service;
3117 + VCHIQ_HEADER_T *header;
3118 + if (instance->completion_remove ==
3119 + instance->completion_insert)
3120 + break;
3121 + completion = &instance->completions[
3122 + instance->completion_remove &
3123 + (MAX_COMPLETIONS - 1)];
3124 +
3125 + service = completion->service_userdata;
3126 + user_service = service->base.userdata;
3127 + completion->service_userdata =
3128 + user_service->userdata;
3129 +
3130 + header = completion->header;
3131 + if (header) {
3132 + void __user *msgbuf;
3133 + int msglen;
3134 +
3135 + msglen = header->size +
3136 + sizeof(VCHIQ_HEADER_T);
3137 + /* This must be a VCHIQ-style service */
3138 + if (args.msgbufsize < msglen) {
3139 + vchiq_log_error(
3140 + vchiq_arm_log_level,
3141 + "header %x: msgbufsize"
3142 + " %x < msglen %x",
3143 + (unsigned int)header,
3144 + args.msgbufsize,
3145 + msglen);
3146 + WARN(1, "invalid message "
3147 + "size\n");
3148 + if (ret == 0)
3149 + ret = -EMSGSIZE;
3150 + break;
3151 + }
3152 + if (msgbufcount <= 0)
3153 + /* Stall here for lack of a
3154 + ** buffer for the message. */
3155 + break;
3156 + /* Get the pointer from user space */
3157 + msgbufcount--;
3158 + if (copy_from_user(&msgbuf,
3159 + (const void __user *)
3160 + &args.msgbufs[msgbufcount],
3161 + sizeof(msgbuf)) != 0) {
3162 + if (ret == 0)
3163 + ret = -EFAULT;
3164 + break;
3165 + }
3166 +
3167 + /* Copy the message to user space */
3168 + if (copy_to_user(msgbuf, header,
3169 + msglen) != 0) {
3170 + if (ret == 0)
3171 + ret = -EFAULT;
3172 + break;
3173 + }
3174 +
3175 + /* Now it has been copied, the message
3176 + ** can be released. */
3177 + vchiq_release_message(service->handle,
3178 + header);
3179 +
3180 + /* The completion must point to the
3181 + ** msgbuf. */
3182 + completion->header = msgbuf;
3183 + }
3184 +
3185 + if ((completion->reason ==
3186 + VCHIQ_SERVICE_CLOSED) &&
3187 + !instance->use_close_delivered)
3188 + unlock_service(service);
3189 +
3190 + if (copy_to_user((void __user *)(
3191 + (size_t)args.buf +
3192 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
3193 + completion,
3194 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
3195 + if (ret == 0)
3196 + ret = -EFAULT;
3197 + break;
3198 + }
3199 +
3200 + instance->completion_remove++;
3201 + }
3202 +
3203 + if (msgbufcount != args.msgbufcount) {
3204 + if (copy_to_user((void __user *)
3205 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3206 + msgbufcount,
3207 + &msgbufcount,
3208 + sizeof(msgbufcount)) != 0) {
3209 + ret = -EFAULT;
3210 + }
3211 + }
3212 + }
3213 +
3214 + if (ret != 0)
3215 + up(&instance->remove_event);
3216 + mutex_unlock(&instance->completion_mutex);
3217 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3218 + } break;
3219 +
3220 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3221 + VCHIQ_DEQUEUE_MESSAGE_T args;
3222 + USER_SERVICE_T *user_service;
3223 + VCHIQ_HEADER_T *header;
3224 +
3225 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3226 + if (copy_from_user
3227 + (&args, (const void __user *)arg,
3228 + sizeof(args)) != 0) {
3229 + ret = -EFAULT;
3230 + break;
3231 + }
3232 + service = find_service_for_instance(instance, args.handle);
3233 + if (!service) {
3234 + ret = -EINVAL;
3235 + break;
3236 + }
3237 + user_service = (USER_SERVICE_T *)service->base.userdata;
3238 + if (user_service->is_vchi == 0) {
3239 + ret = -EINVAL;
3240 + break;
3241 + }
3242 +
3243 + spin_lock(&msg_queue_spinlock);
3244 + if (user_service->msg_remove == user_service->msg_insert) {
3245 + if (!args.blocking) {
3246 + spin_unlock(&msg_queue_spinlock);
3247 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3248 + ret = -EWOULDBLOCK;
3249 + break;
3250 + }
3251 + user_service->dequeue_pending = 1;
3252 + do {
3253 + spin_unlock(&msg_queue_spinlock);
3254 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3255 + if (down_interruptible(
3256 + &user_service->insert_event) != 0) {
3257 + vchiq_log_info(vchiq_arm_log_level,
3258 + "DEQUEUE_MESSAGE interrupted");
3259 + ret = -EINTR;
3260 + break;
3261 + }
3262 + spin_lock(&msg_queue_spinlock);
3263 + } while (user_service->msg_remove ==
3264 + user_service->msg_insert);
3265 +
3266 + if (ret)
3267 + break;
3268 + }
3269 +
3270 + BUG_ON((int)(user_service->msg_insert -
3271 + user_service->msg_remove) < 0);
3272 +
3273 + header = user_service->msg_queue[user_service->msg_remove &
3274 + (MSG_QUEUE_SIZE - 1)];
3275 + user_service->msg_remove++;
3276 + spin_unlock(&msg_queue_spinlock);
3277 +
3278 + up(&user_service->remove_event);
3279 + if (header == NULL)
3280 + ret = -ENOTCONN;
3281 + else if (header->size <= args.bufsize) {
3282 + /* Copy to user space if msgbuf is not NULL */
3283 + if ((args.buf == NULL) ||
3284 + (copy_to_user((void __user *)args.buf,
3285 + header->data,
3286 + header->size) == 0)) {
3287 + ret = header->size;
3288 + vchiq_release_message(
3289 + service->handle,
3290 + header);
3291 + } else
3292 + ret = -EFAULT;
3293 + } else {
3294 + vchiq_log_error(vchiq_arm_log_level,
3295 + "header %x: bufsize %x < size %x",
3296 + (unsigned int)header, args.bufsize,
3297 + header->size);
3298 + WARN(1, "invalid size\n");
3299 + ret = -EMSGSIZE;
3300 + }
3301 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3302 + } break;
3303 +
3304 + case VCHIQ_IOC_GET_CLIENT_ID: {
3305 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3306 +
3307 + ret = vchiq_get_client_id(handle);
3308 + } break;
3309 +
3310 + case VCHIQ_IOC_GET_CONFIG: {
3311 + VCHIQ_GET_CONFIG_T args;
3312 + VCHIQ_CONFIG_T config;
3313 +
3314 + if (copy_from_user(&args, (const void __user *)arg,
3315 + sizeof(args)) != 0) {
3316 + ret = -EFAULT;
3317 + break;
3318 + }
3319 + if (args.config_size > sizeof(config)) {
3320 + ret = -EINVAL;
3321 + break;
3322 + }
3323 + status = vchiq_get_config(instance, args.config_size, &config);
3324 + if (status == VCHIQ_SUCCESS) {
3325 + if (copy_to_user((void __user *)args.pconfig,
3326 + &config, args.config_size) != 0) {
3327 + ret = -EFAULT;
3328 + break;
3329 + }
3330 + }
3331 + } break;
3332 +
3333 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
3334 + VCHIQ_SET_SERVICE_OPTION_T args;
3335 +
3336 + if (copy_from_user(
3337 + &args, (const void __user *)arg,
3338 + sizeof(args)) != 0) {
3339 + ret = -EFAULT;
3340 + break;
3341 + }
3342 +
3343 + service = find_service_for_instance(instance, args.handle);
3344 + if (!service) {
3345 + ret = -EINVAL;
3346 + break;
3347 + }
3348 +
3349 + status = vchiq_set_service_option(
3350 + args.handle, args.option, args.value);
3351 + } break;
3352 +
3353 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
3354 + VCHIQ_DUMP_MEM_T args;
3355 +
3356 + if (copy_from_user
3357 + (&args, (const void __user *)arg,
3358 + sizeof(args)) != 0) {
3359 + ret = -EFAULT;
3360 + break;
3361 + }
3362 + dump_phys_mem(args.virt_addr, args.num_bytes);
3363 + } break;
3364 +
3365 + case VCHIQ_IOC_LIB_VERSION: {
3366 + unsigned int lib_version = (unsigned int)arg;
3367 +
3368 + if (lib_version < VCHIQ_VERSION_MIN)
3369 + ret = -EINVAL;
3370 + else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
3371 + instance->use_close_delivered = 1;
3372 + } break;
3373 +
3374 + case VCHIQ_IOC_CLOSE_DELIVERED: {
3375 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3376 +
3377 + service = find_closed_service_for_instance(instance, handle);
3378 + if (service != NULL) {
3379 + USER_SERVICE_T *user_service =
3380 + (USER_SERVICE_T *)service->base.userdata;
3381 + close_delivered(user_service);
3382 + }
3383 + else
3384 + ret = -EINVAL;
3385 + } break;
3386 +
3387 + default:
3388 + ret = -ENOTTY;
3389 + break;
3390 + }
3391 +
3392 + if (service)
3393 + unlock_service(service);
3394 +
3395 + if (ret == 0) {
3396 + if (status == VCHIQ_ERROR)
3397 + ret = -EIO;
3398 + else if (status == VCHIQ_RETRY)
3399 + ret = -EINTR;
3400 + }
3401 +
3402 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3403 + (ret != -EWOULDBLOCK))
3404 + vchiq_log_info(vchiq_arm_log_level,
3405 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3406 + (unsigned long)instance,
3407 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3408 + ioctl_names[_IOC_NR(cmd)] :
3409 + "<invalid>",
3410 + status, ret);
3411 + else
3412 + vchiq_log_trace(vchiq_arm_log_level,
3413 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3414 + (unsigned long)instance,
3415 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3416 + ioctl_names[_IOC_NR(cmd)] :
3417 + "<invalid>",
3418 + status, ret);
3419 +
3420 + return ret;
3421 +}
3422 +
3423 +/****************************************************************************
3424 +*
3425 +* vchiq_open
3426 +*
3427 +***************************************************************************/
3428 +
3429 +static int
3430 +vchiq_open(struct inode *inode, struct file *file)
3431 +{
3432 + int dev = iminor(inode) & 0x0f;
3433 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3434 + switch (dev) {
3435 + case VCHIQ_MINOR: {
3436 + int ret;
3437 + VCHIQ_STATE_T *state = vchiq_get_state();
3438 + VCHIQ_INSTANCE_T instance;
3439 +
3440 + if (!state) {
3441 + vchiq_log_error(vchiq_arm_log_level,
3442 + "vchiq has no connection to VideoCore");
3443 + return -ENOTCONN;
3444 + }
3445 +
3446 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3447 + if (!instance)
3448 + return -ENOMEM;
3449 +
3450 + instance->state = state;
3451 + instance->pid = current->tgid;
3452 +
3453 + ret = vchiq_debugfs_add_instance(instance);
3454 + if (ret != 0) {
3455 + kfree(instance);
3456 + return ret;
3457 + }
3458 +
3459 + sema_init(&instance->insert_event, 0);
3460 + sema_init(&instance->remove_event, 0);
3461 + mutex_init(&instance->completion_mutex);
3462 + mutex_init(&instance->bulk_waiter_list_mutex);
3463 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
3464 +
3465 + file->private_data = instance;
3466 + } break;
3467 +
3468 + default:
3469 + vchiq_log_error(vchiq_arm_log_level,
3470 + "Unknown minor device: %d", dev);
3471 + return -ENXIO;
3472 + }
3473 +
3474 + return 0;
3475 +}
3476 +
3477 +/****************************************************************************
3478 +*
3479 +* vchiq_release
3480 +*
3481 +***************************************************************************/
3482 +
3483 +static int
3484 +vchiq_release(struct inode *inode, struct file *file)
3485 +{
3486 + int dev = iminor(inode) & 0x0f;
3487 + int ret = 0;
3488 + switch (dev) {
3489 + case VCHIQ_MINOR: {
3490 + VCHIQ_INSTANCE_T instance = file->private_data;
3491 + VCHIQ_STATE_T *state = vchiq_get_state();
3492 + VCHIQ_SERVICE_T *service;
3493 + int i;
3494 +
3495 + vchiq_log_info(vchiq_arm_log_level,
3496 + "vchiq_release: instance=%lx",
3497 + (unsigned long)instance);
3498 +
3499 + if (!state) {
3500 + ret = -EPERM;
3501 + goto out;
3502 + }
3503 +
3504 + /* Ensure videocore is awake to allow termination. */
3505 + vchiq_use_internal(instance->state, NULL,
3506 + USE_TYPE_VCHIQ);
3507 +
3508 + mutex_lock(&instance->completion_mutex);
3509 +
3510 + /* Wake the completion thread and ask it to exit */
3511 + instance->closing = 1;
3512 + up(&instance->insert_event);
3513 +
3514 + mutex_unlock(&instance->completion_mutex);
3515 +
3516 + /* Wake the slot handler if the completion queue is full. */
3517 + up(&instance->remove_event);
3518 +
3519 + /* Mark all services for termination... */
3520 + i = 0;
3521 + while ((service = next_service_by_instance(state, instance,
3522 + &i)) != NULL) {
3523 + USER_SERVICE_T *user_service = service->base.userdata;
3524 +
3525 + /* Wake the slot handler if the msg queue is full. */
3526 + up(&user_service->remove_event);
3527 +
3528 + vchiq_terminate_service_internal(service);
3529 + unlock_service(service);
3530 + }
3531 +
3532 + /* ...and wait for them to die */
3533 + i = 0;
3534 + while ((service = next_service_by_instance(state, instance, &i))
3535 + != NULL) {
3536 + USER_SERVICE_T *user_service = service->base.userdata;
3537 +
3538 + down(&service->remove_event);
3539 +
3540 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3541 +
3542 + spin_lock(&msg_queue_spinlock);
3543 +
3544 + while (user_service->msg_remove !=
3545 + user_service->msg_insert) {
3546 + VCHIQ_HEADER_T *header = user_service->
3547 + msg_queue[user_service->msg_remove &
3548 + (MSG_QUEUE_SIZE - 1)];
3549 + user_service->msg_remove++;
3550 + spin_unlock(&msg_queue_spinlock);
3551 +
3552 + if (header)
3553 + vchiq_release_message(
3554 + service->handle,
3555 + header);
3556 + spin_lock(&msg_queue_spinlock);
3557 + }
3558 +
3559 + spin_unlock(&msg_queue_spinlock);
3560 +
3561 + unlock_service(service);
3562 + }
3563 +
3564 + /* Release any closed services */
3565 + while (instance->completion_remove !=
3566 + instance->completion_insert) {
3567 + VCHIQ_COMPLETION_DATA_T *completion;
3568 + VCHIQ_SERVICE_T *service;
3569 + completion = &instance->completions[
3570 + instance->completion_remove &
3571 + (MAX_COMPLETIONS - 1)];
3572 + service = completion->service_userdata;
3573 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
3574 + {
3575 + USER_SERVICE_T *user_service =
3576 + service->base.userdata;
3577 +
3578 + /* Wake any blocked user-thread */
3579 + if (instance->use_close_delivered)
3580 + up(&user_service->close_event);
3581 + unlock_service(service);
3582 + }
3583 + instance->completion_remove++;
3584 + }
3585 +
3586 + /* Release the PEER service count. */
3587 + vchiq_release_internal(instance->state, NULL);
3588 +
3589 + {
3590 + struct list_head *pos, *next;
3591 + list_for_each_safe(pos, next,
3592 + &instance->bulk_waiter_list) {
3593 + struct bulk_waiter_node *waiter;
3594 + waiter = list_entry(pos,
3595 + struct bulk_waiter_node,
3596 + list);
3597 + list_del(pos);
3598 + vchiq_log_info(vchiq_arm_log_level,
3599 + "bulk_waiter - cleaned up %x "
3600 + "for pid %d",
3601 + (unsigned int)waiter, waiter->pid);
3602 + kfree(waiter);
3603 + }
3604 + }
3605 +
3606 + vchiq_debugfs_remove_instance(instance);
3607 +
3608 + kfree(instance);
3609 + file->private_data = NULL;
3610 + } break;
3611 +
3612 + default:
3613 + vchiq_log_error(vchiq_arm_log_level,
3614 + "Unknown minor device: %d", dev);
3615 + ret = -ENXIO;
3616 + }
3617 +
3618 +out:
3619 + return ret;
3620 +}
3621 +
3622 +/****************************************************************************
3623 +*
3624 +* vchiq_dump
3625 +*
3626 +***************************************************************************/
3627 +
3628 +void
3629 +vchiq_dump(void *dump_context, const char *str, int len)
3630 +{
3631 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3632 +
3633 + if (context->actual < context->space) {
3634 + int copy_bytes;
3635 + if (context->offset > 0) {
3636 + int skip_bytes = min(len, (int)context->offset);
3637 + str += skip_bytes;
3638 + len -= skip_bytes;
3639 + context->offset -= skip_bytes;
3640 + if (context->offset > 0)
3641 + return;
3642 + }
3643 + copy_bytes = min(len, (int)(context->space - context->actual));
3644 + if (copy_bytes == 0)
3645 + return;
3646 + if (copy_to_user(context->buf + context->actual, str,
3647 + copy_bytes))
3648 + context->actual = -EFAULT;
3649 + context->actual += copy_bytes;
3650 + len -= copy_bytes;
3651 +
3652 + /* If tne terminating NUL is included in the length, then it
3653 + ** marks the end of a line and should be replaced with a
3654 + ** carriage return. */
3655 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3656 + char cr = '\n';
3657 + if (copy_to_user(context->buf + context->actual - 1,
3658 + &cr, 1))
3659 + context->actual = -EFAULT;
3660 + }
3661 + }
3662 +}
3663 +
3664 +/****************************************************************************
3665 +*
3666 +* vchiq_dump_platform_instance_state
3667 +*
3668 +***************************************************************************/
3669 +
3670 +void
3671 +vchiq_dump_platform_instances(void *dump_context)
3672 +{
3673 + VCHIQ_STATE_T *state = vchiq_get_state();
3674 + char buf[80];
3675 + int len;
3676 + int i;
3677 +
3678 + /* There is no list of instances, so instead scan all services,
3679 + marking those that have been dumped. */
3680 +
3681 + for (i = 0; i < state->unused_service; i++) {
3682 + VCHIQ_SERVICE_T *service = state->services[i];
3683 + VCHIQ_INSTANCE_T instance;
3684 +
3685 + if (service && (service->base.callback == service_callback)) {
3686 + instance = service->instance;
3687 + if (instance)
3688 + instance->mark = 0;
3689 + }
3690 + }
3691 +
3692 + for (i = 0; i < state->unused_service; i++) {
3693 + VCHIQ_SERVICE_T *service = state->services[i];
3694 + VCHIQ_INSTANCE_T instance;
3695 +
3696 + if (service && (service->base.callback == service_callback)) {
3697 + instance = service->instance;
3698 + if (instance && !instance->mark) {
3699 + len = snprintf(buf, sizeof(buf),
3700 + "Instance %x: pid %d,%s completions "
3701 + "%d/%d",
3702 + (unsigned int)instance, instance->pid,
3703 + instance->connected ? " connected, " :
3704 + "",
3705 + instance->completion_insert -
3706 + instance->completion_remove,
3707 + MAX_COMPLETIONS);
3708 +
3709 + vchiq_dump(dump_context, buf, len + 1);
3710 +
3711 + instance->mark = 1;
3712 + }
3713 + }
3714 + }
3715 +}
3716 +
3717 +/****************************************************************************
3718 +*
3719 +* vchiq_dump_platform_service_state
3720 +*
3721 +***************************************************************************/
3722 +
3723 +void
3724 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3725 +{
3726 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3727 + char buf[80];
3728 + int len;
3729 +
3730 + len = snprintf(buf, sizeof(buf), " instance %x",
3731 + (unsigned int)service->instance);
3732 +
3733 + if ((service->base.callback == service_callback) &&
3734 + user_service->is_vchi) {
3735 + len += snprintf(buf + len, sizeof(buf) - len,
3736 + ", %d/%d messages",
3737 + user_service->msg_insert - user_service->msg_remove,
3738 + MSG_QUEUE_SIZE);
3739 +
3740 + if (user_service->dequeue_pending)
3741 + len += snprintf(buf + len, sizeof(buf) - len,
3742 + " (dequeue pending)");
3743 + }
3744 +
3745 + vchiq_dump(dump_context, buf, len + 1);
3746 +}
3747 +
3748 +/****************************************************************************
3749 +*
3750 +* dump_user_mem
3751 +*
3752 +***************************************************************************/
3753 +
3754 +static void
3755 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3756 +{
3757 + int rc;
3758 + uint8_t *end_virt_addr = virt_addr + num_bytes;
3759 + int num_pages;
3760 + int offset;
3761 + int end_offset;
3762 + int page_idx;
3763 + int prev_idx;
3764 + struct page *page;
3765 + struct page **pages;
3766 + uint8_t *kmapped_virt_ptr;
3767 +
3768 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3769 +
3770 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3771 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3772 + ~0x0fuL);
3773 +
3774 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3775 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3776 +
3777 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3778 +
3779 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3780 + if (pages == NULL) {
3781 + vchiq_log_error(vchiq_arm_log_level,
3782 + "Unable to allocation memory for %d pages\n",
3783 + num_pages);
3784 + return;
3785 + }
3786 +
3787 + down_read(&current->mm->mmap_sem);
3788 + rc = get_user_pages(current, /* task */
3789 + current->mm, /* mm */
3790 + (unsigned long)virt_addr, /* start */
3791 + num_pages, /* len */
3792 + 0, /* write */
3793 + 0, /* force */
3794 + pages, /* pages (array of page pointers) */
3795 + NULL); /* vmas */
3796 + up_read(&current->mm->mmap_sem);
3797 +
3798 + prev_idx = -1;
3799 + page = NULL;
3800 +
3801 + while (offset < end_offset) {
3802 +
3803 + int page_offset = offset % PAGE_SIZE;
3804 + page_idx = offset / PAGE_SIZE;
3805 +
3806 + if (page_idx != prev_idx) {
3807 +
3808 + if (page != NULL)
3809 + kunmap(page);
3810 + page = pages[page_idx];
3811 + kmapped_virt_ptr = kmap(page);
3812 +
3813 + prev_idx = page_idx;
3814 + }
3815 +
3816 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3817 + vchiq_log_dump_mem("ph",
3818 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
3819 + page_offset],
3820 + &kmapped_virt_ptr[page_offset], 16);
3821 +
3822 + offset += 16;
3823 + }
3824 + if (page != NULL)
3825 + kunmap(page);
3826 +
3827 + for (page_idx = 0; page_idx < num_pages; page_idx++)
3828 + page_cache_release(pages[page_idx]);
3829 +
3830 + kfree(pages);
3831 +}
3832 +
3833 +/****************************************************************************
3834 +*
3835 +* vchiq_read
3836 +*
3837 +***************************************************************************/
3838 +
3839 +static ssize_t
3840 +vchiq_read(struct file *file, char __user *buf,
3841 + size_t count, loff_t *ppos)
3842 +{
3843 + DUMP_CONTEXT_T context;
3844 + context.buf = buf;
3845 + context.actual = 0;
3846 + context.space = count;
3847 + context.offset = *ppos;
3848 +
3849 + vchiq_dump_state(&context, &g_state);
3850 +
3851 + *ppos += context.actual;
3852 +
3853 + return context.actual;
3854 +}
3855 +
3856 +VCHIQ_STATE_T *
3857 +vchiq_get_state(void)
3858 +{
3859 +
3860 + if (g_state.remote == NULL)
3861 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3862 + else if (g_state.remote->initialised != 1)
3863 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3864 + __func__, g_state.remote->initialised);
3865 +
3866 + return ((g_state.remote != NULL) &&
3867 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
3868 +}
3869 +
3870 +static const struct file_operations
3871 +vchiq_fops = {
3872 + .owner = THIS_MODULE,
3873 + .unlocked_ioctl = vchiq_ioctl,
3874 + .open = vchiq_open,
3875 + .release = vchiq_release,
3876 + .read = vchiq_read
3877 +};
3878 +
3879 +/*
3880 + * Autosuspend related functionality
3881 + */
3882 +
3883 +int
3884 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3885 +{
3886 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3887 + if (!arm_state)
3888 + /* autosuspend not supported - always return wanted */
3889 + return 1;
3890 + else if (arm_state->blocked_count)
3891 + return 1;
3892 + else if (!arm_state->videocore_use_count)
3893 + /* usage count zero - check for override unless we're forcing */
3894 + if (arm_state->resume_blocked)
3895 + return 0;
3896 + else
3897 + return vchiq_platform_videocore_wanted(state);
3898 + else
3899 + /* non-zero usage count - videocore still required */
3900 + return 1;
3901 +}
3902 +
3903 +static VCHIQ_STATUS_T
3904 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3905 + VCHIQ_HEADER_T *header,
3906 + VCHIQ_SERVICE_HANDLE_T service_user,
3907 + void *bulk_user)
3908 +{
3909 + vchiq_log_error(vchiq_susp_log_level,
3910 + "%s callback reason %d", __func__, reason);
3911 + return 0;
3912 +}
3913 +
3914 +static int
3915 +vchiq_keepalive_thread_func(void *v)
3916 +{
3917 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
3918 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3919 +
3920 + VCHIQ_STATUS_T status;
3921 + VCHIQ_INSTANCE_T instance;
3922 + VCHIQ_SERVICE_HANDLE_T ka_handle;
3923 +
3924 + VCHIQ_SERVICE_PARAMS_T params = {
3925 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
3926 + .callback = vchiq_keepalive_vchiq_callback,
3927 + .version = KEEPALIVE_VER,
3928 + .version_min = KEEPALIVE_VER_MIN
3929 + };
3930 +
3931 + status = vchiq_initialise(&instance);
3932 + if (status != VCHIQ_SUCCESS) {
3933 + vchiq_log_error(vchiq_susp_log_level,
3934 + "%s vchiq_initialise failed %d", __func__, status);
3935 + goto exit;
3936 + }
3937 +
3938 + status = vchiq_connect(instance);
3939 + if (status != VCHIQ_SUCCESS) {
3940 + vchiq_log_error(vchiq_susp_log_level,
3941 + "%s vchiq_connect failed %d", __func__, status);
3942 + goto shutdown;
3943 + }
3944 +
3945 + status = vchiq_add_service(instance, &params, &ka_handle);
3946 + if (status != VCHIQ_SUCCESS) {
3947 + vchiq_log_error(vchiq_susp_log_level,
3948 + "%s vchiq_open_service failed %d", __func__, status);
3949 + goto shutdown;
3950 + }
3951 +
3952 + while (1) {
3953 + long rc = 0, uc = 0;
3954 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
3955 + != 0) {
3956 + vchiq_log_error(vchiq_susp_log_level,
3957 + "%s interrupted", __func__);
3958 + flush_signals(current);
3959 + continue;
3960 + }
3961 +
3962 + /* read and clear counters. Do release_count then use_count to
3963 + * prevent getting more releases than uses */
3964 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
3965 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
3966 +
3967 + /* Call use/release service the requisite number of times.
3968 + * Process use before release so use counts don't go negative */
3969 + while (uc--) {
3970 + atomic_inc(&arm_state->ka_use_ack_count);
3971 + status = vchiq_use_service(ka_handle);
3972 + if (status != VCHIQ_SUCCESS) {
3973 + vchiq_log_error(vchiq_susp_log_level,
3974 + "%s vchiq_use_service error %d",
3975 + __func__, status);
3976 + }
3977 + }
3978 + while (rc--) {
3979 + status = vchiq_release_service(ka_handle);
3980 + if (status != VCHIQ_SUCCESS) {
3981 + vchiq_log_error(vchiq_susp_log_level,
3982 + "%s vchiq_release_service error %d",
3983 + __func__, status);
3984 + }
3985 + }
3986 + }
3987 +
3988 +shutdown:
3989 + vchiq_shutdown(instance);
3990 +exit:
3991 + return 0;
3992 +}
3993 +
3994 +
3995 +
3996 +VCHIQ_STATUS_T
3997 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
3998 +{
3999 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4000 +
4001 + if (arm_state) {
4002 + rwlock_init(&arm_state->susp_res_lock);
4003 +
4004 + init_completion(&arm_state->ka_evt);
4005 + atomic_set(&arm_state->ka_use_count, 0);
4006 + atomic_set(&arm_state->ka_use_ack_count, 0);
4007 + atomic_set(&arm_state->ka_release_count, 0);
4008 +
4009 + init_completion(&arm_state->vc_suspend_complete);
4010 +
4011 + init_completion(&arm_state->vc_resume_complete);
4012 + /* Initialise to 'done' state. We only want to block on resume
4013 + * completion while videocore is suspended. */
4014 + set_resume_state(arm_state, VC_RESUME_RESUMED);
4015 +
4016 + init_completion(&arm_state->resume_blocker);
4017 + /* Initialise to 'done' state. We only want to block on this
4018 + * completion while resume is blocked */
4019 + complete_all(&arm_state->resume_blocker);
4020 +
4021 + init_completion(&arm_state->blocked_blocker);
4022 + /* Initialise to 'done' state. We only want to block on this
4023 + * completion while things are waiting on the resume blocker */
4024 + complete_all(&arm_state->blocked_blocker);
4025 +
4026 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
4027 + arm_state->suspend_timer_running = 0;
4028 + init_timer(&arm_state->suspend_timer);
4029 + arm_state->suspend_timer.data = (unsigned long)(state);
4030 + arm_state->suspend_timer.function = suspend_timer_callback;
4031 +
4032 + arm_state->first_connect = 0;
4033 +
4034 + }
4035 + return status;
4036 +}
4037 +
4038 +/*
4039 +** Functions to modify the state variables;
4040 +** set_suspend_state
4041 +** set_resume_state
4042 +**
4043 +** There are more state variables than we might like, so ensure they remain in
4044 +** step. Suspend and resume state are maintained separately, since most of
4045 +** these state machines can operate independently. However, there are a few
4046 +** states where state transitions in one state machine cause a reset to the
4047 +** other state machine. In addition, there are some completion events which
4048 +** need to occur on state machine reset and end-state(s), so these are also
4049 +** dealt with in these functions.
4050 +**
4051 +** In all states we set the state variable according to the input, but in some
4052 +** cases we perform additional steps outlined below;
4053 +**
4054 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
4055 +** The suspend completion is completed after any suspend
4056 +** attempt. When we reset the state machine we also reset
4057 +** the completion. This reset occurs when videocore is
4058 +** resumed, and also if we initiate suspend after a suspend
4059 +** failure.
4060 +**
4061 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
4062 +** suspend - ie from this point on we must try to suspend
4063 +** before resuming can occur. We therefore also reset the
4064 +** resume state machine to VC_RESUME_IDLE in this state.
4065 +**
4066 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
4067 +** complete_all on the suspend completion to notify
4068 +** anything waiting for suspend to happen.
4069 +**
4070 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
4071 +** initiate resume, so no need to alter resume state.
4072 +** We call complete_all on the suspend completion to notify
4073 +** of suspend rejection.
4074 +**
4075 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
4076 +** suspend completion and reset the resume state machine.
4077 +**
4078 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
4079 +** resume completion is in it's 'done' state whenever
4080 +** videcore is running. Therfore, the VC_RESUME_IDLE state
4081 +** implies that videocore is suspended.
4082 +** Hence, any thread which needs to wait until videocore is
4083 +** running can wait on this completion - it will only block
4084 +** if videocore is suspended.
4085 +**
4086 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
4087 +** Call complete_all on the resume completion to unblock
4088 +** any threads waiting for resume. Also reset the suspend
4089 +** state machine to it's idle state.
4090 +**
4091 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
4092 +*/
4093 +
4094 +inline void
4095 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
4096 + enum vc_suspend_status new_state)
4097 +{
4098 + /* set the state in all cases */
4099 + arm_state->vc_suspend_state = new_state;
4100 +
4101 + /* state specific additional actions */
4102 + switch (new_state) {
4103 + case VC_SUSPEND_FORCE_CANCELED:
4104 + complete_all(&arm_state->vc_suspend_complete);
4105 + break;
4106 + case VC_SUSPEND_REJECTED:
4107 + complete_all(&arm_state->vc_suspend_complete);
4108 + break;
4109 + case VC_SUSPEND_FAILED:
4110 + complete_all(&arm_state->vc_suspend_complete);
4111 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
4112 + complete_all(&arm_state->vc_resume_complete);
4113 + break;
4114 + case VC_SUSPEND_IDLE:
4115 + reinit_completion(&arm_state->vc_suspend_complete);
4116 + break;
4117 + case VC_SUSPEND_REQUESTED:
4118 + break;
4119 + case VC_SUSPEND_IN_PROGRESS:
4120 + set_resume_state(arm_state, VC_RESUME_IDLE);
4121 + break;
4122 + case VC_SUSPEND_SUSPENDED:
4123 + complete_all(&arm_state->vc_suspend_complete);
4124 + break;
4125 + default:
4126 + BUG();
4127 + break;
4128 + }
4129 +}
4130 +
4131 +inline void
4132 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
4133 + enum vc_resume_status new_state)
4134 +{
4135 + /* set the state in all cases */
4136 + arm_state->vc_resume_state = new_state;
4137 +
4138 + /* state specific additional actions */
4139 + switch (new_state) {
4140 + case VC_RESUME_FAILED:
4141 + break;
4142 + case VC_RESUME_IDLE:
4143 + reinit_completion(&arm_state->vc_resume_complete);
4144 + break;
4145 + case VC_RESUME_REQUESTED:
4146 + break;
4147 + case VC_RESUME_IN_PROGRESS:
4148 + break;
4149 + case VC_RESUME_RESUMED:
4150 + complete_all(&arm_state->vc_resume_complete);
4151 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4152 + break;
4153 + default:
4154 + BUG();
4155 + break;
4156 + }
4157 +}
4158 +
4159 +
4160 +/* should be called with the write lock held */
4161 +inline void
4162 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
4163 +{
4164 + del_timer(&arm_state->suspend_timer);
4165 + arm_state->suspend_timer.expires = jiffies +
4166 + msecs_to_jiffies(arm_state->
4167 + suspend_timer_timeout);
4168 + add_timer(&arm_state->suspend_timer);
4169 + arm_state->suspend_timer_running = 1;
4170 +}
4171 +
4172 +/* should be called with the write lock held */
4173 +static inline void
4174 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
4175 +{
4176 + if (arm_state->suspend_timer_running) {
4177 + del_timer(&arm_state->suspend_timer);
4178 + arm_state->suspend_timer_running = 0;
4179 + }
4180 +}
4181 +
4182 +static inline int
4183 +need_resume(VCHIQ_STATE_T *state)
4184 +{
4185 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4186 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
4187 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
4188 + vchiq_videocore_wanted(state);
4189 +}
4190 +
4191 +static int
4192 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
4193 +{
4194 + int status = VCHIQ_SUCCESS;
4195 + const unsigned long timeout_val =
4196 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
4197 + int resume_count = 0;
4198 +
4199 + /* Allow any threads which were blocked by the last force suspend to
4200 + * complete if they haven't already. Only give this one shot; if
4201 + * blocked_count is incremented after blocked_blocker is completed
4202 + * (which only happens when blocked_count hits 0) then those threads
4203 + * will have to wait until next time around */
4204 + if (arm_state->blocked_count) {
4205 + reinit_completion(&arm_state->blocked_blocker);
4206 + write_unlock_bh(&arm_state->susp_res_lock);
4207 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
4208 + "blocked clients", __func__);
4209 + if (wait_for_completion_interruptible_timeout(
4210 + &arm_state->blocked_blocker, timeout_val)
4211 + <= 0) {
4212 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4213 + "previously blocked clients failed" , __func__);
4214 + status = VCHIQ_ERROR;
4215 + write_lock_bh(&arm_state->susp_res_lock);
4216 + goto out;
4217 + }
4218 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
4219 + "clients resumed", __func__);
4220 + write_lock_bh(&arm_state->susp_res_lock);
4221 + }
4222 +
4223 + /* We need to wait for resume to complete if it's in process */
4224 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
4225 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
4226 + if (resume_count > 1) {
4227 + status = VCHIQ_ERROR;
4228 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4229 + "many times for resume" , __func__);
4230 + goto out;
4231 + }
4232 + write_unlock_bh(&arm_state->susp_res_lock);
4233 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4234 + __func__);
4235 + if (wait_for_completion_interruptible_timeout(
4236 + &arm_state->vc_resume_complete, timeout_val)
4237 + <= 0) {
4238 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4239 + "resume failed (%s)", __func__,
4240 + resume_state_names[arm_state->vc_resume_state +
4241 + VC_RESUME_NUM_OFFSET]);
4242 + status = VCHIQ_ERROR;
4243 + write_lock_bh(&arm_state->susp_res_lock);
4244 + goto out;
4245 + }
4246 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4247 + write_lock_bh(&arm_state->susp_res_lock);
4248 + resume_count++;
4249 + }
4250 + reinit_completion(&arm_state->resume_blocker);
4251 + arm_state->resume_blocked = 1;
4252 +
4253 +out:
4254 + return status;
4255 +}
4256 +
4257 +static inline void
4258 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4259 +{
4260 + complete_all(&arm_state->resume_blocker);
4261 + arm_state->resume_blocked = 0;
4262 +}
4263 +
4264 +/* Initiate suspend via slot handler. Should be called with the write lock
4265 + * held */
4266 +VCHIQ_STATUS_T
4267 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4268 +{
4269 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4270 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4271 +
4272 + if (!arm_state)
4273 + goto out;
4274 +
4275 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4276 + status = VCHIQ_SUCCESS;
4277 +
4278 +
4279 + switch (arm_state->vc_suspend_state) {
4280 + case VC_SUSPEND_REQUESTED:
4281 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4282 + "requested", __func__);
4283 + break;
4284 + case VC_SUSPEND_IN_PROGRESS:
4285 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4286 + "progress", __func__);
4287 + break;
4288 +
4289 + default:
4290 + /* We don't expect to be in other states, so log but continue
4291 + * anyway */
4292 + vchiq_log_error(vchiq_susp_log_level,
4293 + "%s unexpected suspend state %s", __func__,
4294 + suspend_state_names[arm_state->vc_suspend_state +
4295 + VC_SUSPEND_NUM_OFFSET]);
4296 + /* fall through */
4297 + case VC_SUSPEND_REJECTED:
4298 + case VC_SUSPEND_FAILED:
4299 + /* Ensure any idle state actions have been run */
4300 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4301 + /* fall through */
4302 + case VC_SUSPEND_IDLE:
4303 + vchiq_log_info(vchiq_susp_log_level,
4304 + "%s: suspending", __func__);
4305 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4306 + /* kick the slot handler thread to initiate suspend */
4307 + request_poll(state, NULL, 0);
4308 + break;
4309 + }
4310 +
4311 +out:
4312 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4313 + return status;
4314 +}
4315 +
4316 +void
4317 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4318 +{
4319 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4320 + int susp = 0;
4321 +
4322 + if (!arm_state)
4323 + goto out;
4324 +
4325 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4326 +
4327 + write_lock_bh(&arm_state->susp_res_lock);
4328 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4329 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4330 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4331 + susp = 1;
4332 + }
4333 + write_unlock_bh(&arm_state->susp_res_lock);
4334 +
4335 + if (susp)
4336 + vchiq_platform_suspend(state);
4337 +
4338 +out:
4339 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4340 + return;
4341 +}
4342 +
4343 +
4344 +static void
4345 +output_timeout_error(VCHIQ_STATE_T *state)
4346 +{
4347 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4348 + char service_err[50] = "";
4349 + int vc_use_count = arm_state->videocore_use_count;
4350 + int active_services = state->unused_service;
4351 + int i;
4352 +
4353 + if (!arm_state->videocore_use_count) {
4354 + snprintf(service_err, 50, " Videocore usecount is 0");
4355 + goto output_msg;
4356 + }
4357 + for (i = 0; i < active_services; i++) {
4358 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4359 + if (service_ptr && service_ptr->service_use_count &&
4360 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4361 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4362 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4363 + service_ptr->base.fourcc),
4364 + service_ptr->client_id,
4365 + service_ptr->service_use_count,
4366 + service_ptr->service_use_count ==
4367 + vc_use_count ? "" : " (+ more)");
4368 + break;
4369 + }
4370 + }
4371 +
4372 +output_msg:
4373 + vchiq_log_error(vchiq_susp_log_level,
4374 + "timed out waiting for vc suspend (%d).%s",
4375 + arm_state->autosuspend_override, service_err);
4376 +
4377 +}
4378 +
4379 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4380 +** We don't actually force suspend, since videocore may get into a bad state
4381 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
4382 +** determine a good point to suspend. If this doesn't happen within 100ms we
4383 +** report failure.
4384 +**
4385 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4386 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4387 +*/
4388 +VCHIQ_STATUS_T
4389 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4390 +{
4391 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4392 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4393 + long rc = 0;
4394 + int repeat = -1;
4395 +
4396 + if (!arm_state)
4397 + goto out;
4398 +
4399 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4400 +
4401 + write_lock_bh(&arm_state->susp_res_lock);
4402 +
4403 + status = block_resume(arm_state);
4404 + if (status != VCHIQ_SUCCESS)
4405 + goto unlock;
4406 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4407 + /* Already suspended - just block resume and exit */
4408 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4409 + __func__);
4410 + status = VCHIQ_SUCCESS;
4411 + goto unlock;
4412 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4413 + /* initiate suspend immediately in the case that we're waiting
4414 + * for the timeout */
4415 + stop_suspend_timer(arm_state);
4416 + if (!vchiq_videocore_wanted(state)) {
4417 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4418 + "idle, initiating suspend", __func__);
4419 + status = vchiq_arm_vcsuspend(state);
4420 + } else if (arm_state->autosuspend_override <
4421 + FORCE_SUSPEND_FAIL_MAX) {
4422 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
4423 + "videocore go idle", __func__);
4424 + status = VCHIQ_SUCCESS;
4425 + } else {
4426 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4427 + "many times - attempting suspend", __func__);
4428 + status = vchiq_arm_vcsuspend(state);
4429 + }
4430 + } else {
4431 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4432 + "in progress - wait for completion", __func__);
4433 + status = VCHIQ_SUCCESS;
4434 + }
4435 +
4436 + /* Wait for suspend to happen due to system idle (not forced..) */
4437 + if (status != VCHIQ_SUCCESS)
4438 + goto unblock_resume;
4439 +
4440 + do {
4441 + write_unlock_bh(&arm_state->susp_res_lock);
4442 +
4443 + rc = wait_for_completion_interruptible_timeout(
4444 + &arm_state->vc_suspend_complete,
4445 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4446 +
4447 + write_lock_bh(&arm_state->susp_res_lock);
4448 + if (rc < 0) {
4449 + vchiq_log_warning(vchiq_susp_log_level, "%s "
4450 + "interrupted waiting for suspend", __func__);
4451 + status = VCHIQ_ERROR;
4452 + goto unblock_resume;
4453 + } else if (rc == 0) {
4454 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4455 + /* Repeat timeout once if in progress */
4456 + if (repeat < 0) {
4457 + repeat = 1;
4458 + continue;
4459 + }
4460 + }
4461 + arm_state->autosuspend_override++;
4462 + output_timeout_error(state);
4463 +
4464 + status = VCHIQ_RETRY;
4465 + goto unblock_resume;
4466 + }
4467 + } while (0 < (repeat--));
4468 +
4469 + /* Check and report state in case we need to abort ARM suspend */
4470 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4471 + status = VCHIQ_RETRY;
4472 + vchiq_log_error(vchiq_susp_log_level,
4473 + "%s videocore suspend failed (state %s)", __func__,
4474 + suspend_state_names[arm_state->vc_suspend_state +
4475 + VC_SUSPEND_NUM_OFFSET]);
4476 + /* Reset the state only if it's still in an error state.
4477 + * Something could have already initiated another suspend. */
4478 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4479 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4480 +
4481 + goto unblock_resume;
4482 + }
4483 +
4484 + /* successfully suspended - unlock and exit */
4485 + goto unlock;
4486 +
4487 +unblock_resume:
4488 + /* all error states need to unblock resume before exit */
4489 + unblock_resume(arm_state);
4490 +
4491 +unlock:
4492 + write_unlock_bh(&arm_state->susp_res_lock);
4493 +
4494 +out:
4495 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4496 + return status;
4497 +}
4498 +
4499 +void
4500 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4501 +{
4502 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4503 +
4504 + if (!arm_state)
4505 + goto out;
4506 +
4507 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4508 +
4509 + write_lock_bh(&arm_state->susp_res_lock);
4510 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4511 + arm_state->first_connect &&
4512 + !vchiq_videocore_wanted(state)) {
4513 + vchiq_arm_vcsuspend(state);
4514 + }
4515 + write_unlock_bh(&arm_state->susp_res_lock);
4516 +
4517 +out:
4518 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4519 + return;
4520 +}
4521 +
4522 +
4523 +int
4524 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4525 +{
4526 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4527 + int resume = 0;
4528 + int ret = -1;
4529 +
4530 + if (!arm_state)
4531 + goto out;
4532 +
4533 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4534 +
4535 + write_lock_bh(&arm_state->susp_res_lock);
4536 + unblock_resume(arm_state);
4537 + resume = vchiq_check_resume(state);
4538 + write_unlock_bh(&arm_state->susp_res_lock);
4539 +
4540 + if (resume) {
4541 + if (wait_for_completion_interruptible(
4542 + &arm_state->vc_resume_complete) < 0) {
4543 + vchiq_log_error(vchiq_susp_log_level,
4544 + "%s interrupted", __func__);
4545 + /* failed, cannot accurately derive suspend
4546 + * state, so exit early. */
4547 + goto out;
4548 + }
4549 + }
4550 +
4551 + read_lock_bh(&arm_state->susp_res_lock);
4552 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4553 + vchiq_log_info(vchiq_susp_log_level,
4554 + "%s: Videocore remains suspended", __func__);
4555 + } else {
4556 + vchiq_log_info(vchiq_susp_log_level,
4557 + "%s: Videocore resumed", __func__);
4558 + ret = 0;
4559 + }
4560 + read_unlock_bh(&arm_state->susp_res_lock);
4561 +out:
4562 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4563 + return ret;
4564 +}
4565 +
4566 +/* This function should be called with the write lock held */
4567 +int
4568 +vchiq_check_resume(VCHIQ_STATE_T *state)
4569 +{
4570 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4571 + int resume = 0;
4572 +
4573 + if (!arm_state)
4574 + goto out;
4575 +
4576 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4577 +
4578 + if (need_resume(state)) {
4579 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4580 + request_poll(state, NULL, 0);
4581 + resume = 1;
4582 + }
4583 +
4584 +out:
4585 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4586 + return resume;
4587 +}
4588 +
4589 +void
4590 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4591 +{
4592 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4593 + int res = 0;
4594 +
4595 + if (!arm_state)
4596 + goto out;
4597 +
4598 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4599 +
4600 + write_lock_bh(&arm_state->susp_res_lock);
4601 + if (arm_state->wake_address == 0) {
4602 + vchiq_log_info(vchiq_susp_log_level,
4603 + "%s: already awake", __func__);
4604 + goto unlock;
4605 + }
4606 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4607 + vchiq_log_info(vchiq_susp_log_level,
4608 + "%s: already resuming", __func__);
4609 + goto unlock;
4610 + }
4611 +
4612 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4613 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4614 + res = 1;
4615 + } else
4616 + vchiq_log_trace(vchiq_susp_log_level,
4617 + "%s: not resuming (resume state %s)", __func__,
4618 + resume_state_names[arm_state->vc_resume_state +
4619 + VC_RESUME_NUM_OFFSET]);
4620 +
4621 +unlock:
4622 + write_unlock_bh(&arm_state->susp_res_lock);
4623 +
4624 + if (res)
4625 + vchiq_platform_resume(state);
4626 +
4627 +out:
4628 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4629 + return;
4630 +
4631 +}
4632 +
4633 +
4634 +
4635 +VCHIQ_STATUS_T
4636 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4637 + enum USE_TYPE_E use_type)
4638 +{
4639 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4640 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4641 + char entity[16];
4642 + int *entity_uc;
4643 + int local_uc, local_entity_uc;
4644 +
4645 + if (!arm_state)
4646 + goto out;
4647 +
4648 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4649 +
4650 + if (use_type == USE_TYPE_VCHIQ) {
4651 + sprintf(entity, "VCHIQ: ");
4652 + entity_uc = &arm_state->peer_use_count;
4653 + } else if (service) {
4654 + sprintf(entity, "%c%c%c%c:%03d",
4655 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4656 + service->client_id);
4657 + entity_uc = &service->service_use_count;
4658 + } else {
4659 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
4660 + "ptr", __func__);
4661 + ret = VCHIQ_ERROR;
4662 + goto out;
4663 + }
4664 +
4665 + write_lock_bh(&arm_state->susp_res_lock);
4666 + while (arm_state->resume_blocked) {
4667 + /* If we call 'use' while force suspend is waiting for suspend,
4668 + * then we're about to block the thread which the force is
4669 + * waiting to complete, so we're bound to just time out. In this
4670 + * case, set the suspend state such that the wait will be
4671 + * canceled, so we can complete as quickly as possible. */
4672 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4673 + VC_SUSPEND_IDLE) {
4674 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4675 + break;
4676 + }
4677 + /* If suspend is already in progress then we need to block */
4678 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4679 + /* Indicate that there are threads waiting on the resume
4680 + * blocker. These need to be allowed to complete before
4681 + * a _second_ call to force suspend can complete,
4682 + * otherwise low priority threads might never actually
4683 + * continue */
4684 + arm_state->blocked_count++;
4685 + write_unlock_bh(&arm_state->susp_res_lock);
4686 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4687 + "blocked - waiting...", __func__, entity);
4688 + if (wait_for_completion_killable(
4689 + &arm_state->resume_blocker) != 0) {
4690 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
4691 + "wait for resume blocker interrupted",
4692 + __func__, entity);
4693 + ret = VCHIQ_ERROR;
4694 + write_lock_bh(&arm_state->susp_res_lock);
4695 + arm_state->blocked_count--;
4696 + write_unlock_bh(&arm_state->susp_res_lock);
4697 + goto out;
4698 + }
4699 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4700 + "unblocked", __func__, entity);
4701 + write_lock_bh(&arm_state->susp_res_lock);
4702 + if (--arm_state->blocked_count == 0)
4703 + complete_all(&arm_state->blocked_blocker);
4704 + }
4705 + }
4706 +
4707 + stop_suspend_timer(arm_state);
4708 +
4709 + local_uc = ++arm_state->videocore_use_count;
4710 + local_entity_uc = ++(*entity_uc);
4711 +
4712 + /* If there's a pending request which hasn't yet been serviced then
4713 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
4714 + * vc_resume_complete will block until we either resume or fail to
4715 + * suspend */
4716 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4717 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4718 +
4719 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4720 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4721 + vchiq_log_info(vchiq_susp_log_level,
4722 + "%s %s count %d, state count %d",
4723 + __func__, entity, local_entity_uc, local_uc);
4724 + request_poll(state, NULL, 0);
4725 + } else
4726 + vchiq_log_trace(vchiq_susp_log_level,
4727 + "%s %s count %d, state count %d",
4728 + __func__, entity, *entity_uc, local_uc);
4729 +
4730 +
4731 + write_unlock_bh(&arm_state->susp_res_lock);
4732 +
4733 + /* Completion is in a done state when we're not suspended, so this won't
4734 + * block for the non-suspended case. */
4735 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4736 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4737 + __func__, entity);
4738 + if (wait_for_completion_killable(
4739 + &arm_state->vc_resume_complete) != 0) {
4740 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4741 + "resume interrupted", __func__, entity);
4742 + ret = VCHIQ_ERROR;
4743 + goto out;
4744 + }
4745 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4746 + entity);
4747 + }
4748 +
4749 + if (ret == VCHIQ_SUCCESS) {
4750 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4751 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4752 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4753 + /* Send the use notify to videocore */
4754 + status = vchiq_send_remote_use_active(state);
4755 + if (status == VCHIQ_SUCCESS)
4756 + ack_cnt--;
4757 + else
4758 + atomic_add(ack_cnt,
4759 + &arm_state->ka_use_ack_count);
4760 + }
4761 + }
4762 +
4763 +out:
4764 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4765 + return ret;
4766 +}
4767 +
4768 +VCHIQ_STATUS_T
4769 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4770 +{
4771 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4772 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4773 + char entity[16];
4774 + int *entity_uc;
4775 + int local_uc, local_entity_uc;
4776 +
4777 + if (!arm_state)
4778 + goto out;
4779 +
4780 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4781 +
4782 + if (service) {
4783 + sprintf(entity, "%c%c%c%c:%03d",
4784 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4785 + service->client_id);
4786 + entity_uc = &service->service_use_count;
4787 + } else {
4788 + sprintf(entity, "PEER: ");
4789 + entity_uc = &arm_state->peer_use_count;
4790 + }
4791 +
4792 + write_lock_bh(&arm_state->susp_res_lock);
4793 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
4794 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
4795 + WARN_ON(!arm_state->videocore_use_count);
4796 + WARN_ON(!(*entity_uc));
4797 + ret = VCHIQ_ERROR;
4798 + goto unlock;
4799 + }
4800 + local_uc = --arm_state->videocore_use_count;
4801 + local_entity_uc = --(*entity_uc);
4802 +
4803 + if (!vchiq_videocore_wanted(state)) {
4804 + if (vchiq_platform_use_suspend_timer() &&
4805 + !arm_state->resume_blocked) {
4806 + /* Only use the timer if we're not trying to force
4807 + * suspend (=> resume_blocked) */
4808 + start_suspend_timer(arm_state);
4809 + } else {
4810 + vchiq_log_info(vchiq_susp_log_level,
4811 + "%s %s count %d, state count %d - suspending",
4812 + __func__, entity, *entity_uc,
4813 + arm_state->videocore_use_count);
4814 + vchiq_arm_vcsuspend(state);
4815 + }
4816 + } else
4817 + vchiq_log_trace(vchiq_susp_log_level,
4818 + "%s %s count %d, state count %d",
4819 + __func__, entity, *entity_uc,
4820 + arm_state->videocore_use_count);
4821 +
4822 +unlock:
4823 + write_unlock_bh(&arm_state->susp_res_lock);
4824 +
4825 +out:
4826 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4827 + return ret;
4828 +}
4829 +
4830 +void
4831 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4832 +{
4833 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4834 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4835 + atomic_inc(&arm_state->ka_use_count);
4836 + complete(&arm_state->ka_evt);
4837 +}
4838 +
4839 +void
4840 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4841 +{
4842 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4843 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4844 + atomic_inc(&arm_state->ka_release_count);
4845 + complete(&arm_state->ka_evt);
4846 +}
4847 +
4848 +VCHIQ_STATUS_T
4849 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4850 +{
4851 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4852 +}
4853 +
4854 +VCHIQ_STATUS_T
4855 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4856 +{
4857 + return vchiq_release_internal(service->state, service);
4858 +}
4859 +
4860 +VCHIQ_DEBUGFS_NODE_T *
4861 +vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
4862 +{
4863 + return &instance->debugfs_node;
4864 +}
4865 +
4866 +int
4867 +vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4868 +{
4869 + VCHIQ_SERVICE_T *service;
4870 + int use_count = 0, i;
4871 + i = 0;
4872 + while ((service = next_service_by_instance(instance->state,
4873 + instance, &i)) != NULL) {
4874 + use_count += service->service_use_count;
4875 + unlock_service(service);
4876 + }
4877 + return use_count;
4878 +}
4879 +
4880 +int
4881 +vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
4882 +{
4883 + return instance->pid;
4884 +}
4885 +
4886 +int
4887 +vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
4888 +{
4889 + return instance->trace;
4890 +}
4891 +
4892 +void
4893 +vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
4894 +{
4895 + VCHIQ_SERVICE_T *service;
4896 + int i;
4897 + i = 0;
4898 + while ((service = next_service_by_instance(instance->state,
4899 + instance, &i)) != NULL) {
4900 + service->trace = trace;
4901 + unlock_service(service);
4902 + }
4903 + instance->trace = (trace != 0);
4904 +}
4905 +
4906 +static void suspend_timer_callback(unsigned long context)
4907 +{
4908 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
4909 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4910 + if (!arm_state)
4911 + goto out;
4912 + vchiq_log_info(vchiq_susp_log_level,
4913 + "%s - suspend timer expired - check suspend", __func__);
4914 + vchiq_check_suspend(state);
4915 +out:
4916 + return;
4917 +}
4918 +
4919 +VCHIQ_STATUS_T
4920 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
4921 +{
4922 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4923 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4924 + if (service) {
4925 + ret = vchiq_use_internal(service->state, service,
4926 + USE_TYPE_SERVICE_NO_RESUME);
4927 + unlock_service(service);
4928 + }
4929 + return ret;
4930 +}
4931 +
4932 +VCHIQ_STATUS_T
4933 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
4934 +{
4935 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4936 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4937 + if (service) {
4938 + ret = vchiq_use_internal(service->state, service,
4939 + USE_TYPE_SERVICE);
4940 + unlock_service(service);
4941 + }
4942 + return ret;
4943 +}
4944 +
4945 +VCHIQ_STATUS_T
4946 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
4947 +{
4948 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4949 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4950 + if (service) {
4951 + ret = vchiq_release_internal(service->state, service);
4952 + unlock_service(service);
4953 + }
4954 + return ret;
4955 +}
4956 +
4957 +void
4958 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
4959 +{
4960 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4961 + int i, j = 0;
4962 + /* Only dump 64 services */
4963 + static const int local_max_services = 64;
4964 + /* If there's more than 64 services, only dump ones with
4965 + * non-zero counts */
4966 + int only_nonzero = 0;
4967 + static const char *nz = "<-- preventing suspend";
4968 +
4969 + enum vc_suspend_status vc_suspend_state;
4970 + enum vc_resume_status vc_resume_state;
4971 + int peer_count;
4972 + int vc_use_count;
4973 + int active_services;
4974 + struct service_data_struct {
4975 + int fourcc;
4976 + int clientid;
4977 + int use_count;
4978 + } service_data[local_max_services];
4979 +
4980 + if (!arm_state)
4981 + return;
4982 +
4983 + read_lock_bh(&arm_state->susp_res_lock);
4984 + vc_suspend_state = arm_state->vc_suspend_state;
4985 + vc_resume_state = arm_state->vc_resume_state;
4986 + peer_count = arm_state->peer_use_count;
4987 + vc_use_count = arm_state->videocore_use_count;
4988 + active_services = state->unused_service;
4989 + if (active_services > local_max_services)
4990 + only_nonzero = 1;
4991 +
4992 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
4993 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4994 + if (!service_ptr)
4995 + continue;
4996 +
4997 + if (only_nonzero && !service_ptr->service_use_count)
4998 + continue;
4999 +
5000 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
5001 + service_data[j].fourcc = service_ptr->base.fourcc;
5002 + service_data[j].clientid = service_ptr->client_id;
5003 + service_data[j++].use_count = service_ptr->
5004 + service_use_count;
5005 + }
5006 + }
5007 +
5008 + read_unlock_bh(&arm_state->susp_res_lock);
5009 +
5010 + vchiq_log_warning(vchiq_susp_log_level,
5011 + "-- Videcore suspend state: %s --",
5012 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
5013 + vchiq_log_warning(vchiq_susp_log_level,
5014 + "-- Videcore resume state: %s --",
5015 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
5016 +
5017 + if (only_nonzero)
5018 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
5019 + "services (%d). Only dumping up to first %d services "
5020 + "with non-zero use-count", active_services,
5021 + local_max_services);
5022 +
5023 + for (i = 0; i < j; i++) {
5024 + vchiq_log_warning(vchiq_susp_log_level,
5025 + "----- %c%c%c%c:%d service count %d %s",
5026 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
5027 + service_data[i].clientid,
5028 + service_data[i].use_count,
5029 + service_data[i].use_count ? nz : "");
5030 + }
5031 + vchiq_log_warning(vchiq_susp_log_level,
5032 + "----- VCHIQ use count count %d", peer_count);
5033 + vchiq_log_warning(vchiq_susp_log_level,
5034 + "--- Overall vchiq instance use count %d", vc_use_count);
5035 +
5036 + vchiq_dump_platform_use_state(state);
5037 +}
5038 +
5039 +VCHIQ_STATUS_T
5040 +vchiq_check_service(VCHIQ_SERVICE_T *service)
5041 +{
5042 + VCHIQ_ARM_STATE_T *arm_state;
5043 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5044 +
5045 + if (!service || !service->state)
5046 + goto out;
5047 +
5048 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5049 +
5050 + arm_state = vchiq_platform_get_arm_state(service->state);
5051 +
5052 + read_lock_bh(&arm_state->susp_res_lock);
5053 + if (service->service_use_count)
5054 + ret = VCHIQ_SUCCESS;
5055 + read_unlock_bh(&arm_state->susp_res_lock);
5056 +
5057 + if (ret == VCHIQ_ERROR) {
5058 + vchiq_log_error(vchiq_susp_log_level,
5059 + "%s ERROR - %c%c%c%c:%d service count %d, "
5060 + "state count %d, videocore suspend state %s", __func__,
5061 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5062 + service->client_id, service->service_use_count,
5063 + arm_state->videocore_use_count,
5064 + suspend_state_names[arm_state->vc_suspend_state +
5065 + VC_SUSPEND_NUM_OFFSET]);
5066 + vchiq_dump_service_use_state(service->state);
5067 + }
5068 +out:
5069 + return ret;
5070 +}
5071 +
5072 +/* stub functions */
5073 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
5074 +{
5075 + (void)state;
5076 +}
5077 +
5078 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
5079 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
5080 +{
5081 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5082 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
5083 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
5084 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
5085 + write_lock_bh(&arm_state->susp_res_lock);
5086 + if (!arm_state->first_connect) {
5087 + char threadname[10];
5088 + arm_state->first_connect = 1;
5089 + write_unlock_bh(&arm_state->susp_res_lock);
5090 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
5091 + state->id);
5092 + arm_state->ka_thread = kthread_create(
5093 + &vchiq_keepalive_thread_func,
5094 + (void *)state,
5095 + threadname);
5096 + if (arm_state->ka_thread == NULL) {
5097 + vchiq_log_error(vchiq_susp_log_level,
5098 + "vchiq: FATAL: couldn't create thread %s",
5099 + threadname);
5100 + } else {
5101 + wake_up_process(arm_state->ka_thread);
5102 + }
5103 + } else
5104 + write_unlock_bh(&arm_state->susp_res_lock);
5105 + }
5106 +}
5107 +
5108 +
5109 +/****************************************************************************
5110 +*
5111 +* vchiq_init - called when the module is loaded.
5112 +*
5113 +***************************************************************************/
5114 +
5115 +static int __init
5116 +vchiq_init(void)
5117 +{
5118 + int err;
5119 + void *ptr_err;
5120 +
5121 + /* create debugfs entries */
5122 + err = vchiq_debugfs_init();
5123 + if (err != 0)
5124 + goto failed_debugfs_init;
5125 +
5126 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
5127 + if (err != 0) {
5128 + vchiq_log_error(vchiq_arm_log_level,
5129 + "Unable to allocate device number");
5130 + goto failed_alloc_chrdev;
5131 + }
5132 + cdev_init(&vchiq_cdev, &vchiq_fops);
5133 + vchiq_cdev.owner = THIS_MODULE;
5134 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
5135 + if (err != 0) {
5136 + vchiq_log_error(vchiq_arm_log_level,
5137 + "Unable to register device");
5138 + goto failed_cdev_add;
5139 + }
5140 +
5141 + /* create sysfs entries */
5142 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
5143 + ptr_err = vchiq_class;
5144 + if (IS_ERR(ptr_err))
5145 + goto failed_class_create;
5146 +
5147 + vchiq_dev = device_create(vchiq_class, NULL,
5148 + vchiq_devid, NULL, "vchiq");
5149 + ptr_err = vchiq_dev;
5150 + if (IS_ERR(ptr_err))
5151 + goto failed_device_create;
5152 +
5153 + err = vchiq_platform_init(&g_state);
5154 + if (err != 0)
5155 + goto failed_platform_init;
5156 +
5157 + vchiq_log_info(vchiq_arm_log_level,
5158 + "vchiq: initialised - version %d (min %d), device %d.%d",
5159 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
5160 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
5161 +
5162 + return 0;
5163 +
5164 +failed_platform_init:
5165 + device_destroy(vchiq_class, vchiq_devid);
5166 +failed_device_create:
5167 + class_destroy(vchiq_class);
5168 +failed_class_create:
5169 + cdev_del(&vchiq_cdev);
5170 + err = PTR_ERR(ptr_err);
5171 +failed_cdev_add:
5172 + unregister_chrdev_region(vchiq_devid, 1);
5173 +failed_alloc_chrdev:
5174 + vchiq_debugfs_deinit();
5175 +failed_debugfs_init:
5176 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
5177 + return err;
5178 +}
5179 +
5180 +/****************************************************************************
5181 +*
5182 +* vchiq_exit - called when the module is unloaded.
5183 +*
5184 +***************************************************************************/
5185 +
5186 +static void __exit
5187 +vchiq_exit(void)
5188 +{
5189 + vchiq_platform_exit(&g_state);
5190 + device_destroy(vchiq_class, vchiq_devid);
5191 + class_destroy(vchiq_class);
5192 + cdev_del(&vchiq_cdev);
5193 + unregister_chrdev_region(vchiq_devid, 1);
5194 +}
5195 +
5196 +module_init(vchiq_init);
5197 +module_exit(vchiq_exit);
5198 +MODULE_LICENSE("GPL");
5199 +MODULE_AUTHOR("Broadcom Corporation");
5200 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5201 new file mode 100644
5202 index 0000000..d1e2741
5203 --- /dev/null
5204 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5205 @@ -0,0 +1,223 @@
5206 +/**
5207 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
5208 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5209 + *
5210 + * Redistribution and use in source and binary forms, with or without
5211 + * modification, are permitted provided that the following conditions
5212 + * are met:
5213 + * 1. Redistributions of source code must retain the above copyright
5214 + * notice, this list of conditions, and the following disclaimer,
5215 + * without modification.
5216 + * 2. Redistributions in binary form must reproduce the above copyright
5217 + * notice, this list of conditions and the following disclaimer in the
5218 + * documentation and/or other materials provided with the distribution.
5219 + * 3. The names of the above-listed copyright holders may not be used
5220 + * to endorse or promote products derived from this software without
5221 + * specific prior written permission.
5222 + *
5223 + * ALTERNATIVELY, this software may be distributed under the terms of the
5224 + * GNU General Public License ("GPL") version 2, as published by the Free
5225 + * Software Foundation.
5226 + *
5227 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5228 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5229 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5230 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5231 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5232 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5233 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5234 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5235 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5236 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5237 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5238 + */
5239 +
5240 +#ifndef VCHIQ_ARM_H
5241 +#define VCHIQ_ARM_H
5242 +
5243 +#include <linux/mutex.h>
5244 +#include <linux/semaphore.h>
5245 +#include <linux/atomic.h>
5246 +#include "vchiq_core.h"
5247 +#include "vchiq_debugfs.h"
5248 +
5249 +
5250 +enum vc_suspend_status {
5251 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5252 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
5253 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
5254 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
5255 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
5256 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
5257 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
5258 +};
5259 +
5260 +enum vc_resume_status {
5261 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
5262 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
5263 + VC_RESUME_REQUESTED, /* User has requested resume */
5264 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5265 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
5266 +};
5267 +
5268 +
5269 +enum USE_TYPE_E {
5270 + USE_TYPE_SERVICE,
5271 + USE_TYPE_SERVICE_NO_RESUME,
5272 + USE_TYPE_VCHIQ
5273 +};
5274 +
5275 +
5276 +
5277 +typedef struct vchiq_arm_state_struct {
5278 + /* Keepalive-related data */
5279 + struct task_struct *ka_thread;
5280 + struct completion ka_evt;
5281 + atomic_t ka_use_count;
5282 + atomic_t ka_use_ack_count;
5283 + atomic_t ka_release_count;
5284 +
5285 + struct completion vc_suspend_complete;
5286 + struct completion vc_resume_complete;
5287 +
5288 + rwlock_t susp_res_lock;
5289 + enum vc_suspend_status vc_suspend_state;
5290 + enum vc_resume_status vc_resume_state;
5291 +
5292 + unsigned int wake_address;
5293 +
5294 + struct timer_list suspend_timer;
5295 + int suspend_timer_timeout;
5296 + int suspend_timer_running;
5297 +
5298 + /* Global use count for videocore.
5299 + ** This is equal to the sum of the use counts for all services. When
5300 + ** this hits zero the videocore suspend procedure will be initiated.
5301 + */
5302 + int videocore_use_count;
5303 +
5304 + /* Use count to track requests from videocore peer.
5305 + ** This use count is not associated with a service, so needs to be
5306 + ** tracked separately with the state.
5307 + */
5308 + int peer_use_count;
5309 +
5310 + /* Flag to indicate whether resume is blocked. This happens when the
5311 + ** ARM is suspending
5312 + */
5313 + struct completion resume_blocker;
5314 + int resume_blocked;
5315 + struct completion blocked_blocker;
5316 + int blocked_count;
5317 +
5318 + int autosuspend_override;
5319 +
5320 + /* Flag to indicate that the first vchiq connect has made it through.
5321 + ** This means that both sides should be fully ready, and we should
5322 + ** be able to suspend after this point.
5323 + */
5324 + int first_connect;
5325 +
5326 + unsigned long long suspend_start_time;
5327 + unsigned long long sleep_start_time;
5328 + unsigned long long resume_start_time;
5329 + unsigned long long last_wake_time;
5330 +
5331 +} VCHIQ_ARM_STATE_T;
5332 +
5333 +extern int vchiq_arm_log_level;
5334 +extern int vchiq_susp_log_level;
5335 +
5336 +extern int __init
5337 +vchiq_platform_init(VCHIQ_STATE_T *state);
5338 +
5339 +extern void __exit
5340 +vchiq_platform_exit(VCHIQ_STATE_T *state);
5341 +
5342 +extern VCHIQ_STATE_T *
5343 +vchiq_get_state(void);
5344 +
5345 +extern VCHIQ_STATUS_T
5346 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5347 +
5348 +extern VCHIQ_STATUS_T
5349 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5350 +
5351 +extern int
5352 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5353 +
5354 +extern VCHIQ_STATUS_T
5355 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5356 +
5357 +extern VCHIQ_STATUS_T
5358 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5359 +
5360 +extern int
5361 +vchiq_check_resume(VCHIQ_STATE_T *state);
5362 +
5363 +extern void
5364 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5365 + VCHIQ_STATUS_T
5366 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5367 +
5368 +extern VCHIQ_STATUS_T
5369 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5370 +
5371 +extern VCHIQ_STATUS_T
5372 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5373 +
5374 +extern VCHIQ_STATUS_T
5375 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5376 +
5377 +extern int
5378 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5379 +
5380 +extern int
5381 +vchiq_platform_use_suspend_timer(void);
5382 +
5383 +extern void
5384 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5385 +
5386 +extern void
5387 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5388 +
5389 +extern VCHIQ_ARM_STATE_T*
5390 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5391 +
5392 +extern int
5393 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5394 +
5395 +extern VCHIQ_STATUS_T
5396 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5397 + enum USE_TYPE_E use_type);
5398 +extern VCHIQ_STATUS_T
5399 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5400 +
5401 +extern VCHIQ_DEBUGFS_NODE_T *
5402 +vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance);
5403 +
5404 +extern int
5405 +vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance);
5406 +
5407 +extern int
5408 +vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance);
5409 +
5410 +extern int
5411 +vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance);
5412 +
5413 +extern void
5414 +vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace);
5415 +
5416 +extern void
5417 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5418 + enum vc_suspend_status new_state);
5419 +
5420 +extern void
5421 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5422 + enum vc_resume_status new_state);
5423 +
5424 +extern void
5425 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5426 +
5427 +
5428 +#endif /* VCHIQ_ARM_H */
5429 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5430 new file mode 100644
5431 index 0000000..df64581
5432 --- /dev/null
5433 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5434 @@ -0,0 +1,37 @@
5435 +/**
5436 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5437 + *
5438 + * Redistribution and use in source and binary forms, with or without
5439 + * modification, are permitted provided that the following conditions
5440 + * are met:
5441 + * 1. Redistributions of source code must retain the above copyright
5442 + * notice, this list of conditions, and the following disclaimer,
5443 + * without modification.
5444 + * 2. Redistributions in binary form must reproduce the above copyright
5445 + * notice, this list of conditions and the following disclaimer in the
5446 + * documentation and/or other materials provided with the distribution.
5447 + * 3. The names of the above-listed copyright holders may not be used
5448 + * to endorse or promote products derived from this software without
5449 + * specific prior written permission.
5450 + *
5451 + * ALTERNATIVELY, this software may be distributed under the terms of the
5452 + * GNU General Public License ("GPL") version 2, as published by the Free
5453 + * Software Foundation.
5454 + *
5455 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5456 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5457 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5458 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5459 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5460 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5461 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5462 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5463 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5464 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5465 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5466 + */
5467 +
5468 +const char *vchiq_get_build_hostname(void);
5469 +const char *vchiq_get_build_version(void);
5470 +const char *vchiq_get_build_time(void);
5471 +const char *vchiq_get_build_date(void);
5472 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5473 new file mode 100644
5474 index 0000000..c382740
5475 --- /dev/null
5476 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5477 @@ -0,0 +1,66 @@
5478 +/**
5479 + * Copyright (c) 2010-2014 Broadcom. All rights reserved.
5480 + *
5481 + * Redistribution and use in source and binary forms, with or without
5482 + * modification, are permitted provided that the following conditions
5483 + * are met:
5484 + * 1. Redistributions of source code must retain the above copyright
5485 + * notice, this list of conditions, and the following disclaimer,
5486 + * without modification.
5487 + * 2. Redistributions in binary form must reproduce the above copyright
5488 + * notice, this list of conditions and the following disclaimer in the
5489 + * documentation and/or other materials provided with the distribution.
5490 + * 3. The names of the above-listed copyright holders may not be used
5491 + * to endorse or promote products derived from this software without
5492 + * specific prior written permission.
5493 + *
5494 + * ALTERNATIVELY, this software may be distributed under the terms of the
5495 + * GNU General Public License ("GPL") version 2, as published by the Free
5496 + * Software Foundation.
5497 + *
5498 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5499 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5500 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5501 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5502 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5503 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5504 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5505 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5506 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5507 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5508 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5509 + */
5510 +
5511 +#ifndef VCHIQ_CFG_H
5512 +#define VCHIQ_CFG_H
5513 +
5514 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5515 +/* The version of VCHIQ - change with any non-trivial change */
5516 +#define VCHIQ_VERSION 7
5517 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5518 +** incompatible change */
5519 +#define VCHIQ_VERSION_MIN 3
5520 +
5521 +/* The version that introduced the VCHIQ_IOC_LIB_VERSION ioctl */
5522 +#define VCHIQ_VERSION_LIB_VERSION 7
5523 +
5524 +/* The version that introduced the VCHIQ_IOC_CLOSE_DELIVERED ioctl */
5525 +#define VCHIQ_VERSION_CLOSE_DELIVERED 7
5526 +
5527 +#define VCHIQ_MAX_STATES 1
5528 +#define VCHIQ_MAX_SERVICES 4096
5529 +#define VCHIQ_MAX_SLOTS 128
5530 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5531 +
5532 +#define VCHIQ_NUM_CURRENT_BULKS 32
5533 +#define VCHIQ_NUM_SERVICE_BULKS 4
5534 +
5535 +#ifndef VCHIQ_ENABLE_DEBUG
5536 +#define VCHIQ_ENABLE_DEBUG 1
5537 +#endif
5538 +
5539 +#ifndef VCHIQ_ENABLE_STATS
5540 +#define VCHIQ_ENABLE_STATS 1
5541 +#endif
5542 +
5543 +#endif /* VCHIQ_CFG_H */
5544 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5545 new file mode 100644
5546 index 0000000..65f4b52
5547 --- /dev/null
5548 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5549 @@ -0,0 +1,119 @@
5550 +/**
5551 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5552 + *
5553 + * Redistribution and use in source and binary forms, with or without
5554 + * modification, are permitted provided that the following conditions
5555 + * are met:
5556 + * 1. Redistributions of source code must retain the above copyright
5557 + * notice, this list of conditions, and the following disclaimer,
5558 + * without modification.
5559 + * 2. Redistributions in binary form must reproduce the above copyright
5560 + * notice, this list of conditions and the following disclaimer in the
5561 + * documentation and/or other materials provided with the distribution.
5562 + * 3. The names of the above-listed copyright holders may not be used
5563 + * to endorse or promote products derived from this software without
5564 + * specific prior written permission.
5565 + *
5566 + * ALTERNATIVELY, this software may be distributed under the terms of the
5567 + * GNU General Public License ("GPL") version 2, as published by the Free
5568 + * Software Foundation.
5569 + *
5570 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5571 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5572 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5573 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5574 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5575 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5576 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5577 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5578 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5579 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5580 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5581 + */
5582 +
5583 +#include "vchiq_connected.h"
5584 +#include "vchiq_core.h"
5585 +#include <linux/module.h>
5586 +#include <linux/mutex.h>
5587 +
5588 +#define MAX_CALLBACKS 10
5589 +
5590 +static int g_connected;
5591 +static int g_num_deferred_callbacks;
5592 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5593 +static int g_once_init;
5594 +static struct mutex g_connected_mutex;
5595 +
5596 +/****************************************************************************
5597 +*
5598 +* Function to initialize our lock.
5599 +*
5600 +***************************************************************************/
5601 +
5602 +static void connected_init(void)
5603 +{
5604 + if (!g_once_init) {
5605 + mutex_init(&g_connected_mutex);
5606 + g_once_init = 1;
5607 + }
5608 +}
5609 +
5610 +/****************************************************************************
5611 +*
5612 +* This function is used to defer initialization until the vchiq stack is
5613 +* initialized. If the stack is already initialized, then the callback will
5614 +* be made immediately, otherwise it will be deferred until
5615 +* vchiq_call_connected_callbacks is called.
5616 +*
5617 +***************************************************************************/
5618 +
5619 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5620 +{
5621 + connected_init();
5622 +
5623 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5624 + return;
5625 +
5626 + if (g_connected)
5627 + /* We're already connected. Call the callback immediately. */
5628 +
5629 + callback();
5630 + else {
5631 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5632 + vchiq_log_error(vchiq_core_log_level,
5633 + "There already %d callback registered - "
5634 + "please increase MAX_CALLBACKS",
5635 + g_num_deferred_callbacks);
5636 + else {
5637 + g_deferred_callback[g_num_deferred_callbacks] =
5638 + callback;
5639 + g_num_deferred_callbacks++;
5640 + }
5641 + }
5642 + mutex_unlock(&g_connected_mutex);
5643 +}
5644 +
5645 +/****************************************************************************
5646 +*
5647 +* This function is called by the vchiq stack once it has been connected to
5648 +* the videocore and clients can start to use the stack.
5649 +*
5650 +***************************************************************************/
5651 +
5652 +void vchiq_call_connected_callbacks(void)
5653 +{
5654 + int i;
5655 +
5656 + connected_init();
5657 +
5658 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5659 + return;
5660 +
5661 + for (i = 0; i < g_num_deferred_callbacks; i++)
5662 + g_deferred_callback[i]();
5663 +
5664 + g_num_deferred_callbacks = 0;
5665 + g_connected = 1;
5666 + mutex_unlock(&g_connected_mutex);
5667 +}
5668 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5669 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5670 new file mode 100644
5671 index 0000000..863b3e3
5672 --- /dev/null
5673 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5674 @@ -0,0 +1,50 @@
5675 +/**
5676 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5677 + *
5678 + * Redistribution and use in source and binary forms, with or without
5679 + * modification, are permitted provided that the following conditions
5680 + * are met:
5681 + * 1. Redistributions of source code must retain the above copyright
5682 + * notice, this list of conditions, and the following disclaimer,
5683 + * without modification.
5684 + * 2. Redistributions in binary form must reproduce the above copyright
5685 + * notice, this list of conditions and the following disclaimer in the
5686 + * documentation and/or other materials provided with the distribution.
5687 + * 3. The names of the above-listed copyright holders may not be used
5688 + * to endorse or promote products derived from this software without
5689 + * specific prior written permission.
5690 + *
5691 + * ALTERNATIVELY, this software may be distributed under the terms of the
5692 + * GNU General Public License ("GPL") version 2, as published by the Free
5693 + * Software Foundation.
5694 + *
5695 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5696 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5697 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5698 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5699 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5700 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5701 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5702 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5703 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5704 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5705 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5706 + */
5707 +
5708 +#ifndef VCHIQ_CONNECTED_H
5709 +#define VCHIQ_CONNECTED_H
5710 +
5711 +/* ---- Include Files ----------------------------------------------------- */
5712 +
5713 +/* ---- Constants and Types ---------------------------------------------- */
5714 +
5715 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5716 +
5717 +/* ---- Variable Externs ------------------------------------------------- */
5718 +
5719 +/* ---- Function Prototypes ---------------------------------------------- */
5720 +
5721 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5722 +void vchiq_call_connected_callbacks(void);
5723 +
5724 +#endif /* VCHIQ_CONNECTED_H */
5725 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5726 new file mode 100644
5727 index 0000000..f962027
5728 --- /dev/null
5729 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5730 @@ -0,0 +1,3861 @@
5731 +/**
5732 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5733 + *
5734 + * Redistribution and use in source and binary forms, with or without
5735 + * modification, are permitted provided that the following conditions
5736 + * are met:
5737 + * 1. Redistributions of source code must retain the above copyright
5738 + * notice, this list of conditions, and the following disclaimer,
5739 + * without modification.
5740 + * 2. Redistributions in binary form must reproduce the above copyright
5741 + * notice, this list of conditions and the following disclaimer in the
5742 + * documentation and/or other materials provided with the distribution.
5743 + * 3. The names of the above-listed copyright holders may not be used
5744 + * to endorse or promote products derived from this software without
5745 + * specific prior written permission.
5746 + *
5747 + * ALTERNATIVELY, this software may be distributed under the terms of the
5748 + * GNU General Public License ("GPL") version 2, as published by the Free
5749 + * Software Foundation.
5750 + *
5751 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5752 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5753 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5754 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5755 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5756 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5757 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5758 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5759 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5760 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5761 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5762 + */
5763 +
5764 +#include "vchiq_core.h"
5765 +
5766 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5767 +
5768 +#define HANDLE_STATE_SHIFT 12
5769 +
5770 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5771 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5772 +#define SLOT_INDEX_FROM_DATA(state, data) \
5773 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5774 + VCHIQ_SLOT_SIZE)
5775 +#define SLOT_INDEX_FROM_INFO(state, info) \
5776 + ((unsigned int)(info - state->slot_info))
5777 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5778 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5779 +
5780 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5781 +
5782 +#define SRVTRACE_LEVEL(srv) \
5783 + (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
5784 +#define SRVTRACE_ENABLED(srv, lev) \
5785 + (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
5786 +
5787 +struct vchiq_open_payload {
5788 + int fourcc;
5789 + int client_id;
5790 + short version;
5791 + short version_min;
5792 +};
5793 +
5794 +struct vchiq_openack_payload {
5795 + short version;
5796 +};
5797 +
5798 +/* we require this for consistency between endpoints */
5799 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5800 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5801 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5802 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5803 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5804 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5805 +
5806 +/* Run time control of log level, based on KERN_XXX level. */
5807 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5808 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5809 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5810 +
5811 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5812 +
5813 +static DEFINE_SPINLOCK(service_spinlock);
5814 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5815 +DEFINE_SPINLOCK(quota_spinlock);
5816 +
5817 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5818 +static unsigned int handle_seq;
5819 +
5820 +static const char *const srvstate_names[] = {
5821 + "FREE",
5822 + "HIDDEN",
5823 + "LISTENING",
5824 + "OPENING",
5825 + "OPEN",
5826 + "OPENSYNC",
5827 + "CLOSESENT",
5828 + "CLOSERECVD",
5829 + "CLOSEWAIT",
5830 + "CLOSED"
5831 +};
5832 +
5833 +static const char *const reason_names[] = {
5834 + "SERVICE_OPENED",
5835 + "SERVICE_CLOSED",
5836 + "MESSAGE_AVAILABLE",
5837 + "BULK_TRANSMIT_DONE",
5838 + "BULK_RECEIVE_DONE",
5839 + "BULK_TRANSMIT_ABORTED",
5840 + "BULK_RECEIVE_ABORTED"
5841 +};
5842 +
5843 +static const char *const conn_state_names[] = {
5844 + "DISCONNECTED",
5845 + "CONNECTING",
5846 + "CONNECTED",
5847 + "PAUSING",
5848 + "PAUSE_SENT",
5849 + "PAUSED",
5850 + "RESUMING",
5851 + "PAUSE_TIMEOUT",
5852 + "RESUME_TIMEOUT"
5853 +};
5854 +
5855 +
5856 +static void
5857 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5858 +
5859 +static const char *msg_type_str(unsigned int msg_type)
5860 +{
5861 + switch (msg_type) {
5862 + case VCHIQ_MSG_PADDING: return "PADDING";
5863 + case VCHIQ_MSG_CONNECT: return "CONNECT";
5864 + case VCHIQ_MSG_OPEN: return "OPEN";
5865 + case VCHIQ_MSG_OPENACK: return "OPENACK";
5866 + case VCHIQ_MSG_CLOSE: return "CLOSE";
5867 + case VCHIQ_MSG_DATA: return "DATA";
5868 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
5869 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
5870 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
5871 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
5872 + case VCHIQ_MSG_PAUSE: return "PAUSE";
5873 + case VCHIQ_MSG_RESUME: return "RESUME";
5874 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
5875 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
5876 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
5877 + }
5878 + return "???";
5879 +}
5880 +
5881 +static inline void
5882 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5883 +{
5884 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5885 + service->state->id, service->localport,
5886 + srvstate_names[service->srvstate],
5887 + srvstate_names[newstate]);
5888 + service->srvstate = newstate;
5889 +}
5890 +
5891 +VCHIQ_SERVICE_T *
5892 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5893 +{
5894 + VCHIQ_SERVICE_T *service;
5895 +
5896 + spin_lock(&service_spinlock);
5897 + service = handle_to_service(handle);
5898 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5899 + (service->handle == handle)) {
5900 + BUG_ON(service->ref_count == 0);
5901 + service->ref_count++;
5902 + } else
5903 + service = NULL;
5904 + spin_unlock(&service_spinlock);
5905 +
5906 + if (!service)
5907 + vchiq_log_info(vchiq_core_log_level,
5908 + "Invalid service handle 0x%x", handle);
5909 +
5910 + return service;
5911 +}
5912 +
5913 +VCHIQ_SERVICE_T *
5914 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
5915 +{
5916 + VCHIQ_SERVICE_T *service = NULL;
5917 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
5918 + spin_lock(&service_spinlock);
5919 + service = state->services[localport];
5920 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
5921 + BUG_ON(service->ref_count == 0);
5922 + service->ref_count++;
5923 + } else
5924 + service = NULL;
5925 + spin_unlock(&service_spinlock);
5926 + }
5927 +
5928 + if (!service)
5929 + vchiq_log_info(vchiq_core_log_level,
5930 + "Invalid port %d", localport);
5931 +
5932 + return service;
5933 +}
5934 +
5935 +VCHIQ_SERVICE_T *
5936 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
5937 + VCHIQ_SERVICE_HANDLE_T handle) {
5938 + VCHIQ_SERVICE_T *service;
5939 +
5940 + spin_lock(&service_spinlock);
5941 + service = handle_to_service(handle);
5942 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5943 + (service->handle == handle) &&
5944 + (service->instance == instance)) {
5945 + BUG_ON(service->ref_count == 0);
5946 + service->ref_count++;
5947 + } else
5948 + service = NULL;
5949 + spin_unlock(&service_spinlock);
5950 +
5951 + if (!service)
5952 + vchiq_log_info(vchiq_core_log_level,
5953 + "Invalid service handle 0x%x", handle);
5954 +
5955 + return service;
5956 +}
5957 +
5958 +VCHIQ_SERVICE_T *
5959 +find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
5960 + VCHIQ_SERVICE_HANDLE_T handle) {
5961 + VCHIQ_SERVICE_T *service;
5962 +
5963 + spin_lock(&service_spinlock);
5964 + service = handle_to_service(handle);
5965 + if (service &&
5966 + ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
5967 + (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) &&
5968 + (service->handle == handle) &&
5969 + (service->instance == instance)) {
5970 + BUG_ON(service->ref_count == 0);
5971 + service->ref_count++;
5972 + } else
5973 + service = NULL;
5974 + spin_unlock(&service_spinlock);
5975 +
5976 + if (!service)
5977 + vchiq_log_info(vchiq_core_log_level,
5978 + "Invalid service handle 0x%x", handle);
5979 +
5980 + return service;
5981 +}
5982 +
5983 +VCHIQ_SERVICE_T *
5984 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
5985 + int *pidx)
5986 +{
5987 + VCHIQ_SERVICE_T *service = NULL;
5988 + int idx = *pidx;
5989 +
5990 + spin_lock(&service_spinlock);
5991 + while (idx < state->unused_service) {
5992 + VCHIQ_SERVICE_T *srv = state->services[idx++];
5993 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
5994 + (srv->instance == instance)) {
5995 + service = srv;
5996 + BUG_ON(service->ref_count == 0);
5997 + service->ref_count++;
5998 + break;
5999 + }
6000 + }
6001 + spin_unlock(&service_spinlock);
6002 +
6003 + *pidx = idx;
6004 +
6005 + return service;
6006 +}
6007 +
6008 +void
6009 +lock_service(VCHIQ_SERVICE_T *service)
6010 +{
6011 + spin_lock(&service_spinlock);
6012 + BUG_ON(!service || (service->ref_count == 0));
6013 + if (service)
6014 + service->ref_count++;
6015 + spin_unlock(&service_spinlock);
6016 +}
6017 +
6018 +void
6019 +unlock_service(VCHIQ_SERVICE_T *service)
6020 +{
6021 + VCHIQ_STATE_T *state = service->state;
6022 + spin_lock(&service_spinlock);
6023 + BUG_ON(!service || (service->ref_count == 0));
6024 + if (service && service->ref_count) {
6025 + service->ref_count--;
6026 + if (!service->ref_count) {
6027 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
6028 + state->services[service->localport] = NULL;
6029 + } else
6030 + service = NULL;
6031 + }
6032 + spin_unlock(&service_spinlock);
6033 +
6034 + if (service && service->userdata_term)
6035 + service->userdata_term(service->base.userdata);
6036 +
6037 + kfree(service);
6038 +}
6039 +
6040 +int
6041 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
6042 +{
6043 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
6044 + int id;
6045 +
6046 + id = service ? service->client_id : 0;
6047 + if (service)
6048 + unlock_service(service);
6049 +
6050 + return id;
6051 +}
6052 +
6053 +void *
6054 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
6055 +{
6056 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6057 +
6058 + return service ? service->base.userdata : NULL;
6059 +}
6060 +
6061 +int
6062 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
6063 +{
6064 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6065 +
6066 + return service ? service->base.fourcc : 0;
6067 +}
6068 +
6069 +static void
6070 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
6071 +{
6072 + VCHIQ_STATE_T *state = service->state;
6073 + VCHIQ_SERVICE_QUOTA_T *service_quota;
6074 +
6075 + service->closing = 1;
6076 +
6077 + /* Synchronise with other threads. */
6078 + mutex_lock(&state->recycle_mutex);
6079 + mutex_unlock(&state->recycle_mutex);
6080 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
6081 + /* If we're pausing then the slot_mutex is held until resume
6082 + * by the slot handler. Therefore don't try to acquire this
6083 + * mutex if we're the slot handler and in the pause sent state.
6084 + * We don't need to in this case anyway. */
6085 + mutex_lock(&state->slot_mutex);
6086 + mutex_unlock(&state->slot_mutex);
6087 + }
6088 +
6089 + /* Unblock any sending thread. */
6090 + service_quota = &state->service_quotas[service->localport];
6091 + up(&service_quota->quota_event);
6092 +}
6093 +
6094 +static void
6095 +mark_service_closing(VCHIQ_SERVICE_T *service)
6096 +{
6097 + mark_service_closing_internal(service, 0);
6098 +}
6099 +
6100 +static inline VCHIQ_STATUS_T
6101 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
6102 + VCHIQ_HEADER_T *header, void *bulk_userdata)
6103 +{
6104 + VCHIQ_STATUS_T status;
6105 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
6106 + service->state->id, service->localport, reason_names[reason],
6107 + (unsigned int)header, (unsigned int)bulk_userdata);
6108 + status = service->base.callback(reason, header, service->handle,
6109 + bulk_userdata);
6110 + if (status == VCHIQ_ERROR) {
6111 + vchiq_log_warning(vchiq_core_log_level,
6112 + "%d: ignoring ERROR from callback to service %x",
6113 + service->state->id, service->handle);
6114 + status = VCHIQ_SUCCESS;
6115 + }
6116 + return status;
6117 +}
6118 +
6119 +inline void
6120 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
6121 +{
6122 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
6123 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
6124 + conn_state_names[oldstate],
6125 + conn_state_names[newstate]);
6126 + state->conn_state = newstate;
6127 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
6128 +}
6129 +
6130 +static inline void
6131 +remote_event_create(REMOTE_EVENT_T *event)
6132 +{
6133 + event->armed = 0;
6134 + /* Don't clear the 'fired' flag because it may already have been set
6135 + ** by the other side. */
6136 + sema_init(event->event, 0);
6137 +}
6138 +
6139 +static inline void
6140 +remote_event_destroy(REMOTE_EVENT_T *event)
6141 +{
6142 + (void)event;
6143 +}
6144 +
6145 +static inline int
6146 +remote_event_wait(REMOTE_EVENT_T *event)
6147 +{
6148 + if (!event->fired) {
6149 + event->armed = 1;
6150 + dsb();
6151 + if (!event->fired) {
6152 + if (down_interruptible(event->event) != 0) {
6153 + event->armed = 0;
6154 + return 0;
6155 + }
6156 + }
6157 + event->armed = 0;
6158 + wmb();
6159 + }
6160 +
6161 + event->fired = 0;
6162 + return 1;
6163 +}
6164 +
6165 +static inline void
6166 +remote_event_signal_local(REMOTE_EVENT_T *event)
6167 +{
6168 + event->armed = 0;
6169 + up(event->event);
6170 +}
6171 +
6172 +static inline void
6173 +remote_event_poll(REMOTE_EVENT_T *event)
6174 +{
6175 + if (event->fired && event->armed)
6176 + remote_event_signal_local(event);
6177 +}
6178 +
6179 +void
6180 +remote_event_pollall(VCHIQ_STATE_T *state)
6181 +{
6182 + remote_event_poll(&state->local->sync_trigger);
6183 + remote_event_poll(&state->local->sync_release);
6184 + remote_event_poll(&state->local->trigger);
6185 + remote_event_poll(&state->local->recycle);
6186 +}
6187 +
6188 +/* Round up message sizes so that any space at the end of a slot is always big
6189 +** enough for a header. This relies on header size being a power of two, which
6190 +** has been verified earlier by a static assertion. */
6191 +
6192 +static inline unsigned int
6193 +calc_stride(unsigned int size)
6194 +{
6195 + /* Allow room for the header */
6196 + size += sizeof(VCHIQ_HEADER_T);
6197 +
6198 + /* Round up */
6199 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
6200 + - 1);
6201 +}
6202 +
6203 +/* Called by the slot handler thread */
6204 +static VCHIQ_SERVICE_T *
6205 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
6206 +{
6207 + int i;
6208 +
6209 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
6210 +
6211 + for (i = 0; i < state->unused_service; i++) {
6212 + VCHIQ_SERVICE_T *service = state->services[i];
6213 + if (service &&
6214 + (service->public_fourcc == fourcc) &&
6215 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
6216 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
6217 + (service->remoteport == VCHIQ_PORT_FREE)))) {
6218 + lock_service(service);
6219 + return service;
6220 + }
6221 + }
6222 +
6223 + return NULL;
6224 +}
6225 +
6226 +/* Called by the slot handler thread */
6227 +static VCHIQ_SERVICE_T *
6228 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
6229 +{
6230 + int i;
6231 + for (i = 0; i < state->unused_service; i++) {
6232 + VCHIQ_SERVICE_T *service = state->services[i];
6233 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
6234 + && (service->remoteport == port)) {
6235 + lock_service(service);
6236 + return service;
6237 + }
6238 + }
6239 + return NULL;
6240 +}
6241 +
6242 +inline void
6243 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
6244 +{
6245 + uint32_t value;
6246 +
6247 + if (service) {
6248 + do {
6249 + value = atomic_read(&service->poll_flags);
6250 + } while (atomic_cmpxchg(&service->poll_flags, value,
6251 + value | (1 << poll_type)) != value);
6252 +
6253 + do {
6254 + value = atomic_read(&state->poll_services[
6255 + service->localport>>5]);
6256 + } while (atomic_cmpxchg(
6257 + &state->poll_services[service->localport>>5],
6258 + value, value | (1 << (service->localport & 0x1f)))
6259 + != value);
6260 + }
6261 +
6262 + state->poll_needed = 1;
6263 + wmb();
6264 +
6265 + /* ... and ensure the slot handler runs. */
6266 + remote_event_signal_local(&state->local->trigger);
6267 +}
6268 +
6269 +/* Called from queue_message, by the slot handler and application threads,
6270 +** with slot_mutex held */
6271 +static VCHIQ_HEADER_T *
6272 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6273 +{
6274 + VCHIQ_SHARED_STATE_T *local = state->local;
6275 + int tx_pos = state->local_tx_pos;
6276 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6277 +
6278 + if (space > slot_space) {
6279 + VCHIQ_HEADER_T *header;
6280 + /* Fill the remaining space with padding */
6281 + WARN_ON(state->tx_data == NULL);
6282 + header = (VCHIQ_HEADER_T *)
6283 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6284 + header->msgid = VCHIQ_MSGID_PADDING;
6285 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6286 +
6287 + tx_pos += slot_space;
6288 + }
6289 +
6290 + /* If necessary, get the next slot. */
6291 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6292 + int slot_index;
6293 +
6294 + /* If there is no free slot... */
6295 +
6296 + if (down_trylock(&state->slot_available_event) != 0) {
6297 + /* ...wait for one. */
6298 +
6299 + VCHIQ_STATS_INC(state, slot_stalls);
6300 +
6301 + /* But first, flush through the last slot. */
6302 + state->local_tx_pos = tx_pos;
6303 + local->tx_pos = tx_pos;
6304 + remote_event_signal(&state->remote->trigger);
6305 +
6306 + if (!is_blocking ||
6307 + (down_interruptible(
6308 + &state->slot_available_event) != 0))
6309 + return NULL; /* No space available */
6310 + }
6311 +
6312 + BUG_ON(tx_pos ==
6313 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6314 +
6315 + slot_index = local->slot_queue[
6316 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6317 + VCHIQ_SLOT_QUEUE_MASK];
6318 + state->tx_data =
6319 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6320 + }
6321 +
6322 + state->local_tx_pos = tx_pos + space;
6323 +
6324 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6325 +}
6326 +
6327 +/* Called by the recycle thread. */
6328 +static void
6329 +process_free_queue(VCHIQ_STATE_T *state)
6330 +{
6331 + VCHIQ_SHARED_STATE_T *local = state->local;
6332 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6333 + int slot_queue_available;
6334 +
6335 + /* Use a read memory barrier to ensure that any state that may have
6336 + ** been modified by another thread is not masked by stale prefetched
6337 + ** values. */
6338 + rmb();
6339 +
6340 + /* Find slots which have been freed by the other side, and return them
6341 + ** to the available queue. */
6342 + slot_queue_available = state->slot_queue_available;
6343 +
6344 + while (slot_queue_available != local->slot_queue_recycle) {
6345 + unsigned int pos;
6346 + int slot_index = local->slot_queue[slot_queue_available++ &
6347 + VCHIQ_SLOT_QUEUE_MASK];
6348 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6349 + int data_found = 0;
6350 +
6351 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6352 + state->id, slot_index, (unsigned int)data,
6353 + local->slot_queue_recycle, slot_queue_available);
6354 +
6355 + /* Initialise the bitmask for services which have used this
6356 + ** slot */
6357 + BITSET_ZERO(service_found);
6358 +
6359 + pos = 0;
6360 +
6361 + while (pos < VCHIQ_SLOT_SIZE) {
6362 + VCHIQ_HEADER_T *header =
6363 + (VCHIQ_HEADER_T *)(data + pos);
6364 + int msgid = header->msgid;
6365 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6366 + int port = VCHIQ_MSG_SRCPORT(msgid);
6367 + VCHIQ_SERVICE_QUOTA_T *service_quota =
6368 + &state->service_quotas[port];
6369 + int count;
6370 + spin_lock(&quota_spinlock);
6371 + count = service_quota->message_use_count;
6372 + if (count > 0)
6373 + service_quota->message_use_count =
6374 + count - 1;
6375 + spin_unlock(&quota_spinlock);
6376 +
6377 + if (count == service_quota->message_quota)
6378 + /* Signal the service that it
6379 + ** has dropped below its quota
6380 + */
6381 + up(&service_quota->quota_event);
6382 + else if (count == 0) {
6383 + vchiq_log_error(vchiq_core_log_level,
6384 + "service %d "
6385 + "message_use_count=%d "
6386 + "(header %x, msgid %x, "
6387 + "header->msgid %x, "
6388 + "header->size %x)",
6389 + port,
6390 + service_quota->
6391 + message_use_count,
6392 + (unsigned int)header, msgid,
6393 + header->msgid,
6394 + header->size);
6395 + WARN(1, "invalid message use count\n");
6396 + }
6397 + if (!BITSET_IS_SET(service_found, port)) {
6398 + /* Set the found bit for this service */
6399 + BITSET_SET(service_found, port);
6400 +
6401 + spin_lock(&quota_spinlock);
6402 + count = service_quota->slot_use_count;
6403 + if (count > 0)
6404 + service_quota->slot_use_count =
6405 + count - 1;
6406 + spin_unlock(&quota_spinlock);
6407 +
6408 + if (count > 0) {
6409 + /* Signal the service in case
6410 + ** it has dropped below its
6411 + ** quota */
6412 + up(&service_quota->quota_event);
6413 + vchiq_log_trace(
6414 + vchiq_core_log_level,
6415 + "%d: pfq:%d %x@%x - "
6416 + "slot_use->%d",
6417 + state->id, port,
6418 + header->size,
6419 + (unsigned int)header,
6420 + count - 1);
6421 + } else {
6422 + vchiq_log_error(
6423 + vchiq_core_log_level,
6424 + "service %d "
6425 + "slot_use_count"
6426 + "=%d (header %x"
6427 + ", msgid %x, "
6428 + "header->msgid"
6429 + " %x, header->"
6430 + "size %x)",
6431 + port, count,
6432 + (unsigned int)header,
6433 + msgid,
6434 + header->msgid,
6435 + header->size);
6436 + WARN(1, "bad slot use count\n");
6437 + }
6438 + }
6439 +
6440 + data_found = 1;
6441 + }
6442 +
6443 + pos += calc_stride(header->size);
6444 + if (pos > VCHIQ_SLOT_SIZE) {
6445 + vchiq_log_error(vchiq_core_log_level,
6446 + "pfq - pos %x: header %x, msgid %x, "
6447 + "header->msgid %x, header->size %x",
6448 + pos, (unsigned int)header, msgid,
6449 + header->msgid, header->size);
6450 + WARN(1, "invalid slot position\n");
6451 + }
6452 + }
6453 +
6454 + if (data_found) {
6455 + int count;
6456 + spin_lock(&quota_spinlock);
6457 + count = state->data_use_count;
6458 + if (count > 0)
6459 + state->data_use_count =
6460 + count - 1;
6461 + spin_unlock(&quota_spinlock);
6462 + if (count == state->data_quota)
6463 + up(&state->data_quota_event);
6464 + }
6465 +
6466 + state->slot_queue_available = slot_queue_available;
6467 + up(&state->slot_available_event);
6468 + }
6469 +}
6470 +
6471 +/* Called by the slot handler and application threads */
6472 +static VCHIQ_STATUS_T
6473 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6474 + int msgid, const VCHIQ_ELEMENT_T *elements,
6475 + int count, int size, int is_blocking)
6476 +{
6477 + VCHIQ_SHARED_STATE_T *local;
6478 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6479 + VCHIQ_HEADER_T *header;
6480 + int type = VCHIQ_MSG_TYPE(msgid);
6481 +
6482 + unsigned int stride;
6483 +
6484 + local = state->local;
6485 +
6486 + stride = calc_stride(size);
6487 +
6488 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6489 +
6490 + if ((type != VCHIQ_MSG_RESUME) &&
6491 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
6492 + return VCHIQ_RETRY;
6493 +
6494 + if (type == VCHIQ_MSG_DATA) {
6495 + int tx_end_index;
6496 +
6497 + BUG_ON(!service);
6498 +
6499 + if (service->closing) {
6500 + /* The service has been closed */
6501 + mutex_unlock(&state->slot_mutex);
6502 + return VCHIQ_ERROR;
6503 + }
6504 +
6505 + service_quota = &state->service_quotas[service->localport];
6506 +
6507 + spin_lock(&quota_spinlock);
6508 +
6509 + /* Ensure this service doesn't use more than its quota of
6510 + ** messages or slots */
6511 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6512 + state->local_tx_pos + stride - 1);
6513 +
6514 + /* Ensure data messages don't use more than their quota of
6515 + ** slots */
6516 + while ((tx_end_index != state->previous_data_index) &&
6517 + (state->data_use_count == state->data_quota)) {
6518 + VCHIQ_STATS_INC(state, data_stalls);
6519 + spin_unlock(&quota_spinlock);
6520 + mutex_unlock(&state->slot_mutex);
6521 +
6522 + if (down_interruptible(&state->data_quota_event)
6523 + != 0)
6524 + return VCHIQ_RETRY;
6525 +
6526 + mutex_lock(&state->slot_mutex);
6527 + spin_lock(&quota_spinlock);
6528 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6529 + state->local_tx_pos + stride - 1);
6530 + if ((tx_end_index == state->previous_data_index) ||
6531 + (state->data_use_count < state->data_quota)) {
6532 + /* Pass the signal on to other waiters */
6533 + up(&state->data_quota_event);
6534 + break;
6535 + }
6536 + }
6537 +
6538 + while ((service_quota->message_use_count ==
6539 + service_quota->message_quota) ||
6540 + ((tx_end_index != service_quota->previous_tx_index) &&
6541 + (service_quota->slot_use_count ==
6542 + service_quota->slot_quota))) {
6543 + spin_unlock(&quota_spinlock);
6544 + vchiq_log_trace(vchiq_core_log_level,
6545 + "%d: qm:%d %s,%x - quota stall "
6546 + "(msg %d, slot %d)",
6547 + state->id, service->localport,
6548 + msg_type_str(type), size,
6549 + service_quota->message_use_count,
6550 + service_quota->slot_use_count);
6551 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6552 + mutex_unlock(&state->slot_mutex);
6553 + if (down_interruptible(&service_quota->quota_event)
6554 + != 0)
6555 + return VCHIQ_RETRY;
6556 + if (service->closing)
6557 + return VCHIQ_ERROR;
6558 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6559 + return VCHIQ_RETRY;
6560 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6561 + /* The service has been closed */
6562 + mutex_unlock(&state->slot_mutex);
6563 + return VCHIQ_ERROR;
6564 + }
6565 + spin_lock(&quota_spinlock);
6566 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6567 + state->local_tx_pos + stride - 1);
6568 + }
6569 +
6570 + spin_unlock(&quota_spinlock);
6571 + }
6572 +
6573 + header = reserve_space(state, stride, is_blocking);
6574 +
6575 + if (!header) {
6576 + if (service)
6577 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6578 + mutex_unlock(&state->slot_mutex);
6579 + return VCHIQ_RETRY;
6580 + }
6581 +
6582 + if (type == VCHIQ_MSG_DATA) {
6583 + int i, pos;
6584 + int tx_end_index;
6585 + int slot_use_count;
6586 +
6587 + vchiq_log_info(vchiq_core_log_level,
6588 + "%d: qm %s@%x,%x (%d->%d)",
6589 + state->id,
6590 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6591 + (unsigned int)header, size,
6592 + VCHIQ_MSG_SRCPORT(msgid),
6593 + VCHIQ_MSG_DSTPORT(msgid));
6594 +
6595 + BUG_ON(!service);
6596 +
6597 + for (i = 0, pos = 0; i < (unsigned int)count;
6598 + pos += elements[i++].size)
6599 + if (elements[i].size) {
6600 + if (vchiq_copy_from_user
6601 + (header->data + pos, elements[i].data,
6602 + (size_t) elements[i].size) !=
6603 + VCHIQ_SUCCESS) {
6604 + mutex_unlock(&state->slot_mutex);
6605 + VCHIQ_SERVICE_STATS_INC(service,
6606 + error_count);
6607 + return VCHIQ_ERROR;
6608 + }
6609 + if (i == 0) {
6610 + if (SRVTRACE_ENABLED(service,
6611 + VCHIQ_LOG_INFO))
6612 + vchiq_log_dump_mem("Sent", 0,
6613 + header->data + pos,
6614 + min(64u,
6615 + elements[0].size));
6616 + }
6617 + }
6618 +
6619 + spin_lock(&quota_spinlock);
6620 + service_quota->message_use_count++;
6621 +
6622 + tx_end_index =
6623 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6624 +
6625 + /* If this transmission can't fit in the last slot used by any
6626 + ** service, the data_use_count must be increased. */
6627 + if (tx_end_index != state->previous_data_index) {
6628 + state->previous_data_index = tx_end_index;
6629 + state->data_use_count++;
6630 + }
6631 +
6632 + /* If this isn't the same slot last used by this service,
6633 + ** the service's slot_use_count must be increased. */
6634 + if (tx_end_index != service_quota->previous_tx_index) {
6635 + service_quota->previous_tx_index = tx_end_index;
6636 + slot_use_count = ++service_quota->slot_use_count;
6637 + } else {
6638 + slot_use_count = 0;
6639 + }
6640 +
6641 + spin_unlock(&quota_spinlock);
6642 +
6643 + if (slot_use_count)
6644 + vchiq_log_trace(vchiq_core_log_level,
6645 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6646 + state->id, service->localport,
6647 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6648 + slot_use_count, header);
6649 +
6650 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6651 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6652 + } else {
6653 + vchiq_log_info(vchiq_core_log_level,
6654 + "%d: qm %s@%x,%x (%d->%d)", state->id,
6655 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6656 + (unsigned int)header, size,
6657 + VCHIQ_MSG_SRCPORT(msgid),
6658 + VCHIQ_MSG_DSTPORT(msgid));
6659 + if (size != 0) {
6660 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6661 + memcpy(header->data, elements[0].data,
6662 + elements[0].size);
6663 + }
6664 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6665 + }
6666 +
6667 + header->msgid = msgid;
6668 + header->size = size;
6669 +
6670 + {
6671 + int svc_fourcc;
6672 +
6673 + svc_fourcc = service
6674 + ? service->base.fourcc
6675 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6676 +
6677 + vchiq_log_info(SRVTRACE_LEVEL(service),
6678 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6679 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6680 + VCHIQ_MSG_TYPE(msgid),
6681 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6682 + VCHIQ_MSG_SRCPORT(msgid),
6683 + VCHIQ_MSG_DSTPORT(msgid),
6684 + size);
6685 + }
6686 +
6687 + /* Make sure the new header is visible to the peer. */
6688 + wmb();
6689 +
6690 + /* Make the new tx_pos visible to the peer. */
6691 + local->tx_pos = state->local_tx_pos;
6692 + wmb();
6693 +
6694 + if (service && (type == VCHIQ_MSG_CLOSE))
6695 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6696 +
6697 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6698 + mutex_unlock(&state->slot_mutex);
6699 +
6700 + remote_event_signal(&state->remote->trigger);
6701 +
6702 + return VCHIQ_SUCCESS;
6703 +}
6704 +
6705 +/* Called by the slot handler and application threads */
6706 +static VCHIQ_STATUS_T
6707 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6708 + int msgid, const VCHIQ_ELEMENT_T *elements,
6709 + int count, int size, int is_blocking)
6710 +{
6711 + VCHIQ_SHARED_STATE_T *local;
6712 + VCHIQ_HEADER_T *header;
6713 +
6714 + local = state->local;
6715 +
6716 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6717 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
6718 + return VCHIQ_RETRY;
6719 +
6720 + remote_event_wait(&local->sync_release);
6721 +
6722 + rmb();
6723 +
6724 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6725 + local->slot_sync);
6726 +
6727 + {
6728 + int oldmsgid = header->msgid;
6729 + if (oldmsgid != VCHIQ_MSGID_PADDING)
6730 + vchiq_log_error(vchiq_core_log_level,
6731 + "%d: qms - msgid %x, not PADDING",
6732 + state->id, oldmsgid);
6733 + }
6734 +
6735 + if (service) {
6736 + int i, pos;
6737 +
6738 + vchiq_log_info(vchiq_sync_log_level,
6739 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6740 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6741 + (unsigned int)header, size,
6742 + VCHIQ_MSG_SRCPORT(msgid),
6743 + VCHIQ_MSG_DSTPORT(msgid));
6744 +
6745 + for (i = 0, pos = 0; i < (unsigned int)count;
6746 + pos += elements[i++].size)
6747 + if (elements[i].size) {
6748 + if (vchiq_copy_from_user
6749 + (header->data + pos, elements[i].data,
6750 + (size_t) elements[i].size) !=
6751 + VCHIQ_SUCCESS) {
6752 + mutex_unlock(&state->sync_mutex);
6753 + VCHIQ_SERVICE_STATS_INC(service,
6754 + error_count);
6755 + return VCHIQ_ERROR;
6756 + }
6757 + if (i == 0) {
6758 + if (vchiq_sync_log_level >=
6759 + VCHIQ_LOG_TRACE)
6760 + vchiq_log_dump_mem("Sent Sync",
6761 + 0, header->data + pos,
6762 + min(64u,
6763 + elements[0].size));
6764 + }
6765 + }
6766 +
6767 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6768 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6769 + } else {
6770 + vchiq_log_info(vchiq_sync_log_level,
6771 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6772 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6773 + (unsigned int)header, size,
6774 + VCHIQ_MSG_SRCPORT(msgid),
6775 + VCHIQ_MSG_DSTPORT(msgid));
6776 + if (size != 0) {
6777 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6778 + memcpy(header->data, elements[0].data,
6779 + elements[0].size);
6780 + }
6781 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6782 + }
6783 +
6784 + header->size = size;
6785 + header->msgid = msgid;
6786 +
6787 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6788 + int svc_fourcc;
6789 +
6790 + svc_fourcc = service
6791 + ? service->base.fourcc
6792 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6793 +
6794 + vchiq_log_trace(vchiq_sync_log_level,
6795 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6796 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6797 + VCHIQ_MSG_TYPE(msgid),
6798 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6799 + VCHIQ_MSG_SRCPORT(msgid),
6800 + VCHIQ_MSG_DSTPORT(msgid),
6801 + size);
6802 + }
6803 +
6804 + /* Make sure the new header is visible to the peer. */
6805 + wmb();
6806 +
6807 + remote_event_signal(&state->remote->sync_trigger);
6808 +
6809 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6810 + mutex_unlock(&state->sync_mutex);
6811 +
6812 + return VCHIQ_SUCCESS;
6813 +}
6814 +
6815 +static inline void
6816 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6817 +{
6818 + slot->use_count++;
6819 +}
6820 +
6821 +static void
6822 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6823 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6824 +{
6825 + int release_count;
6826 +
6827 + mutex_lock(&state->recycle_mutex);
6828 +
6829 + if (header) {
6830 + int msgid = header->msgid;
6831 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6832 + (service && service->closing)) {
6833 + mutex_unlock(&state->recycle_mutex);
6834 + return;
6835 + }
6836 +
6837 + /* Rewrite the message header to prevent a double
6838 + ** release */
6839 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6840 + }
6841 +
6842 + release_count = slot_info->release_count;
6843 + slot_info->release_count = ++release_count;
6844 +
6845 + if (release_count == slot_info->use_count) {
6846 + int slot_queue_recycle;
6847 + /* Add to the freed queue */
6848 +
6849 + /* A read barrier is necessary here to prevent speculative
6850 + ** fetches of remote->slot_queue_recycle from overtaking the
6851 + ** mutex. */
6852 + rmb();
6853 +
6854 + slot_queue_recycle = state->remote->slot_queue_recycle;
6855 + state->remote->slot_queue[slot_queue_recycle &
6856 + VCHIQ_SLOT_QUEUE_MASK] =
6857 + SLOT_INDEX_FROM_INFO(state, slot_info);
6858 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6859 + vchiq_log_info(vchiq_core_log_level,
6860 + "%d: release_slot %d - recycle->%x",
6861 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6862 + state->remote->slot_queue_recycle);
6863 +
6864 + /* A write barrier is necessary, but remote_event_signal
6865 + ** contains one. */
6866 + remote_event_signal(&state->remote->recycle);
6867 + }
6868 +
6869 + mutex_unlock(&state->recycle_mutex);
6870 +}
6871 +
6872 +/* Called by the slot handler - don't hold the bulk mutex */
6873 +static VCHIQ_STATUS_T
6874 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6875 + int retry_poll)
6876 +{
6877 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6878 +
6879 + vchiq_log_trace(vchiq_core_log_level,
6880 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
6881 + service->state->id, service->localport,
6882 + (queue == &service->bulk_tx) ? 't' : 'r',
6883 + queue->process, queue->remote_notify, queue->remove);
6884 +
6885 + if (service->state->is_master) {
6886 + while (queue->remote_notify != queue->process) {
6887 + VCHIQ_BULK_T *bulk =
6888 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
6889 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6890 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6891 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6892 + service->remoteport);
6893 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6894 + /* Only reply to non-dummy bulk requests */
6895 + if (bulk->remote_data) {
6896 + status = queue_message(service->state, NULL,
6897 + msgid, &element, 1, 4, 0);
6898 + if (status != VCHIQ_SUCCESS)
6899 + break;
6900 + }
6901 + queue->remote_notify++;
6902 + }
6903 + } else {
6904 + queue->remote_notify = queue->process;
6905 + }
6906 +
6907 + if (status == VCHIQ_SUCCESS) {
6908 + while (queue->remove != queue->remote_notify) {
6909 + VCHIQ_BULK_T *bulk =
6910 + &queue->bulks[BULK_INDEX(queue->remove)];
6911 +
6912 + /* Only generate callbacks for non-dummy bulk
6913 + ** requests, and non-terminated services */
6914 + if (bulk->data && service->instance) {
6915 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
6916 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
6917 + VCHIQ_SERVICE_STATS_INC(service,
6918 + bulk_tx_count);
6919 + VCHIQ_SERVICE_STATS_ADD(service,
6920 + bulk_tx_bytes,
6921 + bulk->actual);
6922 + } else {
6923 + VCHIQ_SERVICE_STATS_INC(service,
6924 + bulk_rx_count);
6925 + VCHIQ_SERVICE_STATS_ADD(service,
6926 + bulk_rx_bytes,
6927 + bulk->actual);
6928 + }
6929 + } else {
6930 + VCHIQ_SERVICE_STATS_INC(service,
6931 + bulk_aborted_count);
6932 + }
6933 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
6934 + struct bulk_waiter *waiter;
6935 + spin_lock(&bulk_waiter_spinlock);
6936 + waiter = bulk->userdata;
6937 + if (waiter) {
6938 + waiter->actual = bulk->actual;
6939 + up(&waiter->event);
6940 + }
6941 + spin_unlock(&bulk_waiter_spinlock);
6942 + } else if (bulk->mode ==
6943 + VCHIQ_BULK_MODE_CALLBACK) {
6944 + VCHIQ_REASON_T reason = (bulk->dir ==
6945 + VCHIQ_BULK_TRANSMIT) ?
6946 + ((bulk->actual ==
6947 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6948 + VCHIQ_BULK_TRANSMIT_ABORTED :
6949 + VCHIQ_BULK_TRANSMIT_DONE) :
6950 + ((bulk->actual ==
6951 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6952 + VCHIQ_BULK_RECEIVE_ABORTED :
6953 + VCHIQ_BULK_RECEIVE_DONE);
6954 + status = make_service_callback(service,
6955 + reason, NULL, bulk->userdata);
6956 + if (status == VCHIQ_RETRY)
6957 + break;
6958 + }
6959 + }
6960 +
6961 + queue->remove++;
6962 + up(&service->bulk_remove_event);
6963 + }
6964 + if (!retry_poll)
6965 + status = VCHIQ_SUCCESS;
6966 + }
6967 +
6968 + if (status == VCHIQ_RETRY)
6969 + request_poll(service->state, service,
6970 + (queue == &service->bulk_tx) ?
6971 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
6972 +
6973 + return status;
6974 +}
6975 +
6976 +/* Called by the slot handler thread */
6977 +static void
6978 +poll_services(VCHIQ_STATE_T *state)
6979 +{
6980 + int group, i;
6981 +
6982 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
6983 + uint32_t flags;
6984 + flags = atomic_xchg(&state->poll_services[group], 0);
6985 + for (i = 0; flags; i++) {
6986 + if (flags & (1 << i)) {
6987 + VCHIQ_SERVICE_T *service =
6988 + find_service_by_port(state,
6989 + (group<<5) + i);
6990 + uint32_t service_flags;
6991 + flags &= ~(1 << i);
6992 + if (!service)
6993 + continue;
6994 + service_flags =
6995 + atomic_xchg(&service->poll_flags, 0);
6996 + if (service_flags &
6997 + (1 << VCHIQ_POLL_REMOVE)) {
6998 + vchiq_log_info(vchiq_core_log_level,
6999 + "%d: ps - remove %d<->%d",
7000 + state->id, service->localport,
7001 + service->remoteport);
7002 +
7003 + /* Make it look like a client, because
7004 + it must be removed and not left in
7005 + the LISTENING state. */
7006 + service->public_fourcc =
7007 + VCHIQ_FOURCC_INVALID;
7008 +
7009 + if (vchiq_close_service_internal(
7010 + service, 0/*!close_recvd*/) !=
7011 + VCHIQ_SUCCESS)
7012 + request_poll(state, service,
7013 + VCHIQ_POLL_REMOVE);
7014 + } else if (service_flags &
7015 + (1 << VCHIQ_POLL_TERMINATE)) {
7016 + vchiq_log_info(vchiq_core_log_level,
7017 + "%d: ps - terminate %d<->%d",
7018 + state->id, service->localport,
7019 + service->remoteport);
7020 + if (vchiq_close_service_internal(
7021 + service, 0/*!close_recvd*/) !=
7022 + VCHIQ_SUCCESS)
7023 + request_poll(state, service,
7024 + VCHIQ_POLL_TERMINATE);
7025 + }
7026 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
7027 + notify_bulks(service,
7028 + &service->bulk_tx,
7029 + 1/*retry_poll*/);
7030 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
7031 + notify_bulks(service,
7032 + &service->bulk_rx,
7033 + 1/*retry_poll*/);
7034 + unlock_service(service);
7035 + }
7036 + }
7037 + }
7038 +}
7039 +
7040 +/* Called by the slot handler or application threads, holding the bulk mutex. */
7041 +static int
7042 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7043 +{
7044 + VCHIQ_STATE_T *state = service->state;
7045 + int resolved = 0;
7046 + int rc;
7047 +
7048 + while ((queue->process != queue->local_insert) &&
7049 + (queue->process != queue->remote_insert)) {
7050 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7051 +
7052 + vchiq_log_trace(vchiq_core_log_level,
7053 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
7054 + state->id, service->localport,
7055 + (queue == &service->bulk_tx) ? 't' : 'r',
7056 + queue->local_insert, queue->remote_insert,
7057 + queue->process);
7058 +
7059 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
7060 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
7061 +
7062 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
7063 + if (rc != 0)
7064 + break;
7065 +
7066 + vchiq_transfer_bulk(bulk);
7067 + mutex_unlock(&state->bulk_transfer_mutex);
7068 +
7069 + if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
7070 + const char *header = (queue == &service->bulk_tx) ?
7071 + "Send Bulk to" : "Recv Bulk from";
7072 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
7073 + vchiq_log_info(SRVTRACE_LEVEL(service),
7074 + "%s %c%c%c%c d:%d len:%d %x<->%x",
7075 + header,
7076 + VCHIQ_FOURCC_AS_4CHARS(
7077 + service->base.fourcc),
7078 + service->remoteport,
7079 + bulk->size,
7080 + (unsigned int)bulk->data,
7081 + (unsigned int)bulk->remote_data);
7082 + else
7083 + vchiq_log_info(SRVTRACE_LEVEL(service),
7084 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
7085 + " rx len:%d %x<->%x",
7086 + header,
7087 + VCHIQ_FOURCC_AS_4CHARS(
7088 + service->base.fourcc),
7089 + service->remoteport,
7090 + bulk->size,
7091 + bulk->remote_size,
7092 + (unsigned int)bulk->data,
7093 + (unsigned int)bulk->remote_data);
7094 + }
7095 +
7096 + vchiq_complete_bulk(bulk);
7097 + queue->process++;
7098 + resolved++;
7099 + }
7100 + return resolved;
7101 +}
7102 +
7103 +/* Called with the bulk_mutex held */
7104 +static void
7105 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7106 +{
7107 + int is_tx = (queue == &service->bulk_tx);
7108 + vchiq_log_trace(vchiq_core_log_level,
7109 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
7110 + service->state->id, service->localport, is_tx ? 't' : 'r',
7111 + queue->local_insert, queue->remote_insert, queue->process);
7112 +
7113 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
7114 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
7115 +
7116 + while ((queue->process != queue->local_insert) ||
7117 + (queue->process != queue->remote_insert)) {
7118 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7119 +
7120 + if (queue->process == queue->remote_insert) {
7121 + /* fabricate a matching dummy bulk */
7122 + bulk->remote_data = NULL;
7123 + bulk->remote_size = 0;
7124 + queue->remote_insert++;
7125 + }
7126 +
7127 + if (queue->process != queue->local_insert) {
7128 + vchiq_complete_bulk(bulk);
7129 +
7130 + vchiq_log_info(SRVTRACE_LEVEL(service),
7131 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
7132 + "rx len:%d",
7133 + is_tx ? "Send Bulk to" : "Recv Bulk from",
7134 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7135 + service->remoteport,
7136 + bulk->size,
7137 + bulk->remote_size);
7138 + } else {
7139 + /* fabricate a matching dummy bulk */
7140 + bulk->data = NULL;
7141 + bulk->size = 0;
7142 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
7143 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
7144 + VCHIQ_BULK_RECEIVE;
7145 + queue->local_insert++;
7146 + }
7147 +
7148 + queue->process++;
7149 + }
7150 +}
7151 +
7152 +/* Called from the slot handler thread */
7153 +static void
7154 +pause_bulks(VCHIQ_STATE_T *state)
7155 +{
7156 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
7157 + WARN_ON_ONCE(1);
7158 + atomic_set(&pause_bulks_count, 1);
7159 + return;
7160 + }
7161 +
7162 + /* Block bulk transfers from all services */
7163 + mutex_lock(&state->bulk_transfer_mutex);
7164 +}
7165 +
7166 +/* Called from the slot handler thread */
7167 +static void
7168 +resume_bulks(VCHIQ_STATE_T *state)
7169 +{
7170 + int i;
7171 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
7172 + WARN_ON_ONCE(1);
7173 + atomic_set(&pause_bulks_count, 0);
7174 + return;
7175 + }
7176 +
7177 + /* Allow bulk transfers from all services */
7178 + mutex_unlock(&state->bulk_transfer_mutex);
7179 +
7180 + if (state->deferred_bulks == 0)
7181 + return;
7182 +
7183 + /* Deal with any bulks which had to be deferred due to being in
7184 + * paused state. Don't try to match up to number of deferred bulks
7185 + * in case we've had something come and close the service in the
7186 + * interim - just process all bulk queues for all services */
7187 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
7188 + __func__, state->deferred_bulks);
7189 +
7190 + for (i = 0; i < state->unused_service; i++) {
7191 + VCHIQ_SERVICE_T *service = state->services[i];
7192 + int resolved_rx = 0;
7193 + int resolved_tx = 0;
7194 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
7195 + continue;
7196 +
7197 + mutex_lock(&service->bulk_mutex);
7198 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
7199 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
7200 + mutex_unlock(&service->bulk_mutex);
7201 + if (resolved_rx)
7202 + notify_bulks(service, &service->bulk_rx, 1);
7203 + if (resolved_tx)
7204 + notify_bulks(service, &service->bulk_tx, 1);
7205 + }
7206 + state->deferred_bulks = 0;
7207 +}
7208 +
7209 +static int
7210 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
7211 +{
7212 + VCHIQ_SERVICE_T *service = NULL;
7213 + int msgid, size;
7214 + int type;
7215 + unsigned int localport, remoteport;
7216 +
7217 + msgid = header->msgid;
7218 + size = header->size;
7219 + type = VCHIQ_MSG_TYPE(msgid);
7220 + localport = VCHIQ_MSG_DSTPORT(msgid);
7221 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7222 + if (size >= sizeof(struct vchiq_open_payload)) {
7223 + const struct vchiq_open_payload *payload =
7224 + (struct vchiq_open_payload *)header->data;
7225 + unsigned int fourcc;
7226 +
7227 + fourcc = payload->fourcc;
7228 + vchiq_log_info(vchiq_core_log_level,
7229 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
7230 + state->id, (unsigned int)header,
7231 + localport,
7232 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
7233 +
7234 + service = get_listening_service(state, fourcc);
7235 +
7236 + if (service) {
7237 + /* A matching service exists */
7238 + short version = payload->version;
7239 + short version_min = payload->version_min;
7240 + if ((service->version < version_min) ||
7241 + (version < service->version_min)) {
7242 + /* Version mismatch */
7243 + vchiq_loud_error_header();
7244 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
7245 + "version mismatch - local (%d, min %d)"
7246 + " vs. remote (%d, min %d)",
7247 + state->id, service->localport,
7248 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
7249 + service->version, service->version_min,
7250 + version, version_min);
7251 + vchiq_loud_error_footer();
7252 + unlock_service(service);
7253 + service = NULL;
7254 + goto fail_open;
7255 + }
7256 + service->peer_version = version;
7257 +
7258 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
7259 + struct vchiq_openack_payload ack_payload = {
7260 + service->version
7261 + };
7262 + VCHIQ_ELEMENT_T body = {
7263 + &ack_payload,
7264 + sizeof(ack_payload)
7265 + };
7266 +
7267 + /* Acknowledge the OPEN */
7268 + if (service->sync) {
7269 + if (queue_message_sync(state, NULL,
7270 + VCHIQ_MAKE_MSG(
7271 + VCHIQ_MSG_OPENACK,
7272 + service->localport,
7273 + remoteport),
7274 + &body, 1, sizeof(ack_payload),
7275 + 0) == VCHIQ_RETRY)
7276 + goto bail_not_ready;
7277 + } else {
7278 + if (queue_message(state, NULL,
7279 + VCHIQ_MAKE_MSG(
7280 + VCHIQ_MSG_OPENACK,
7281 + service->localport,
7282 + remoteport),
7283 + &body, 1, sizeof(ack_payload),
7284 + 0) == VCHIQ_RETRY)
7285 + goto bail_not_ready;
7286 + }
7287 +
7288 + /* The service is now open */
7289 + vchiq_set_service_state(service,
7290 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7291 + : VCHIQ_SRVSTATE_OPEN);
7292 + }
7293 +
7294 + service->remoteport = remoteport;
7295 + service->client_id = ((int *)header->data)[1];
7296 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7297 + NULL, NULL) == VCHIQ_RETRY) {
7298 + /* Bail out if not ready */
7299 + service->remoteport = VCHIQ_PORT_FREE;
7300 + goto bail_not_ready;
7301 + }
7302 +
7303 + /* Success - the message has been dealt with */
7304 + unlock_service(service);
7305 + return 1;
7306 + }
7307 + }
7308 +
7309 +fail_open:
7310 + /* No available service, or an invalid request - send a CLOSE */
7311 + if (queue_message(state, NULL,
7312 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7313 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7314 + goto bail_not_ready;
7315 +
7316 + return 1;
7317 +
7318 +bail_not_ready:
7319 + if (service)
7320 + unlock_service(service);
7321 +
7322 + return 0;
7323 +}
7324 +
7325 +/* Called by the slot handler thread */
7326 +static void
7327 +parse_rx_slots(VCHIQ_STATE_T *state)
7328 +{
7329 + VCHIQ_SHARED_STATE_T *remote = state->remote;
7330 + VCHIQ_SERVICE_T *service = NULL;
7331 + int tx_pos;
7332 + DEBUG_INITIALISE(state->local)
7333 +
7334 + tx_pos = remote->tx_pos;
7335 +
7336 + while (state->rx_pos != tx_pos) {
7337 + VCHIQ_HEADER_T *header;
7338 + int msgid, size;
7339 + int type;
7340 + unsigned int localport, remoteport;
7341 +
7342 + DEBUG_TRACE(PARSE_LINE);
7343 + if (!state->rx_data) {
7344 + int rx_index;
7345 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7346 + rx_index = remote->slot_queue[
7347 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7348 + VCHIQ_SLOT_QUEUE_MASK];
7349 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7350 + rx_index);
7351 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7352 +
7353 + /* Initialise use_count to one, and increment
7354 + ** release_count at the end of the slot to avoid
7355 + ** releasing the slot prematurely. */
7356 + state->rx_info->use_count = 1;
7357 + state->rx_info->release_count = 0;
7358 + }
7359 +
7360 + header = (VCHIQ_HEADER_T *)(state->rx_data +
7361 + (state->rx_pos & VCHIQ_SLOT_MASK));
7362 + DEBUG_VALUE(PARSE_HEADER, (int)header);
7363 + msgid = header->msgid;
7364 + DEBUG_VALUE(PARSE_MSGID, msgid);
7365 + size = header->size;
7366 + type = VCHIQ_MSG_TYPE(msgid);
7367 + localport = VCHIQ_MSG_DSTPORT(msgid);
7368 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7369 +
7370 + if (type != VCHIQ_MSG_DATA)
7371 + VCHIQ_STATS_INC(state, ctrl_rx_count);
7372 +
7373 + switch (type) {
7374 + case VCHIQ_MSG_OPENACK:
7375 + case VCHIQ_MSG_CLOSE:
7376 + case VCHIQ_MSG_DATA:
7377 + case VCHIQ_MSG_BULK_RX:
7378 + case VCHIQ_MSG_BULK_TX:
7379 + case VCHIQ_MSG_BULK_RX_DONE:
7380 + case VCHIQ_MSG_BULK_TX_DONE:
7381 + service = find_service_by_port(state, localport);
7382 + if ((!service ||
7383 + ((service->remoteport != remoteport) &&
7384 + (service->remoteport != VCHIQ_PORT_FREE))) &&
7385 + (localport == 0) &&
7386 + (type == VCHIQ_MSG_CLOSE)) {
7387 + /* This could be a CLOSE from a client which
7388 + hadn't yet received the OPENACK - look for
7389 + the connected service */
7390 + if (service)
7391 + unlock_service(service);
7392 + service = get_connected_service(state,
7393 + remoteport);
7394 + if (service)
7395 + vchiq_log_warning(vchiq_core_log_level,
7396 + "%d: prs %s@%x (%d->%d) - "
7397 + "found connected service %d",
7398 + state->id, msg_type_str(type),
7399 + (unsigned int)header,
7400 + remoteport, localport,
7401 + service->localport);
7402 + }
7403 +
7404 + if (!service) {
7405 + vchiq_log_error(vchiq_core_log_level,
7406 + "%d: prs %s@%x (%d->%d) - "
7407 + "invalid/closed service %d",
7408 + state->id, msg_type_str(type),
7409 + (unsigned int)header,
7410 + remoteport, localport, localport);
7411 + goto skip_message;
7412 + }
7413 + break;
7414 + default:
7415 + break;
7416 + }
7417 +
7418 + if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
7419 + int svc_fourcc;
7420 +
7421 + svc_fourcc = service
7422 + ? service->base.fourcc
7423 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7424 + vchiq_log_info(SRVTRACE_LEVEL(service),
7425 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7426 + "len:%d",
7427 + msg_type_str(type), type,
7428 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7429 + remoteport, localport, size);
7430 + if (size > 0)
7431 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7432 + min(64, size));
7433 + }
7434 +
7435 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7436 + > VCHIQ_SLOT_SIZE) {
7437 + vchiq_log_error(vchiq_core_log_level,
7438 + "header %x (msgid %x) - size %x too big for "
7439 + "slot",
7440 + (unsigned int)header, (unsigned int)msgid,
7441 + (unsigned int)size);
7442 + WARN(1, "oversized for slot\n");
7443 + }
7444 +
7445 + switch (type) {
7446 + case VCHIQ_MSG_OPEN:
7447 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7448 + if (!parse_open(state, header))
7449 + goto bail_not_ready;
7450 + break;
7451 + case VCHIQ_MSG_OPENACK:
7452 + if (size >= sizeof(struct vchiq_openack_payload)) {
7453 + const struct vchiq_openack_payload *payload =
7454 + (struct vchiq_openack_payload *)
7455 + header->data;
7456 + service->peer_version = payload->version;
7457 + }
7458 + vchiq_log_info(vchiq_core_log_level,
7459 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7460 + state->id, (unsigned int)header, size,
7461 + remoteport, localport, service->peer_version);
7462 + if (service->srvstate ==
7463 + VCHIQ_SRVSTATE_OPENING) {
7464 + service->remoteport = remoteport;
7465 + vchiq_set_service_state(service,
7466 + VCHIQ_SRVSTATE_OPEN);
7467 + up(&service->remove_event);
7468 + } else
7469 + vchiq_log_error(vchiq_core_log_level,
7470 + "OPENACK received in state %s",
7471 + srvstate_names[service->srvstate]);
7472 + break;
7473 + case VCHIQ_MSG_CLOSE:
7474 + WARN_ON(size != 0); /* There should be no data */
7475 +
7476 + vchiq_log_info(vchiq_core_log_level,
7477 + "%d: prs CLOSE@%x (%d->%d)",
7478 + state->id, (unsigned int)header,
7479 + remoteport, localport);
7480 +
7481 + mark_service_closing_internal(service, 1);
7482 +
7483 + if (vchiq_close_service_internal(service,
7484 + 1/*close_recvd*/) == VCHIQ_RETRY)
7485 + goto bail_not_ready;
7486 +
7487 + vchiq_log_info(vchiq_core_log_level,
7488 + "Close Service %c%c%c%c s:%u d:%d",
7489 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7490 + service->localport,
7491 + service->remoteport);
7492 + break;
7493 + case VCHIQ_MSG_DATA:
7494 + vchiq_log_trace(vchiq_core_log_level,
7495 + "%d: prs DATA@%x,%x (%d->%d)",
7496 + state->id, (unsigned int)header, size,
7497 + remoteport, localport);
7498 +
7499 + if ((service->remoteport == remoteport)
7500 + && (service->srvstate ==
7501 + VCHIQ_SRVSTATE_OPEN)) {
7502 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7503 + claim_slot(state->rx_info);
7504 + DEBUG_TRACE(PARSE_LINE);
7505 + if (make_service_callback(service,
7506 + VCHIQ_MESSAGE_AVAILABLE, header,
7507 + NULL) == VCHIQ_RETRY) {
7508 + DEBUG_TRACE(PARSE_LINE);
7509 + goto bail_not_ready;
7510 + }
7511 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7512 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7513 + size);
7514 + } else {
7515 + VCHIQ_STATS_INC(state, error_count);
7516 + }
7517 + break;
7518 + case VCHIQ_MSG_CONNECT:
7519 + vchiq_log_info(vchiq_core_log_level,
7520 + "%d: prs CONNECT@%x",
7521 + state->id, (unsigned int)header);
7522 + up(&state->connect);
7523 + break;
7524 + case VCHIQ_MSG_BULK_RX:
7525 + case VCHIQ_MSG_BULK_TX: {
7526 + VCHIQ_BULK_QUEUE_T *queue;
7527 + WARN_ON(!state->is_master);
7528 + queue = (type == VCHIQ_MSG_BULK_RX) ?
7529 + &service->bulk_tx : &service->bulk_rx;
7530 + if ((service->remoteport == remoteport)
7531 + && (service->srvstate ==
7532 + VCHIQ_SRVSTATE_OPEN)) {
7533 + VCHIQ_BULK_T *bulk;
7534 + int resolved = 0;
7535 +
7536 + DEBUG_TRACE(PARSE_LINE);
7537 + if (mutex_lock_interruptible(
7538 + &service->bulk_mutex) != 0) {
7539 + DEBUG_TRACE(PARSE_LINE);
7540 + goto bail_not_ready;
7541 + }
7542 +
7543 + WARN_ON(!(queue->remote_insert < queue->remove +
7544 + VCHIQ_NUM_SERVICE_BULKS));
7545 + bulk = &queue->bulks[
7546 + BULK_INDEX(queue->remote_insert)];
7547 + bulk->remote_data =
7548 + (void *)((int *)header->data)[0];
7549 + bulk->remote_size = ((int *)header->data)[1];
7550 + wmb();
7551 +
7552 + vchiq_log_info(vchiq_core_log_level,
7553 + "%d: prs %s@%x (%d->%d) %x@%x",
7554 + state->id, msg_type_str(type),
7555 + (unsigned int)header,
7556 + remoteport, localport,
7557 + bulk->remote_size,
7558 + (unsigned int)bulk->remote_data);
7559 +
7560 + queue->remote_insert++;
7561 +
7562 + if (atomic_read(&pause_bulks_count)) {
7563 + state->deferred_bulks++;
7564 + vchiq_log_info(vchiq_core_log_level,
7565 + "%s: deferring bulk (%d)",
7566 + __func__,
7567 + state->deferred_bulks);
7568 + if (state->conn_state !=
7569 + VCHIQ_CONNSTATE_PAUSE_SENT)
7570 + vchiq_log_error(
7571 + vchiq_core_log_level,
7572 + "%s: bulks paused in "
7573 + "unexpected state %s",
7574 + __func__,
7575 + conn_state_names[
7576 + state->conn_state]);
7577 + } else if (state->conn_state ==
7578 + VCHIQ_CONNSTATE_CONNECTED) {
7579 + DEBUG_TRACE(PARSE_LINE);
7580 + resolved = resolve_bulks(service,
7581 + queue);
7582 + }
7583 +
7584 + mutex_unlock(&service->bulk_mutex);
7585 + if (resolved)
7586 + notify_bulks(service, queue,
7587 + 1/*retry_poll*/);
7588 + }
7589 + } break;
7590 + case VCHIQ_MSG_BULK_RX_DONE:
7591 + case VCHIQ_MSG_BULK_TX_DONE:
7592 + WARN_ON(state->is_master);
7593 + if ((service->remoteport == remoteport)
7594 + && (service->srvstate !=
7595 + VCHIQ_SRVSTATE_FREE)) {
7596 + VCHIQ_BULK_QUEUE_T *queue;
7597 + VCHIQ_BULK_T *bulk;
7598 +
7599 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7600 + &service->bulk_rx : &service->bulk_tx;
7601 +
7602 + DEBUG_TRACE(PARSE_LINE);
7603 + if (mutex_lock_interruptible(
7604 + &service->bulk_mutex) != 0) {
7605 + DEBUG_TRACE(PARSE_LINE);
7606 + goto bail_not_ready;
7607 + }
7608 + if ((int)(queue->remote_insert -
7609 + queue->local_insert) >= 0) {
7610 + vchiq_log_error(vchiq_core_log_level,
7611 + "%d: prs %s@%x (%d->%d) "
7612 + "unexpected (ri=%d,li=%d)",
7613 + state->id, msg_type_str(type),
7614 + (unsigned int)header,
7615 + remoteport, localport,
7616 + queue->remote_insert,
7617 + queue->local_insert);
7618 + mutex_unlock(&service->bulk_mutex);
7619 + break;
7620 + }
7621 +
7622 + BUG_ON(queue->process == queue->local_insert);
7623 + BUG_ON(queue->process != queue->remote_insert);
7624 +
7625 + bulk = &queue->bulks[
7626 + BULK_INDEX(queue->remote_insert)];
7627 + bulk->actual = *(int *)header->data;
7628 + queue->remote_insert++;
7629 +
7630 + vchiq_log_info(vchiq_core_log_level,
7631 + "%d: prs %s@%x (%d->%d) %x@%x",
7632 + state->id, msg_type_str(type),
7633 + (unsigned int)header,
7634 + remoteport, localport,
7635 + bulk->actual, (unsigned int)bulk->data);
7636 +
7637 + vchiq_log_trace(vchiq_core_log_level,
7638 + "%d: prs:%d %cx li=%x ri=%x p=%x",
7639 + state->id, localport,
7640 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
7641 + 'r' : 't',
7642 + queue->local_insert,
7643 + queue->remote_insert, queue->process);
7644 +
7645 + DEBUG_TRACE(PARSE_LINE);
7646 + WARN_ON(queue->process == queue->local_insert);
7647 + vchiq_complete_bulk(bulk);
7648 + queue->process++;
7649 + mutex_unlock(&service->bulk_mutex);
7650 + DEBUG_TRACE(PARSE_LINE);
7651 + notify_bulks(service, queue, 1/*retry_poll*/);
7652 + DEBUG_TRACE(PARSE_LINE);
7653 + }
7654 + break;
7655 + case VCHIQ_MSG_PADDING:
7656 + vchiq_log_trace(vchiq_core_log_level,
7657 + "%d: prs PADDING@%x,%x",
7658 + state->id, (unsigned int)header, size);
7659 + break;
7660 + case VCHIQ_MSG_PAUSE:
7661 + /* If initiated, signal the application thread */
7662 + vchiq_log_trace(vchiq_core_log_level,
7663 + "%d: prs PAUSE@%x,%x",
7664 + state->id, (unsigned int)header, size);
7665 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7666 + vchiq_log_error(vchiq_core_log_level,
7667 + "%d: PAUSE received in state PAUSED",
7668 + state->id);
7669 + break;
7670 + }
7671 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7672 + /* Send a PAUSE in response */
7673 + if (queue_message(state, NULL,
7674 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7675 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7676 + goto bail_not_ready;
7677 + if (state->is_master)
7678 + pause_bulks(state);
7679 + }
7680 + /* At this point slot_mutex is held */
7681 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7682 + vchiq_platform_paused(state);
7683 + break;
7684 + case VCHIQ_MSG_RESUME:
7685 + vchiq_log_trace(vchiq_core_log_level,
7686 + "%d: prs RESUME@%x,%x",
7687 + state->id, (unsigned int)header, size);
7688 + /* Release the slot mutex */
7689 + mutex_unlock(&state->slot_mutex);
7690 + if (state->is_master)
7691 + resume_bulks(state);
7692 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7693 + vchiq_platform_resumed(state);
7694 + break;
7695 +
7696 + case VCHIQ_MSG_REMOTE_USE:
7697 + vchiq_on_remote_use(state);
7698 + break;
7699 + case VCHIQ_MSG_REMOTE_RELEASE:
7700 + vchiq_on_remote_release(state);
7701 + break;
7702 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7703 + vchiq_on_remote_use_active(state);
7704 + break;
7705 +
7706 + default:
7707 + vchiq_log_error(vchiq_core_log_level,
7708 + "%d: prs invalid msgid %x@%x,%x",
7709 + state->id, msgid, (unsigned int)header, size);
7710 + WARN(1, "invalid message\n");
7711 + break;
7712 + }
7713 +
7714 +skip_message:
7715 + if (service) {
7716 + unlock_service(service);
7717 + service = NULL;
7718 + }
7719 +
7720 + state->rx_pos += calc_stride(size);
7721 +
7722 + DEBUG_TRACE(PARSE_LINE);
7723 + /* Perform some housekeeping when the end of the slot is
7724 + ** reached. */
7725 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7726 + /* Remove the extra reference count. */
7727 + release_slot(state, state->rx_info, NULL, NULL);
7728 + state->rx_data = NULL;
7729 + }
7730 + }
7731 +
7732 +bail_not_ready:
7733 + if (service)
7734 + unlock_service(service);
7735 +}
7736 +
7737 +/* Called by the slot handler thread */
7738 +static int
7739 +slot_handler_func(void *v)
7740 +{
7741 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7742 + VCHIQ_SHARED_STATE_T *local = state->local;
7743 + DEBUG_INITIALISE(local)
7744 +
7745 + while (1) {
7746 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
7747 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7748 + remote_event_wait(&local->trigger);
7749 +
7750 + rmb();
7751 +
7752 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7753 + if (state->poll_needed) {
7754 + /* Check if we need to suspend - may change our
7755 + * conn_state */
7756 + vchiq_platform_check_suspend(state);
7757 +
7758 + state->poll_needed = 0;
7759 +
7760 + /* Handle service polling and other rare conditions here
7761 + ** out of the mainline code */
7762 + switch (state->conn_state) {
7763 + case VCHIQ_CONNSTATE_CONNECTED:
7764 + /* Poll the services as requested */
7765 + poll_services(state);
7766 + break;
7767 +
7768 + case VCHIQ_CONNSTATE_PAUSING:
7769 + if (state->is_master)
7770 + pause_bulks(state);
7771 + if (queue_message(state, NULL,
7772 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7773 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7774 + vchiq_set_conn_state(state,
7775 + VCHIQ_CONNSTATE_PAUSE_SENT);
7776 + } else {
7777 + if (state->is_master)
7778 + resume_bulks(state);
7779 + /* Retry later */
7780 + state->poll_needed = 1;
7781 + }
7782 + break;
7783 +
7784 + case VCHIQ_CONNSTATE_PAUSED:
7785 + vchiq_platform_resume(state);
7786 + break;
7787 +
7788 + case VCHIQ_CONNSTATE_RESUMING:
7789 + if (queue_message(state, NULL,
7790 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7791 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7792 + if (state->is_master)
7793 + resume_bulks(state);
7794 + vchiq_set_conn_state(state,
7795 + VCHIQ_CONNSTATE_CONNECTED);
7796 + vchiq_platform_resumed(state);
7797 + } else {
7798 + /* This should really be impossible,
7799 + ** since the PAUSE should have flushed
7800 + ** through outstanding messages. */
7801 + vchiq_log_error(vchiq_core_log_level,
7802 + "Failed to send RESUME "
7803 + "message");
7804 + BUG();
7805 + }
7806 + break;
7807 +
7808 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7809 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7810 + vchiq_platform_handle_timeout(state);
7811 + break;
7812 + default:
7813 + break;
7814 + }
7815 +
7816 +
7817 + }
7818 +
7819 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7820 + parse_rx_slots(state);
7821 + }
7822 + return 0;
7823 +}
7824 +
7825 +
7826 +/* Called by the recycle thread */
7827 +static int
7828 +recycle_func(void *v)
7829 +{
7830 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7831 + VCHIQ_SHARED_STATE_T *local = state->local;
7832 +
7833 + while (1) {
7834 + remote_event_wait(&local->recycle);
7835 +
7836 + process_free_queue(state);
7837 + }
7838 + return 0;
7839 +}
7840 +
7841 +
7842 +/* Called by the sync thread */
7843 +static int
7844 +sync_func(void *v)
7845 +{
7846 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7847 + VCHIQ_SHARED_STATE_T *local = state->local;
7848 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7849 + state->remote->slot_sync);
7850 +
7851 + while (1) {
7852 + VCHIQ_SERVICE_T *service;
7853 + int msgid, size;
7854 + int type;
7855 + unsigned int localport, remoteport;
7856 +
7857 + remote_event_wait(&local->sync_trigger);
7858 +
7859 + rmb();
7860 +
7861 + msgid = header->msgid;
7862 + size = header->size;
7863 + type = VCHIQ_MSG_TYPE(msgid);
7864 + localport = VCHIQ_MSG_DSTPORT(msgid);
7865 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7866 +
7867 + service = find_service_by_port(state, localport);
7868 +
7869 + if (!service) {
7870 + vchiq_log_error(vchiq_sync_log_level,
7871 + "%d: sf %s@%x (%d->%d) - "
7872 + "invalid/closed service %d",
7873 + state->id, msg_type_str(type),
7874 + (unsigned int)header,
7875 + remoteport, localport, localport);
7876 + release_message_sync(state, header);
7877 + continue;
7878 + }
7879 +
7880 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7881 + int svc_fourcc;
7882 +
7883 + svc_fourcc = service
7884 + ? service->base.fourcc
7885 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7886 + vchiq_log_trace(vchiq_sync_log_level,
7887 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7888 + msg_type_str(type),
7889 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7890 + remoteport, localport, size);
7891 + if (size > 0)
7892 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7893 + min(64, size));
7894 + }
7895 +
7896 + switch (type) {
7897 + case VCHIQ_MSG_OPENACK:
7898 + if (size >= sizeof(struct vchiq_openack_payload)) {
7899 + const struct vchiq_openack_payload *payload =
7900 + (struct vchiq_openack_payload *)
7901 + header->data;
7902 + service->peer_version = payload->version;
7903 + }
7904 + vchiq_log_info(vchiq_sync_log_level,
7905 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
7906 + state->id, (unsigned int)header, size,
7907 + remoteport, localport, service->peer_version);
7908 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
7909 + service->remoteport = remoteport;
7910 + vchiq_set_service_state(service,
7911 + VCHIQ_SRVSTATE_OPENSYNC);
7912 + up(&service->remove_event);
7913 + }
7914 + release_message_sync(state, header);
7915 + break;
7916 +
7917 + case VCHIQ_MSG_DATA:
7918 + vchiq_log_trace(vchiq_sync_log_level,
7919 + "%d: sf DATA@%x,%x (%d->%d)",
7920 + state->id, (unsigned int)header, size,
7921 + remoteport, localport);
7922 +
7923 + if ((service->remoteport == remoteport) &&
7924 + (service->srvstate ==
7925 + VCHIQ_SRVSTATE_OPENSYNC)) {
7926 + if (make_service_callback(service,
7927 + VCHIQ_MESSAGE_AVAILABLE, header,
7928 + NULL) == VCHIQ_RETRY)
7929 + vchiq_log_error(vchiq_sync_log_level,
7930 + "synchronous callback to "
7931 + "service %d returns "
7932 + "VCHIQ_RETRY",
7933 + localport);
7934 + }
7935 + break;
7936 +
7937 + default:
7938 + vchiq_log_error(vchiq_sync_log_level,
7939 + "%d: sf unexpected msgid %x@%x,%x",
7940 + state->id, msgid, (unsigned int)header, size);
7941 + release_message_sync(state, header);
7942 + break;
7943 + }
7944 +
7945 + unlock_service(service);
7946 + }
7947 +
7948 + return 0;
7949 +}
7950 +
7951 +
7952 +static void
7953 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
7954 +{
7955 + queue->local_insert = 0;
7956 + queue->remote_insert = 0;
7957 + queue->process = 0;
7958 + queue->remote_notify = 0;
7959 + queue->remove = 0;
7960 +}
7961 +
7962 +
7963 +inline const char *
7964 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
7965 +{
7966 + return conn_state_names[conn_state];
7967 +}
7968 +
7969 +
7970 +VCHIQ_SLOT_ZERO_T *
7971 +vchiq_init_slots(void *mem_base, int mem_size)
7972 +{
7973 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
7974 + VCHIQ_SLOT_ZERO_T *slot_zero =
7975 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
7976 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
7977 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
7978 +
7979 + /* Ensure there is enough memory to run an absolutely minimum system */
7980 + num_slots -= first_data_slot;
7981 +
7982 + if (num_slots < 4) {
7983 + vchiq_log_error(vchiq_core_log_level,
7984 + "vchiq_init_slots - insufficient memory %x bytes",
7985 + mem_size);
7986 + return NULL;
7987 + }
7988 +
7989 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
7990 +
7991 + slot_zero->magic = VCHIQ_MAGIC;
7992 + slot_zero->version = VCHIQ_VERSION;
7993 + slot_zero->version_min = VCHIQ_VERSION_MIN;
7994 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
7995 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
7996 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
7997 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
7998 +
7999 + slot_zero->master.slot_sync = first_data_slot;
8000 + slot_zero->master.slot_first = first_data_slot + 1;
8001 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
8002 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
8003 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
8004 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
8005 +
8006 + return slot_zero;
8007 +}
8008 +
8009 +VCHIQ_STATUS_T
8010 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
8011 + int is_master)
8012 +{
8013 + VCHIQ_SHARED_STATE_T *local;
8014 + VCHIQ_SHARED_STATE_T *remote;
8015 + VCHIQ_STATUS_T status;
8016 + char threadname[10];
8017 + static int id;
8018 + int i;
8019 +
8020 + vchiq_log_warning(vchiq_core_log_level,
8021 + "%s: slot_zero = 0x%08lx, is_master = %d",
8022 + __func__, (unsigned long)slot_zero, is_master);
8023 +
8024 + /* Check the input configuration */
8025 +
8026 + if (slot_zero->magic != VCHIQ_MAGIC) {
8027 + vchiq_loud_error_header();
8028 + vchiq_loud_error("Invalid VCHIQ magic value found.");
8029 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
8030 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
8031 + vchiq_loud_error_footer();
8032 + return VCHIQ_ERROR;
8033 + }
8034 +
8035 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
8036 + vchiq_loud_error_header();
8037 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8038 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
8039 + "(minimum %d)",
8040 + (unsigned int)slot_zero, slot_zero->version,
8041 + VCHIQ_VERSION_MIN);
8042 + vchiq_loud_error("Restart with a newer VideoCore image.");
8043 + vchiq_loud_error_footer();
8044 + return VCHIQ_ERROR;
8045 + }
8046 +
8047 + if (VCHIQ_VERSION < slot_zero->version_min) {
8048 + vchiq_loud_error_header();
8049 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8050 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
8051 + "minimum %d)",
8052 + (unsigned int)slot_zero, VCHIQ_VERSION,
8053 + slot_zero->version_min);
8054 + vchiq_loud_error("Restart with a newer kernel.");
8055 + vchiq_loud_error_footer();
8056 + return VCHIQ_ERROR;
8057 + }
8058 +
8059 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
8060 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
8061 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
8062 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
8063 + vchiq_loud_error_header();
8064 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
8065 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
8066 + "(expected %x)",
8067 + (unsigned int)slot_zero,
8068 + slot_zero->slot_zero_size,
8069 + sizeof(VCHIQ_SLOT_ZERO_T));
8070 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
8071 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
8072 + "(expected %d",
8073 + (unsigned int)slot_zero, slot_zero->slot_size,
8074 + VCHIQ_SLOT_SIZE);
8075 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
8076 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
8077 + "(expected %d)",
8078 + (unsigned int)slot_zero, slot_zero->max_slots,
8079 + VCHIQ_MAX_SLOTS);
8080 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
8081 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
8082 + "(expected %d)",
8083 + (unsigned int)slot_zero,
8084 + slot_zero->max_slots_per_side,
8085 + VCHIQ_MAX_SLOTS_PER_SIDE);
8086 + vchiq_loud_error_footer();
8087 + return VCHIQ_ERROR;
8088 + }
8089 +
8090 + if (is_master) {
8091 + local = &slot_zero->master;
8092 + remote = &slot_zero->slave;
8093 + } else {
8094 + local = &slot_zero->slave;
8095 + remote = &slot_zero->master;
8096 + }
8097 +
8098 + if (local->initialised) {
8099 + vchiq_loud_error_header();
8100 + if (remote->initialised)
8101 + vchiq_loud_error("local state has already been "
8102 + "initialised");
8103 + else
8104 + vchiq_loud_error("master/slave mismatch - two %ss",
8105 + is_master ? "master" : "slave");
8106 + vchiq_loud_error_footer();
8107 + return VCHIQ_ERROR;
8108 + }
8109 +
8110 + memset(state, 0, sizeof(VCHIQ_STATE_T));
8111 +
8112 + state->id = id++;
8113 + state->is_master = is_master;
8114 +
8115 + /*
8116 + initialize shared state pointers
8117 + */
8118 +
8119 + state->local = local;
8120 + state->remote = remote;
8121 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
8122 +
8123 + /*
8124 + initialize events and mutexes
8125 + */
8126 +
8127 + sema_init(&state->connect, 0);
8128 + mutex_init(&state->mutex);
8129 + sema_init(&state->trigger_event, 0);
8130 + sema_init(&state->recycle_event, 0);
8131 + sema_init(&state->sync_trigger_event, 0);
8132 + sema_init(&state->sync_release_event, 0);
8133 +
8134 + mutex_init(&state->slot_mutex);
8135 + mutex_init(&state->recycle_mutex);
8136 + mutex_init(&state->sync_mutex);
8137 + mutex_init(&state->bulk_transfer_mutex);
8138 +
8139 + sema_init(&state->slot_available_event, 0);
8140 + sema_init(&state->slot_remove_event, 0);
8141 + sema_init(&state->data_quota_event, 0);
8142 +
8143 + state->slot_queue_available = 0;
8144 +
8145 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
8146 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8147 + &state->service_quotas[i];
8148 + sema_init(&service_quota->quota_event, 0);
8149 + }
8150 +
8151 + for (i = local->slot_first; i <= local->slot_last; i++) {
8152 + local->slot_queue[state->slot_queue_available++] = i;
8153 + up(&state->slot_available_event);
8154 + }
8155 +
8156 + state->default_slot_quota = state->slot_queue_available/2;
8157 + state->default_message_quota =
8158 + min((unsigned short)(state->default_slot_quota * 256),
8159 + (unsigned short)~0);
8160 +
8161 + state->previous_data_index = -1;
8162 + state->data_use_count = 0;
8163 + state->data_quota = state->slot_queue_available - 1;
8164 +
8165 + local->trigger.event = &state->trigger_event;
8166 + remote_event_create(&local->trigger);
8167 + local->tx_pos = 0;
8168 +
8169 + local->recycle.event = &state->recycle_event;
8170 + remote_event_create(&local->recycle);
8171 + local->slot_queue_recycle = state->slot_queue_available;
8172 +
8173 + local->sync_trigger.event = &state->sync_trigger_event;
8174 + remote_event_create(&local->sync_trigger);
8175 +
8176 + local->sync_release.event = &state->sync_release_event;
8177 + remote_event_create(&local->sync_release);
8178 +
8179 + /* At start-of-day, the slot is empty and available */
8180 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
8181 + = VCHIQ_MSGID_PADDING;
8182 + remote_event_signal_local(&local->sync_release);
8183 +
8184 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
8185 +
8186 + status = vchiq_platform_init_state(state);
8187 +
8188 + /*
8189 + bring up slot handler thread
8190 + */
8191 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
8192 + state->slot_handler_thread = kthread_create(&slot_handler_func,
8193 + (void *)state,
8194 + threadname);
8195 +
8196 + if (state->slot_handler_thread == NULL) {
8197 + vchiq_loud_error_header();
8198 + vchiq_loud_error("couldn't create thread %s", threadname);
8199 + vchiq_loud_error_footer();
8200 + return VCHIQ_ERROR;
8201 + }
8202 + set_user_nice(state->slot_handler_thread, -19);
8203 + wake_up_process(state->slot_handler_thread);
8204 +
8205 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
8206 + state->recycle_thread = kthread_create(&recycle_func,
8207 + (void *)state,
8208 + threadname);
8209 + if (state->recycle_thread == NULL) {
8210 + vchiq_loud_error_header();
8211 + vchiq_loud_error("couldn't create thread %s", threadname);
8212 + vchiq_loud_error_footer();
8213 + return VCHIQ_ERROR;
8214 + }
8215 + set_user_nice(state->recycle_thread, -19);
8216 + wake_up_process(state->recycle_thread);
8217 +
8218 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
8219 + state->sync_thread = kthread_create(&sync_func,
8220 + (void *)state,
8221 + threadname);
8222 + if (state->sync_thread == NULL) {
8223 + vchiq_loud_error_header();
8224 + vchiq_loud_error("couldn't create thread %s", threadname);
8225 + vchiq_loud_error_footer();
8226 + return VCHIQ_ERROR;
8227 + }
8228 + set_user_nice(state->sync_thread, -20);
8229 + wake_up_process(state->sync_thread);
8230 +
8231 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
8232 + vchiq_states[state->id] = state;
8233 +
8234 + /* Indicate readiness to the other side */
8235 + local->initialised = 1;
8236 +
8237 + return status;
8238 +}
8239 +
8240 +/* Called from application thread when a client or server service is created. */
8241 +VCHIQ_SERVICE_T *
8242 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
8243 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
8244 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
8245 +{
8246 + VCHIQ_SERVICE_T *service;
8247 +
8248 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
8249 + if (service) {
8250 + service->base.fourcc = params->fourcc;
8251 + service->base.callback = params->callback;
8252 + service->base.userdata = params->userdata;
8253 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
8254 + service->ref_count = 1;
8255 + service->srvstate = VCHIQ_SRVSTATE_FREE;
8256 + service->userdata_term = userdata_term;
8257 + service->localport = VCHIQ_PORT_FREE;
8258 + service->remoteport = VCHIQ_PORT_FREE;
8259 +
8260 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
8261 + VCHIQ_FOURCC_INVALID : params->fourcc;
8262 + service->client_id = 0;
8263 + service->auto_close = 1;
8264 + service->sync = 0;
8265 + service->closing = 0;
8266 + service->trace = 0;
8267 + atomic_set(&service->poll_flags, 0);
8268 + service->version = params->version;
8269 + service->version_min = params->version_min;
8270 + service->state = state;
8271 + service->instance = instance;
8272 + service->service_use_count = 0;
8273 + init_bulk_queue(&service->bulk_tx);
8274 + init_bulk_queue(&service->bulk_rx);
8275 + sema_init(&service->remove_event, 0);
8276 + sema_init(&service->bulk_remove_event, 0);
8277 + mutex_init(&service->bulk_mutex);
8278 + memset(&service->stats, 0, sizeof(service->stats));
8279 + } else {
8280 + vchiq_log_error(vchiq_core_log_level,
8281 + "Out of memory");
8282 + }
8283 +
8284 + if (service) {
8285 + VCHIQ_SERVICE_T **pservice = NULL;
8286 + int i;
8287 +
8288 + /* Although it is perfectly possible to use service_spinlock
8289 + ** to protect the creation of services, it is overkill as it
8290 + ** disables interrupts while the array is searched.
8291 + ** The only danger is of another thread trying to create a
8292 + ** service - service deletion is safe.
8293 + ** Therefore it is preferable to use state->mutex which,
8294 + ** although slower to claim, doesn't block interrupts while
8295 + ** it is held.
8296 + */
8297 +
8298 + mutex_lock(&state->mutex);
8299 +
8300 + /* Prepare to use a previously unused service */
8301 + if (state->unused_service < VCHIQ_MAX_SERVICES)
8302 + pservice = &state->services[state->unused_service];
8303 +
8304 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8305 + for (i = 0; i < state->unused_service; i++) {
8306 + VCHIQ_SERVICE_T *srv = state->services[i];
8307 + if (!srv) {
8308 + pservice = &state->services[i];
8309 + break;
8310 + }
8311 + }
8312 + } else {
8313 + for (i = (state->unused_service - 1); i >= 0; i--) {
8314 + VCHIQ_SERVICE_T *srv = state->services[i];
8315 + if (!srv)
8316 + pservice = &state->services[i];
8317 + else if ((srv->public_fourcc == params->fourcc)
8318 + && ((srv->instance != instance) ||
8319 + (srv->base.callback !=
8320 + params->callback))) {
8321 + /* There is another server using this
8322 + ** fourcc which doesn't match. */
8323 + pservice = NULL;
8324 + break;
8325 + }
8326 + }
8327 + }
8328 +
8329 + if (pservice) {
8330 + service->localport = (pservice - state->services);
8331 + if (!handle_seq)
8332 + handle_seq = VCHIQ_MAX_STATES *
8333 + VCHIQ_MAX_SERVICES;
8334 + service->handle = handle_seq |
8335 + (state->id * VCHIQ_MAX_SERVICES) |
8336 + service->localport;
8337 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8338 + *pservice = service;
8339 + if (pservice == &state->services[state->unused_service])
8340 + state->unused_service++;
8341 + }
8342 +
8343 + mutex_unlock(&state->mutex);
8344 +
8345 + if (!pservice) {
8346 + kfree(service);
8347 + service = NULL;
8348 + }
8349 + }
8350 +
8351 + if (service) {
8352 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8353 + &state->service_quotas[service->localport];
8354 + service_quota->slot_quota = state->default_slot_quota;
8355 + service_quota->message_quota = state->default_message_quota;
8356 + if (service_quota->slot_use_count == 0)
8357 + service_quota->previous_tx_index =
8358 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8359 + - 1;
8360 +
8361 + /* Bring this service online */
8362 + vchiq_set_service_state(service, srvstate);
8363 +
8364 + vchiq_log_info(vchiq_core_msg_log_level,
8365 + "%s Service %c%c%c%c SrcPort:%d",
8366 + (srvstate == VCHIQ_SRVSTATE_OPENING)
8367 + ? "Open" : "Add",
8368 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8369 + service->localport);
8370 + }
8371 +
8372 + /* Don't unlock the service - leave it with a ref_count of 1. */
8373 +
8374 + return service;
8375 +}
8376 +
8377 +VCHIQ_STATUS_T
8378 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8379 +{
8380 + struct vchiq_open_payload payload = {
8381 + service->base.fourcc,
8382 + client_id,
8383 + service->version,
8384 + service->version_min
8385 + };
8386 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8387 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8388 +
8389 + service->client_id = client_id;
8390 + vchiq_use_service_internal(service);
8391 + status = queue_message(service->state, NULL,
8392 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8393 + &body, 1, sizeof(payload), 1);
8394 + if (status == VCHIQ_SUCCESS) {
8395 + /* Wait for the ACK/NAK */
8396 + if (down_interruptible(&service->remove_event) != 0) {
8397 + status = VCHIQ_RETRY;
8398 + vchiq_release_service_internal(service);
8399 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8400 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8401 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8402 + vchiq_log_error(vchiq_core_log_level,
8403 + "%d: osi - srvstate = %s (ref %d)",
8404 + service->state->id,
8405 + srvstate_names[service->srvstate],
8406 + service->ref_count);
8407 + status = VCHIQ_ERROR;
8408 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8409 + vchiq_release_service_internal(service);
8410 + }
8411 + }
8412 + return status;
8413 +}
8414 +
8415 +static void
8416 +release_service_messages(VCHIQ_SERVICE_T *service)
8417 +{
8418 + VCHIQ_STATE_T *state = service->state;
8419 + int slot_last = state->remote->slot_last;
8420 + int i;
8421 +
8422 + /* Release any claimed messages */
8423 + for (i = state->remote->slot_first; i <= slot_last; i++) {
8424 + VCHIQ_SLOT_INFO_T *slot_info =
8425 + SLOT_INFO_FROM_INDEX(state, i);
8426 + if (slot_info->release_count != slot_info->use_count) {
8427 + char *data =
8428 + (char *)SLOT_DATA_FROM_INDEX(state, i);
8429 + unsigned int pos, end;
8430 +
8431 + end = VCHIQ_SLOT_SIZE;
8432 + if (data == state->rx_data)
8433 + /* This buffer is still being read from - stop
8434 + ** at the current read position */
8435 + end = state->rx_pos & VCHIQ_SLOT_MASK;
8436 +
8437 + pos = 0;
8438 +
8439 + while (pos < end) {
8440 + VCHIQ_HEADER_T *header =
8441 + (VCHIQ_HEADER_T *)(data + pos);
8442 + int msgid = header->msgid;
8443 + int port = VCHIQ_MSG_DSTPORT(msgid);
8444 + if ((port == service->localport) &&
8445 + (msgid & VCHIQ_MSGID_CLAIMED)) {
8446 + vchiq_log_info(vchiq_core_log_level,
8447 + " fsi - hdr %x",
8448 + (unsigned int)header);
8449 + release_slot(state, slot_info, header,
8450 + NULL);
8451 + }
8452 + pos += calc_stride(header->size);
8453 + if (pos > VCHIQ_SLOT_SIZE) {
8454 + vchiq_log_error(vchiq_core_log_level,
8455 + "fsi - pos %x: header %x, "
8456 + "msgid %x, header->msgid %x, "
8457 + "header->size %x",
8458 + pos, (unsigned int)header,
8459 + msgid, header->msgid,
8460 + header->size);
8461 + WARN(1, "invalid slot position\n");
8462 + }
8463 + }
8464 + }
8465 + }
8466 +}
8467 +
8468 +static int
8469 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8470 +{
8471 + VCHIQ_STATUS_T status;
8472 +
8473 + /* Abort any outstanding bulk transfers */
8474 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8475 + return 0;
8476 + abort_outstanding_bulks(service, &service->bulk_tx);
8477 + abort_outstanding_bulks(service, &service->bulk_rx);
8478 + mutex_unlock(&service->bulk_mutex);
8479 +
8480 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8481 + if (status == VCHIQ_SUCCESS)
8482 + status = notify_bulks(service, &service->bulk_rx,
8483 + 0/*!retry_poll*/);
8484 + return (status == VCHIQ_SUCCESS);
8485 +}
8486 +
8487 +static VCHIQ_STATUS_T
8488 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8489 +{
8490 + VCHIQ_STATUS_T status;
8491 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8492 + int newstate;
8493 +
8494 + switch (service->srvstate) {
8495 + case VCHIQ_SRVSTATE_OPEN:
8496 + case VCHIQ_SRVSTATE_CLOSESENT:
8497 + case VCHIQ_SRVSTATE_CLOSERECVD:
8498 + if (is_server) {
8499 + if (service->auto_close) {
8500 + service->client_id = 0;
8501 + service->remoteport = VCHIQ_PORT_FREE;
8502 + newstate = VCHIQ_SRVSTATE_LISTENING;
8503 + } else
8504 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8505 + } else
8506 + newstate = VCHIQ_SRVSTATE_CLOSED;
8507 + vchiq_set_service_state(service, newstate);
8508 + break;
8509 + case VCHIQ_SRVSTATE_LISTENING:
8510 + break;
8511 + default:
8512 + vchiq_log_error(vchiq_core_log_level,
8513 + "close_service_complete(%x) called in state %s",
8514 + service->handle, srvstate_names[service->srvstate]);
8515 + WARN(1, "close_service_complete in unexpected state\n");
8516 + return VCHIQ_ERROR;
8517 + }
8518 +
8519 + status = make_service_callback(service,
8520 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
8521 +
8522 + if (status != VCHIQ_RETRY) {
8523 + int uc = service->service_use_count;
8524 + int i;
8525 + /* Complete the close process */
8526 + for (i = 0; i < uc; i++)
8527 + /* cater for cases where close is forced and the
8528 + ** client may not close all it's handles */
8529 + vchiq_release_service_internal(service);
8530 +
8531 + service->client_id = 0;
8532 + service->remoteport = VCHIQ_PORT_FREE;
8533 +
8534 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8535 + vchiq_free_service_internal(service);
8536 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8537 + if (is_server)
8538 + service->closing = 0;
8539 +
8540 + up(&service->remove_event);
8541 + }
8542 + } else
8543 + vchiq_set_service_state(service, failstate);
8544 +
8545 + return status;
8546 +}
8547 +
8548 +/* Called by the slot handler */
8549 +VCHIQ_STATUS_T
8550 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8551 +{
8552 + VCHIQ_STATE_T *state = service->state;
8553 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8554 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8555 +
8556 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8557 + service->state->id, service->localport, close_recvd,
8558 + srvstate_names[service->srvstate]);
8559 +
8560 + switch (service->srvstate) {
8561 + case VCHIQ_SRVSTATE_CLOSED:
8562 + case VCHIQ_SRVSTATE_HIDDEN:
8563 + case VCHIQ_SRVSTATE_LISTENING:
8564 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8565 + if (close_recvd)
8566 + vchiq_log_error(vchiq_core_log_level,
8567 + "vchiq_close_service_internal(1) called "
8568 + "in state %s",
8569 + srvstate_names[service->srvstate]);
8570 + else if (is_server) {
8571 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8572 + status = VCHIQ_ERROR;
8573 + } else {
8574 + service->client_id = 0;
8575 + service->remoteport = VCHIQ_PORT_FREE;
8576 + if (service->srvstate ==
8577 + VCHIQ_SRVSTATE_CLOSEWAIT)
8578 + vchiq_set_service_state(service,
8579 + VCHIQ_SRVSTATE_LISTENING);
8580 + }
8581 + up(&service->remove_event);
8582 + } else
8583 + vchiq_free_service_internal(service);
8584 + break;
8585 + case VCHIQ_SRVSTATE_OPENING:
8586 + if (close_recvd) {
8587 + /* The open was rejected - tell the user */
8588 + vchiq_set_service_state(service,
8589 + VCHIQ_SRVSTATE_CLOSEWAIT);
8590 + up(&service->remove_event);
8591 + } else {
8592 + /* Shutdown mid-open - let the other side know */
8593 + status = queue_message(state, service,
8594 + VCHIQ_MAKE_MSG
8595 + (VCHIQ_MSG_CLOSE,
8596 + service->localport,
8597 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8598 + NULL, 0, 0, 0);
8599 + }
8600 + break;
8601 +
8602 + case VCHIQ_SRVSTATE_OPENSYNC:
8603 + mutex_lock(&state->sync_mutex);
8604 + /* Drop through */
8605 +
8606 + case VCHIQ_SRVSTATE_OPEN:
8607 + if (state->is_master || close_recvd) {
8608 + if (!do_abort_bulks(service))
8609 + status = VCHIQ_RETRY;
8610 + }
8611 +
8612 + release_service_messages(service);
8613 +
8614 + if (status == VCHIQ_SUCCESS)
8615 + status = queue_message(state, service,
8616 + VCHIQ_MAKE_MSG
8617 + (VCHIQ_MSG_CLOSE,
8618 + service->localport,
8619 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8620 + NULL, 0, 0, 0);
8621 +
8622 + if (status == VCHIQ_SUCCESS) {
8623 + if (!close_recvd)
8624 + break;
8625 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8626 + mutex_unlock(&state->sync_mutex);
8627 + break;
8628 + } else
8629 + break;
8630 +
8631 + status = close_service_complete(service,
8632 + VCHIQ_SRVSTATE_CLOSERECVD);
8633 + break;
8634 +
8635 + case VCHIQ_SRVSTATE_CLOSESENT:
8636 + if (!close_recvd)
8637 + /* This happens when a process is killed mid-close */
8638 + break;
8639 +
8640 + if (!state->is_master) {
8641 + if (!do_abort_bulks(service)) {
8642 + status = VCHIQ_RETRY;
8643 + break;
8644 + }
8645 + }
8646 +
8647 + if (status == VCHIQ_SUCCESS)
8648 + status = close_service_complete(service,
8649 + VCHIQ_SRVSTATE_CLOSERECVD);
8650 + break;
8651 +
8652 + case VCHIQ_SRVSTATE_CLOSERECVD:
8653 + if (!close_recvd && is_server)
8654 + /* Force into LISTENING mode */
8655 + vchiq_set_service_state(service,
8656 + VCHIQ_SRVSTATE_LISTENING);
8657 + status = close_service_complete(service,
8658 + VCHIQ_SRVSTATE_CLOSERECVD);
8659 + break;
8660 +
8661 + default:
8662 + vchiq_log_error(vchiq_core_log_level,
8663 + "vchiq_close_service_internal(%d) called in state %s",
8664 + close_recvd, srvstate_names[service->srvstate]);
8665 + break;
8666 + }
8667 +
8668 + return status;
8669 +}
8670 +
8671 +/* Called from the application process upon process death */
8672 +void
8673 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8674 +{
8675 + VCHIQ_STATE_T *state = service->state;
8676 +
8677 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8678 + state->id, service->localport, service->remoteport);
8679 +
8680 + mark_service_closing(service);
8681 +
8682 + /* Mark the service for removal by the slot handler */
8683 + request_poll(state, service, VCHIQ_POLL_REMOVE);
8684 +}
8685 +
8686 +/* Called from the slot handler */
8687 +void
8688 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8689 +{
8690 + VCHIQ_STATE_T *state = service->state;
8691 +
8692 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8693 + state->id, service->localport);
8694 +
8695 + switch (service->srvstate) {
8696 + case VCHIQ_SRVSTATE_OPENING:
8697 + case VCHIQ_SRVSTATE_CLOSED:
8698 + case VCHIQ_SRVSTATE_HIDDEN:
8699 + case VCHIQ_SRVSTATE_LISTENING:
8700 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8701 + break;
8702 + default:
8703 + vchiq_log_error(vchiq_core_log_level,
8704 + "%d: fsi - (%d) in state %s",
8705 + state->id, service->localport,
8706 + srvstate_names[service->srvstate]);
8707 + return;
8708 + }
8709 +
8710 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8711 +
8712 + up(&service->remove_event);
8713 +
8714 + /* Release the initial lock */
8715 + unlock_service(service);
8716 +}
8717 +
8718 +VCHIQ_STATUS_T
8719 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8720 +{
8721 + VCHIQ_SERVICE_T *service;
8722 + int i;
8723 +
8724 + /* Find all services registered to this client and enable them. */
8725 + i = 0;
8726 + while ((service = next_service_by_instance(state, instance,
8727 + &i)) != NULL) {
8728 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8729 + vchiq_set_service_state(service,
8730 + VCHIQ_SRVSTATE_LISTENING);
8731 + unlock_service(service);
8732 + }
8733 +
8734 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8735 + if (queue_message(state, NULL,
8736 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8737 + 0, 1) == VCHIQ_RETRY)
8738 + return VCHIQ_RETRY;
8739 +
8740 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8741 + }
8742 +
8743 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8744 + if (down_interruptible(&state->connect) != 0)
8745 + return VCHIQ_RETRY;
8746 +
8747 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8748 + up(&state->connect);
8749 + }
8750 +
8751 + return VCHIQ_SUCCESS;
8752 +}
8753 +
8754 +VCHIQ_STATUS_T
8755 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8756 +{
8757 + VCHIQ_SERVICE_T *service;
8758 + int i;
8759 +
8760 + /* Find all services registered to this client and enable them. */
8761 + i = 0;
8762 + while ((service = next_service_by_instance(state, instance,
8763 + &i)) != NULL) {
8764 + (void)vchiq_remove_service(service->handle);
8765 + unlock_service(service);
8766 + }
8767 +
8768 + return VCHIQ_SUCCESS;
8769 +}
8770 +
8771 +VCHIQ_STATUS_T
8772 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8773 +{
8774 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8775 +
8776 + switch (state->conn_state) {
8777 + case VCHIQ_CONNSTATE_CONNECTED:
8778 + /* Request a pause */
8779 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8780 + request_poll(state, NULL, 0);
8781 + break;
8782 + default:
8783 + vchiq_log_error(vchiq_core_log_level,
8784 + "vchiq_pause_internal in state %s\n",
8785 + conn_state_names[state->conn_state]);
8786 + status = VCHIQ_ERROR;
8787 + VCHIQ_STATS_INC(state, error_count);
8788 + break;
8789 + }
8790 +
8791 + return status;
8792 +}
8793 +
8794 +VCHIQ_STATUS_T
8795 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8796 +{
8797 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8798 +
8799 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8800 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8801 + request_poll(state, NULL, 0);
8802 + } else {
8803 + status = VCHIQ_ERROR;
8804 + VCHIQ_STATS_INC(state, error_count);
8805 + }
8806 +
8807 + return status;
8808 +}
8809 +
8810 +VCHIQ_STATUS_T
8811 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8812 +{
8813 + /* Unregister the service */
8814 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8815 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8816 +
8817 + if (!service)
8818 + return VCHIQ_ERROR;
8819 +
8820 + vchiq_log_info(vchiq_core_log_level,
8821 + "%d: close_service:%d",
8822 + service->state->id, service->localport);
8823 +
8824 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8825 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8826 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8827 + unlock_service(service);
8828 + return VCHIQ_ERROR;
8829 + }
8830 +
8831 + mark_service_closing(service);
8832 +
8833 + if (current == service->state->slot_handler_thread) {
8834 + status = vchiq_close_service_internal(service,
8835 + 0/*!close_recvd*/);
8836 + BUG_ON(status == VCHIQ_RETRY);
8837 + } else {
8838 + /* Mark the service for termination by the slot handler */
8839 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8840 + }
8841 +
8842 + while (1) {
8843 + if (down_interruptible(&service->remove_event) != 0) {
8844 + status = VCHIQ_RETRY;
8845 + break;
8846 + }
8847 +
8848 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8849 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8850 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8851 + break;
8852 +
8853 + vchiq_log_warning(vchiq_core_log_level,
8854 + "%d: close_service:%d - waiting in state %s",
8855 + service->state->id, service->localport,
8856 + srvstate_names[service->srvstate]);
8857 + }
8858 +
8859 + if ((status == VCHIQ_SUCCESS) &&
8860 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8861 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8862 + status = VCHIQ_ERROR;
8863 +
8864 + unlock_service(service);
8865 +
8866 + return status;
8867 +}
8868 +
8869 +VCHIQ_STATUS_T
8870 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
8871 +{
8872 + /* Unregister the service */
8873 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8874 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8875 +
8876 + if (!service)
8877 + return VCHIQ_ERROR;
8878 +
8879 + vchiq_log_info(vchiq_core_log_level,
8880 + "%d: remove_service:%d",
8881 + service->state->id, service->localport);
8882 +
8883 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
8884 + unlock_service(service);
8885 + return VCHIQ_ERROR;
8886 + }
8887 +
8888 + mark_service_closing(service);
8889 +
8890 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
8891 + (current == service->state->slot_handler_thread)) {
8892 + /* Make it look like a client, because it must be removed and
8893 + not left in the LISTENING state. */
8894 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
8895 +
8896 + status = vchiq_close_service_internal(service,
8897 + 0/*!close_recvd*/);
8898 + BUG_ON(status == VCHIQ_RETRY);
8899 + } else {
8900 + /* Mark the service for removal by the slot handler */
8901 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
8902 + }
8903 + while (1) {
8904 + if (down_interruptible(&service->remove_event) != 0) {
8905 + status = VCHIQ_RETRY;
8906 + break;
8907 + }
8908 +
8909 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8910 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8911 + break;
8912 +
8913 + vchiq_log_warning(vchiq_core_log_level,
8914 + "%d: remove_service:%d - waiting in state %s",
8915 + service->state->id, service->localport,
8916 + srvstate_names[service->srvstate]);
8917 + }
8918 +
8919 + if ((status == VCHIQ_SUCCESS) &&
8920 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
8921 + status = VCHIQ_ERROR;
8922 +
8923 + unlock_service(service);
8924 +
8925 + return status;
8926 +}
8927 +
8928 +
8929 +/* This function may be called by kernel threads or user threads.
8930 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
8931 + * received and the call should be retried after being returned to user
8932 + * context.
8933 + * When called in blocking mode, the userdata field points to a bulk_waiter
8934 + * structure.
8935 + */
8936 +VCHIQ_STATUS_T
8937 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
8938 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
8939 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
8940 +{
8941 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8942 + VCHIQ_BULK_QUEUE_T *queue;
8943 + VCHIQ_BULK_T *bulk;
8944 + VCHIQ_STATE_T *state;
8945 + struct bulk_waiter *bulk_waiter = NULL;
8946 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
8947 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
8948 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
8949 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8950 +
8951 + if (!service ||
8952 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
8953 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
8954 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8955 + goto error_exit;
8956 +
8957 + switch (mode) {
8958 + case VCHIQ_BULK_MODE_NOCALLBACK:
8959 + case VCHIQ_BULK_MODE_CALLBACK:
8960 + break;
8961 + case VCHIQ_BULK_MODE_BLOCKING:
8962 + bulk_waiter = (struct bulk_waiter *)userdata;
8963 + sema_init(&bulk_waiter->event, 0);
8964 + bulk_waiter->actual = 0;
8965 + bulk_waiter->bulk = NULL;
8966 + break;
8967 + case VCHIQ_BULK_MODE_WAITING:
8968 + bulk_waiter = (struct bulk_waiter *)userdata;
8969 + bulk = bulk_waiter->bulk;
8970 + goto waiting;
8971 + default:
8972 + goto error_exit;
8973 + }
8974 +
8975 + state = service->state;
8976 +
8977 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
8978 + &service->bulk_tx : &service->bulk_rx;
8979 +
8980 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
8981 + status = VCHIQ_RETRY;
8982 + goto error_exit;
8983 + }
8984 +
8985 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
8986 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
8987 + do {
8988 + mutex_unlock(&service->bulk_mutex);
8989 + if (down_interruptible(&service->bulk_remove_event)
8990 + != 0) {
8991 + status = VCHIQ_RETRY;
8992 + goto error_exit;
8993 + }
8994 + if (mutex_lock_interruptible(&service->bulk_mutex)
8995 + != 0) {
8996 + status = VCHIQ_RETRY;
8997 + goto error_exit;
8998 + }
8999 + } while (queue->local_insert == queue->remove +
9000 + VCHIQ_NUM_SERVICE_BULKS);
9001 + }
9002 +
9003 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
9004 +
9005 + bulk->mode = mode;
9006 + bulk->dir = dir;
9007 + bulk->userdata = userdata;
9008 + bulk->size = size;
9009 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
9010 +
9011 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
9012 + VCHIQ_SUCCESS)
9013 + goto unlock_error_exit;
9014 +
9015 + wmb();
9016 +
9017 + vchiq_log_info(vchiq_core_log_level,
9018 + "%d: bt (%d->%d) %cx %x@%x %x",
9019 + state->id,
9020 + service->localport, service->remoteport, dir_char,
9021 + size, (unsigned int)bulk->data, (unsigned int)userdata);
9022 +
9023 + if (state->is_master) {
9024 + queue->local_insert++;
9025 + if (resolve_bulks(service, queue))
9026 + request_poll(state, service,
9027 + (dir == VCHIQ_BULK_TRANSMIT) ?
9028 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
9029 + } else {
9030 + int payload[2] = { (int)bulk->data, bulk->size };
9031 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
9032 +
9033 + status = queue_message(state, NULL,
9034 + VCHIQ_MAKE_MSG(dir_msgtype,
9035 + service->localport, service->remoteport),
9036 + &element, 1, sizeof(payload), 1);
9037 + if (status != VCHIQ_SUCCESS) {
9038 + vchiq_complete_bulk(bulk);
9039 + goto unlock_error_exit;
9040 + }
9041 + queue->local_insert++;
9042 + }
9043 +
9044 + mutex_unlock(&service->bulk_mutex);
9045 +
9046 + vchiq_log_trace(vchiq_core_log_level,
9047 + "%d: bt:%d %cx li=%x ri=%x p=%x",
9048 + state->id,
9049 + service->localport, dir_char,
9050 + queue->local_insert, queue->remote_insert, queue->process);
9051 +
9052 +waiting:
9053 + unlock_service(service);
9054 +
9055 + status = VCHIQ_SUCCESS;
9056 +
9057 + if (bulk_waiter) {
9058 + bulk_waiter->bulk = bulk;
9059 + if (down_interruptible(&bulk_waiter->event) != 0)
9060 + status = VCHIQ_RETRY;
9061 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
9062 + status = VCHIQ_ERROR;
9063 + }
9064 +
9065 + return status;
9066 +
9067 +unlock_error_exit:
9068 + mutex_unlock(&service->bulk_mutex);
9069 +
9070 +error_exit:
9071 + if (service)
9072 + unlock_service(service);
9073 + return status;
9074 +}
9075 +
9076 +VCHIQ_STATUS_T
9077 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
9078 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
9079 +{
9080 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9081 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9082 +
9083 + unsigned int size = 0;
9084 + unsigned int i;
9085 +
9086 + if (!service ||
9087 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
9088 + goto error_exit;
9089 +
9090 + for (i = 0; i < (unsigned int)count; i++) {
9091 + if (elements[i].size) {
9092 + if (elements[i].data == NULL) {
9093 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9094 + goto error_exit;
9095 + }
9096 + size += elements[i].size;
9097 + }
9098 + }
9099 +
9100 + if (size > VCHIQ_MAX_MSG_SIZE) {
9101 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9102 + goto error_exit;
9103 + }
9104 +
9105 + switch (service->srvstate) {
9106 + case VCHIQ_SRVSTATE_OPEN:
9107 + status = queue_message(service->state, service,
9108 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9109 + service->localport,
9110 + service->remoteport),
9111 + elements, count, size, 1);
9112 + break;
9113 + case VCHIQ_SRVSTATE_OPENSYNC:
9114 + status = queue_message_sync(service->state, service,
9115 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9116 + service->localport,
9117 + service->remoteport),
9118 + elements, count, size, 1);
9119 + break;
9120 + default:
9121 + status = VCHIQ_ERROR;
9122 + break;
9123 + }
9124 +
9125 +error_exit:
9126 + if (service)
9127 + unlock_service(service);
9128 +
9129 + return status;
9130 +}
9131 +
9132 +void
9133 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
9134 +{
9135 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9136 + VCHIQ_SHARED_STATE_T *remote;
9137 + VCHIQ_STATE_T *state;
9138 + int slot_index;
9139 +
9140 + if (!service)
9141 + return;
9142 +
9143 + state = service->state;
9144 + remote = state->remote;
9145 +
9146 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
9147 +
9148 + if ((slot_index >= remote->slot_first) &&
9149 + (slot_index <= remote->slot_last)) {
9150 + int msgid = header->msgid;
9151 + if (msgid & VCHIQ_MSGID_CLAIMED) {
9152 + VCHIQ_SLOT_INFO_T *slot_info =
9153 + SLOT_INFO_FROM_INDEX(state, slot_index);
9154 +
9155 + release_slot(state, slot_info, header, service);
9156 + }
9157 + } else if (slot_index == remote->slot_sync)
9158 + release_message_sync(state, header);
9159 +
9160 + unlock_service(service);
9161 +}
9162 +
9163 +static void
9164 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
9165 +{
9166 + header->msgid = VCHIQ_MSGID_PADDING;
9167 + wmb();
9168 + remote_event_signal(&state->remote->sync_release);
9169 +}
9170 +
9171 +VCHIQ_STATUS_T
9172 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
9173 +{
9174 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9175 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9176 +
9177 + if (!service ||
9178 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
9179 + !peer_version)
9180 + goto exit;
9181 + *peer_version = service->peer_version;
9182 + status = VCHIQ_SUCCESS;
9183 +
9184 +exit:
9185 + if (service)
9186 + unlock_service(service);
9187 + return status;
9188 +}
9189 +
9190 +VCHIQ_STATUS_T
9191 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
9192 + int config_size, VCHIQ_CONFIG_T *pconfig)
9193 +{
9194 + VCHIQ_CONFIG_T config;
9195 +
9196 + (void)instance;
9197 +
9198 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
9199 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
9200 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
9201 + config.max_services = VCHIQ_MAX_SERVICES;
9202 + config.version = VCHIQ_VERSION;
9203 + config.version_min = VCHIQ_VERSION_MIN;
9204 +
9205 + if (config_size > sizeof(VCHIQ_CONFIG_T))
9206 + return VCHIQ_ERROR;
9207 +
9208 + memcpy(pconfig, &config,
9209 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
9210 +
9211 + return VCHIQ_SUCCESS;
9212 +}
9213 +
9214 +VCHIQ_STATUS_T
9215 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
9216 + VCHIQ_SERVICE_OPTION_T option, int value)
9217 +{
9218 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9219 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9220 +
9221 + if (service) {
9222 + switch (option) {
9223 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
9224 + service->auto_close = value;
9225 + status = VCHIQ_SUCCESS;
9226 + break;
9227 +
9228 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
9229 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9230 + &service->state->service_quotas[
9231 + service->localport];
9232 + if (value == 0)
9233 + value = service->state->default_slot_quota;
9234 + if ((value >= service_quota->slot_use_count) &&
9235 + (value < (unsigned short)~0)) {
9236 + service_quota->slot_quota = value;
9237 + if ((value >= service_quota->slot_use_count) &&
9238 + (service_quota->message_quota >=
9239 + service_quota->message_use_count)) {
9240 + /* Signal the service that it may have
9241 + ** dropped below its quota */
9242 + up(&service_quota->quota_event);
9243 + }
9244 + status = VCHIQ_SUCCESS;
9245 + }
9246 + } break;
9247 +
9248 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
9249 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9250 + &service->state->service_quotas[
9251 + service->localport];
9252 + if (value == 0)
9253 + value = service->state->default_message_quota;
9254 + if ((value >= service_quota->message_use_count) &&
9255 + (value < (unsigned short)~0)) {
9256 + service_quota->message_quota = value;
9257 + if ((value >=
9258 + service_quota->message_use_count) &&
9259 + (service_quota->slot_quota >=
9260 + service_quota->slot_use_count))
9261 + /* Signal the service that it may have
9262 + ** dropped below its quota */
9263 + up(&service_quota->quota_event);
9264 + status = VCHIQ_SUCCESS;
9265 + }
9266 + } break;
9267 +
9268 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
9269 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9270 + (service->srvstate ==
9271 + VCHIQ_SRVSTATE_LISTENING)) {
9272 + service->sync = value;
9273 + status = VCHIQ_SUCCESS;
9274 + }
9275 + break;
9276 +
9277 + case VCHIQ_SERVICE_OPTION_TRACE:
9278 + service->trace = value;
9279 + status = VCHIQ_SUCCESS;
9280 + break;
9281 +
9282 + default:
9283 + break;
9284 + }
9285 + unlock_service(service);
9286 + }
9287 +
9288 + return status;
9289 +}
9290 +
9291 +void
9292 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9293 + VCHIQ_SHARED_STATE_T *shared, const char *label)
9294 +{
9295 + static const char *const debug_names[] = {
9296 + "<entries>",
9297 + "SLOT_HANDLER_COUNT",
9298 + "SLOT_HANDLER_LINE",
9299 + "PARSE_LINE",
9300 + "PARSE_HEADER",
9301 + "PARSE_MSGID",
9302 + "AWAIT_COMPLETION_LINE",
9303 + "DEQUEUE_MESSAGE_LINE",
9304 + "SERVICE_CALLBACK_LINE",
9305 + "MSG_QUEUE_FULL_COUNT",
9306 + "COMPLETION_QUEUE_FULL_COUNT"
9307 + };
9308 + int i;
9309 +
9310 + char buf[80];
9311 + int len;
9312 + len = snprintf(buf, sizeof(buf),
9313 + " %s: slots %d-%d tx_pos=%x recycle=%x",
9314 + label, shared->slot_first, shared->slot_last,
9315 + shared->tx_pos, shared->slot_queue_recycle);
9316 + vchiq_dump(dump_context, buf, len + 1);
9317 +
9318 + len = snprintf(buf, sizeof(buf),
9319 + " Slots claimed:");
9320 + vchiq_dump(dump_context, buf, len + 1);
9321 +
9322 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
9323 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9324 + if (slot_info.use_count != slot_info.release_count) {
9325 + len = snprintf(buf, sizeof(buf),
9326 + " %d: %d/%d", i, slot_info.use_count,
9327 + slot_info.release_count);
9328 + vchiq_dump(dump_context, buf, len + 1);
9329 + }
9330 + }
9331 +
9332 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9333 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
9334 + debug_names[i], shared->debug[i], shared->debug[i]);
9335 + vchiq_dump(dump_context, buf, len + 1);
9336 + }
9337 +}
9338 +
9339 +void
9340 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9341 +{
9342 + char buf[80];
9343 + int len;
9344 + int i;
9345 +
9346 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9347 + conn_state_names[state->conn_state]);
9348 + vchiq_dump(dump_context, buf, len + 1);
9349 +
9350 + len = snprintf(buf, sizeof(buf),
9351 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
9352 + state->local->tx_pos,
9353 + (uint32_t)state->tx_data +
9354 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
9355 + state->rx_pos,
9356 + (uint32_t)state->rx_data +
9357 + (state->rx_pos & VCHIQ_SLOT_MASK));
9358 + vchiq_dump(dump_context, buf, len + 1);
9359 +
9360 + len = snprintf(buf, sizeof(buf),
9361 + " Version: %d (min %d)",
9362 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9363 + vchiq_dump(dump_context, buf, len + 1);
9364 +
9365 + if (VCHIQ_ENABLE_STATS) {
9366 + len = snprintf(buf, sizeof(buf),
9367 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9368 + "error_count=%d",
9369 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9370 + state->stats.error_count);
9371 + vchiq_dump(dump_context, buf, len + 1);
9372 + }
9373 +
9374 + len = snprintf(buf, sizeof(buf),
9375 + " Slots: %d available (%d data), %d recyclable, %d stalls "
9376 + "(%d data)",
9377 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9378 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9379 + state->data_quota - state->data_use_count,
9380 + state->local->slot_queue_recycle - state->slot_queue_available,
9381 + state->stats.slot_stalls, state->stats.data_stalls);
9382 + vchiq_dump(dump_context, buf, len + 1);
9383 +
9384 + vchiq_dump_platform_state(dump_context);
9385 +
9386 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9387 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9388 +
9389 + vchiq_dump_platform_instances(dump_context);
9390 +
9391 + for (i = 0; i < state->unused_service; i++) {
9392 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9393 +
9394 + if (service) {
9395 + vchiq_dump_service_state(dump_context, service);
9396 + unlock_service(service);
9397 + }
9398 + }
9399 +}
9400 +
9401 +void
9402 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9403 +{
9404 + char buf[80];
9405 + int len;
9406 +
9407 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9408 + service->localport, srvstate_names[service->srvstate],
9409 + service->ref_count - 1); /*Don't include the lock just taken*/
9410 +
9411 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9412 + char remoteport[30];
9413 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9414 + &service->state->service_quotas[service->localport];
9415 + int fourcc = service->base.fourcc;
9416 + int tx_pending, rx_pending;
9417 + if (service->remoteport != VCHIQ_PORT_FREE) {
9418 + int len2 = snprintf(remoteport, sizeof(remoteport),
9419 + "%d", service->remoteport);
9420 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9421 + snprintf(remoteport + len2,
9422 + sizeof(remoteport) - len2,
9423 + " (client %x)", service->client_id);
9424 + } else
9425 + strcpy(remoteport, "n/a");
9426 +
9427 + len += snprintf(buf + len, sizeof(buf) - len,
9428 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9429 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
9430 + remoteport,
9431 + service_quota->message_use_count,
9432 + service_quota->message_quota,
9433 + service_quota->slot_use_count,
9434 + service_quota->slot_quota);
9435 +
9436 + vchiq_dump(dump_context, buf, len + 1);
9437 +
9438 + tx_pending = service->bulk_tx.local_insert -
9439 + service->bulk_tx.remote_insert;
9440 +
9441 + rx_pending = service->bulk_rx.local_insert -
9442 + service->bulk_rx.remote_insert;
9443 +
9444 + len = snprintf(buf, sizeof(buf),
9445 + " Bulk: tx_pending=%d (size %d),"
9446 + " rx_pending=%d (size %d)",
9447 + tx_pending,
9448 + tx_pending ? service->bulk_tx.bulks[
9449 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
9450 + rx_pending,
9451 + rx_pending ? service->bulk_rx.bulks[
9452 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
9453 +
9454 + if (VCHIQ_ENABLE_STATS) {
9455 + vchiq_dump(dump_context, buf, len + 1);
9456 +
9457 + len = snprintf(buf, sizeof(buf),
9458 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
9459 + "rx_count=%d, rx_bytes=%llu",
9460 + service->stats.ctrl_tx_count,
9461 + service->stats.ctrl_tx_bytes,
9462 + service->stats.ctrl_rx_count,
9463 + service->stats.ctrl_rx_bytes);
9464 + vchiq_dump(dump_context, buf, len + 1);
9465 +
9466 + len = snprintf(buf, sizeof(buf),
9467 + " Bulk: tx_count=%d, tx_bytes=%llu, "
9468 + "rx_count=%d, rx_bytes=%llu",
9469 + service->stats.bulk_tx_count,
9470 + service->stats.bulk_tx_bytes,
9471 + service->stats.bulk_rx_count,
9472 + service->stats.bulk_rx_bytes);
9473 + vchiq_dump(dump_context, buf, len + 1);
9474 +
9475 + len = snprintf(buf, sizeof(buf),
9476 + " %d quota stalls, %d slot stalls, "
9477 + "%d bulk stalls, %d aborted, %d errors",
9478 + service->stats.quota_stalls,
9479 + service->stats.slot_stalls,
9480 + service->stats.bulk_stalls,
9481 + service->stats.bulk_aborted_count,
9482 + service->stats.error_count);
9483 + }
9484 + }
9485 +
9486 + vchiq_dump(dump_context, buf, len + 1);
9487 +
9488 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9489 + vchiq_dump_platform_service_state(dump_context, service);
9490 +}
9491 +
9492 +
9493 +void
9494 +vchiq_loud_error_header(void)
9495 +{
9496 + vchiq_log_error(vchiq_core_log_level,
9497 + "============================================================"
9498 + "================");
9499 + vchiq_log_error(vchiq_core_log_level,
9500 + "============================================================"
9501 + "================");
9502 + vchiq_log_error(vchiq_core_log_level, "=====");
9503 +}
9504 +
9505 +void
9506 +vchiq_loud_error_footer(void)
9507 +{
9508 + vchiq_log_error(vchiq_core_log_level, "=====");
9509 + vchiq_log_error(vchiq_core_log_level,
9510 + "============================================================"
9511 + "================");
9512 + vchiq_log_error(vchiq_core_log_level,
9513 + "============================================================"
9514 + "================");
9515 +}
9516 +
9517 +
9518 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9519 +{
9520 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9521 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9522 + status = queue_message(state, NULL,
9523 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9524 + NULL, 0, 0, 0);
9525 + return status;
9526 +}
9527 +
9528 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9529 +{
9530 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9531 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9532 + status = queue_message(state, NULL,
9533 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9534 + NULL, 0, 0, 0);
9535 + return status;
9536 +}
9537 +
9538 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9539 +{
9540 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9541 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9542 + status = queue_message(state, NULL,
9543 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9544 + NULL, 0, 0, 0);
9545 + return status;
9546 +}
9547 +
9548 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9549 + size_t numBytes)
9550 +{
9551 + const uint8_t *mem = (const uint8_t *)voidMem;
9552 + size_t offset;
9553 + char lineBuf[100];
9554 + char *s;
9555 +
9556 + while (numBytes > 0) {
9557 + s = lineBuf;
9558 +
9559 + for (offset = 0; offset < 16; offset++) {
9560 + if (offset < numBytes)
9561 + s += snprintf(s, 4, "%02x ", mem[offset]);
9562 + else
9563 + s += snprintf(s, 4, " ");
9564 + }
9565 +
9566 + for (offset = 0; offset < 16; offset++) {
9567 + if (offset < numBytes) {
9568 + uint8_t ch = mem[offset];
9569 +
9570 + if ((ch < ' ') || (ch > '~'))
9571 + ch = '.';
9572 + *s++ = (char)ch;
9573 + }
9574 + }
9575 + *s++ = '\0';
9576 +
9577 + if ((label != NULL) && (*label != '\0'))
9578 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9579 + "%s: %08x: %s", label, addr, lineBuf);
9580 + else
9581 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9582 + "%08x: %s", addr, lineBuf);
9583 +
9584 + addr += 16;
9585 + mem += 16;
9586 + if (numBytes > 16)
9587 + numBytes -= 16;
9588 + else
9589 + numBytes = 0;
9590 + }
9591 +}
9592 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9593 new file mode 100644
9594 index 0000000..1b27917
9595 --- /dev/null
9596 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9597 @@ -0,0 +1,711 @@
9598 +/**
9599 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9600 + *
9601 + * Redistribution and use in source and binary forms, with or without
9602 + * modification, are permitted provided that the following conditions
9603 + * are met:
9604 + * 1. Redistributions of source code must retain the above copyright
9605 + * notice, this list of conditions, and the following disclaimer,
9606 + * without modification.
9607 + * 2. Redistributions in binary form must reproduce the above copyright
9608 + * notice, this list of conditions and the following disclaimer in the
9609 + * documentation and/or other materials provided with the distribution.
9610 + * 3. The names of the above-listed copyright holders may not be used
9611 + * to endorse or promote products derived from this software without
9612 + * specific prior written permission.
9613 + *
9614 + * ALTERNATIVELY, this software may be distributed under the terms of the
9615 + * GNU General Public License ("GPL") version 2, as published by the Free
9616 + * Software Foundation.
9617 + *
9618 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9619 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9620 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9621 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9622 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9623 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9624 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9625 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9626 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9627 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9628 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9629 + */
9630 +
9631 +#ifndef VCHIQ_CORE_H
9632 +#define VCHIQ_CORE_H
9633 +
9634 +#include <linux/mutex.h>
9635 +#include <linux/semaphore.h>
9636 +#include <linux/kthread.h>
9637 +
9638 +#include "vchiq_cfg.h"
9639 +
9640 +#include "vchiq.h"
9641 +
9642 +/* Run time control of log level, based on KERN_XXX level. */
9643 +#define VCHIQ_LOG_DEFAULT 4
9644 +#define VCHIQ_LOG_ERROR 3
9645 +#define VCHIQ_LOG_WARNING 4
9646 +#define VCHIQ_LOG_INFO 6
9647 +#define VCHIQ_LOG_TRACE 7
9648 +
9649 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
9650 +
9651 +#ifndef vchiq_log_error
9652 +#define vchiq_log_error(cat, fmt, ...) \
9653 + do { if (cat >= VCHIQ_LOG_ERROR) \
9654 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9655 +#endif
9656 +#ifndef vchiq_log_warning
9657 +#define vchiq_log_warning(cat, fmt, ...) \
9658 + do { if (cat >= VCHIQ_LOG_WARNING) \
9659 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9660 +#endif
9661 +#ifndef vchiq_log_info
9662 +#define vchiq_log_info(cat, fmt, ...) \
9663 + do { if (cat >= VCHIQ_LOG_INFO) \
9664 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9665 +#endif
9666 +#ifndef vchiq_log_trace
9667 +#define vchiq_log_trace(cat, fmt, ...) \
9668 + do { if (cat >= VCHIQ_LOG_TRACE) \
9669 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9670 +#endif
9671 +
9672 +#define vchiq_loud_error(...) \
9673 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9674 +
9675 +#ifndef vchiq_static_assert
9676 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9677 + extern int vchiq_static_assert[(cond) ? 1 : -1]
9678 +#endif
9679 +
9680 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9681 +
9682 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9683 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9684 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9685 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9686 +
9687 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
9688 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9689 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9690 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9691 +
9692 +#define VCHIQ_MSG_PADDING 0 /* - */
9693 +#define VCHIQ_MSG_CONNECT 1 /* - */
9694 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
9695 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
9696 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
9697 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
9698 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
9699 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
9700 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
9701 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
9702 +#define VCHIQ_MSG_PAUSE 10 /* - */
9703 +#define VCHIQ_MSG_RESUME 11 /* - */
9704 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
9705 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
9706 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
9707 +
9708 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
9709 +#define VCHIQ_PORT_FREE 0x1000
9710 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
9711 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9712 + ((type<<24) | (srcport<<12) | (dstport<<0))
9713 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
9714 +#define VCHIQ_MSG_SRCPORT(msgid) \
9715 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9716 +#define VCHIQ_MSG_DSTPORT(msgid) \
9717 + ((unsigned short)msgid & 0xfff)
9718 +
9719 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9720 + ((fourcc) >> 24) & 0xff, \
9721 + ((fourcc) >> 16) & 0xff, \
9722 + ((fourcc) >> 8) & 0xff, \
9723 + (fourcc) & 0xff
9724 +
9725 +/* Ensure the fields are wide enough */
9726 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9727 + == 0);
9728 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9729 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9730 + (unsigned int)VCHIQ_PORT_FREE);
9731 +
9732 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9733 +#define VCHIQ_MSGID_CLAIMED 0x40000000
9734 +
9735 +#define VCHIQ_FOURCC_INVALID 0x00000000
9736 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
9737 +
9738 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9739 +
9740 +typedef uint32_t BITSET_T;
9741 +
9742 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9743 +
9744 +#define BITSET_SIZE(b) ((b + 31) >> 5)
9745 +#define BITSET_WORD(b) (b >> 5)
9746 +#define BITSET_BIT(b) (1 << (b & 31))
9747 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
9748 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9749 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9750 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9751 +
9752 +#if VCHIQ_ENABLE_STATS
9753 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9754 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9755 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9756 + (service->stats. stat += addend)
9757 +#else
9758 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9759 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9760 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9761 +#endif
9762 +
9763 +enum {
9764 + DEBUG_ENTRIES,
9765 +#if VCHIQ_ENABLE_DEBUG
9766 + DEBUG_SLOT_HANDLER_COUNT,
9767 + DEBUG_SLOT_HANDLER_LINE,
9768 + DEBUG_PARSE_LINE,
9769 + DEBUG_PARSE_HEADER,
9770 + DEBUG_PARSE_MSGID,
9771 + DEBUG_AWAIT_COMPLETION_LINE,
9772 + DEBUG_DEQUEUE_MESSAGE_LINE,
9773 + DEBUG_SERVICE_CALLBACK_LINE,
9774 + DEBUG_MSG_QUEUE_FULL_COUNT,
9775 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9776 +#endif
9777 + DEBUG_MAX
9778 +};
9779 +
9780 +#if VCHIQ_ENABLE_DEBUG
9781 +
9782 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9783 +#define DEBUG_TRACE(d) \
9784 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9785 +#define DEBUG_VALUE(d, v) \
9786 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9787 +#define DEBUG_COUNT(d) \
9788 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9789 +
9790 +#else /* VCHIQ_ENABLE_DEBUG */
9791 +
9792 +#define DEBUG_INITIALISE(local)
9793 +#define DEBUG_TRACE(d)
9794 +#define DEBUG_VALUE(d, v)
9795 +#define DEBUG_COUNT(d)
9796 +
9797 +#endif /* VCHIQ_ENABLE_DEBUG */
9798 +
9799 +typedef enum {
9800 + VCHIQ_CONNSTATE_DISCONNECTED,
9801 + VCHIQ_CONNSTATE_CONNECTING,
9802 + VCHIQ_CONNSTATE_CONNECTED,
9803 + VCHIQ_CONNSTATE_PAUSING,
9804 + VCHIQ_CONNSTATE_PAUSE_SENT,
9805 + VCHIQ_CONNSTATE_PAUSED,
9806 + VCHIQ_CONNSTATE_RESUMING,
9807 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9808 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
9809 +} VCHIQ_CONNSTATE_T;
9810 +
9811 +enum {
9812 + VCHIQ_SRVSTATE_FREE,
9813 + VCHIQ_SRVSTATE_HIDDEN,
9814 + VCHIQ_SRVSTATE_LISTENING,
9815 + VCHIQ_SRVSTATE_OPENING,
9816 + VCHIQ_SRVSTATE_OPEN,
9817 + VCHIQ_SRVSTATE_OPENSYNC,
9818 + VCHIQ_SRVSTATE_CLOSESENT,
9819 + VCHIQ_SRVSTATE_CLOSERECVD,
9820 + VCHIQ_SRVSTATE_CLOSEWAIT,
9821 + VCHIQ_SRVSTATE_CLOSED
9822 +};
9823 +
9824 +enum {
9825 + VCHIQ_POLL_TERMINATE,
9826 + VCHIQ_POLL_REMOVE,
9827 + VCHIQ_POLL_TXNOTIFY,
9828 + VCHIQ_POLL_RXNOTIFY,
9829 + VCHIQ_POLL_COUNT
9830 +};
9831 +
9832 +typedef enum {
9833 + VCHIQ_BULK_TRANSMIT,
9834 + VCHIQ_BULK_RECEIVE
9835 +} VCHIQ_BULK_DIR_T;
9836 +
9837 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9838 +
9839 +typedef struct vchiq_bulk_struct {
9840 + short mode;
9841 + short dir;
9842 + void *userdata;
9843 + VCHI_MEM_HANDLE_T handle;
9844 + void *data;
9845 + int size;
9846 + void *remote_data;
9847 + int remote_size;
9848 + int actual;
9849 +} VCHIQ_BULK_T;
9850 +
9851 +typedef struct vchiq_bulk_queue_struct {
9852 + int local_insert; /* Where to insert the next local bulk */
9853 + int remote_insert; /* Where to insert the next remote bulk (master) */
9854 + int process; /* Bulk to transfer next */
9855 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
9856 + int remove; /* Bulk to notify the local client of, and remove,
9857 + ** next */
9858 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
9859 +} VCHIQ_BULK_QUEUE_T;
9860 +
9861 +typedef struct remote_event_struct {
9862 + int armed;
9863 + int fired;
9864 + struct semaphore *event;
9865 +} REMOTE_EVENT_T;
9866 +
9867 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
9868 +
9869 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
9870 +
9871 +typedef struct vchiq_slot_struct {
9872 + char data[VCHIQ_SLOT_SIZE];
9873 +} VCHIQ_SLOT_T;
9874 +
9875 +typedef struct vchiq_slot_info_struct {
9876 + /* Use two counters rather than one to avoid the need for a mutex. */
9877 + short use_count;
9878 + short release_count;
9879 +} VCHIQ_SLOT_INFO_T;
9880 +
9881 +typedef struct vchiq_service_struct {
9882 + VCHIQ_SERVICE_BASE_T base;
9883 + VCHIQ_SERVICE_HANDLE_T handle;
9884 + unsigned int ref_count;
9885 + int srvstate;
9886 + VCHIQ_USERDATA_TERM_T userdata_term;
9887 + unsigned int localport;
9888 + unsigned int remoteport;
9889 + int public_fourcc;
9890 + int client_id;
9891 + char auto_close;
9892 + char sync;
9893 + char closing;
9894 + char trace;
9895 + atomic_t poll_flags;
9896 + short version;
9897 + short version_min;
9898 + short peer_version;
9899 +
9900 + VCHIQ_STATE_T *state;
9901 + VCHIQ_INSTANCE_T instance;
9902 +
9903 + int service_use_count;
9904 +
9905 + VCHIQ_BULK_QUEUE_T bulk_tx;
9906 + VCHIQ_BULK_QUEUE_T bulk_rx;
9907 +
9908 + struct semaphore remove_event;
9909 + struct semaphore bulk_remove_event;
9910 + struct mutex bulk_mutex;
9911 +
9912 + struct service_stats_struct {
9913 + int quota_stalls;
9914 + int slot_stalls;
9915 + int bulk_stalls;
9916 + int error_count;
9917 + int ctrl_tx_count;
9918 + int ctrl_rx_count;
9919 + int bulk_tx_count;
9920 + int bulk_rx_count;
9921 + int bulk_aborted_count;
9922 + uint64_t ctrl_tx_bytes;
9923 + uint64_t ctrl_rx_bytes;
9924 + uint64_t bulk_tx_bytes;
9925 + uint64_t bulk_rx_bytes;
9926 + } stats;
9927 +} VCHIQ_SERVICE_T;
9928 +
9929 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
9930 + statically allocated, since for accounting reasons a service's slot
9931 + usage is carried over between users of the same port number.
9932 + */
9933 +typedef struct vchiq_service_quota_struct {
9934 + unsigned short slot_quota;
9935 + unsigned short slot_use_count;
9936 + unsigned short message_quota;
9937 + unsigned short message_use_count;
9938 + struct semaphore quota_event;
9939 + int previous_tx_index;
9940 +} VCHIQ_SERVICE_QUOTA_T;
9941 +
9942 +typedef struct vchiq_shared_state_struct {
9943 +
9944 + /* A non-zero value here indicates that the content is valid. */
9945 + int initialised;
9946 +
9947 + /* The first and last (inclusive) slots allocated to the owner. */
9948 + int slot_first;
9949 + int slot_last;
9950 +
9951 + /* The slot allocated to synchronous messages from the owner. */
9952 + int slot_sync;
9953 +
9954 + /* Signalling this event indicates that owner's slot handler thread
9955 + ** should run. */
9956 + REMOTE_EVENT_T trigger;
9957 +
9958 + /* Indicates the byte position within the stream where the next message
9959 + ** will be written. The least significant bits are an index into the
9960 + ** slot. The next bits are the index of the slot in slot_queue. */
9961 + int tx_pos;
9962 +
9963 + /* This event should be signalled when a slot is recycled. */
9964 + REMOTE_EVENT_T recycle;
9965 +
9966 + /* The slot_queue index where the next recycled slot will be written. */
9967 + int slot_queue_recycle;
9968 +
9969 + /* This event should be signalled when a synchronous message is sent. */
9970 + REMOTE_EVENT_T sync_trigger;
9971 +
9972 + /* This event should be signalled when a synchronous message has been
9973 + ** released. */
9974 + REMOTE_EVENT_T sync_release;
9975 +
9976 + /* A circular buffer of slot indexes. */
9977 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
9978 +
9979 + /* Debugging state */
9980 + int debug[DEBUG_MAX];
9981 +} VCHIQ_SHARED_STATE_T;
9982 +
9983 +typedef struct vchiq_slot_zero_struct {
9984 + int magic;
9985 + short version;
9986 + short version_min;
9987 + int slot_zero_size;
9988 + int slot_size;
9989 + int max_slots;
9990 + int max_slots_per_side;
9991 + int platform_data[2];
9992 + VCHIQ_SHARED_STATE_T master;
9993 + VCHIQ_SHARED_STATE_T slave;
9994 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
9995 +} VCHIQ_SLOT_ZERO_T;
9996 +
9997 +struct vchiq_state_struct {
9998 + int id;
9999 + int initialised;
10000 + VCHIQ_CONNSTATE_T conn_state;
10001 + int is_master;
10002 +
10003 + VCHIQ_SHARED_STATE_T *local;
10004 + VCHIQ_SHARED_STATE_T *remote;
10005 + VCHIQ_SLOT_T *slot_data;
10006 +
10007 + unsigned short default_slot_quota;
10008 + unsigned short default_message_quota;
10009 +
10010 + /* Event indicating connect message received */
10011 + struct semaphore connect;
10012 +
10013 + /* Mutex protecting services */
10014 + struct mutex mutex;
10015 + VCHIQ_INSTANCE_T *instance;
10016 +
10017 + /* Processes incoming messages */
10018 + struct task_struct *slot_handler_thread;
10019 +
10020 + /* Processes recycled slots */
10021 + struct task_struct *recycle_thread;
10022 +
10023 + /* Processes synchronous messages */
10024 + struct task_struct *sync_thread;
10025 +
10026 + /* Local implementation of the trigger remote event */
10027 + struct semaphore trigger_event;
10028 +
10029 + /* Local implementation of the recycle remote event */
10030 + struct semaphore recycle_event;
10031 +
10032 + /* Local implementation of the sync trigger remote event */
10033 + struct semaphore sync_trigger_event;
10034 +
10035 + /* Local implementation of the sync release remote event */
10036 + struct semaphore sync_release_event;
10037 +
10038 + char *tx_data;
10039 + char *rx_data;
10040 + VCHIQ_SLOT_INFO_T *rx_info;
10041 +
10042 + struct mutex slot_mutex;
10043 +
10044 + struct mutex recycle_mutex;
10045 +
10046 + struct mutex sync_mutex;
10047 +
10048 + struct mutex bulk_transfer_mutex;
10049 +
10050 + /* Indicates the byte position within the stream from where the next
10051 + ** message will be read. The least significant bits are an index into
10052 + ** the slot.The next bits are the index of the slot in
10053 + ** remote->slot_queue. */
10054 + int rx_pos;
10055 +
10056 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
10057 + from remote->tx_pos. */
10058 + int local_tx_pos;
10059 +
10060 + /* The slot_queue index of the slot to become available next. */
10061 + int slot_queue_available;
10062 +
10063 + /* A flag to indicate if any poll has been requested */
10064 + int poll_needed;
10065 +
10066 + /* Ths index of the previous slot used for data messages. */
10067 + int previous_data_index;
10068 +
10069 + /* The number of slots occupied by data messages. */
10070 + unsigned short data_use_count;
10071 +
10072 + /* The maximum number of slots to be occupied by data messages. */
10073 + unsigned short data_quota;
10074 +
10075 + /* An array of bit sets indicating which services must be polled. */
10076 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
10077 +
10078 + /* The number of the first unused service */
10079 + int unused_service;
10080 +
10081 + /* Signalled when a free slot becomes available. */
10082 + struct semaphore slot_available_event;
10083 +
10084 + struct semaphore slot_remove_event;
10085 +
10086 + /* Signalled when a free data slot becomes available. */
10087 + struct semaphore data_quota_event;
10088 +
10089 + /* Incremented when there are bulk transfers which cannot be processed
10090 + * whilst paused and must be processed on resume */
10091 + int deferred_bulks;
10092 +
10093 + struct state_stats_struct {
10094 + int slot_stalls;
10095 + int data_stalls;
10096 + int ctrl_tx_count;
10097 + int ctrl_rx_count;
10098 + int error_count;
10099 + } stats;
10100 +
10101 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
10102 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
10103 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
10104 +
10105 + VCHIQ_PLATFORM_STATE_T platform_state;
10106 +};
10107 +
10108 +struct bulk_waiter {
10109 + VCHIQ_BULK_T *bulk;
10110 + struct semaphore event;
10111 + int actual;
10112 +};
10113 +
10114 +extern spinlock_t bulk_waiter_spinlock;
10115 +
10116 +extern int vchiq_core_log_level;
10117 +extern int vchiq_core_msg_log_level;
10118 +extern int vchiq_sync_log_level;
10119 +
10120 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
10121 +
10122 +extern const char *
10123 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
10124 +
10125 +extern VCHIQ_SLOT_ZERO_T *
10126 +vchiq_init_slots(void *mem_base, int mem_size);
10127 +
10128 +extern VCHIQ_STATUS_T
10129 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
10130 + int is_master);
10131 +
10132 +extern VCHIQ_STATUS_T
10133 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10134 +
10135 +extern VCHIQ_SERVICE_T *
10136 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
10137 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
10138 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
10139 +
10140 +extern VCHIQ_STATUS_T
10141 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
10142 +
10143 +extern VCHIQ_STATUS_T
10144 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
10145 +
10146 +extern void
10147 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
10148 +
10149 +extern void
10150 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
10151 +
10152 +extern VCHIQ_STATUS_T
10153 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10154 +
10155 +extern VCHIQ_STATUS_T
10156 +vchiq_pause_internal(VCHIQ_STATE_T *state);
10157 +
10158 +extern VCHIQ_STATUS_T
10159 +vchiq_resume_internal(VCHIQ_STATE_T *state);
10160 +
10161 +extern void
10162 +remote_event_pollall(VCHIQ_STATE_T *state);
10163 +
10164 +extern VCHIQ_STATUS_T
10165 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
10166 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
10167 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
10168 +
10169 +extern void
10170 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
10171 +
10172 +extern void
10173 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
10174 +
10175 +extern void
10176 +vchiq_loud_error_header(void);
10177 +
10178 +extern void
10179 +vchiq_loud_error_footer(void);
10180 +
10181 +extern void
10182 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
10183 +
10184 +static inline VCHIQ_SERVICE_T *
10185 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
10186 +{
10187 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
10188 + (VCHIQ_MAX_STATES - 1)];
10189 + if (!state)
10190 + return NULL;
10191 +
10192 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
10193 +}
10194 +
10195 +extern VCHIQ_SERVICE_T *
10196 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
10197 +
10198 +extern VCHIQ_SERVICE_T *
10199 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
10200 +
10201 +extern VCHIQ_SERVICE_T *
10202 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
10203 + VCHIQ_SERVICE_HANDLE_T handle);
10204 +
10205 +extern VCHIQ_SERVICE_T *
10206 +find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
10207 + VCHIQ_SERVICE_HANDLE_T handle);
10208 +
10209 +extern VCHIQ_SERVICE_T *
10210 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
10211 + int *pidx);
10212 +
10213 +extern void
10214 +lock_service(VCHIQ_SERVICE_T *service);
10215 +
10216 +extern void
10217 +unlock_service(VCHIQ_SERVICE_T *service);
10218 +
10219 +/* The following functions are called from vchiq_core, and external
10220 +** implementations must be provided. */
10221 +
10222 +extern VCHIQ_STATUS_T
10223 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
10224 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
10225 +
10226 +extern void
10227 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
10228 +
10229 +extern void
10230 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
10231 +
10232 +extern VCHIQ_STATUS_T
10233 +vchiq_copy_from_user(void *dst, const void *src, int size);
10234 +
10235 +extern void
10236 +remote_event_signal(REMOTE_EVENT_T *event);
10237 +
10238 +void
10239 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
10240 +
10241 +extern void
10242 +vchiq_platform_paused(VCHIQ_STATE_T *state);
10243 +
10244 +extern VCHIQ_STATUS_T
10245 +vchiq_platform_resume(VCHIQ_STATE_T *state);
10246 +
10247 +extern void
10248 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
10249 +
10250 +extern void
10251 +vchiq_dump(void *dump_context, const char *str, int len);
10252 +
10253 +extern void
10254 +vchiq_dump_platform_state(void *dump_context);
10255 +
10256 +extern void
10257 +vchiq_dump_platform_instances(void *dump_context);
10258 +
10259 +extern void
10260 +vchiq_dump_platform_service_state(void *dump_context,
10261 + VCHIQ_SERVICE_T *service);
10262 +
10263 +extern VCHIQ_STATUS_T
10264 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
10265 +
10266 +extern VCHIQ_STATUS_T
10267 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
10268 +
10269 +extern void
10270 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
10271 +
10272 +extern void
10273 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
10274 +
10275 +extern VCHIQ_STATUS_T
10276 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
10277 +
10278 +extern VCHIQ_STATUS_T
10279 +vchiq_check_service(VCHIQ_SERVICE_T *service);
10280 +
10281 +extern void
10282 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10283 +
10284 +extern VCHIQ_STATUS_T
10285 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10286 +
10287 +extern VCHIQ_STATUS_T
10288 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10289 +
10290 +extern VCHIQ_STATUS_T
10291 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10292 +
10293 +extern void
10294 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10295 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10296 +
10297 +extern void
10298 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10299 +
10300 +extern void
10301 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10302 +
10303 +
10304 +extern void
10305 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10306 + size_t numBytes);
10307 +
10308 +#endif
10309 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
10310 new file mode 100644
10311 index 0000000..7e03213
10312 --- /dev/null
10313 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
10314 @@ -0,0 +1,383 @@
10315 +/**
10316 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
10317 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10318 + *
10319 + * Redistribution and use in source and binary forms, with or without
10320 + * modification, are permitted provided that the following conditions
10321 + * are met:
10322 + * 1. Redistributions of source code must retain the above copyright
10323 + * notice, this list of conditions, and the following disclaimer,
10324 + * without modification.
10325 + * 2. Redistributions in binary form must reproduce the above copyright
10326 + * notice, this list of conditions and the following disclaimer in the
10327 + * documentation and/or other materials provided with the distribution.
10328 + * 3. The names of the above-listed copyright holders may not be used
10329 + * to endorse or promote products derived from this software without
10330 + * specific prior written permission.
10331 + *
10332 + * ALTERNATIVELY, this software may be distributed under the terms of the
10333 + * GNU General Public License ("GPL") version 2, as published by the Free
10334 + * Software Foundation.
10335 + *
10336 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10337 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10338 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10339 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10340 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10341 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10342 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10343 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10344 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10345 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10346 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10347 + */
10348 +
10349 +
10350 +#include <linux/debugfs.h>
10351 +#include "vchiq_core.h"
10352 +#include "vchiq_arm.h"
10353 +#include "vchiq_debugfs.h"
10354 +
10355 +#ifdef CONFIG_DEBUG_FS
10356 +
10357 +/****************************************************************************
10358 +*
10359 +* log category entries
10360 +*
10361 +***************************************************************************/
10362 +#define DEBUGFS_WRITE_BUF_SIZE 256
10363 +
10364 +#define VCHIQ_LOG_ERROR_STR "error"
10365 +#define VCHIQ_LOG_WARNING_STR "warning"
10366 +#define VCHIQ_LOG_INFO_STR "info"
10367 +#define VCHIQ_LOG_TRACE_STR "trace"
10368 +
10369 +
10370 +/* Top-level debug info */
10371 +struct vchiq_debugfs_info {
10372 + /* Global 'vchiq' debugfs entry used by all instances */
10373 + struct dentry *vchiq_cfg_dir;
10374 +
10375 + /* one entry per client process */
10376 + struct dentry *clients;
10377 +
10378 + /* log categories */
10379 + struct dentry *log_categories;
10380 +};
10381 +
10382 +static struct vchiq_debugfs_info debugfs_info;
10383 +
10384 +/* Log category debugfs entries */
10385 +struct vchiq_debugfs_log_entry {
10386 + const char *name;
10387 + int *plevel;
10388 + struct dentry *dir;
10389 +};
10390 +
10391 +static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
10392 + { "core", &vchiq_core_log_level },
10393 + { "msg", &vchiq_core_msg_log_level },
10394 + { "sync", &vchiq_sync_log_level },
10395 + { "susp", &vchiq_susp_log_level },
10396 + { "arm", &vchiq_arm_log_level },
10397 +};
10398 +static int n_log_entries =
10399 + sizeof(vchiq_debugfs_log_entries)/sizeof(vchiq_debugfs_log_entries[0]);
10400 +
10401 +
10402 +static struct dentry *vchiq_clients_top(void);
10403 +static struct dentry *vchiq_debugfs_top(void);
10404 +
10405 +static int debugfs_log_show(struct seq_file *f, void *offset)
10406 +{
10407 + int *levp = f->private;
10408 + char *log_value = NULL;
10409 +
10410 + switch (*levp) {
10411 + case VCHIQ_LOG_ERROR:
10412 + log_value = VCHIQ_LOG_ERROR_STR;
10413 + break;
10414 + case VCHIQ_LOG_WARNING:
10415 + log_value = VCHIQ_LOG_WARNING_STR;
10416 + break;
10417 + case VCHIQ_LOG_INFO:
10418 + log_value = VCHIQ_LOG_INFO_STR;
10419 + break;
10420 + case VCHIQ_LOG_TRACE:
10421 + log_value = VCHIQ_LOG_TRACE_STR;
10422 + break;
10423 + default:
10424 + break;
10425 + }
10426 +
10427 + seq_printf(f, "%s\n", log_value ? log_value : "(null)");
10428 +
10429 + return 0;
10430 +}
10431 +
10432 +static int debugfs_log_open(struct inode *inode, struct file *file)
10433 +{
10434 + return single_open(file, debugfs_log_show, inode->i_private);
10435 +}
10436 +
10437 +static int debugfs_log_write(struct file *file,
10438 + const char __user *buffer,
10439 + size_t count, loff_t *ppos)
10440 +{
10441 + struct seq_file *f = (struct seq_file *)file->private_data;
10442 + int *levp = f->private;
10443 + char kbuf[DEBUGFS_WRITE_BUF_SIZE + 1];
10444 +
10445 + memset(kbuf, 0, DEBUGFS_WRITE_BUF_SIZE + 1);
10446 + if (count >= DEBUGFS_WRITE_BUF_SIZE)
10447 + count = DEBUGFS_WRITE_BUF_SIZE;
10448 +
10449 + if (copy_from_user(kbuf, buffer, count) != 0)
10450 + return -EFAULT;
10451 + kbuf[count - 1] = 0;
10452 +
10453 + if (strncmp("error", kbuf, strlen("error")) == 0)
10454 + *levp = VCHIQ_LOG_ERROR;
10455 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
10456 + *levp = VCHIQ_LOG_WARNING;
10457 + else if (strncmp("info", kbuf, strlen("info")) == 0)
10458 + *levp = VCHIQ_LOG_INFO;
10459 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
10460 + *levp = VCHIQ_LOG_TRACE;
10461 + else
10462 + *levp = VCHIQ_LOG_DEFAULT;
10463 +
10464 + *ppos += count;
10465 +
10466 + return count;
10467 +}
10468 +
10469 +static const struct file_operations debugfs_log_fops = {
10470 + .owner = THIS_MODULE,
10471 + .open = debugfs_log_open,
10472 + .write = debugfs_log_write,
10473 + .read = seq_read,
10474 + .llseek = seq_lseek,
10475 + .release = single_release,
10476 +};
10477 +
10478 +/* create an entry under <debugfs>/vchiq/log for each log category */
10479 +static int vchiq_debugfs_create_log_entries(struct dentry *top)
10480 +{
10481 + struct dentry *dir;
10482 + size_t i;
10483 + int ret = 0;
10484 + dir = debugfs_create_dir("log", vchiq_debugfs_top());
10485 + if (!dir)
10486 + return -ENOMEM;
10487 + debugfs_info.log_categories = dir;
10488 +
10489 + for (i = 0; i < n_log_entries; i++) {
10490 + void *levp = (void *)vchiq_debugfs_log_entries[i].plevel;
10491 + dir = debugfs_create_file(vchiq_debugfs_log_entries[i].name,
10492 + 0644,
10493 + debugfs_info.log_categories,
10494 + levp,
10495 + &debugfs_log_fops);
10496 + if (!dir) {
10497 + ret = -ENOMEM;
10498 + break;
10499 + }
10500 +
10501 + vchiq_debugfs_log_entries[i].dir = dir;
10502 + }
10503 + return ret;
10504 +}
10505 +
10506 +static int debugfs_usecount_show(struct seq_file *f, void *offset)
10507 +{
10508 + VCHIQ_INSTANCE_T instance = f->private;
10509 + int use_count;
10510 +
10511 + use_count = vchiq_instance_get_use_count(instance);
10512 + seq_printf(f, "%d\n", use_count);
10513 +
10514 + return 0;
10515 +}
10516 +
10517 +static int debugfs_usecount_open(struct inode *inode, struct file *file)
10518 +{
10519 + return single_open(file, debugfs_usecount_show, inode->i_private);
10520 +}
10521 +
10522 +static const struct file_operations debugfs_usecount_fops = {
10523 + .owner = THIS_MODULE,
10524 + .open = debugfs_usecount_open,
10525 + .read = seq_read,
10526 + .llseek = seq_lseek,
10527 + .release = single_release,
10528 +};
10529 +
10530 +static int debugfs_trace_show(struct seq_file *f, void *offset)
10531 +{
10532 + VCHIQ_INSTANCE_T instance = f->private;
10533 + int trace;
10534 +
10535 + trace = vchiq_instance_get_trace(instance);
10536 + seq_printf(f, "%s\n", trace ? "Y" : "N");
10537 +
10538 + return 0;
10539 +}
10540 +
10541 +static int debugfs_trace_open(struct inode *inode, struct file *file)
10542 +{
10543 + return single_open(file, debugfs_trace_show, inode->i_private);
10544 +}
10545 +
10546 +static int debugfs_trace_write(struct file *file,
10547 + const char __user *buffer,
10548 + size_t count, loff_t *ppos)
10549 +{
10550 + struct seq_file *f = (struct seq_file *)file->private_data;
10551 + VCHIQ_INSTANCE_T instance = f->private;
10552 + char firstchar;
10553 +
10554 + if (copy_from_user(&firstchar, buffer, 1) != 0)
10555 + return -EFAULT;
10556 +
10557 + switch (firstchar) {
10558 + case 'Y':
10559 + case 'y':
10560 + case '1':
10561 + vchiq_instance_set_trace(instance, 1);
10562 + break;
10563 + case 'N':
10564 + case 'n':
10565 + case '0':
10566 + vchiq_instance_set_trace(instance, 0);
10567 + break;
10568 + default:
10569 + break;
10570 + }
10571 +
10572 + *ppos += count;
10573 +
10574 + return count;
10575 +}
10576 +
10577 +static const struct file_operations debugfs_trace_fops = {
10578 + .owner = THIS_MODULE,
10579 + .open = debugfs_trace_open,
10580 + .write = debugfs_trace_write,
10581 + .read = seq_read,
10582 + .llseek = seq_lseek,
10583 + .release = single_release,
10584 +};
10585 +
10586 +/* add an instance (process) to the debugfs entries */
10587 +int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
10588 +{
10589 + char pidstr[16];
10590 + struct dentry *top, *use_count, *trace;
10591 + struct dentry *clients = vchiq_clients_top();
10592 +
10593 + snprintf(pidstr, sizeof(pidstr), "%d",
10594 + vchiq_instance_get_pid(instance));
10595 +
10596 + top = debugfs_create_dir(pidstr, clients);
10597 + if (!top)
10598 + goto fail_top;
10599 +
10600 + use_count = debugfs_create_file("use_count",
10601 + 0444, top,
10602 + instance,
10603 + &debugfs_usecount_fops);
10604 + if (!use_count)
10605 + goto fail_use_count;
10606 +
10607 + trace = debugfs_create_file("trace",
10608 + 0644, top,
10609 + instance,
10610 + &debugfs_trace_fops);
10611 + if (!trace)
10612 + goto fail_trace;
10613 +
10614 + vchiq_instance_get_debugfs_node(instance)->dentry = top;
10615 +
10616 + return 0;
10617 +
10618 +fail_trace:
10619 + debugfs_remove(use_count);
10620 +fail_use_count:
10621 + debugfs_remove(top);
10622 +fail_top:
10623 + return -ENOMEM;
10624 +}
10625 +
10626 +void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
10627 +{
10628 + VCHIQ_DEBUGFS_NODE_T *node = vchiq_instance_get_debugfs_node(instance);
10629 + debugfs_remove_recursive(node->dentry);
10630 +}
10631 +
10632 +
10633 +int vchiq_debugfs_init(void)
10634 +{
10635 + BUG_ON(debugfs_info.vchiq_cfg_dir != NULL);
10636 +
10637 + debugfs_info.vchiq_cfg_dir = debugfs_create_dir("vchiq", NULL);
10638 + if (debugfs_info.vchiq_cfg_dir == NULL)
10639 + goto fail;
10640 +
10641 + debugfs_info.clients = debugfs_create_dir("clients",
10642 + vchiq_debugfs_top());
10643 + if (!debugfs_info.clients)
10644 + goto fail;
10645 +
10646 + if (vchiq_debugfs_create_log_entries(vchiq_debugfs_top()) != 0)
10647 + goto fail;
10648 +
10649 + return 0;
10650 +
10651 +fail:
10652 + vchiq_debugfs_deinit();
10653 + vchiq_log_error(vchiq_arm_log_level,
10654 + "%s: failed to create debugfs directory",
10655 + __func__);
10656 +
10657 + return -ENOMEM;
10658 +}
10659 +
10660 +/* remove all the debugfs entries */
10661 +void vchiq_debugfs_deinit(void)
10662 +{
10663 + debugfs_remove_recursive(vchiq_debugfs_top());
10664 +}
10665 +
10666 +static struct dentry *vchiq_clients_top(void)
10667 +{
10668 + return debugfs_info.clients;
10669 +}
10670 +
10671 +static struct dentry *vchiq_debugfs_top(void)
10672 +{
10673 + BUG_ON(debugfs_info.vchiq_cfg_dir == NULL);
10674 + return debugfs_info.vchiq_cfg_dir;
10675 +}
10676 +
10677 +#else /* CONFIG_DEBUG_FS */
10678 +
10679 +int vchiq_debugfs_init(void)
10680 +{
10681 + return 0;
10682 +}
10683 +
10684 +void vchiq_debugfs_deinit(void)
10685 +{
10686 +}
10687 +
10688 +int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
10689 +{
10690 + return 0;
10691 +}
10692 +
10693 +void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
10694 +{
10695 +}
10696 +
10697 +#endif /* CONFIG_DEBUG_FS */
10698 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
10699 new file mode 100644
10700 index 0000000..4d6a378
10701 --- /dev/null
10702 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
10703 @@ -0,0 +1,52 @@
10704 +/**
10705 + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
10706 + *
10707 + * Redistribution and use in source and binary forms, with or without
10708 + * modification, are permitted provided that the following conditions
10709 + * are met:
10710 + * 1. Redistributions of source code must retain the above copyright
10711 + * notice, this list of conditions, and the following disclaimer,
10712 + * without modification.
10713 + * 2. Redistributions in binary form must reproduce the above copyright
10714 + * notice, this list of conditions and the following disclaimer in the
10715 + * documentation and/or other materials provided with the distribution.
10716 + * 3. The names of the above-listed copyright holders may not be used
10717 + * to endorse or promote products derived from this software without
10718 + * specific prior written permission.
10719 + *
10720 + * ALTERNATIVELY, this software may be distributed under the terms of the
10721 + * GNU General Public License ("GPL") version 2, as published by the Free
10722 + * Software Foundation.
10723 + *
10724 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10725 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10726 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10727 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10728 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10729 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10730 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10731 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10732 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10733 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10734 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10735 + */
10736 +
10737 +#ifndef VCHIQ_DEBUGFS_H
10738 +#define VCHIQ_DEBUGFS_H
10739 +
10740 +#include "vchiq_core.h"
10741 +
10742 +typedef struct vchiq_debugfs_node_struct
10743 +{
10744 + struct dentry *dentry;
10745 +} VCHIQ_DEBUGFS_NODE_T;
10746 +
10747 +int vchiq_debugfs_init(void);
10748 +
10749 +void vchiq_debugfs_deinit(void);
10750 +
10751 +int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
10752 +
10753 +void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance);
10754 +
10755 +#endif /* VCHIQ_DEBUGFS_H */
10756 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10757 new file mode 100644
10758 index 0000000..9f5b634
10759 --- /dev/null
10760 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10761 @@ -0,0 +1,87 @@
10762 +#!/usr/bin/perl -w
10763 +
10764 +use strict;
10765 +
10766 +#
10767 +# Generate a version from available information
10768 +#
10769 +
10770 +my $prefix = shift @ARGV;
10771 +my $root = shift @ARGV;
10772 +
10773 +
10774 +if ( not defined $root ) {
10775 + die "usage: $0 prefix root-dir\n";
10776 +}
10777 +
10778 +if ( ! -d $root ) {
10779 + die "root directory $root not found\n";
10780 +}
10781 +
10782 +my $version = "unknown";
10783 +my $tainted = "";
10784 +
10785 +if ( -d "$root/.git" ) {
10786 + # attempt to work out git version. only do so
10787 + # on a linux build host, as cygwin builds are
10788 + # already slow enough
10789 +
10790 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10791 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10792 + $version = "no git version";
10793 + }
10794 + else {
10795 + $version = <F>;
10796 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10797 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10798 + }
10799 +
10800 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10801 + $tainted = <G>;
10802 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10803 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10804 + if (length $tainted) {
10805 + $version = join ' ', $version, "(tainted)";
10806 + }
10807 + else {
10808 + $version = join ' ', $version, "(clean)";
10809 + }
10810 + }
10811 + }
10812 +}
10813 +
10814 +my $hostname = `hostname`;
10815 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10816 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10817 +
10818 +
10819 +print STDERR "Version $version\n";
10820 +print <<EOF;
10821 +#include "${prefix}_build_info.h"
10822 +#include <linux/broadcom/vc_debug_sym.h>
10823 +
10824 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10825 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10826 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
10827 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
10828 +
10829 +const char *vchiq_get_build_hostname( void )
10830 +{
10831 + return vchiq_build_hostname;
10832 +}
10833 +
10834 +const char *vchiq_get_build_version( void )
10835 +{
10836 + return vchiq_build_version;
10837 +}
10838 +
10839 +const char *vchiq_get_build_date( void )
10840 +{
10841 + return vchiq_build_date;
10842 +}
10843 +
10844 +const char *vchiq_get_build_time( void )
10845 +{
10846 + return vchiq_build_time;
10847 +}
10848 +EOF
10849 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10850 new file mode 100644
10851 index 0000000..8067bbe
10852 --- /dev/null
10853 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10854 @@ -0,0 +1,189 @@
10855 +/**
10856 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10857 + *
10858 + * Redistribution and use in source and binary forms, with or without
10859 + * modification, are permitted provided that the following conditions
10860 + * are met:
10861 + * 1. Redistributions of source code must retain the above copyright
10862 + * notice, this list of conditions, and the following disclaimer,
10863 + * without modification.
10864 + * 2. Redistributions in binary form must reproduce the above copyright
10865 + * notice, this list of conditions and the following disclaimer in the
10866 + * documentation and/or other materials provided with the distribution.
10867 + * 3. The names of the above-listed copyright holders may not be used
10868 + * to endorse or promote products derived from this software without
10869 + * specific prior written permission.
10870 + *
10871 + * ALTERNATIVELY, this software may be distributed under the terms of the
10872 + * GNU General Public License ("GPL") version 2, as published by the Free
10873 + * Software Foundation.
10874 + *
10875 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10876 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10877 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10878 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10879 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10880 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10881 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10882 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10883 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10884 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10885 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10886 + */
10887 +
10888 +#ifndef VCHIQ_IF_H
10889 +#define VCHIQ_IF_H
10890 +
10891 +#include "interface/vchi/vchi_mh.h"
10892 +
10893 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
10894 +
10895 +#define VCHIQ_SLOT_SIZE 4096
10896 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
10897 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
10898 +
10899 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
10900 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
10901 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
10902 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
10903 +
10904 +typedef enum {
10905 + VCHIQ_SERVICE_OPENED, /* service, -, - */
10906 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
10907 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
10908 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
10909 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
10910 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
10911 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
10912 +} VCHIQ_REASON_T;
10913 +
10914 +typedef enum {
10915 + VCHIQ_ERROR = -1,
10916 + VCHIQ_SUCCESS = 0,
10917 + VCHIQ_RETRY = 1
10918 +} VCHIQ_STATUS_T;
10919 +
10920 +typedef enum {
10921 + VCHIQ_BULK_MODE_CALLBACK,
10922 + VCHIQ_BULK_MODE_BLOCKING,
10923 + VCHIQ_BULK_MODE_NOCALLBACK,
10924 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
10925 +} VCHIQ_BULK_MODE_T;
10926 +
10927 +typedef enum {
10928 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
10929 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
10930 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
10931 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
10932 + VCHIQ_SERVICE_OPTION_TRACE
10933 +} VCHIQ_SERVICE_OPTION_T;
10934 +
10935 +typedef struct vchiq_header_struct {
10936 + /* The message identifier - opaque to applications. */
10937 + int msgid;
10938 +
10939 + /* Size of message data. */
10940 + unsigned int size;
10941 +
10942 + char data[0]; /* message */
10943 +} VCHIQ_HEADER_T;
10944 +
10945 +typedef struct {
10946 + const void *data;
10947 + unsigned int size;
10948 +} VCHIQ_ELEMENT_T;
10949 +
10950 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
10951 +
10952 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
10953 + VCHIQ_SERVICE_HANDLE_T, void *);
10954 +
10955 +typedef struct vchiq_service_base_struct {
10956 + int fourcc;
10957 + VCHIQ_CALLBACK_T callback;
10958 + void *userdata;
10959 +} VCHIQ_SERVICE_BASE_T;
10960 +
10961 +typedef struct vchiq_service_params_struct {
10962 + int fourcc;
10963 + VCHIQ_CALLBACK_T callback;
10964 + void *userdata;
10965 + short version; /* Increment for non-trivial changes */
10966 + short version_min; /* Update for incompatible changes */
10967 +} VCHIQ_SERVICE_PARAMS_T;
10968 +
10969 +typedef struct vchiq_config_struct {
10970 + unsigned int max_msg_size;
10971 + unsigned int bulk_threshold; /* The message size above which it
10972 + is better to use a bulk transfer
10973 + (<= max_msg_size) */
10974 + unsigned int max_outstanding_bulks;
10975 + unsigned int max_services;
10976 + short version; /* The version of VCHIQ */
10977 + short version_min; /* The minimum compatible version of VCHIQ */
10978 +} VCHIQ_CONFIG_T;
10979 +
10980 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
10981 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
10982 +
10983 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
10984 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
10985 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
10986 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
10987 + const VCHIQ_SERVICE_PARAMS_T *params,
10988 + VCHIQ_SERVICE_HANDLE_T *pservice);
10989 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
10990 + const VCHIQ_SERVICE_PARAMS_T *params,
10991 + VCHIQ_SERVICE_HANDLE_T *pservice);
10992 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
10993 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
10994 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
10995 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
10996 + VCHIQ_SERVICE_HANDLE_T service);
10997 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
10998 +
10999 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
11000 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
11001 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
11002 + VCHIQ_HEADER_T *header);
11003 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11004 + const void *data, unsigned int size, void *userdata);
11005 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11006 + void *data, unsigned int size, void *userdata);
11007 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
11008 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11009 + const void *offset, unsigned int size, void *userdata);
11010 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
11011 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11012 + void *offset, unsigned int size, void *userdata);
11013 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11014 + const void *data, unsigned int size, void *userdata,
11015 + VCHIQ_BULK_MODE_T mode);
11016 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11017 + void *data, unsigned int size, void *userdata,
11018 + VCHIQ_BULK_MODE_T mode);
11019 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
11020 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
11021 + void *userdata, VCHIQ_BULK_MODE_T mode);
11022 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
11023 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
11024 + void *userdata, VCHIQ_BULK_MODE_T mode);
11025 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
11026 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
11027 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
11028 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
11029 + int config_size, VCHIQ_CONFIG_T *pconfig);
11030 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
11031 + VCHIQ_SERVICE_OPTION_T option, int value);
11032 +
11033 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
11034 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
11035 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
11036 +
11037 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
11038 + void *ptr, size_t num_bytes);
11039 +
11040 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
11041 + short *peer_version);
11042 +
11043 +#endif /* VCHIQ_IF_H */
11044 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
11045 new file mode 100644
11046 index 0000000..6137ae9
11047 --- /dev/null
11048 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
11049 @@ -0,0 +1,131 @@
11050 +/**
11051 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11052 + *
11053 + * Redistribution and use in source and binary forms, with or without
11054 + * modification, are permitted provided that the following conditions
11055 + * are met:
11056 + * 1. Redistributions of source code must retain the above copyright
11057 + * notice, this list of conditions, and the following disclaimer,
11058 + * without modification.
11059 + * 2. Redistributions in binary form must reproduce the above copyright
11060 + * notice, this list of conditions and the following disclaimer in the
11061 + * documentation and/or other materials provided with the distribution.
11062 + * 3. The names of the above-listed copyright holders may not be used
11063 + * to endorse or promote products derived from this software without
11064 + * specific prior written permission.
11065 + *
11066 + * ALTERNATIVELY, this software may be distributed under the terms of the
11067 + * GNU General Public License ("GPL") version 2, as published by the Free
11068 + * Software Foundation.
11069 + *
11070 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11071 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11072 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11073 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11074 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11075 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11076 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11077 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11078 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11079 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11080 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11081 + */
11082 +
11083 +#ifndef VCHIQ_IOCTLS_H
11084 +#define VCHIQ_IOCTLS_H
11085 +
11086 +#include <linux/ioctl.h>
11087 +#include "vchiq_if.h"
11088 +
11089 +#define VCHIQ_IOC_MAGIC 0xc4
11090 +#define VCHIQ_INVALID_HANDLE (~0)
11091 +
11092 +typedef struct {
11093 + VCHIQ_SERVICE_PARAMS_T params;
11094 + int is_open;
11095 + int is_vchi;
11096 + unsigned int handle; /* OUT */
11097 +} VCHIQ_CREATE_SERVICE_T;
11098 +
11099 +typedef struct {
11100 + unsigned int handle;
11101 + unsigned int count;
11102 + const VCHIQ_ELEMENT_T *elements;
11103 +} VCHIQ_QUEUE_MESSAGE_T;
11104 +
11105 +typedef struct {
11106 + unsigned int handle;
11107 + void *data;
11108 + unsigned int size;
11109 + void *userdata;
11110 + VCHIQ_BULK_MODE_T mode;
11111 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
11112 +
11113 +typedef struct {
11114 + VCHIQ_REASON_T reason;
11115 + VCHIQ_HEADER_T *header;
11116 + void *service_userdata;
11117 + void *bulk_userdata;
11118 +} VCHIQ_COMPLETION_DATA_T;
11119 +
11120 +typedef struct {
11121 + unsigned int count;
11122 + VCHIQ_COMPLETION_DATA_T *buf;
11123 + unsigned int msgbufsize;
11124 + unsigned int msgbufcount; /* IN/OUT */
11125 + void **msgbufs;
11126 +} VCHIQ_AWAIT_COMPLETION_T;
11127 +
11128 +typedef struct {
11129 + unsigned int handle;
11130 + int blocking;
11131 + unsigned int bufsize;
11132 + void *buf;
11133 +} VCHIQ_DEQUEUE_MESSAGE_T;
11134 +
11135 +typedef struct {
11136 + unsigned int config_size;
11137 + VCHIQ_CONFIG_T *pconfig;
11138 +} VCHIQ_GET_CONFIG_T;
11139 +
11140 +typedef struct {
11141 + unsigned int handle;
11142 + VCHIQ_SERVICE_OPTION_T option;
11143 + int value;
11144 +} VCHIQ_SET_SERVICE_OPTION_T;
11145 +
11146 +typedef struct {
11147 + void *virt_addr;
11148 + size_t num_bytes;
11149 +} VCHIQ_DUMP_MEM_T;
11150 +
11151 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
11152 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
11153 +#define VCHIQ_IOC_CREATE_SERVICE \
11154 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
11155 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
11156 +#define VCHIQ_IOC_QUEUE_MESSAGE \
11157 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
11158 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
11159 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
11160 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
11161 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
11162 +#define VCHIQ_IOC_AWAIT_COMPLETION \
11163 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
11164 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
11165 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
11166 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
11167 +#define VCHIQ_IOC_GET_CONFIG \
11168 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
11169 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
11170 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
11171 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
11172 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
11173 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
11174 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
11175 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
11176 +#define VCHIQ_IOC_LIB_VERSION _IO(VCHIQ_IOC_MAGIC, 16)
11177 +#define VCHIQ_IOC_CLOSE_DELIVERED _IO(VCHIQ_IOC_MAGIC, 17)
11178 +#define VCHIQ_IOC_MAX 17
11179 +
11180 +#endif
11181 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
11182 new file mode 100644
11183 index 0000000..be9735f
11184 --- /dev/null
11185 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
11186 @@ -0,0 +1,456 @@
11187 +/**
11188 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11189 + *
11190 + * Redistribution and use in source and binary forms, with or without
11191 + * modification, are permitted provided that the following conditions
11192 + * are met:
11193 + * 1. Redistributions of source code must retain the above copyright
11194 + * notice, this list of conditions, and the following disclaimer,
11195 + * without modification.
11196 + * 2. Redistributions in binary form must reproduce the above copyright
11197 + * notice, this list of conditions and the following disclaimer in the
11198 + * documentation and/or other materials provided with the distribution.
11199 + * 3. The names of the above-listed copyright holders may not be used
11200 + * to endorse or promote products derived from this software without
11201 + * specific prior written permission.
11202 + *
11203 + * ALTERNATIVELY, this software may be distributed under the terms of the
11204 + * GNU General Public License ("GPL") version 2, as published by the Free
11205 + * Software Foundation.
11206 + *
11207 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11208 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11209 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11210 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11211 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11212 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11213 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11214 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11215 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11216 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11217 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11218 + */
11219 +
11220 +/* ---- Include Files ---------------------------------------------------- */
11221 +
11222 +#include <linux/kernel.h>
11223 +#include <linux/module.h>
11224 +#include <linux/mutex.h>
11225 +
11226 +#include "vchiq_core.h"
11227 +#include "vchiq_arm.h"
11228 +
11229 +/* ---- Public Variables ------------------------------------------------- */
11230 +
11231 +/* ---- Private Constants and Types -------------------------------------- */
11232 +
11233 +struct bulk_waiter_node {
11234 + struct bulk_waiter bulk_waiter;
11235 + int pid;
11236 + struct list_head list;
11237 +};
11238 +
11239 +struct vchiq_instance_struct {
11240 + VCHIQ_STATE_T *state;
11241 +
11242 + int connected;
11243 +
11244 + struct list_head bulk_waiter_list;
11245 + struct mutex bulk_waiter_list_mutex;
11246 +};
11247 +
11248 +static VCHIQ_STATUS_T
11249 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11250 + unsigned int size, VCHIQ_BULK_DIR_T dir);
11251 +
11252 +/****************************************************************************
11253 +*
11254 +* vchiq_initialise
11255 +*
11256 +***************************************************************************/
11257 +#define VCHIQ_INIT_RETRIES 10
11258 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
11259 +{
11260 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11261 + VCHIQ_STATE_T *state;
11262 + VCHIQ_INSTANCE_T instance = NULL;
11263 + int i;
11264 +
11265 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
11266 +
11267 + /* VideoCore may not be ready due to boot up timing.
11268 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
11269 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
11270 + state = vchiq_get_state();
11271 + if (state)
11272 + break;
11273 + udelay(500);
11274 + }
11275 + if (i==VCHIQ_INIT_RETRIES) {
11276 + vchiq_log_error(vchiq_core_log_level,
11277 + "%s: videocore not initialized\n", __func__);
11278 + goto failed;
11279 + } else if (i>0) {
11280 + vchiq_log_warning(vchiq_core_log_level,
11281 + "%s: videocore initialized after %d retries\n", __func__, i);
11282 + }
11283 +
11284 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
11285 + if (!instance) {
11286 + vchiq_log_error(vchiq_core_log_level,
11287 + "%s: error allocating vchiq instance\n", __func__);
11288 + goto failed;
11289 + }
11290 +
11291 + instance->connected = 0;
11292 + instance->state = state;
11293 + mutex_init(&instance->bulk_waiter_list_mutex);
11294 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
11295 +
11296 + *instanceOut = instance;
11297 +
11298 + status = VCHIQ_SUCCESS;
11299 +
11300 +failed:
11301 + vchiq_log_trace(vchiq_core_log_level,
11302 + "%s(%p): returning %d", __func__, instance, status);
11303 +
11304 + return status;
11305 +}
11306 +EXPORT_SYMBOL(vchiq_initialise);
11307 +
11308 +/****************************************************************************
11309 +*
11310 +* vchiq_shutdown
11311 +*
11312 +***************************************************************************/
11313 +
11314 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
11315 +{
11316 + VCHIQ_STATUS_T status;
11317 + VCHIQ_STATE_T *state = instance->state;
11318 +
11319 + vchiq_log_trace(vchiq_core_log_level,
11320 + "%s(%p) called", __func__, instance);
11321 +
11322 + if (mutex_lock_interruptible(&state->mutex) != 0)
11323 + return VCHIQ_RETRY;
11324 +
11325 + /* Remove all services */
11326 + status = vchiq_shutdown_internal(state, instance);
11327 +
11328 + mutex_unlock(&state->mutex);
11329 +
11330 + vchiq_log_trace(vchiq_core_log_level,
11331 + "%s(%p): returning %d", __func__, instance, status);
11332 +
11333 + if (status == VCHIQ_SUCCESS) {
11334 + struct list_head *pos, *next;
11335 + list_for_each_safe(pos, next,
11336 + &instance->bulk_waiter_list) {
11337 + struct bulk_waiter_node *waiter;
11338 + waiter = list_entry(pos,
11339 + struct bulk_waiter_node,
11340 + list);
11341 + list_del(pos);
11342 + vchiq_log_info(vchiq_arm_log_level,
11343 + "bulk_waiter - cleaned up %x "
11344 + "for pid %d",
11345 + (unsigned int)waiter, waiter->pid);
11346 + kfree(waiter);
11347 + }
11348 + kfree(instance);
11349 + }
11350 +
11351 + return status;
11352 +}
11353 +EXPORT_SYMBOL(vchiq_shutdown);
11354 +
11355 +/****************************************************************************
11356 +*
11357 +* vchiq_is_connected
11358 +*
11359 +***************************************************************************/
11360 +
11361 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
11362 +{
11363 + return instance->connected;
11364 +}
11365 +
11366 +/****************************************************************************
11367 +*
11368 +* vchiq_connect
11369 +*
11370 +***************************************************************************/
11371 +
11372 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
11373 +{
11374 + VCHIQ_STATUS_T status;
11375 + VCHIQ_STATE_T *state = instance->state;
11376 +
11377 + vchiq_log_trace(vchiq_core_log_level,
11378 + "%s(%p) called", __func__, instance);
11379 +
11380 + if (mutex_lock_interruptible(&state->mutex) != 0) {
11381 + vchiq_log_trace(vchiq_core_log_level,
11382 + "%s: call to mutex_lock failed", __func__);
11383 + status = VCHIQ_RETRY;
11384 + goto failed;
11385 + }
11386 + status = vchiq_connect_internal(state, instance);
11387 +
11388 + if (status == VCHIQ_SUCCESS)
11389 + instance->connected = 1;
11390 +
11391 + mutex_unlock(&state->mutex);
11392 +
11393 +failed:
11394 + vchiq_log_trace(vchiq_core_log_level,
11395 + "%s(%p): returning %d", __func__, instance, status);
11396 +
11397 + return status;
11398 +}
11399 +EXPORT_SYMBOL(vchiq_connect);
11400 +
11401 +/****************************************************************************
11402 +*
11403 +* vchiq_add_service
11404 +*
11405 +***************************************************************************/
11406 +
11407 +VCHIQ_STATUS_T vchiq_add_service(
11408 + VCHIQ_INSTANCE_T instance,
11409 + const VCHIQ_SERVICE_PARAMS_T *params,
11410 + VCHIQ_SERVICE_HANDLE_T *phandle)
11411 +{
11412 + VCHIQ_STATUS_T status;
11413 + VCHIQ_STATE_T *state = instance->state;
11414 + VCHIQ_SERVICE_T *service = NULL;
11415 + int srvstate;
11416 +
11417 + vchiq_log_trace(vchiq_core_log_level,
11418 + "%s(%p) called", __func__, instance);
11419 +
11420 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11421 +
11422 + srvstate = vchiq_is_connected(instance)
11423 + ? VCHIQ_SRVSTATE_LISTENING
11424 + : VCHIQ_SRVSTATE_HIDDEN;
11425 +
11426 + service = vchiq_add_service_internal(
11427 + state,
11428 + params,
11429 + srvstate,
11430 + instance,
11431 + NULL);
11432 +
11433 + if (service) {
11434 + *phandle = service->handle;
11435 + status = VCHIQ_SUCCESS;
11436 + } else
11437 + status = VCHIQ_ERROR;
11438 +
11439 + vchiq_log_trace(vchiq_core_log_level,
11440 + "%s(%p): returning %d", __func__, instance, status);
11441 +
11442 + return status;
11443 +}
11444 +EXPORT_SYMBOL(vchiq_add_service);
11445 +
11446 +/****************************************************************************
11447 +*
11448 +* vchiq_open_service
11449 +*
11450 +***************************************************************************/
11451 +
11452 +VCHIQ_STATUS_T vchiq_open_service(
11453 + VCHIQ_INSTANCE_T instance,
11454 + const VCHIQ_SERVICE_PARAMS_T *params,
11455 + VCHIQ_SERVICE_HANDLE_T *phandle)
11456 +{
11457 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11458 + VCHIQ_STATE_T *state = instance->state;
11459 + VCHIQ_SERVICE_T *service = NULL;
11460 +
11461 + vchiq_log_trace(vchiq_core_log_level,
11462 + "%s(%p) called", __func__, instance);
11463 +
11464 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11465 +
11466 + if (!vchiq_is_connected(instance))
11467 + goto failed;
11468 +
11469 + service = vchiq_add_service_internal(state,
11470 + params,
11471 + VCHIQ_SRVSTATE_OPENING,
11472 + instance,
11473 + NULL);
11474 +
11475 + if (service) {
11476 + status = vchiq_open_service_internal(service, current->pid);
11477 + if (status == VCHIQ_SUCCESS)
11478 + *phandle = service->handle;
11479 + else
11480 + vchiq_remove_service(service->handle);
11481 + }
11482 +
11483 +failed:
11484 + vchiq_log_trace(vchiq_core_log_level,
11485 + "%s(%p): returning %d", __func__, instance, status);
11486 +
11487 + return status;
11488 +}
11489 +EXPORT_SYMBOL(vchiq_open_service);
11490 +
11491 +VCHIQ_STATUS_T
11492 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
11493 + const void *data, unsigned int size, void *userdata)
11494 +{
11495 + return vchiq_bulk_transfer(handle,
11496 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11497 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
11498 +}
11499 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
11500 +
11501 +VCHIQ_STATUS_T
11502 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11503 + unsigned int size, void *userdata)
11504 +{
11505 + return vchiq_bulk_transfer(handle,
11506 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11507 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
11508 +}
11509 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
11510 +
11511 +VCHIQ_STATUS_T
11512 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
11513 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11514 +{
11515 + VCHIQ_STATUS_T status;
11516 +
11517 + switch (mode) {
11518 + case VCHIQ_BULK_MODE_NOCALLBACK:
11519 + case VCHIQ_BULK_MODE_CALLBACK:
11520 + status = vchiq_bulk_transfer(handle,
11521 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11522 + mode, VCHIQ_BULK_TRANSMIT);
11523 + break;
11524 + case VCHIQ_BULK_MODE_BLOCKING:
11525 + status = vchiq_blocking_bulk_transfer(handle,
11526 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
11527 + break;
11528 + default:
11529 + return VCHIQ_ERROR;
11530 + }
11531 +
11532 + return status;
11533 +}
11534 +EXPORT_SYMBOL(vchiq_bulk_transmit);
11535 +
11536 +VCHIQ_STATUS_T
11537 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11538 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11539 +{
11540 + VCHIQ_STATUS_T status;
11541 +
11542 + switch (mode) {
11543 + case VCHIQ_BULK_MODE_NOCALLBACK:
11544 + case VCHIQ_BULK_MODE_CALLBACK:
11545 + status = vchiq_bulk_transfer(handle,
11546 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11547 + mode, VCHIQ_BULK_RECEIVE);
11548 + break;
11549 + case VCHIQ_BULK_MODE_BLOCKING:
11550 + status = vchiq_blocking_bulk_transfer(handle,
11551 + (void *)data, size, VCHIQ_BULK_RECEIVE);
11552 + break;
11553 + default:
11554 + return VCHIQ_ERROR;
11555 + }
11556 +
11557 + return status;
11558 +}
11559 +EXPORT_SYMBOL(vchiq_bulk_receive);
11560 +
11561 +static VCHIQ_STATUS_T
11562 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11563 + unsigned int size, VCHIQ_BULK_DIR_T dir)
11564 +{
11565 + VCHIQ_INSTANCE_T instance;
11566 + VCHIQ_SERVICE_T *service;
11567 + VCHIQ_STATUS_T status;
11568 + struct bulk_waiter_node *waiter = NULL;
11569 + struct list_head *pos;
11570 +
11571 + service = find_service_by_handle(handle);
11572 + if (!service)
11573 + return VCHIQ_ERROR;
11574 +
11575 + instance = service->instance;
11576 +
11577 + unlock_service(service);
11578 +
11579 + mutex_lock(&instance->bulk_waiter_list_mutex);
11580 + list_for_each(pos, &instance->bulk_waiter_list) {
11581 + if (list_entry(pos, struct bulk_waiter_node,
11582 + list)->pid == current->pid) {
11583 + waiter = list_entry(pos,
11584 + struct bulk_waiter_node,
11585 + list);
11586 + list_del(pos);
11587 + break;
11588 + }
11589 + }
11590 + mutex_unlock(&instance->bulk_waiter_list_mutex);
11591 +
11592 + if (waiter) {
11593 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11594 + if (bulk) {
11595 + /* This thread has an outstanding bulk transfer. */
11596 + if ((bulk->data != data) ||
11597 + (bulk->size != size)) {
11598 + /* This is not a retry of the previous one.
11599 + ** Cancel the signal when the transfer
11600 + ** completes. */
11601 + spin_lock(&bulk_waiter_spinlock);
11602 + bulk->userdata = NULL;
11603 + spin_unlock(&bulk_waiter_spinlock);
11604 + }
11605 + }
11606 + }
11607 +
11608 + if (!waiter) {
11609 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
11610 + if (!waiter) {
11611 + vchiq_log_error(vchiq_core_log_level,
11612 + "%s - out of memory", __func__);
11613 + return VCHIQ_ERROR;
11614 + }
11615 + }
11616 +
11617 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
11618 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
11619 + dir);
11620 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
11621 + !waiter->bulk_waiter.bulk) {
11622 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11623 + if (bulk) {
11624 + /* Cancel the signal when the transfer
11625 + ** completes. */
11626 + spin_lock(&bulk_waiter_spinlock);
11627 + bulk->userdata = NULL;
11628 + spin_unlock(&bulk_waiter_spinlock);
11629 + }
11630 + kfree(waiter);
11631 + } else {
11632 + waiter->pid = current->pid;
11633 + mutex_lock(&instance->bulk_waiter_list_mutex);
11634 + list_add(&waiter->list, &instance->bulk_waiter_list);
11635 + mutex_unlock(&instance->bulk_waiter_list_mutex);
11636 + vchiq_log_info(vchiq_arm_log_level,
11637 + "saved bulk_waiter %x for pid %d",
11638 + (unsigned int)waiter, current->pid);
11639 + }
11640 +
11641 + return status;
11642 +}
11643 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
11644 new file mode 100644
11645 index 0000000..d02e776
11646 --- /dev/null
11647 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
11648 @@ -0,0 +1,71 @@
11649 +/**
11650 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11651 + *
11652 + * Redistribution and use in source and binary forms, with or without
11653 + * modification, are permitted provided that the following conditions
11654 + * are met:
11655 + * 1. Redistributions of source code must retain the above copyright
11656 + * notice, this list of conditions, and the following disclaimer,
11657 + * without modification.
11658 + * 2. Redistributions in binary form must reproduce the above copyright
11659 + * notice, this list of conditions and the following disclaimer in the
11660 + * documentation and/or other materials provided with the distribution.
11661 + * 3. The names of the above-listed copyright holders may not be used
11662 + * to endorse or promote products derived from this software without
11663 + * specific prior written permission.
11664 + *
11665 + * ALTERNATIVELY, this software may be distributed under the terms of the
11666 + * GNU General Public License ("GPL") version 2, as published by the Free
11667 + * Software Foundation.
11668 + *
11669 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11670 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11671 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11672 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11673 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11674 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11675 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11676 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11677 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11678 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11679 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11680 + */
11681 +
11682 +#ifndef VCHIQ_MEMDRV_H
11683 +#define VCHIQ_MEMDRV_H
11684 +
11685 +/* ---- Include Files ----------------------------------------------------- */
11686 +
11687 +#include <linux/kernel.h>
11688 +#include "vchiq_if.h"
11689 +
11690 +/* ---- Constants and Types ---------------------------------------------- */
11691 +
11692 +typedef struct {
11693 + void *armSharedMemVirt;
11694 + dma_addr_t armSharedMemPhys;
11695 + size_t armSharedMemSize;
11696 +
11697 + void *vcSharedMemVirt;
11698 + dma_addr_t vcSharedMemPhys;
11699 + size_t vcSharedMemSize;
11700 +} VCHIQ_SHARED_MEM_INFO_T;
11701 +
11702 +/* ---- Variable Externs ------------------------------------------------- */
11703 +
11704 +/* ---- Function Prototypes ---------------------------------------------- */
11705 +
11706 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
11707 +
11708 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
11709 +
11710 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
11711 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11712 +
11713 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
11714 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11715 +
11716 +VCHIQ_STATUS_T vchiq_userdrv_resume(
11717 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11718 +
11719 +#endif
11720 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11721 new file mode 100644
11722 index 0000000..54a3ece
11723 --- /dev/null
11724 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11725 @@ -0,0 +1,58 @@
11726 +/**
11727 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11728 + *
11729 + * Redistribution and use in source and binary forms, with or without
11730 + * modification, are permitted provided that the following conditions
11731 + * are met:
11732 + * 1. Redistributions of source code must retain the above copyright
11733 + * notice, this list of conditions, and the following disclaimer,
11734 + * without modification.
11735 + * 2. Redistributions in binary form must reproduce the above copyright
11736 + * notice, this list of conditions and the following disclaimer in the
11737 + * documentation and/or other materials provided with the distribution.
11738 + * 3. The names of the above-listed copyright holders may not be used
11739 + * to endorse or promote products derived from this software without
11740 + * specific prior written permission.
11741 + *
11742 + * ALTERNATIVELY, this software may be distributed under the terms of the
11743 + * GNU General Public License ("GPL") version 2, as published by the Free
11744 + * Software Foundation.
11745 + *
11746 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11747 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11748 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11749 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11750 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11751 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11752 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11753 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11754 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11755 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11756 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11757 + */
11758 +
11759 +#ifndef VCHIQ_PAGELIST_H
11760 +#define VCHIQ_PAGELIST_H
11761 +
11762 +#ifndef PAGE_SIZE
11763 +#define PAGE_SIZE 4096
11764 +#endif
11765 +#define CACHE_LINE_SIZE 32
11766 +#define PAGELIST_WRITE 0
11767 +#define PAGELIST_READ 1
11768 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11769 +
11770 +typedef struct pagelist_struct {
11771 + unsigned long length;
11772 + unsigned short type;
11773 + unsigned short offset;
11774 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11775 + pages at consecutive addresses. */
11776 +} PAGELIST_T;
11777 +
11778 +typedef struct fragments_struct {
11779 + char headbuf[CACHE_LINE_SIZE];
11780 + char tailbuf[CACHE_LINE_SIZE];
11781 +} FRAGMENTS_T;
11782 +
11783 +#endif /* VCHIQ_PAGELIST_H */
11784 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11785 new file mode 100644
11786 index 0000000..72eacdaf
11787 --- /dev/null
11788 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11789 @@ -0,0 +1,853 @@
11790 +/**
11791 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11792 + *
11793 + * Redistribution and use in source and binary forms, with or without
11794 + * modification, are permitted provided that the following conditions
11795 + * are met:
11796 + * 1. Redistributions of source code must retain the above copyright
11797 + * notice, this list of conditions, and the following disclaimer,
11798 + * without modification.
11799 + * 2. Redistributions in binary form must reproduce the above copyright
11800 + * notice, this list of conditions and the following disclaimer in the
11801 + * documentation and/or other materials provided with the distribution.
11802 + * 3. The names of the above-listed copyright holders may not be used
11803 + * to endorse or promote products derived from this software without
11804 + * specific prior written permission.
11805 + *
11806 + * ALTERNATIVELY, this software may be distributed under the terms of the
11807 + * GNU General Public License ("GPL") version 2, as published by the Free
11808 + * Software Foundation.
11809 + *
11810 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11811 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11812 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11813 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11814 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11815 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11816 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11817 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11818 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11819 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11820 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11821 + */
11822 +#include <linux/module.h>
11823 +#include <linux/types.h>
11824 +
11825 +#include "interface/vchi/vchi.h"
11826 +#include "vchiq.h"
11827 +#include "vchiq_core.h"
11828 +
11829 +#include "vchiq_util.h"
11830 +
11831 +#include <stddef.h>
11832 +
11833 +#define vchiq_status_to_vchi(status) ((int32_t)status)
11834 +
11835 +typedef struct {
11836 + VCHIQ_SERVICE_HANDLE_T handle;
11837 +
11838 + VCHIU_QUEUE_T queue;
11839 +
11840 + VCHI_CALLBACK_T callback;
11841 + void *callback_param;
11842 +} SHIM_SERVICE_T;
11843 +
11844 +/* ----------------------------------------------------------------------
11845 + * return pointer to the mphi message driver function table
11846 + * -------------------------------------------------------------------- */
11847 +const VCHI_MESSAGE_DRIVER_T *
11848 +vchi_mphi_message_driver_func_table(void)
11849 +{
11850 + return NULL;
11851 +}
11852 +
11853 +/* ----------------------------------------------------------------------
11854 + * return a pointer to the 'single' connection driver fops
11855 + * -------------------------------------------------------------------- */
11856 +const VCHI_CONNECTION_API_T *
11857 +single_get_func_table(void)
11858 +{
11859 + return NULL;
11860 +}
11861 +
11862 +VCHI_CONNECTION_T *vchi_create_connection(
11863 + const VCHI_CONNECTION_API_T *function_table,
11864 + const VCHI_MESSAGE_DRIVER_T *low_level)
11865 +{
11866 + (void)function_table;
11867 + (void)low_level;
11868 + return NULL;
11869 +}
11870 +
11871 +/***********************************************************
11872 + * Name: vchi_msg_peek
11873 + *
11874 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11875 + * void **data,
11876 + * uint32_t *msg_size,
11877 +
11878 +
11879 + * VCHI_FLAGS_T flags
11880 + *
11881 + * Description: Routine to return a pointer to the current message (to allow in
11882 + * place processing). The message can be removed using
11883 + * vchi_msg_remove when you're finished
11884 + *
11885 + * Returns: int32_t - success == 0
11886 + *
11887 + ***********************************************************/
11888 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
11889 + void **data,
11890 + uint32_t *msg_size,
11891 + VCHI_FLAGS_T flags)
11892 +{
11893 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11894 + VCHIQ_HEADER_T *header;
11895 +
11896 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11897 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11898 +
11899 + if (flags == VCHI_FLAGS_NONE)
11900 + if (vchiu_queue_is_empty(&service->queue))
11901 + return -1;
11902 +
11903 + header = vchiu_queue_peek(&service->queue);
11904 +
11905 + *data = header->data;
11906 + *msg_size = header->size;
11907 +
11908 + return 0;
11909 +}
11910 +EXPORT_SYMBOL(vchi_msg_peek);
11911 +
11912 +/***********************************************************
11913 + * Name: vchi_msg_remove
11914 + *
11915 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11916 + *
11917 + * Description: Routine to remove a message (after it has been read with
11918 + * vchi_msg_peek)
11919 + *
11920 + * Returns: int32_t - success == 0
11921 + *
11922 + ***********************************************************/
11923 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
11924 +{
11925 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11926 + VCHIQ_HEADER_T *header;
11927 +
11928 + header = vchiu_queue_pop(&service->queue);
11929 +
11930 + vchiq_release_message(service->handle, header);
11931 +
11932 + return 0;
11933 +}
11934 +EXPORT_SYMBOL(vchi_msg_remove);
11935 +
11936 +/***********************************************************
11937 + * Name: vchi_msg_queue
11938 + *
11939 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11940 + * const void *data,
11941 + * uint32_t data_size,
11942 + * VCHI_FLAGS_T flags,
11943 + * void *msg_handle,
11944 + *
11945 + * Description: Thin wrapper to queue a message onto a connection
11946 + *
11947 + * Returns: int32_t - success == 0
11948 + *
11949 + ***********************************************************/
11950 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
11951 + const void *data,
11952 + uint32_t data_size,
11953 + VCHI_FLAGS_T flags,
11954 + void *msg_handle)
11955 +{
11956 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11957 + VCHIQ_ELEMENT_T element = {data, data_size};
11958 + VCHIQ_STATUS_T status;
11959 +
11960 + (void)msg_handle;
11961 +
11962 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11963 +
11964 + status = vchiq_queue_message(service->handle, &element, 1);
11965 +
11966 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
11967 + ** implement a retry mechanism since this function is supposed
11968 + ** to block until queued
11969 + */
11970 + while (status == VCHIQ_RETRY) {
11971 + msleep(1);
11972 + status = vchiq_queue_message(service->handle, &element, 1);
11973 + }
11974 +
11975 + return vchiq_status_to_vchi(status);
11976 +}
11977 +EXPORT_SYMBOL(vchi_msg_queue);
11978 +
11979 +/***********************************************************
11980 + * Name: vchi_bulk_queue_receive
11981 + *
11982 + * Arguments: VCHI_BULK_HANDLE_T handle,
11983 + * void *data_dst,
11984 + * const uint32_t data_size,
11985 + * VCHI_FLAGS_T flags
11986 + * void *bulk_handle
11987 + *
11988 + * Description: Routine to setup a rcv buffer
11989 + *
11990 + * Returns: int32_t - success == 0
11991 + *
11992 + ***********************************************************/
11993 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
11994 + void *data_dst,
11995 + uint32_t data_size,
11996 + VCHI_FLAGS_T flags,
11997 + void *bulk_handle)
11998 +{
11999 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12000 + VCHIQ_BULK_MODE_T mode;
12001 + VCHIQ_STATUS_T status;
12002 +
12003 + switch ((int)flags) {
12004 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12005 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12006 + WARN_ON(!service->callback);
12007 + mode = VCHIQ_BULK_MODE_CALLBACK;
12008 + break;
12009 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12010 + mode = VCHIQ_BULK_MODE_BLOCKING;
12011 + break;
12012 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12013 + case VCHI_FLAGS_NONE:
12014 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12015 + break;
12016 + default:
12017 + WARN(1, "unsupported message\n");
12018 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12019 + }
12020 +
12021 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
12022 + bulk_handle, mode);
12023 +
12024 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
12025 + ** implement a retry mechanism since this function is supposed
12026 + ** to block until queued
12027 + */
12028 + while (status == VCHIQ_RETRY) {
12029 + msleep(1);
12030 + status = vchiq_bulk_receive(service->handle, data_dst,
12031 + data_size, bulk_handle, mode);
12032 + }
12033 +
12034 + return vchiq_status_to_vchi(status);
12035 +}
12036 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
12037 +
12038 +/***********************************************************
12039 + * Name: vchi_bulk_queue_transmit
12040 + *
12041 + * Arguments: VCHI_BULK_HANDLE_T handle,
12042 + * const void *data_src,
12043 + * uint32_t data_size,
12044 + * VCHI_FLAGS_T flags,
12045 + * void *bulk_handle
12046 + *
12047 + * Description: Routine to transmit some data
12048 + *
12049 + * Returns: int32_t - success == 0
12050 + *
12051 + ***********************************************************/
12052 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
12053 + const void *data_src,
12054 + uint32_t data_size,
12055 + VCHI_FLAGS_T flags,
12056 + void *bulk_handle)
12057 +{
12058 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12059 + VCHIQ_BULK_MODE_T mode;
12060 + VCHIQ_STATUS_T status;
12061 +
12062 + switch ((int)flags) {
12063 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12064 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12065 + WARN_ON(!service->callback);
12066 + mode = VCHIQ_BULK_MODE_CALLBACK;
12067 + break;
12068 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
12069 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12070 + mode = VCHIQ_BULK_MODE_BLOCKING;
12071 + break;
12072 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12073 + case VCHI_FLAGS_NONE:
12074 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12075 + break;
12076 + default:
12077 + WARN(1, "unsupported message\n");
12078 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12079 + }
12080 +
12081 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
12082 + bulk_handle, mode);
12083 +
12084 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
12085 + ** implement a retry mechanism since this function is supposed
12086 + ** to block until queued
12087 + */
12088 + while (status == VCHIQ_RETRY) {
12089 + msleep(1);
12090 + status = vchiq_bulk_transmit(service->handle, data_src,
12091 + data_size, bulk_handle, mode);
12092 + }
12093 +
12094 + return vchiq_status_to_vchi(status);
12095 +}
12096 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
12097 +
12098 +/***********************************************************
12099 + * Name: vchi_msg_dequeue
12100 + *
12101 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12102 + * void *data,
12103 + * uint32_t max_data_size_to_read,
12104 + * uint32_t *actual_msg_size
12105 + * VCHI_FLAGS_T flags
12106 + *
12107 + * Description: Routine to dequeue a message into the supplied buffer
12108 + *
12109 + * Returns: int32_t - success == 0
12110 + *
12111 + ***********************************************************/
12112 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
12113 + void *data,
12114 + uint32_t max_data_size_to_read,
12115 + uint32_t *actual_msg_size,
12116 + VCHI_FLAGS_T flags)
12117 +{
12118 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12119 + VCHIQ_HEADER_T *header;
12120 +
12121 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12122 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12123 +
12124 + if (flags == VCHI_FLAGS_NONE)
12125 + if (vchiu_queue_is_empty(&service->queue))
12126 + return -1;
12127 +
12128 + header = vchiu_queue_pop(&service->queue);
12129 +
12130 + memcpy(data, header->data, header->size < max_data_size_to_read ?
12131 + header->size : max_data_size_to_read);
12132 +
12133 + *actual_msg_size = header->size;
12134 +
12135 + vchiq_release_message(service->handle, header);
12136 +
12137 + return 0;
12138 +}
12139 +EXPORT_SYMBOL(vchi_msg_dequeue);
12140 +
12141 +/***********************************************************
12142 + * Name: vchi_msg_queuev
12143 + *
12144 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12145 + * VCHI_MSG_VECTOR_T *vector,
12146 + * uint32_t count,
12147 + * VCHI_FLAGS_T flags,
12148 + * void *msg_handle
12149 + *
12150 + * Description: Thin wrapper to queue a message onto a connection
12151 + *
12152 + * Returns: int32_t - success == 0
12153 + *
12154 + ***********************************************************/
12155 +
12156 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
12157 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
12158 + offsetof(VCHIQ_ELEMENT_T, data));
12159 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
12160 + offsetof(VCHIQ_ELEMENT_T, size));
12161 +
12162 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
12163 + VCHI_MSG_VECTOR_T *vector,
12164 + uint32_t count,
12165 + VCHI_FLAGS_T flags,
12166 + void *msg_handle)
12167 +{
12168 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12169 +
12170 + (void)msg_handle;
12171 +
12172 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12173 +
12174 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
12175 + (const VCHIQ_ELEMENT_T *)vector, count));
12176 +}
12177 +EXPORT_SYMBOL(vchi_msg_queuev);
12178 +
12179 +/***********************************************************
12180 + * Name: vchi_held_msg_release
12181 + *
12182 + * Arguments: VCHI_HELD_MSG_T *message
12183 + *
12184 + * Description: Routine to release a held message (after it has been read with
12185 + * vchi_msg_hold)
12186 + *
12187 + * Returns: int32_t - success == 0
12188 + *
12189 + ***********************************************************/
12190 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
12191 +{
12192 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
12193 + (VCHIQ_HEADER_T *)message->message);
12194 +
12195 + return 0;
12196 +}
12197 +EXPORT_SYMBOL(vchi_held_msg_release);
12198 +
12199 +/***********************************************************
12200 + * Name: vchi_msg_hold
12201 + *
12202 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12203 + * void **data,
12204 + * uint32_t *msg_size,
12205 + * VCHI_FLAGS_T flags,
12206 + * VCHI_HELD_MSG_T *message_handle
12207 + *
12208 + * Description: Routine to return a pointer to the current message (to allow
12209 + * in place processing). The message is dequeued - don't forget
12210 + * to release the message using vchi_held_msg_release when you're
12211 + * finished.
12212 + *
12213 + * Returns: int32_t - success == 0
12214 + *
12215 + ***********************************************************/
12216 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
12217 + void **data,
12218 + uint32_t *msg_size,
12219 + VCHI_FLAGS_T flags,
12220 + VCHI_HELD_MSG_T *message_handle)
12221 +{
12222 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12223 + VCHIQ_HEADER_T *header;
12224 +
12225 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12226 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12227 +
12228 + if (flags == VCHI_FLAGS_NONE)
12229 + if (vchiu_queue_is_empty(&service->queue))
12230 + return -1;
12231 +
12232 + header = vchiu_queue_pop(&service->queue);
12233 +
12234 + *data = header->data;
12235 + *msg_size = header->size;
12236 +
12237 + message_handle->service =
12238 + (struct opaque_vchi_service_t *)service->handle;
12239 + message_handle->message = header;
12240 +
12241 + return 0;
12242 +}
12243 +EXPORT_SYMBOL(vchi_msg_hold);
12244 +
12245 +/***********************************************************
12246 + * Name: vchi_initialise
12247 + *
12248 + * Arguments: VCHI_INSTANCE_T *instance_handle
12249 + *
12250 + * Description: Initialises the hardware but does not transmit anything
12251 + * When run as a Host App this will be called twice hence the need
12252 + * to malloc the state information
12253 + *
12254 + * Returns: 0 if successful, failure otherwise
12255 + *
12256 + ***********************************************************/
12257 +
12258 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
12259 +{
12260 + VCHIQ_INSTANCE_T instance;
12261 + VCHIQ_STATUS_T status;
12262 +
12263 + status = vchiq_initialise(&instance);
12264 +
12265 + *instance_handle = (VCHI_INSTANCE_T)instance;
12266 +
12267 + return vchiq_status_to_vchi(status);
12268 +}
12269 +EXPORT_SYMBOL(vchi_initialise);
12270 +
12271 +/***********************************************************
12272 + * Name: vchi_connect
12273 + *
12274 + * Arguments: VCHI_CONNECTION_T **connections
12275 + * const uint32_t num_connections
12276 + * VCHI_INSTANCE_T instance_handle)
12277 + *
12278 + * Description: Starts the command service on each connection,
12279 + * causing INIT messages to be pinged back and forth
12280 + *
12281 + * Returns: 0 if successful, failure otherwise
12282 + *
12283 + ***********************************************************/
12284 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
12285 + const uint32_t num_connections,
12286 + VCHI_INSTANCE_T instance_handle)
12287 +{
12288 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12289 +
12290 + (void)connections;
12291 + (void)num_connections;
12292 +
12293 + return vchiq_connect(instance);
12294 +}
12295 +EXPORT_SYMBOL(vchi_connect);
12296 +
12297 +
12298 +/***********************************************************
12299 + * Name: vchi_disconnect
12300 + *
12301 + * Arguments: VCHI_INSTANCE_T instance_handle
12302 + *
12303 + * Description: Stops the command service on each connection,
12304 + * causing DE-INIT messages to be pinged back and forth
12305 + *
12306 + * Returns: 0 if successful, failure otherwise
12307 + *
12308 + ***********************************************************/
12309 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
12310 +{
12311 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12312 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
12313 +}
12314 +EXPORT_SYMBOL(vchi_disconnect);
12315 +
12316 +
12317 +/***********************************************************
12318 + * Name: vchi_service_open
12319 + * Name: vchi_service_create
12320 + *
12321 + * Arguments: VCHI_INSTANCE_T *instance_handle
12322 + * SERVICE_CREATION_T *setup,
12323 + * VCHI_SERVICE_HANDLE_T *handle
12324 + *
12325 + * Description: Routine to open a service
12326 + *
12327 + * Returns: int32_t - success == 0
12328 + *
12329 + ***********************************************************/
12330 +
12331 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
12332 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
12333 +{
12334 + SHIM_SERVICE_T *service =
12335 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
12336 +
12337 + if (!service->callback)
12338 + goto release;
12339 +
12340 + switch (reason) {
12341 + case VCHIQ_MESSAGE_AVAILABLE:
12342 + vchiu_queue_push(&service->queue, header);
12343 +
12344 + service->callback(service->callback_param,
12345 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
12346 +
12347 + goto done;
12348 + break;
12349 +
12350 + case VCHIQ_BULK_TRANSMIT_DONE:
12351 + service->callback(service->callback_param,
12352 + VCHI_CALLBACK_BULK_SENT, bulk_user);
12353 + break;
12354 +
12355 + case VCHIQ_BULK_RECEIVE_DONE:
12356 + service->callback(service->callback_param,
12357 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
12358 + break;
12359 +
12360 + case VCHIQ_SERVICE_CLOSED:
12361 + service->callback(service->callback_param,
12362 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
12363 + break;
12364 +
12365 + case VCHIQ_SERVICE_OPENED:
12366 + /* No equivalent VCHI reason */
12367 + break;
12368 +
12369 + case VCHIQ_BULK_TRANSMIT_ABORTED:
12370 + service->callback(service->callback_param,
12371 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
12372 + bulk_user);
12373 + break;
12374 +
12375 + case VCHIQ_BULK_RECEIVE_ABORTED:
12376 + service->callback(service->callback_param,
12377 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
12378 + bulk_user);
12379 + break;
12380 +
12381 + default:
12382 + WARN(1, "not supported\n");
12383 + break;
12384 + }
12385 +
12386 +release:
12387 + vchiq_release_message(service->handle, header);
12388 +done:
12389 + return VCHIQ_SUCCESS;
12390 +}
12391 +
12392 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
12393 + SERVICE_CREATION_T *setup)
12394 +{
12395 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
12396 +
12397 + (void)instance;
12398 +
12399 + if (service) {
12400 + if (vchiu_queue_init(&service->queue, 64)) {
12401 + service->callback = setup->callback;
12402 + service->callback_param = setup->callback_param;
12403 + } else {
12404 + kfree(service);
12405 + service = NULL;
12406 + }
12407 + }
12408 +
12409 + return service;
12410 +}
12411 +
12412 +static void service_free(SHIM_SERVICE_T *service)
12413 +{
12414 + if (service) {
12415 + vchiu_queue_delete(&service->queue);
12416 + kfree(service);
12417 + }
12418 +}
12419 +
12420 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
12421 + SERVICE_CREATION_T *setup,
12422 + VCHI_SERVICE_HANDLE_T *handle)
12423 +{
12424 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12425 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12426 + if (service) {
12427 + VCHIQ_SERVICE_PARAMS_T params;
12428 + VCHIQ_STATUS_T status;
12429 +
12430 + memset(&params, 0, sizeof(params));
12431 + params.fourcc = setup->service_id;
12432 + params.callback = shim_callback;
12433 + params.userdata = service;
12434 + params.version = setup->version.version;
12435 + params.version_min = setup->version.version_min;
12436 +
12437 + status = vchiq_open_service(instance, &params,
12438 + &service->handle);
12439 + if (status != VCHIQ_SUCCESS) {
12440 + service_free(service);
12441 + service = NULL;
12442 + }
12443 + }
12444 +
12445 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12446 +
12447 + return (service != NULL) ? 0 : -1;
12448 +}
12449 +EXPORT_SYMBOL(vchi_service_open);
12450 +
12451 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
12452 + SERVICE_CREATION_T *setup,
12453 + VCHI_SERVICE_HANDLE_T *handle)
12454 +{
12455 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12456 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12457 + if (service) {
12458 + VCHIQ_SERVICE_PARAMS_T params;
12459 + VCHIQ_STATUS_T status;
12460 +
12461 + memset(&params, 0, sizeof(params));
12462 + params.fourcc = setup->service_id;
12463 + params.callback = shim_callback;
12464 + params.userdata = service;
12465 + params.version = setup->version.version;
12466 + params.version_min = setup->version.version_min;
12467 + status = vchiq_add_service(instance, &params, &service->handle);
12468 +
12469 + if (status != VCHIQ_SUCCESS) {
12470 + service_free(service);
12471 + service = NULL;
12472 + }
12473 + }
12474 +
12475 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12476 +
12477 + return (service != NULL) ? 0 : -1;
12478 +}
12479 +EXPORT_SYMBOL(vchi_service_create);
12480 +
12481 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
12482 +{
12483 + int32_t ret = -1;
12484 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12485 + if (service) {
12486 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
12487 + if (status == VCHIQ_SUCCESS) {
12488 + service_free(service);
12489 + service = NULL;
12490 + }
12491 +
12492 + ret = vchiq_status_to_vchi(status);
12493 + }
12494 + return ret;
12495 +}
12496 +EXPORT_SYMBOL(vchi_service_close);
12497 +
12498 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
12499 +{
12500 + int32_t ret = -1;
12501 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12502 + if (service) {
12503 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
12504 + if (status == VCHIQ_SUCCESS) {
12505 + service_free(service);
12506 + service = NULL;
12507 + }
12508 +
12509 + ret = vchiq_status_to_vchi(status);
12510 + }
12511 + return ret;
12512 +}
12513 +EXPORT_SYMBOL(vchi_service_destroy);
12514 +
12515 +int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
12516 + VCHI_SERVICE_OPTION_T option,
12517 + int value)
12518 +{
12519 + int32_t ret = -1;
12520 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12521 + VCHIQ_SERVICE_OPTION_T vchiq_option;
12522 + switch (option) {
12523 + case VCHI_SERVICE_OPTION_TRACE:
12524 + vchiq_option = VCHIQ_SERVICE_OPTION_TRACE;
12525 + break;
12526 + default:
12527 + service = NULL;
12528 + break;
12529 + }
12530 + if (service) {
12531 + VCHIQ_STATUS_T status =
12532 + vchiq_set_service_option(service->handle,
12533 + vchiq_option,
12534 + value);
12535 +
12536 + ret = vchiq_status_to_vchi(status);
12537 + }
12538 + return ret;
12539 +}
12540 +EXPORT_SYMBOL(vchi_service_set_option);
12541 +
12542 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12543 +{
12544 + int32_t ret = -1;
12545 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12546 + if(service)
12547 + {
12548 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12549 + ret = vchiq_status_to_vchi( status );
12550 + }
12551 + return ret;
12552 +}
12553 +EXPORT_SYMBOL(vchi_get_peer_version);
12554 +
12555 +/* ----------------------------------------------------------------------
12556 + * read a uint32_t from buffer.
12557 + * network format is defined to be little endian
12558 + * -------------------------------------------------------------------- */
12559 +uint32_t
12560 +vchi_readbuf_uint32(const void *_ptr)
12561 +{
12562 + const unsigned char *ptr = _ptr;
12563 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12564 +}
12565 +
12566 +/* ----------------------------------------------------------------------
12567 + * write a uint32_t to buffer.
12568 + * network format is defined to be little endian
12569 + * -------------------------------------------------------------------- */
12570 +void
12571 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12572 +{
12573 + unsigned char *ptr = _ptr;
12574 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
12575 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
12576 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12577 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12578 +}
12579 +
12580 +/* ----------------------------------------------------------------------
12581 + * read a uint16_t from buffer.
12582 + * network format is defined to be little endian
12583 + * -------------------------------------------------------------------- */
12584 +uint16_t
12585 +vchi_readbuf_uint16(const void *_ptr)
12586 +{
12587 + const unsigned char *ptr = _ptr;
12588 + return ptr[0] | (ptr[1] << 8);
12589 +}
12590 +
12591 +/* ----------------------------------------------------------------------
12592 + * write a uint16_t into the buffer.
12593 + * network format is defined to be little endian
12594 + * -------------------------------------------------------------------- */
12595 +void
12596 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12597 +{
12598 + unsigned char *ptr = _ptr;
12599 + ptr[0] = (value >> 0) & 0xFF;
12600 + ptr[1] = (value >> 8) & 0xFF;
12601 +}
12602 +
12603 +/***********************************************************
12604 + * Name: vchi_service_use
12605 + *
12606 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12607 + *
12608 + * Description: Routine to increment refcount on a service
12609 + *
12610 + * Returns: void
12611 + *
12612 + ***********************************************************/
12613 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12614 +{
12615 + int32_t ret = -1;
12616 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12617 + if (service)
12618 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12619 + return ret;
12620 +}
12621 +EXPORT_SYMBOL(vchi_service_use);
12622 +
12623 +/***********************************************************
12624 + * Name: vchi_service_release
12625 + *
12626 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12627 + *
12628 + * Description: Routine to decrement refcount on a service
12629 + *
12630 + * Returns: void
12631 + *
12632 + ***********************************************************/
12633 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12634 +{
12635 + int32_t ret = -1;
12636 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12637 + if (service)
12638 + ret = vchiq_status_to_vchi(
12639 + vchiq_release_service(service->handle));
12640 + return ret;
12641 +}
12642 +EXPORT_SYMBOL(vchi_service_release);
12643 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12644 new file mode 100644
12645 index 0000000..c2eefef
12646 --- /dev/null
12647 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12648 @@ -0,0 +1,151 @@
12649 +/**
12650 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12651 + *
12652 + * Redistribution and use in source and binary forms, with or without
12653 + * modification, are permitted provided that the following conditions
12654 + * are met:
12655 + * 1. Redistributions of source code must retain the above copyright
12656 + * notice, this list of conditions, and the following disclaimer,
12657 + * without modification.
12658 + * 2. Redistributions in binary form must reproduce the above copyright
12659 + * notice, this list of conditions and the following disclaimer in the
12660 + * documentation and/or other materials provided with the distribution.
12661 + * 3. The names of the above-listed copyright holders may not be used
12662 + * to endorse or promote products derived from this software without
12663 + * specific prior written permission.
12664 + *
12665 + * ALTERNATIVELY, this software may be distributed under the terms of the
12666 + * GNU General Public License ("GPL") version 2, as published by the Free
12667 + * Software Foundation.
12668 + *
12669 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12670 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12671 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12672 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12673 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12674 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12675 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12676 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12677 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12678 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12679 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12680 + */
12681 +
12682 +#include "vchiq_util.h"
12683 +
12684 +static inline int is_pow2(int i)
12685 +{
12686 + return i && !(i & (i - 1));
12687 +}
12688 +
12689 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12690 +{
12691 + WARN_ON(!is_pow2(size));
12692 +
12693 + queue->size = size;
12694 + queue->read = 0;
12695 + queue->write = 0;
12696 +
12697 + sema_init(&queue->pop, 0);
12698 + sema_init(&queue->push, 0);
12699 +
12700 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12701 + if (queue->storage == NULL) {
12702 + vchiu_queue_delete(queue);
12703 + return 0;
12704 + }
12705 + return 1;
12706 +}
12707 +
12708 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12709 +{
12710 + if (queue->storage != NULL)
12711 + kfree(queue->storage);
12712 +}
12713 +
12714 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12715 +{
12716 + return queue->read == queue->write;
12717 +}
12718 +
12719 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12720 +{
12721 + return queue->write == queue->read + queue->size;
12722 +}
12723 +
12724 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12725 +{
12726 + while (queue->write == queue->read + queue->size) {
12727 + if (down_interruptible(&queue->pop) != 0) {
12728 + flush_signals(current);
12729 + }
12730 + }
12731 +
12732 + /*
12733 + * Write to queue->storage must be visible after read from
12734 + * queue->read
12735 + */
12736 + smp_mb();
12737 +
12738 + queue->storage[queue->write & (queue->size - 1)] = header;
12739 +
12740 + /*
12741 + * Write to queue->storage must be visible before write to
12742 + * queue->write
12743 + */
12744 + smp_wmb();
12745 +
12746 + queue->write++;
12747 +
12748 + up(&queue->push);
12749 +}
12750 +
12751 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12752 +{
12753 + while (queue->write == queue->read) {
12754 + if (down_interruptible(&queue->push) != 0) {
12755 + flush_signals(current);
12756 + }
12757 + }
12758 +
12759 + up(&queue->push); // We haven't removed anything from the queue.
12760 +
12761 + /*
12762 + * Read from queue->storage must be visible after read from
12763 + * queue->write
12764 + */
12765 + smp_rmb();
12766 +
12767 + return queue->storage[queue->read & (queue->size - 1)];
12768 +}
12769 +
12770 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12771 +{
12772 + VCHIQ_HEADER_T *header;
12773 +
12774 + while (queue->write == queue->read) {
12775 + if (down_interruptible(&queue->push) != 0) {
12776 + flush_signals(current);
12777 + }
12778 + }
12779 +
12780 + /*
12781 + * Read from queue->storage must be visible after read from
12782 + * queue->write
12783 + */
12784 + smp_rmb();
12785 +
12786 + header = queue->storage[queue->read & (queue->size - 1)];
12787 +
12788 + /*
12789 + * Read from queue->storage must be visible before write to
12790 + * queue->read
12791 + */
12792 + smp_mb();
12793 +
12794 + queue->read++;
12795 +
12796 + up(&queue->pop);
12797 +
12798 + return header;
12799 +}
12800 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12801 new file mode 100644
12802 index 0000000..f4d0b66
12803 --- /dev/null
12804 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12805 @@ -0,0 +1,81 @@
12806 +/**
12807 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12808 + *
12809 + * Redistribution and use in source and binary forms, with or without
12810 + * modification, are permitted provided that the following conditions
12811 + * are met:
12812 + * 1. Redistributions of source code must retain the above copyright
12813 + * notice, this list of conditions, and the following disclaimer,
12814 + * without modification.
12815 + * 2. Redistributions in binary form must reproduce the above copyright
12816 + * notice, this list of conditions and the following disclaimer in the
12817 + * documentation and/or other materials provided with the distribution.
12818 + * 3. The names of the above-listed copyright holders may not be used
12819 + * to endorse or promote products derived from this software without
12820 + * specific prior written permission.
12821 + *
12822 + * ALTERNATIVELY, this software may be distributed under the terms of the
12823 + * GNU General Public License ("GPL") version 2, as published by the Free
12824 + * Software Foundation.
12825 + *
12826 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12827 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12828 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12829 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12830 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12831 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12832 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12833 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12834 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12835 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12836 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12837 + */
12838 +
12839 +#ifndef VCHIQ_UTIL_H
12840 +#define VCHIQ_UTIL_H
12841 +
12842 +#include <linux/types.h>
12843 +#include <linux/semaphore.h>
12844 +#include <linux/mutex.h>
12845 +#include <linux/bitops.h>
12846 +#include <linux/kthread.h>
12847 +#include <linux/wait.h>
12848 +#include <linux/vmalloc.h>
12849 +#include <linux/jiffies.h>
12850 +#include <linux/delay.h>
12851 +#include <linux/string.h>
12852 +#include <linux/types.h>
12853 +#include <linux/interrupt.h>
12854 +#include <linux/random.h>
12855 +#include <linux/sched.h>
12856 +#include <linux/ctype.h>
12857 +#include <linux/uaccess.h>
12858 +#include <linux/time.h> /* for time_t */
12859 +#include <linux/slab.h>
12860 +#include <linux/vmalloc.h>
12861 +
12862 +#include "vchiq_if.h"
12863 +
12864 +typedef struct {
12865 + int size;
12866 + int read;
12867 + int write;
12868 +
12869 + struct semaphore pop;
12870 + struct semaphore push;
12871 +
12872 + VCHIQ_HEADER_T **storage;
12873 +} VCHIU_QUEUE_T;
12874 +
12875 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
12876 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
12877 +
12878 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
12879 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
12880 +
12881 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
12882 +
12883 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
12884 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
12885 +
12886 +#endif
12887 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12888 new file mode 100644
12889 index 0000000..b6bfa21
12890 --- /dev/null
12891 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12892 @@ -0,0 +1,59 @@
12893 +/**
12894 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12895 + *
12896 + * Redistribution and use in source and binary forms, with or without
12897 + * modification, are permitted provided that the following conditions
12898 + * are met:
12899 + * 1. Redistributions of source code must retain the above copyright
12900 + * notice, this list of conditions, and the following disclaimer,
12901 + * without modification.
12902 + * 2. Redistributions in binary form must reproduce the above copyright
12903 + * notice, this list of conditions and the following disclaimer in the
12904 + * documentation and/or other materials provided with the distribution.
12905 + * 3. The names of the above-listed copyright holders may not be used
12906 + * to endorse or promote products derived from this software without
12907 + * specific prior written permission.
12908 + *
12909 + * ALTERNATIVELY, this software may be distributed under the terms of the
12910 + * GNU General Public License ("GPL") version 2, as published by the Free
12911 + * Software Foundation.
12912 + *
12913 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12914 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12915 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12916 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12917 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12918 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12919 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12920 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12921 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12922 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12923 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12924 + */
12925 +#include "vchiq_build_info.h"
12926 +#include <linux/broadcom/vc_debug_sym.h>
12927 +
12928 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
12929 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
12930 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
12931 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
12932 +
12933 +const char *vchiq_get_build_hostname( void )
12934 +{
12935 + return vchiq_build_hostname;
12936 +}
12937 +
12938 +const char *vchiq_get_build_version( void )
12939 +{
12940 + return vchiq_build_version;
12941 +}
12942 +
12943 +const char *vchiq_get_build_date( void )
12944 +{
12945 + return vchiq_build_date;
12946 +}
12947 +
12948 +const char *vchiq_get_build_time( void )
12949 +{
12950 + return vchiq_build_time;
12951 +}
12952 --
12953 1.8.3.2
12954