23decd64d937e157e36c6905336ec2134cdc8833
[openwrt/staging/chunkeey.git] / target / linux / brcm2708 / patches-3.10 / 0005-bcm2708-vchiq-driver.patch
1 From 8edd8c6dbdb6a5288c7116ea6e9dc59705b15a6a Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 005/196] bcm2708 vchiq driver
5
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
7 ---
8 drivers/misc/Kconfig | 2 +
9 drivers/misc/Makefile | 1 +
10 drivers/misc/vc04_services/Kconfig | 10 +
11 drivers/misc/vc04_services/Makefile | 18 +
12 .../interface/vchi/connections/connection.h | 328 ++
13 .../interface/vchi/message_drivers/message.h | 204 ++
14 drivers/misc/vc04_services/interface/vchi/vchi.h | 373 ++
15 .../misc/vc04_services/interface/vchi/vchi_cfg.h | 224 ++
16 .../interface/vchi/vchi_cfg_internal.h | 71 +
17 .../vc04_services/interface/vchi/vchi_common.h | 163 +
18 .../misc/vc04_services/interface/vchi/vchi_mh.h | 42 +
19 .../misc/vc04_services/interface/vchiq_arm/vchiq.h | 41 +
20 .../vc04_services/interface/vchiq_arm/vchiq_2835.h | 42 +
21 .../interface/vchiq_arm/vchiq_2835_arm.c | 538 +++
22 .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 2813 ++++++++++++++
23 .../vc04_services/interface/vchiq_arm/vchiq_arm.h | 212 ++
24 .../interface/vchiq_arm/vchiq_build_info.h | 37 +
25 .../vc04_services/interface/vchiq_arm/vchiq_cfg.h | 60 +
26 .../interface/vchiq_arm/vchiq_connected.c | 119 +
27 .../interface/vchiq_arm/vchiq_connected.h | 51 +
28 .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3824 ++++++++++++++++++++
29 .../vc04_services/interface/vchiq_arm/vchiq_core.h | 706 ++++
30 .../interface/vchiq_arm/vchiq_genversion | 89 +
31 .../vc04_services/interface/vchiq_arm/vchiq_if.h | 188 +
32 .../interface/vchiq_arm/vchiq_ioctl.h | 129 +
33 .../interface/vchiq_arm/vchiq_kern_lib.c | 456 +++
34 .../interface/vchiq_arm/vchiq_memdrv.h | 71 +
35 .../interface/vchiq_arm/vchiq_pagelist.h | 58 +
36 .../vc04_services/interface/vchiq_arm/vchiq_proc.c | 254 ++
37 .../vc04_services/interface/vchiq_arm/vchiq_shim.c | 815 +++++
38 .../vc04_services/interface/vchiq_arm/vchiq_util.c | 151 +
39 .../vc04_services/interface/vchiq_arm/vchiq_util.h | 82 +
40 .../interface/vchiq_arm/vchiq_version.c | 59 +
41 33 files changed, 12231 insertions(+)
42 create mode 100644 drivers/misc/vc04_services/Kconfig
43 create mode 100644 drivers/misc/vc04_services/Makefile
44 create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
45 create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
46 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
47 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
48 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
49 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
50 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
51 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
52 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
53 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
54 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
55 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
56 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
57 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
58 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
59 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
60 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
61 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
62 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
63 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
64 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
65 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
66 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
67 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
68 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
69 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
70 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
71 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
72 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
73
74 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
75 index c002d86..84d3100 100644
76 --- a/drivers/misc/Kconfig
77 +++ b/drivers/misc/Kconfig
78 @@ -536,4 +536,6 @@ source "drivers/misc/carma/Kconfig"
79 source "drivers/misc/altera-stapl/Kconfig"
80 source "drivers/misc/mei/Kconfig"
81 source "drivers/misc/vmw_vmci/Kconfig"
82 +source "drivers/misc/vc04_services/Kconfig"
83 endmenu
84 +
85 diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
86 index c235d5b..d72aaf2 100644
87 --- a/drivers/misc/Makefile
88 +++ b/drivers/misc/Makefile
89 @@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/
90 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
91 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
92 obj-$(CONFIG_SRAM) += sram.o
93 +obj-y += vc04_services/
94 diff --git a/drivers/misc/vc04_services/Kconfig b/drivers/misc/vc04_services/Kconfig
95 new file mode 100644
96 index 0000000..b48a3f3
97 --- /dev/null
98 +++ b/drivers/misc/vc04_services/Kconfig
99 @@ -0,0 +1,10 @@
100 +config BCM2708_VCHIQ
101 + tristate "Videocore VCHIQ"
102 + depends on MACH_BCM2708
103 + default y
104 + help
105 + Kernel to VideoCore communication interface for the
106 + BCM2708 family of products.
107 + Defaults to Y when the Broadcom Videocore services
108 + are included in the build, N otherwise.
109 +
110 diff --git a/drivers/misc/vc04_services/Makefile b/drivers/misc/vc04_services/Makefile
111 new file mode 100644
112 index 0000000..1aeb20a
113 --- /dev/null
114 +++ b/drivers/misc/vc04_services/Makefile
115 @@ -0,0 +1,18 @@
116 +ifeq ($(CONFIG_MACH_BCM2708),y)
117 +
118 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
119 +
120 +vchiq-objs := \
121 + interface/vchiq_arm/vchiq_core.o \
122 + interface/vchiq_arm/vchiq_arm.o \
123 + interface/vchiq_arm/vchiq_kern_lib.o \
124 + interface/vchiq_arm/vchiq_2835_arm.o \
125 + interface/vchiq_arm/vchiq_proc.o \
126 + interface/vchiq_arm/vchiq_shim.o \
127 + interface/vchiq_arm/vchiq_util.o \
128 + interface/vchiq_arm/vchiq_connected.o \
129 +
130 +EXTRA_CFLAGS += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
131 +
132 +endif
133 +
134 diff --git a/drivers/misc/vc04_services/interface/vchi/connections/connection.h b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
135 new file mode 100644
136 index 0000000..fef6ac3
137 --- /dev/null
138 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
139 @@ -0,0 +1,328 @@
140 +/**
141 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
142 + *
143 + * Redistribution and use in source and binary forms, with or without
144 + * modification, are permitted provided that the following conditions
145 + * are met:
146 + * 1. Redistributions of source code must retain the above copyright
147 + * notice, this list of conditions, and the following disclaimer,
148 + * without modification.
149 + * 2. Redistributions in binary form must reproduce the above copyright
150 + * notice, this list of conditions and the following disclaimer in the
151 + * documentation and/or other materials provided with the distribution.
152 + * 3. The names of the above-listed copyright holders may not be used
153 + * to endorse or promote products derived from this software without
154 + * specific prior written permission.
155 + *
156 + * ALTERNATIVELY, this software may be distributed under the terms of the
157 + * GNU General Public License ("GPL") version 2, as published by the Free
158 + * Software Foundation.
159 + *
160 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
161 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
162 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
163 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
164 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
165 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
166 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
167 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
168 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
169 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
170 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
171 + */
172 +
173 +#ifndef CONNECTION_H_
174 +#define CONNECTION_H_
175 +
176 +#include <linux/kernel.h>
177 +#include <linux/types.h>
178 +#include <linux/semaphore.h>
179 +
180 +#include "interface/vchi/vchi_cfg_internal.h"
181 +#include "interface/vchi/vchi_common.h"
182 +#include "interface/vchi/message_drivers/message.h"
183 +
184 +/******************************************************************************
185 + Global defs
186 + *****************************************************************************/
187 +
188 +// Opaque handle for a connection / service pair
189 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
190 +
191 +// opaque handle to the connection state information
192 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
193 +
194 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
195 +
196 +
197 +/******************************************************************************
198 + API
199 + *****************************************************************************/
200 +
201 +// Routine to init a connection with a particular low level driver
202 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
203 + const VCHI_MESSAGE_DRIVER_T * driver );
204 +
205 +// Routine to control CRC enabling at a connection level
206 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
207 + VCHI_CRC_CONTROL_T control );
208 +
209 +// Routine to create a service
210 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
211 + int32_t service_id,
212 + uint32_t rx_fifo_size,
213 + uint32_t tx_fifo_size,
214 + int server,
215 + VCHI_CALLBACK_T callback,
216 + void *callback_param,
217 + int32_t want_crc,
218 + int32_t want_unaligned_bulk_rx,
219 + int32_t want_unaligned_bulk_tx,
220 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
221 +
222 +// Routine to close a service
223 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
224 +
225 +// Routine to queue a message
226 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
227 + const void *data,
228 + uint32_t data_size,
229 + VCHI_FLAGS_T flags,
230 + void *msg_handle );
231 +
232 +// scatter-gather (vector) message queueing
233 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
234 + VCHI_MSG_VECTOR_T *vector,
235 + uint32_t count,
236 + VCHI_FLAGS_T flags,
237 + void *msg_handle );
238 +
239 +// Routine to dequeue a message
240 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
241 + void *data,
242 + uint32_t max_data_size_to_read,
243 + uint32_t *actual_msg_size,
244 + VCHI_FLAGS_T flags );
245 +
246 +// Routine to peek at a message
247 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
248 + void **data,
249 + uint32_t *msg_size,
250 + VCHI_FLAGS_T flags );
251 +
252 +// Routine to hold a message
253 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
254 + void **data,
255 + uint32_t *msg_size,
256 + VCHI_FLAGS_T flags,
257 + void **message_handle );
258 +
259 +// Routine to initialise a received message iterator
260 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
261 + VCHI_MSG_ITER_T *iter,
262 + VCHI_FLAGS_T flags );
263 +
264 +// Routine to release a held message
265 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
266 + void *message_handle );
267 +
268 +// Routine to get info on a held message
269 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
270 + void *message_handle,
271 + void **data,
272 + int32_t *msg_size,
273 + uint32_t *tx_timestamp,
274 + uint32_t *rx_timestamp );
275 +
276 +// Routine to check whether the iterator has a next message
277 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
278 + const VCHI_MSG_ITER_T *iter );
279 +
280 +// Routine to advance the iterator
281 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
282 + VCHI_MSG_ITER_T *iter,
283 + void **data,
284 + uint32_t *msg_size );
285 +
286 +// Routine to remove the last message returned by the iterator
287 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
288 + VCHI_MSG_ITER_T *iter );
289 +
290 +// Routine to hold the last message returned by the iterator
291 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
292 + VCHI_MSG_ITER_T *iter,
293 + void **msg_handle );
294 +
295 +// Routine to transmit bulk data
296 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
297 + const void *data_src,
298 + uint32_t data_size,
299 + VCHI_FLAGS_T flags,
300 + void *bulk_handle );
301 +
302 +// Routine to receive data
303 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
304 + void *data_dst,
305 + uint32_t data_size,
306 + VCHI_FLAGS_T flags,
307 + void *bulk_handle );
308 +
309 +// Routine to report if a server is available
310 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
311 +
312 +// Routine to report the number of RX slots available
313 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
314 +
315 +// Routine to report the RX slot size
316 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
317 +
318 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
319 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
320 + int32_t service,
321 + uint32_t length,
322 + MESSAGE_TX_CHANNEL_T channel,
323 + uint32_t channel_params,
324 + uint32_t data_length,
325 + uint32_t data_offset);
326 +
327 +// Callback to inform a service that a Xon or Xoff message has been received
328 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
329 +
330 +// Callback to inform a service that a server available reply message has been received
331 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
332 +
333 +// Callback to indicate that bulk auxiliary messages have arrived
334 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
335 +
336 +// Callback to indicate that bulk auxiliary messages have arrived
337 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
338 +
339 +// Callback with all the connection info you require
340 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
341 +
342 +// Callback to inform of a disconnect
343 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
344 +
345 +// Callback to inform of a power control request
346 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
347 +
348 +// allocate memory suitably aligned for this connection
349 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
350 +
351 +// free memory allocated by buffer_allocate
352 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
353 +
354 +
355 +/******************************************************************************
356 + System driver struct
357 + *****************************************************************************/
358 +
359 +struct opaque_vchi_connection_api_t
360 +{
361 + // Routine to init the connection
362 + VCHI_CONNECTION_INIT_T init;
363 +
364 + // Connection-level CRC control
365 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
366 +
367 + // Routine to connect to or create service
368 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
369 +
370 + // Routine to disconnect from a service
371 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
372 +
373 + // Routine to queue a message
374 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
375 +
376 + // scatter-gather (vector) message queue
377 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
378 +
379 + // Routine to dequeue a message
380 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
381 +
382 + // Routine to peek at a message
383 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
384 +
385 + // Routine to hold a message
386 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
387 +
388 + // Routine to initialise a received message iterator
389 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
390 +
391 + // Routine to release a message
392 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
393 +
394 + // Routine to get information on a held message
395 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
396 +
397 + // Routine to check for next message on iterator
398 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
399 +
400 + // Routine to get next message on iterator
401 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
402 +
403 + // Routine to remove the last message returned by iterator
404 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
405 +
406 + // Routine to hold the last message returned by iterator
407 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
408 +
409 + // Routine to transmit bulk data
410 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
411 +
412 + // Routine to receive data
413 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
414 +
415 + // Routine to report the available servers
416 + VCHI_CONNECTION_SERVER_PRESENT server_present;
417 +
418 + // Routine to report the number of RX slots available
419 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
420 +
421 + // Routine to report the RX slot size
422 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
423 +
424 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
425 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
426 +
427 + // Callback to inform a service that a Xon or Xoff message has been received
428 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
429 +
430 + // Callback to inform a service that a server available reply message has been received
431 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
432 +
433 + // Callback to indicate that bulk auxiliary messages have arrived
434 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
435 +
436 + // Callback to indicate that a bulk auxiliary message has been transmitted
437 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
438 +
439 + // Callback to provide information about the connection
440 + VCHI_CONNECTION_INFO connection_info;
441 +
442 + // Callback to notify that peer has requested disconnect
443 + VCHI_CONNECTION_DISCONNECT disconnect;
444 +
445 + // Callback to notify that peer has requested power change
446 + VCHI_CONNECTION_POWER_CONTROL power_control;
447 +
448 + // allocate memory suitably aligned for this connection
449 + VCHI_BUFFER_ALLOCATE buffer_allocate;
450 +
451 + // free memory allocated by buffer_allocate
452 + VCHI_BUFFER_FREE buffer_free;
453 +
454 +};
455 +
456 +struct vchi_connection_t {
457 + const VCHI_CONNECTION_API_T *api;
458 + VCHI_CONNECTION_STATE_T *state;
459 +#ifdef VCHI_COARSE_LOCKING
460 + struct semaphore sem;
461 +#endif
462 +};
463 +
464 +
465 +#endif /* CONNECTION_H_ */
466 +
467 +/****************************** End of file **********************************/
468 diff --git a/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
469 new file mode 100644
470 index 0000000..8b3f767
471 --- /dev/null
472 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
473 @@ -0,0 +1,204 @@
474 +/**
475 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
476 + *
477 + * Redistribution and use in source and binary forms, with or without
478 + * modification, are permitted provided that the following conditions
479 + * are met:
480 + * 1. Redistributions of source code must retain the above copyright
481 + * notice, this list of conditions, and the following disclaimer,
482 + * without modification.
483 + * 2. Redistributions in binary form must reproduce the above copyright
484 + * notice, this list of conditions and the following disclaimer in the
485 + * documentation and/or other materials provided with the distribution.
486 + * 3. The names of the above-listed copyright holders may not be used
487 + * to endorse or promote products derived from this software without
488 + * specific prior written permission.
489 + *
490 + * ALTERNATIVELY, this software may be distributed under the terms of the
491 + * GNU General Public License ("GPL") version 2, as published by the Free
492 + * Software Foundation.
493 + *
494 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
495 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
496 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
497 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
498 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
499 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
500 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
501 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
502 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
503 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
504 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
505 + */
506 +
507 +#ifndef _VCHI_MESSAGE_H_
508 +#define _VCHI_MESSAGE_H_
509 +
510 +#include <linux/kernel.h>
511 +#include <linux/types.h>
512 +#include <linux/semaphore.h>
513 +
514 +#include "interface/vchi/vchi_cfg_internal.h"
515 +#include "interface/vchi/vchi_common.h"
516 +
517 +
518 +typedef enum message_event_type {
519 + MESSAGE_EVENT_NONE,
520 + MESSAGE_EVENT_NOP,
521 + MESSAGE_EVENT_MESSAGE,
522 + MESSAGE_EVENT_SLOT_COMPLETE,
523 + MESSAGE_EVENT_RX_BULK_PAUSED,
524 + MESSAGE_EVENT_RX_BULK_COMPLETE,
525 + MESSAGE_EVENT_TX_COMPLETE,
526 + MESSAGE_EVENT_MSG_DISCARDED
527 +} MESSAGE_EVENT_TYPE_T;
528 +
529 +typedef enum vchi_msg_flags
530 +{
531 + VCHI_MSG_FLAGS_NONE = 0x0,
532 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
533 +} VCHI_MSG_FLAGS_T;
534 +
535 +typedef enum message_tx_channel
536 +{
537 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
538 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
539 +} MESSAGE_TX_CHANNEL_T;
540 +
541 +// Macros used for cycling through bulk channels
542 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
543 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
544 +
545 +typedef enum message_rx_channel
546 +{
547 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
548 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
549 +} MESSAGE_RX_CHANNEL_T;
550 +
551 +// Message receive slot information
552 +typedef struct rx_msg_slot_info {
553 +
554 + struct rx_msg_slot_info *next;
555 + //struct slot_info *prev;
556 +#if !defined VCHI_COARSE_LOCKING
557 + struct semaphore sem;
558 +#endif
559 +
560 + uint8_t *addr; // base address of slot
561 + uint32_t len; // length of slot in bytes
562 +
563 + uint32_t write_ptr; // hardware causes this to advance
564 + uint32_t read_ptr; // this module does the reading
565 + int active; // is this slot in the hardware dma fifo?
566 + uint32_t msgs_parsed; // count how many messages are in this slot
567 + uint32_t msgs_released; // how many messages have been released
568 + void *state; // connection state information
569 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
570 +} RX_MSG_SLOTINFO_T;
571 +
572 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
573 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
574 +// driver will be tasked with sending the aligned core section.
575 +typedef struct rx_bulk_slotinfo_t {
576 + struct rx_bulk_slotinfo_t *next;
577 +
578 + struct semaphore *blocking;
579 +
580 + // needed by DMA
581 + void *addr;
582 + uint32_t len;
583 +
584 + // needed for the callback
585 + void *service;
586 + void *handle;
587 + VCHI_FLAGS_T flags;
588 +} RX_BULK_SLOTINFO_T;
589 +
590 +
591 +/* ----------------------------------------------------------------------
592 + * each connection driver will have a pool of the following struct.
593 + *
594 + * the pool will be managed by vchi_qman_*
595 + * this means there will be multiple queues (single linked lists)
596 + * a given struct message_info will be on exactly one of these queues
597 + * at any one time
598 + * -------------------------------------------------------------------- */
599 +typedef struct rx_message_info {
600 +
601 + struct message_info *next;
602 + //struct message_info *prev;
603 +
604 + uint8_t *addr;
605 + uint32_t len;
606 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
607 + uint32_t tx_timestamp;
608 + uint32_t rx_timestamp;
609 +
610 +} RX_MESSAGE_INFO_T;
611 +
612 +typedef struct {
613 + MESSAGE_EVENT_TYPE_T type;
614 +
615 + struct {
616 + // for messages
617 + void *addr; // address of message
618 + uint16_t slot_delta; // whether this message indicated slot delta
619 + uint32_t len; // length of message
620 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
621 + int32_t service; // service id this message is destined for
622 + uint32_t tx_timestamp; // timestamp from the header
623 + uint32_t rx_timestamp; // timestamp when we parsed it
624 + } message;
625 +
626 + // FIXME: cleanup slot reporting...
627 + RX_MSG_SLOTINFO_T *rx_msg;
628 + RX_BULK_SLOTINFO_T *rx_bulk;
629 + void *tx_handle;
630 + MESSAGE_TX_CHANNEL_T tx_channel;
631 +
632 +} MESSAGE_EVENT_T;
633 +
634 +
635 +// callbacks
636 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
637 +
638 +typedef struct {
639 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
640 +} VCHI_MESSAGE_DRIVER_OPEN_T;
641 +
642 +
643 +// handle to this instance of message driver (as returned by ->open)
644 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
645 +
646 +struct opaque_vchi_message_driver_t {
647 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
648 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
649 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
650 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
651 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
652 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
653 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
654 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
655 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
656 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
657 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
658 +
659 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
660 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
661 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
662 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
663 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
664 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
665 +
666 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
667 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
668 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
669 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
670 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
671 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
672 +};
673 +
674 +
675 +#endif // _VCHI_MESSAGE_H_
676 +
677 +/****************************** End of file ***********************************/
678 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi.h b/drivers/misc/vc04_services/interface/vchi/vchi.h
679 new file mode 100644
680 index 0000000..cee5291
681 --- /dev/null
682 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
683 @@ -0,0 +1,373 @@
684 +/**
685 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
686 + *
687 + * Redistribution and use in source and binary forms, with or without
688 + * modification, are permitted provided that the following conditions
689 + * are met:
690 + * 1. Redistributions of source code must retain the above copyright
691 + * notice, this list of conditions, and the following disclaimer,
692 + * without modification.
693 + * 2. Redistributions in binary form must reproduce the above copyright
694 + * notice, this list of conditions and the following disclaimer in the
695 + * documentation and/or other materials provided with the distribution.
696 + * 3. The names of the above-listed copyright holders may not be used
697 + * to endorse or promote products derived from this software without
698 + * specific prior written permission.
699 + *
700 + * ALTERNATIVELY, this software may be distributed under the terms of the
701 + * GNU General Public License ("GPL") version 2, as published by the Free
702 + * Software Foundation.
703 + *
704 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
705 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
706 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
707 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
708 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
709 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
710 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
711 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
712 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
713 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
714 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
715 + */
716 +
717 +#ifndef VCHI_H_
718 +#define VCHI_H_
719 +
720 +#include "interface/vchi/vchi_cfg.h"
721 +#include "interface/vchi/vchi_common.h"
722 +#include "interface/vchi/connections/connection.h"
723 +#include "vchi_mh.h"
724 +
725 +
726 +/******************************************************************************
727 + Global defs
728 + *****************************************************************************/
729 +
730 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
731 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
732 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
733 +
734 +#ifdef USE_VCHIQ_ARM
735 +#define VCHI_BULK_ALIGNED(x) 1
736 +#else
737 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
738 +#endif
739 +
740 +struct vchi_version {
741 + uint32_t version;
742 + uint32_t version_min;
743 +};
744 +#define VCHI_VERSION(v_) { v_, v_ }
745 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
746 +
747 +typedef enum
748 +{
749 + VCHI_VEC_POINTER,
750 + VCHI_VEC_HANDLE,
751 + VCHI_VEC_LIST
752 +} VCHI_MSG_VECTOR_TYPE_T;
753 +
754 +typedef struct vchi_msg_vector_ex {
755 +
756 + VCHI_MSG_VECTOR_TYPE_T type;
757 + union
758 + {
759 + // a memory handle
760 + struct
761 + {
762 + VCHI_MEM_HANDLE_T handle;
763 + uint32_t offset;
764 + int32_t vec_len;
765 + } handle;
766 +
767 + // an ordinary data pointer
768 + struct
769 + {
770 + const void *vec_base;
771 + int32_t vec_len;
772 + } ptr;
773 +
774 + // a nested vector list
775 + struct
776 + {
777 + struct vchi_msg_vector_ex *vec;
778 + uint32_t vec_len;
779 + } list;
780 + } u;
781 +} VCHI_MSG_VECTOR_EX_T;
782 +
783 +
784 +// Construct an entry in a msg vector for a pointer (p) of length (l)
785 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
786 +
787 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
788 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
789 +
790 +// Macros to manipulate 'FOURCC' values
791 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
792 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
793 +
794 +
795 +// Opaque service information
796 +struct opaque_vchi_service_t;
797 +
798 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
799 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
800 +typedef struct
801 +{
802 + struct opaque_vchi_service_t *service;
803 + void *message;
804 +} VCHI_HELD_MSG_T;
805 +
806 +
807 +
808 +// structure used to provide the information needed to open a server or a client
809 +typedef struct {
810 + struct vchi_version version;
811 + int32_t service_id;
812 + VCHI_CONNECTION_T *connection;
813 + uint32_t rx_fifo_size;
814 + uint32_t tx_fifo_size;
815 + VCHI_CALLBACK_T callback;
816 + void *callback_param;
817 + /* client intends to receive bulk transfers of
818 + odd lengths or into unaligned buffers */
819 + int32_t want_unaligned_bulk_rx;
820 + /* client intends to transmit bulk transfers of
821 + odd lengths or out of unaligned buffers */
822 + int32_t want_unaligned_bulk_tx;
823 + /* client wants to check CRCs on (bulk) xfers.
824 + Only needs to be set at 1 end - will do both directions. */
825 + int32_t want_crc;
826 +} SERVICE_CREATION_T;
827 +
828 +// Opaque handle for a VCHI instance
829 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
830 +
831 +// Opaque handle for a server or client
832 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
833 +
834 +// Service registration & startup
835 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
836 +
837 +typedef struct service_info_tag {
838 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
839 + VCHI_SERVICE_INIT init; /* Service initialisation function */
840 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
841 +} SERVICE_INFO_T;
842 +
843 +/******************************************************************************
844 + Global funcs - implementation is specific to which side you are on (local / remote)
845 + *****************************************************************************/
846 +
847 +#ifdef __cplusplus
848 +extern "C" {
849 +#endif
850 +
851 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
852 + const VCHI_MESSAGE_DRIVER_T * low_level);
853 +
854 +
855 +// Routine used to initialise the vchi on both local + remote connections
856 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
857 +
858 +extern int32_t vchi_exit( void );
859 +
860 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
861 + const uint32_t num_connections,
862 + VCHI_INSTANCE_T instance_handle );
863 +
864 +//When this is called, ensure that all services have no data pending.
865 +//Bulk transfers can remain 'queued'
866 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
867 +
868 +// Global control over bulk CRC checking
869 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
870 + VCHI_CRC_CONTROL_T control );
871 +
872 +// helper functions
873 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
874 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
875 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
876 +
877 +
878 +/******************************************************************************
879 + Global service API
880 + *****************************************************************************/
881 +// Routine to create a named service
882 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
883 + SERVICE_CREATION_T *setup,
884 + VCHI_SERVICE_HANDLE_T *handle );
885 +
886 +// Routine to destory a service
887 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
888 +
889 +// Routine to open a named service
890 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
891 + SERVICE_CREATION_T *setup,
892 + VCHI_SERVICE_HANDLE_T *handle);
893 +
894 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
895 + short *peer_version );
896 +
897 +// Routine to close a named service
898 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
899 +
900 +// Routine to increment ref count on a named service
901 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
902 +
903 +// Routine to decrement ref count on a named service
904 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
905 +
906 +// Routine to send a message accross a service
907 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
908 + const void *data,
909 + uint32_t data_size,
910 + VCHI_FLAGS_T flags,
911 + void *msg_handle );
912 +
913 +// scatter-gather (vector) and send message
914 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
915 + VCHI_MSG_VECTOR_EX_T *vector,
916 + uint32_t count,
917 + VCHI_FLAGS_T flags,
918 + void *msg_handle );
919 +
920 +// legacy scatter-gather (vector) and send message, only handles pointers
921 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
922 + VCHI_MSG_VECTOR_T *vector,
923 + uint32_t count,
924 + VCHI_FLAGS_T flags,
925 + void *msg_handle );
926 +
927 +// Routine to receive a msg from a service
928 +// Dequeue is equivalent to hold, copy into client buffer, release
929 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
930 + void *data,
931 + uint32_t max_data_size_to_read,
932 + uint32_t *actual_msg_size,
933 + VCHI_FLAGS_T flags );
934 +
935 +// Routine to look at a message in place.
936 +// The message is not dequeued, so a subsequent call to peek or dequeue
937 +// will return the same message.
938 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
939 + void **data,
940 + uint32_t *msg_size,
941 + VCHI_FLAGS_T flags );
942 +
943 +// Routine to remove a message after it has been read in place with peek
944 +// The first message on the queue is dequeued.
945 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
946 +
947 +// Routine to look at a message in place.
948 +// The message is dequeued, so the caller is left holding it; the descriptor is
949 +// filled in and must be released when the user has finished with the message.
950 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
951 + void **data, // } may be NULL, as info can be
952 + uint32_t *msg_size, // } obtained from HELD_MSG_T
953 + VCHI_FLAGS_T flags,
954 + VCHI_HELD_MSG_T *message_descriptor );
955 +
956 +// Initialise an iterator to look through messages in place
957 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
958 + VCHI_MSG_ITER_T *iter,
959 + VCHI_FLAGS_T flags );
960 +
961 +/******************************************************************************
962 + Global service support API - operations on held messages and message iterators
963 + *****************************************************************************/
964 +
965 +// Routine to get the address of a held message
966 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
967 +
968 +// Routine to get the size of a held message
969 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
970 +
971 +// Routine to get the transmit timestamp as written into the header by the peer
972 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
973 +
974 +// Routine to get the reception timestamp, written as we parsed the header
975 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
976 +
977 +// Routine to release a held message after it has been processed
978 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
979 +
980 +// Indicates whether the iterator has a next message.
981 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
982 +
983 +// Return the pointer and length for the next message and advance the iterator.
984 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
985 + void **data,
986 + uint32_t *msg_size );
987 +
988 +// Remove the last message returned by vchi_msg_iter_next.
989 +// Can only be called once after each call to vchi_msg_iter_next.
990 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
991 +
992 +// Hold the last message returned by vchi_msg_iter_next.
993 +// Can only be called once after each call to vchi_msg_iter_next.
994 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
995 + VCHI_HELD_MSG_T *message );
996 +
997 +// Return information for the next message, and hold it, advancing the iterator.
998 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
999 + void **data, // } may be NULL
1000 + uint32_t *msg_size, // }
1001 + VCHI_HELD_MSG_T *message );
1002 +
1003 +
1004 +/******************************************************************************
1005 + Global bulk API
1006 + *****************************************************************************/
1007 +
1008 +// Routine to prepare interface for a transfer from the other side
1009 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
1010 + void *data_dst,
1011 + uint32_t data_size,
1012 + VCHI_FLAGS_T flags,
1013 + void *transfer_handle );
1014 +
1015 +
1016 +// Prepare interface for a transfer from the other side into relocatable memory.
1017 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
1018 + VCHI_MEM_HANDLE_T h_dst,
1019 + uint32_t offset,
1020 + uint32_t data_size,
1021 + const VCHI_FLAGS_T flags,
1022 + void * const bulk_handle );
1023 +
1024 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1025 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1026 + const void *data_src,
1027 + uint32_t data_size,
1028 + VCHI_FLAGS_T flags,
1029 + void *transfer_handle );
1030 +
1031 +
1032 +/******************************************************************************
1033 + Configuration plumbing
1034 + *****************************************************************************/
1035 +
1036 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1037 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1038 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1039 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1040 +
1041 +// declare all message drivers here
1042 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1043 +
1044 +#ifdef __cplusplus
1045 +}
1046 +#endif
1047 +
1048 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1049 + VCHI_MEM_HANDLE_T h_src,
1050 + uint32_t offset,
1051 + uint32_t data_size,
1052 + VCHI_FLAGS_T flags,
1053 + void *transfer_handle );
1054 +#endif /* VCHI_H_ */
1055 +
1056 +/****************************** End of file **********************************/
1057 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1058 new file mode 100644
1059 index 0000000..26bc2d3
1060 --- /dev/null
1061 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1062 @@ -0,0 +1,224 @@
1063 +/**
1064 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1065 + *
1066 + * Redistribution and use in source and binary forms, with or without
1067 + * modification, are permitted provided that the following conditions
1068 + * are met:
1069 + * 1. Redistributions of source code must retain the above copyright
1070 + * notice, this list of conditions, and the following disclaimer,
1071 + * without modification.
1072 + * 2. Redistributions in binary form must reproduce the above copyright
1073 + * notice, this list of conditions and the following disclaimer in the
1074 + * documentation and/or other materials provided with the distribution.
1075 + * 3. The names of the above-listed copyright holders may not be used
1076 + * to endorse or promote products derived from this software without
1077 + * specific prior written permission.
1078 + *
1079 + * ALTERNATIVELY, this software may be distributed under the terms of the
1080 + * GNU General Public License ("GPL") version 2, as published by the Free
1081 + * Software Foundation.
1082 + *
1083 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1084 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1085 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1086 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1087 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1088 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1089 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1090 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1091 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1092 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1093 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1094 + */
1095 +
1096 +#ifndef VCHI_CFG_H_
1097 +#define VCHI_CFG_H_
1098 +
1099 +/****************************************************************************************
1100 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1101 + * services.
1102 + ***************************************************************************************/
1103 +
1104 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1105 +/* Really determined by the message driver, and should be available from a run-time call. */
1106 +#ifndef VCHI_BULK_ALIGN
1107 +# if __VCCOREVER__ >= 0x04000000
1108 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1109 +# else
1110 +# define VCHI_BULK_ALIGN 16
1111 +# endif
1112 +#endif
1113 +
1114 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1115 +/* May be less than or greater than VCHI_BULK_ALIGN */
1116 +/* Really determined by the message driver, and should be available from a run-time call. */
1117 +#ifndef VCHI_BULK_GRANULARITY
1118 +# if __VCCOREVER__ >= 0x04000000
1119 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1120 +# else
1121 +# define VCHI_BULK_GRANULARITY 16
1122 +# endif
1123 +#endif
1124 +
1125 +/* The largest possible message to be queued with vchi_msg_queue. */
1126 +#ifndef VCHI_MAX_MSG_SIZE
1127 +# if defined VCHI_LOCAL_HOST_PORT
1128 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1129 +# else
1130 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1131 +# endif
1132 +#endif
1133 +
1134 +/******************************************************************************************
1135 + * Defines below are system configuration options, and should not be used by VCHI services.
1136 + *****************************************************************************************/
1137 +
1138 +/* How many connections can we support? A localhost implementation uses 2 connections,
1139 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1140 + * driver. */
1141 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1142 +# define VCHI_MAX_NUM_CONNECTIONS 3
1143 +#endif
1144 +
1145 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1146 + * amount of static memory. */
1147 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1148 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1149 +#endif
1150 +
1151 +/* Adjust if using a message driver that supports more logical TX channels */
1152 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1153 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1154 +#endif
1155 +
1156 +/* Adjust if using a message driver that supports more logical RX channels */
1157 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1158 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1159 +#endif
1160 +
1161 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1162 + * receive queue space, less message headers. */
1163 +#ifndef VCHI_NUM_READ_SLOTS
1164 +# if defined(VCHI_LOCAL_HOST_PORT)
1165 +# define VCHI_NUM_READ_SLOTS 4
1166 +# else
1167 +# define VCHI_NUM_READ_SLOTS 48
1168 +# endif
1169 +#endif
1170 +
1171 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1172 + * performance. Only define on VideoCore end, talking to host.
1173 + */
1174 +//#define VCHI_MSG_RX_OVERRUN
1175 +
1176 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1177 + * underneath VCHI will usually have its own buffering. */
1178 +#ifndef VCHI_NUM_WRITE_SLOTS
1179 +# define VCHI_NUM_WRITE_SLOTS 4
1180 +#endif
1181 +
1182 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1183 + * then it's taking up too much buffer space, and the peer service will be told to stop
1184 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1185 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1186 + * is too high. */
1187 +#ifndef VCHI_XOFF_THRESHOLD
1188 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1189 +#endif
1190 +
1191 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1192 + * service has dequeued/released enough messages that it's now occupying
1193 + * VCHI_XON_THRESHOLD slots or fewer. */
1194 +#ifndef VCHI_XON_THRESHOLD
1195 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1196 +#endif
1197 +
1198 +/* A size below which a bulk transfer omits the handshake completely and always goes
1199 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1200 + * can guarantee this by enabling unaligned transmits).
1201 + * Not API. */
1202 +#ifndef VCHI_MIN_BULK_SIZE
1203 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1204 +#endif
1205 +
1206 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1207 + * speed and latency; the smaller the chunk size the better change of messages and other
1208 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1209 + * break transmissions into chunks.
1210 + */
1211 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1212 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1213 +#endif
1214 +
1215 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1216 + * with multiple-line frames. Only use if the receiver can cope. */
1217 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1218 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1219 +#endif
1220 +
1221 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1222 + * vchi_msg_queue will be blocked. */
1223 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1224 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1225 +#endif
1226 +
1227 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1228 + * will be suspended until older messages are dequeued/released. */
1229 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1230 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1231 +#endif
1232 +
1233 +/* Really should be able to cope if we run out of received message descriptors, by
1234 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1235 + * under the carpet. */
1236 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1237 +# undef VCHI_RX_MSG_QUEUE_SIZE
1238 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1239 +#endif
1240 +
1241 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1242 + * will be blocked. */
1243 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1244 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1245 +#endif
1246 +
1247 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1248 + * will be blocked. */
1249 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1250 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1251 +#endif
1252 +
1253 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1254 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1255 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1256 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1257 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1258 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1259 +#endif
1260 +
1261 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1262 + * transmitter on and off.
1263 + */
1264 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1265 +
1266 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1267 +
1268 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1269 + * negative for no IDLE.
1270 + */
1271 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1272 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1273 +# endif
1274 +
1275 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1276 + * negative for no OFF.
1277 + */
1278 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1279 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1280 +# endif
1281 +
1282 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1283 +
1284 +#endif /* VCHI_CFG_H_ */
1285 +
1286 +/****************************** End of file **********************************/
1287 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1288 new file mode 100644
1289 index 0000000..35dcba4
1290 --- /dev/null
1291 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1292 @@ -0,0 +1,71 @@
1293 +/**
1294 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1295 + *
1296 + * Redistribution and use in source and binary forms, with or without
1297 + * modification, are permitted provided that the following conditions
1298 + * are met:
1299 + * 1. Redistributions of source code must retain the above copyright
1300 + * notice, this list of conditions, and the following disclaimer,
1301 + * without modification.
1302 + * 2. Redistributions in binary form must reproduce the above copyright
1303 + * notice, this list of conditions and the following disclaimer in the
1304 + * documentation and/or other materials provided with the distribution.
1305 + * 3. The names of the above-listed copyright holders may not be used
1306 + * to endorse or promote products derived from this software without
1307 + * specific prior written permission.
1308 + *
1309 + * ALTERNATIVELY, this software may be distributed under the terms of the
1310 + * GNU General Public License ("GPL") version 2, as published by the Free
1311 + * Software Foundation.
1312 + *
1313 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1314 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1315 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1316 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1317 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1318 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1319 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1320 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1321 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1322 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1323 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1324 + */
1325 +
1326 +#ifndef VCHI_CFG_INTERNAL_H_
1327 +#define VCHI_CFG_INTERNAL_H_
1328 +
1329 +/****************************************************************************************
1330 + * Control optimisation attempts.
1331 + ***************************************************************************************/
1332 +
1333 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1334 +#define VCHI_COARSE_LOCKING
1335 +
1336 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1337 +// (only relevant if VCHI_COARSE_LOCKING)
1338 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1339 +
1340 +// Avoid lock on non-blocking peek
1341 +// (only relevant if VCHI_COARSE_LOCKING)
1342 +#define VCHI_AVOID_PEEK_LOCK
1343 +
1344 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1345 +#define VCHI_MULTIPLE_HANDLER_THREADS
1346 +
1347 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1348 +// our way through the pool of descriptors.
1349 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1350 +
1351 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1352 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1353 +
1354 +// Don't use message descriptors for TX messages that don't need them
1355 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1356 +
1357 +// Nano-locks for multiqueue
1358 +//#define VCHI_MQUEUE_NANOLOCKS
1359 +
1360 +// Lock-free(er) dequeuing
1361 +//#define VCHI_RX_NANOLOCKS
1362 +
1363 +#endif /*VCHI_CFG_INTERNAL_H_*/
1364 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_common.h b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1365 new file mode 100644
1366 index 0000000..9e6c00e
1367 --- /dev/null
1368 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1369 @@ -0,0 +1,163 @@
1370 +/**
1371 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1372 + *
1373 + * Redistribution and use in source and binary forms, with or without
1374 + * modification, are permitted provided that the following conditions
1375 + * are met:
1376 + * 1. Redistributions of source code must retain the above copyright
1377 + * notice, this list of conditions, and the following disclaimer,
1378 + * without modification.
1379 + * 2. Redistributions in binary form must reproduce the above copyright
1380 + * notice, this list of conditions and the following disclaimer in the
1381 + * documentation and/or other materials provided with the distribution.
1382 + * 3. The names of the above-listed copyright holders may not be used
1383 + * to endorse or promote products derived from this software without
1384 + * specific prior written permission.
1385 + *
1386 + * ALTERNATIVELY, this software may be distributed under the terms of the
1387 + * GNU General Public License ("GPL") version 2, as published by the Free
1388 + * Software Foundation.
1389 + *
1390 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1391 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1392 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1393 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1394 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1395 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1396 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1397 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1398 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1399 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1400 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1401 + */
1402 +
1403 +#ifndef VCHI_COMMON_H_
1404 +#define VCHI_COMMON_H_
1405 +
1406 +
1407 +//flags used when sending messages (must be bitmapped)
1408 +typedef enum
1409 +{
1410 + VCHI_FLAGS_NONE = 0x0,
1411 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
1412 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
1413 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
1414 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
1415 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
1416 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
1417 +
1418 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
1419 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
1420 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
1421 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
1422 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
1423 + VCHI_FLAGS_INTERNAL = 0xFF0000
1424 +} VCHI_FLAGS_T;
1425 +
1426 +// constants for vchi_crc_control()
1427 +typedef enum {
1428 + VCHI_CRC_NOTHING = -1,
1429 + VCHI_CRC_PER_SERVICE = 0,
1430 + VCHI_CRC_EVERYTHING = 1,
1431 +} VCHI_CRC_CONTROL_T;
1432 +
1433 +//callback reasons when an event occurs on a service
1434 +typedef enum
1435 +{
1436 + VCHI_CALLBACK_REASON_MIN,
1437 +
1438 + //This indicates that there is data available
1439 + //handle is the msg id that was transmitted with the data
1440 + // When a message is received and there was no FULL message available previously, send callback
1441 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1442 + VCHI_CALLBACK_MSG_AVAILABLE,
1443 + VCHI_CALLBACK_MSG_SENT,
1444 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1445 +
1446 + // This indicates that a transfer from the other side has completed
1447 + VCHI_CALLBACK_BULK_RECEIVED,
1448 + //This indicates that data queued up to be sent has now gone
1449 + //handle is the msg id that was used when sending the data
1450 + VCHI_CALLBACK_BULK_SENT,
1451 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1452 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1453 +
1454 + VCHI_CALLBACK_SERVICE_CLOSED,
1455 +
1456 + // this side has sent XOFF to peer due to lack of data consumption by service
1457 + // (suggests the service may need to take some recovery action if it has
1458 + // been deliberately holding off consuming data)
1459 + VCHI_CALLBACK_SENT_XOFF,
1460 + VCHI_CALLBACK_SENT_XON,
1461 +
1462 + // indicates that a bulk transfer has finished reading the source buffer
1463 + VCHI_CALLBACK_BULK_DATA_READ,
1464 +
1465 + // power notification events (currently host side only)
1466 + VCHI_CALLBACK_PEER_OFF,
1467 + VCHI_CALLBACK_PEER_SUSPENDED,
1468 + VCHI_CALLBACK_PEER_ON,
1469 + VCHI_CALLBACK_PEER_RESUMED,
1470 + VCHI_CALLBACK_FORCED_POWER_OFF,
1471 +
1472 +#ifdef USE_VCHIQ_ARM
1473 + // some extra notifications provided by vchiq_arm
1474 + VCHI_CALLBACK_SERVICE_OPENED,
1475 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1476 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1477 +#endif
1478 +
1479 + VCHI_CALLBACK_REASON_MAX
1480 +} VCHI_CALLBACK_REASON_T;
1481 +
1482 +//Calback used by all services / bulk transfers
1483 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1484 + VCHI_CALLBACK_REASON_T reason,
1485 + void *handle ); //for transmitting msg's only
1486 +
1487 +
1488 +
1489 +/*
1490 + * Define vector struct for scatter-gather (vector) operations
1491 + * Vectors can be nested - if a vector element has negative length, then
1492 + * the data pointer is treated as pointing to another vector array, with
1493 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1494 + * you can do this:
1495 + *
1496 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1497 + * {
1498 + * VCHI_MSG_VECTOR_T nv[2];
1499 + * nv[0].vec_base = my_header;
1500 + * nv[0].vec_len = sizeof my_header;
1501 + * nv[1].vec_base = v;
1502 + * nv[1].vec_len = -n;
1503 + * ...
1504 + *
1505 + */
1506 +typedef struct vchi_msg_vector {
1507 + const void *vec_base;
1508 + int32_t vec_len;
1509 +} VCHI_MSG_VECTOR_T;
1510 +
1511 +// Opaque type for a connection API
1512 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1513 +
1514 +// Opaque type for a message driver
1515 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1516 +
1517 +
1518 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1519 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1520 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1521 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1522 +// is used again after messages for that service are removed/dequeued by any
1523 +// means other than vchi_msg_iter_... calls on the iterator itself.
1524 +typedef struct {
1525 + struct opaque_vchi_service_t *service;
1526 + void *last;
1527 + void *next;
1528 + void *remove;
1529 +} VCHI_MSG_ITER_T;
1530 +
1531 +
1532 +#endif // VCHI_COMMON_H_
1533 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_mh.h b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1534 new file mode 100644
1535 index 0000000..198bd07
1536 --- /dev/null
1537 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1538 @@ -0,0 +1,42 @@
1539 +/**
1540 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1541 + *
1542 + * Redistribution and use in source and binary forms, with or without
1543 + * modification, are permitted provided that the following conditions
1544 + * are met:
1545 + * 1. Redistributions of source code must retain the above copyright
1546 + * notice, this list of conditions, and the following disclaimer,
1547 + * without modification.
1548 + * 2. Redistributions in binary form must reproduce the above copyright
1549 + * notice, this list of conditions and the following disclaimer in the
1550 + * documentation and/or other materials provided with the distribution.
1551 + * 3. The names of the above-listed copyright holders may not be used
1552 + * to endorse or promote products derived from this software without
1553 + * specific prior written permission.
1554 + *
1555 + * ALTERNATIVELY, this software may be distributed under the terms of the
1556 + * GNU General Public License ("GPL") version 2, as published by the Free
1557 + * Software Foundation.
1558 + *
1559 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1560 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1561 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1562 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1563 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1564 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1565 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1566 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1567 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1568 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1569 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1570 + */
1571 +
1572 +#ifndef VCHI_MH_H_
1573 +#define VCHI_MH_H_
1574 +
1575 +#include <linux/types.h>
1576 +
1577 +typedef int32_t VCHI_MEM_HANDLE_T;
1578 +#define VCHI_MEM_HANDLE_INVALID 0
1579 +
1580 +#endif
1581 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1582 new file mode 100644
1583 index 0000000..f87dcbd
1584 --- /dev/null
1585 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1586 @@ -0,0 +1,41 @@
1587 +/**
1588 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1589 + *
1590 + * Redistribution and use in source and binary forms, with or without
1591 + * modification, are permitted provided that the following conditions
1592 + * are met:
1593 + * 1. Redistributions of source code must retain the above copyright
1594 + * notice, this list of conditions, and the following disclaimer,
1595 + * without modification.
1596 + * 2. Redistributions in binary form must reproduce the above copyright
1597 + * notice, this list of conditions and the following disclaimer in the
1598 + * documentation and/or other materials provided with the distribution.
1599 + * 3. The names of the above-listed copyright holders may not be used
1600 + * to endorse or promote products derived from this software without
1601 + * specific prior written permission.
1602 + *
1603 + * ALTERNATIVELY, this software may be distributed under the terms of the
1604 + * GNU General Public License ("GPL") version 2, as published by the Free
1605 + * Software Foundation.
1606 + *
1607 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1608 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1609 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1610 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1611 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1612 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1613 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1614 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1615 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1616 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1617 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1618 + */
1619 +
1620 +#ifndef VCHIQ_VCHIQ_H
1621 +#define VCHIQ_VCHIQ_H
1622 +
1623 +#include "vchiq_if.h"
1624 +#include "vchiq_util.h"
1625 +
1626 +#endif
1627 +
1628 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1629 new file mode 100644
1630 index 0000000..7ea5c64
1631 --- /dev/null
1632 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1633 @@ -0,0 +1,42 @@
1634 +/**
1635 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1636 + *
1637 + * Redistribution and use in source and binary forms, with or without
1638 + * modification, are permitted provided that the following conditions
1639 + * are met:
1640 + * 1. Redistributions of source code must retain the above copyright
1641 + * notice, this list of conditions, and the following disclaimer,
1642 + * without modification.
1643 + * 2. Redistributions in binary form must reproduce the above copyright
1644 + * notice, this list of conditions and the following disclaimer in the
1645 + * documentation and/or other materials provided with the distribution.
1646 + * 3. The names of the above-listed copyright holders may not be used
1647 + * to endorse or promote products derived from this software without
1648 + * specific prior written permission.
1649 + *
1650 + * ALTERNATIVELY, this software may be distributed under the terms of the
1651 + * GNU General Public License ("GPL") version 2, as published by the Free
1652 + * Software Foundation.
1653 + *
1654 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1655 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1656 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1657 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1658 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1659 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1660 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1661 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1662 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1663 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1664 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1665 + */
1666 +
1667 +#ifndef VCHIQ_2835_H
1668 +#define VCHIQ_2835_H
1669 +
1670 +#include "vchiq_pagelist.h"
1671 +
1672 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1673 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
1674 +
1675 +#endif /* VCHIQ_2835_H */
1676 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1677 new file mode 100644
1678 index 0000000..2b5fa56
1679 --- /dev/null
1680 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1681 @@ -0,0 +1,538 @@
1682 +/**
1683 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1684 + *
1685 + * Redistribution and use in source and binary forms, with or without
1686 + * modification, are permitted provided that the following conditions
1687 + * are met:
1688 + * 1. Redistributions of source code must retain the above copyright
1689 + * notice, this list of conditions, and the following disclaimer,
1690 + * without modification.
1691 + * 2. Redistributions in binary form must reproduce the above copyright
1692 + * notice, this list of conditions and the following disclaimer in the
1693 + * documentation and/or other materials provided with the distribution.
1694 + * 3. The names of the above-listed copyright holders may not be used
1695 + * to endorse or promote products derived from this software without
1696 + * specific prior written permission.
1697 + *
1698 + * ALTERNATIVELY, this software may be distributed under the terms of the
1699 + * GNU General Public License ("GPL") version 2, as published by the Free
1700 + * Software Foundation.
1701 + *
1702 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1703 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1704 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1705 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1706 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1707 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1708 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1709 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1710 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1711 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1712 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1713 + */
1714 +
1715 +#include <linux/kernel.h>
1716 +#include <linux/types.h>
1717 +#include <linux/errno.h>
1718 +#include <linux/interrupt.h>
1719 +#include <linux/irq.h>
1720 +#include <linux/pagemap.h>
1721 +#include <linux/dma-mapping.h>
1722 +#include <linux/version.h>
1723 +#include <linux/io.h>
1724 +#include <linux/uaccess.h>
1725 +#include <asm/pgtable.h>
1726 +
1727 +#include <mach/irqs.h>
1728 +
1729 +#include <mach/platform.h>
1730 +#include <mach/vcio.h>
1731 +
1732 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1733 +
1734 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
1735 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
1736 +
1737 +#include "vchiq_arm.h"
1738 +#include "vchiq_2835.h"
1739 +#include "vchiq_connected.h"
1740 +
1741 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1742 +
1743 +typedef struct vchiq_2835_state_struct {
1744 + int inited;
1745 + VCHIQ_ARM_STATE_T arm_state;
1746 +} VCHIQ_2835_ARM_STATE_T;
1747 +
1748 +static char *g_slot_mem;
1749 +static int g_slot_mem_size;
1750 +dma_addr_t g_slot_phys;
1751 +static FRAGMENTS_T *g_fragments_base;
1752 +static FRAGMENTS_T *g_free_fragments;
1753 +struct semaphore g_free_fragments_sema;
1754 +
1755 +extern int vchiq_arm_log_level;
1756 +
1757 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1758 +
1759 +static irqreturn_t
1760 +vchiq_doorbell_irq(int irq, void *dev_id);
1761 +
1762 +static int
1763 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1764 + struct task_struct *task, PAGELIST_T ** ppagelist);
1765 +
1766 +static void
1767 +free_pagelist(PAGELIST_T *pagelist, int actual);
1768 +
1769 +int __init
1770 +vchiq_platform_init(VCHIQ_STATE_T *state)
1771 +{
1772 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1773 + int frag_mem_size;
1774 + int err;
1775 + int i;
1776 +
1777 + /* Allocate space for the channels in coherent memory */
1778 + g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1779 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1780 +
1781 + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
1782 + &g_slot_phys, GFP_ATOMIC);
1783 +
1784 + if (!g_slot_mem) {
1785 + vchiq_log_error(vchiq_arm_log_level,
1786 + "Unable to allocate channel memory");
1787 + err = -ENOMEM;
1788 + goto failed_alloc;
1789 + }
1790 +
1791 + WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
1792 +
1793 + vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
1794 + if (!vchiq_slot_zero) {
1795 + err = -EINVAL;
1796 + goto failed_init_slots;
1797 + }
1798 +
1799 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1800 + (int)g_slot_phys + g_slot_mem_size;
1801 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1802 + MAX_FRAGMENTS;
1803 +
1804 + g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
1805 + g_slot_mem_size += frag_mem_size;
1806 +
1807 + g_free_fragments = g_fragments_base;
1808 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1809 + *(FRAGMENTS_T **)&g_fragments_base[i] =
1810 + &g_fragments_base[i + 1];
1811 + }
1812 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1813 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1814 +
1815 + if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
1816 + VCHIQ_SUCCESS) {
1817 + err = -EINVAL;
1818 + goto failed_vchiq_init;
1819 + }
1820 +
1821 + err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
1822 + IRQF_IRQPOLL, "VCHIQ doorbell",
1823 + state);
1824 + if (err < 0) {
1825 + vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
1826 + "irq=%d err=%d", __func__,
1827 + VCHIQ_DOORBELL_IRQ, err);
1828 + goto failed_request_irq;
1829 + }
1830 +
1831 + /* Send the base address of the slots to VideoCore */
1832 +
1833 + dsb(); /* Ensure all writes have completed */
1834 +
1835 + bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
1836 +
1837 + vchiq_log_info(vchiq_arm_log_level,
1838 + "vchiq_init - done (slots %x, phys %x)",
1839 + (unsigned int)vchiq_slot_zero, g_slot_phys);
1840 +
1841 + vchiq_call_connected_callbacks();
1842 +
1843 + return 0;
1844 +
1845 +failed_request_irq:
1846 +failed_vchiq_init:
1847 +failed_init_slots:
1848 + dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
1849 +
1850 +failed_alloc:
1851 + return err;
1852 +}
1853 +
1854 +void __exit
1855 +vchiq_platform_exit(VCHIQ_STATE_T *state)
1856 +{
1857 + free_irq(VCHIQ_DOORBELL_IRQ, state);
1858 + dma_free_coherent(NULL, g_slot_mem_size,
1859 + g_slot_mem, g_slot_phys);
1860 +}
1861 +
1862 +
1863 +VCHIQ_STATUS_T
1864 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
1865 +{
1866 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1867 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
1868 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
1869 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
1870 + if(status != VCHIQ_SUCCESS)
1871 + {
1872 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
1873 + }
1874 + return status;
1875 +}
1876 +
1877 +VCHIQ_ARM_STATE_T*
1878 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
1879 +{
1880 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
1881 + {
1882 + BUG();
1883 + }
1884 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
1885 +}
1886 +
1887 +void
1888 +remote_event_signal(REMOTE_EVENT_T *event)
1889 +{
1890 + wmb();
1891 +
1892 + event->fired = 1;
1893 +
1894 + dsb(); /* data barrier operation */
1895 +
1896 + if (event->armed) {
1897 + /* trigger vc interrupt */
1898 +
1899 + writel(0, __io_address(ARM_0_BELL2));
1900 + }
1901 +}
1902 +
1903 +int
1904 +vchiq_copy_from_user(void *dst, const void *src, int size)
1905 +{
1906 + if ((uint32_t)src < TASK_SIZE) {
1907 + return copy_from_user(dst, src, size);
1908 + } else {
1909 + memcpy(dst, src, size);
1910 + return 0;
1911 + }
1912 +}
1913 +
1914 +VCHIQ_STATUS_T
1915 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
1916 + void *offset, int size, int dir)
1917 +{
1918 + PAGELIST_T *pagelist;
1919 + int ret;
1920 +
1921 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
1922 +
1923 + ret = create_pagelist((char __user *)offset, size,
1924 + (dir == VCHIQ_BULK_RECEIVE)
1925 + ? PAGELIST_READ
1926 + : PAGELIST_WRITE,
1927 + current,
1928 + &pagelist);
1929 + if (ret != 0)
1930 + return VCHIQ_ERROR;
1931 +
1932 + bulk->handle = memhandle;
1933 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
1934 +
1935 + /* Store the pagelist address in remote_data, which isn't used by the
1936 + slave. */
1937 + bulk->remote_data = pagelist;
1938 +
1939 + return VCHIQ_SUCCESS;
1940 +}
1941 +
1942 +void
1943 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
1944 +{
1945 + if (bulk && bulk->remote_data && bulk->actual)
1946 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
1947 +}
1948 +
1949 +void
1950 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
1951 +{
1952 + /*
1953 + * This should only be called on the master (VideoCore) side, but
1954 + * provide an implementation to avoid the need for ifdefery.
1955 + */
1956 + BUG();
1957 +}
1958 +
1959 +void
1960 +vchiq_dump_platform_state(void *dump_context)
1961 +{
1962 + char buf[80];
1963 + int len;
1964 + len = snprintf(buf, sizeof(buf),
1965 + " Platform: 2835 (VC master)");
1966 + vchiq_dump(dump_context, buf, len + 1);
1967 +}
1968 +
1969 +VCHIQ_STATUS_T
1970 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
1971 +{
1972 + return VCHIQ_ERROR;
1973 +}
1974 +
1975 +VCHIQ_STATUS_T
1976 +vchiq_platform_resume(VCHIQ_STATE_T *state)
1977 +{
1978 + return VCHIQ_SUCCESS;
1979 +}
1980 +
1981 +void
1982 +vchiq_platform_paused(VCHIQ_STATE_T *state)
1983 +{
1984 +}
1985 +
1986 +void
1987 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
1988 +{
1989 +}
1990 +
1991 +int
1992 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
1993 +{
1994 + return 1; // autosuspend not supported - videocore always wanted
1995 +}
1996 +
1997 +int
1998 +vchiq_platform_use_suspend_timer(void)
1999 +{
2000 + return 0;
2001 +}
2002 +void
2003 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
2004 +{
2005 + vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
2006 +}
2007 +void
2008 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
2009 +{
2010 + (void)state;
2011 +}
2012 +/*
2013 + * Local functions
2014 + */
2015 +
2016 +static irqreturn_t
2017 +vchiq_doorbell_irq(int irq, void *dev_id)
2018 +{
2019 + VCHIQ_STATE_T *state = dev_id;
2020 + irqreturn_t ret = IRQ_NONE;
2021 + unsigned int status;
2022 +
2023 + /* Read (and clear) the doorbell */
2024 + status = readl(__io_address(ARM_0_BELL0));
2025 +
2026 + if (status & 0x4) { /* Was the doorbell rung? */
2027 + remote_event_pollall(state);
2028 + ret = IRQ_HANDLED;
2029 + }
2030 +
2031 + return ret;
2032 +}
2033 +
2034 +/* There is a potential problem with partial cache lines (pages?)
2035 +** at the ends of the block when reading. If the CPU accessed anything in
2036 +** the same line (page?) then it may have pulled old data into the cache,
2037 +** obscuring the new data underneath. We can solve this by transferring the
2038 +** partial cache lines separately, and allowing the ARM to copy into the
2039 +** cached area.
2040 +
2041 +** N.B. This implementation plays slightly fast and loose with the Linux
2042 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2043 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2044 +** from increased speed as a result.
2045 +*/
2046 +
2047 +static int
2048 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2049 + struct task_struct *task, PAGELIST_T ** ppagelist)
2050 +{
2051 + PAGELIST_T *pagelist;
2052 + struct page **pages;
2053 + struct page *page;
2054 + unsigned long *addrs;
2055 + unsigned int num_pages, offset, i;
2056 + char *addr, *base_addr, *next_addr;
2057 + int run, addridx, actual_pages;
2058 +
2059 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
2060 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2061 +
2062 + *ppagelist = NULL;
2063 +
2064 + /* Allocate enough storage to hold the page pointers and the page
2065 + ** list
2066 + */
2067 + pagelist = kmalloc(sizeof(PAGELIST_T) +
2068 + (num_pages * sizeof(unsigned long)) +
2069 + (num_pages * sizeof(pages[0])),
2070 + GFP_KERNEL);
2071 +
2072 + vchiq_log_trace(vchiq_arm_log_level,
2073 + "create_pagelist - %x", (unsigned int)pagelist);
2074 + if (!pagelist)
2075 + return -ENOMEM;
2076 +
2077 + addrs = pagelist->addrs;
2078 + pages = (struct page **)(addrs + num_pages);
2079 +
2080 + down_read(&task->mm->mmap_sem);
2081 + actual_pages = get_user_pages(task, task->mm,
2082 + (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages,
2083 + (type == PAGELIST_READ) /*Write */ , 0 /*Force */ ,
2084 + pages, NULL /*vmas */);
2085 + up_read(&task->mm->mmap_sem);
2086 +
2087 + if (actual_pages != num_pages)
2088 + {
2089 + /* This is probably due to the process being killed */
2090 + while (actual_pages > 0)
2091 + {
2092 + actual_pages--;
2093 + page_cache_release(pages[actual_pages]);
2094 + }
2095 + kfree(pagelist);
2096 + if (actual_pages == 0)
2097 + actual_pages = -ENOMEM;
2098 + return actual_pages;
2099 + }
2100 +
2101 + pagelist->length = count;
2102 + pagelist->type = type;
2103 + pagelist->offset = offset;
2104 +
2105 + /* Group the pages into runs of contiguous pages */
2106 +
2107 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2108 + next_addr = base_addr + PAGE_SIZE;
2109 + addridx = 0;
2110 + run = 0;
2111 +
2112 + for (i = 1; i < num_pages; i++) {
2113 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2114 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2115 + next_addr += PAGE_SIZE;
2116 + run++;
2117 + } else {
2118 + addrs[addridx] = (unsigned long)base_addr + run;
2119 + addridx++;
2120 + base_addr = addr;
2121 + next_addr = addr + PAGE_SIZE;
2122 + run = 0;
2123 + }
2124 + }
2125 +
2126 + addrs[addridx] = (unsigned long)base_addr + run;
2127 + addridx++;
2128 +
2129 + /* Partial cache lines (fragments) require special measures */
2130 + if ((type == PAGELIST_READ) &&
2131 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2132 + ((pagelist->offset + pagelist->length) &
2133 + (CACHE_LINE_SIZE - 1)))) {
2134 + FRAGMENTS_T *fragments;
2135 +
2136 + if (down_interruptible(&g_free_fragments_sema) != 0) {
2137 + kfree(pagelist);
2138 + return -EINTR;
2139 + }
2140 +
2141 + WARN_ON(g_free_fragments == NULL);
2142 +
2143 + down(&g_free_fragments_mutex);
2144 + fragments = (FRAGMENTS_T *) g_free_fragments;
2145 + WARN_ON(fragments == NULL);
2146 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2147 + up(&g_free_fragments_mutex);
2148 + pagelist->type =
2149 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2150 + g_fragments_base);
2151 + }
2152 +
2153 + for (page = virt_to_page(pagelist);
2154 + page <= virt_to_page(addrs + num_pages - 1); page++) {
2155 + flush_dcache_page(page);
2156 + }
2157 +
2158 + *ppagelist = pagelist;
2159 +
2160 + return 0;
2161 +}
2162 +
2163 +static void
2164 +free_pagelist(PAGELIST_T *pagelist, int actual)
2165 +{
2166 + struct page **pages;
2167 + unsigned int num_pages, i;
2168 +
2169 + vchiq_log_trace(vchiq_arm_log_level,
2170 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2171 +
2172 + num_pages =
2173 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2174 + PAGE_SIZE;
2175 +
2176 + pages = (struct page **)(pagelist->addrs + num_pages);
2177 +
2178 + /* Deal with any partial cache lines (fragments) */
2179 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2180 + FRAGMENTS_T *fragments = g_fragments_base +
2181 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2182 + int head_bytes, tail_bytes;
2183 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2184 + (CACHE_LINE_SIZE - 1);
2185 + tail_bytes = (pagelist->offset + actual) &
2186 + (CACHE_LINE_SIZE - 1);
2187 +
2188 + if ((actual >= 0) && (head_bytes != 0)) {
2189 + if (head_bytes > actual)
2190 + head_bytes = actual;
2191 +
2192 + memcpy((char *)page_address(pages[0]) +
2193 + pagelist->offset,
2194 + fragments->headbuf,
2195 + head_bytes);
2196 + }
2197 + if ((actual >= 0) && (head_bytes < actual) &&
2198 + (tail_bytes != 0)) {
2199 + memcpy((char *)page_address(pages[num_pages - 1]) +
2200 + ((pagelist->offset + actual) &
2201 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2202 + fragments->tailbuf, tail_bytes);
2203 + }
2204 +
2205 + down(&g_free_fragments_mutex);
2206 + *(FRAGMENTS_T **) fragments = g_free_fragments;
2207 + g_free_fragments = fragments;
2208 + up(&g_free_fragments_mutex);
2209 + up(&g_free_fragments_sema);
2210 + }
2211 +
2212 + for (i = 0; i < num_pages; i++) {
2213 + if (pagelist->type != PAGELIST_WRITE)
2214 + set_page_dirty(pages[i]);
2215 + page_cache_release(pages[i]);
2216 + }
2217 +
2218 + kfree(pagelist);
2219 +}
2220 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2221 new file mode 100644
2222 index 0000000..f44d4b4
2223 --- /dev/null
2224 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2225 @@ -0,0 +1,2813 @@
2226 +/**
2227 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2228 + *
2229 + * Redistribution and use in source and binary forms, with or without
2230 + * modification, are permitted provided that the following conditions
2231 + * are met:
2232 + * 1. Redistributions of source code must retain the above copyright
2233 + * notice, this list of conditions, and the following disclaimer,
2234 + * without modification.
2235 + * 2. Redistributions in binary form must reproduce the above copyright
2236 + * notice, this list of conditions and the following disclaimer in the
2237 + * documentation and/or other materials provided with the distribution.
2238 + * 3. The names of the above-listed copyright holders may not be used
2239 + * to endorse or promote products derived from this software without
2240 + * specific prior written permission.
2241 + *
2242 + * ALTERNATIVELY, this software may be distributed under the terms of the
2243 + * GNU General Public License ("GPL") version 2, as published by the Free
2244 + * Software Foundation.
2245 + *
2246 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2247 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2248 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2249 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2250 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2251 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2252 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2253 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2254 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2255 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2256 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2257 + */
2258 +
2259 +#include <linux/kernel.h>
2260 +#include <linux/module.h>
2261 +#include <linux/types.h>
2262 +#include <linux/errno.h>
2263 +#include <linux/cdev.h>
2264 +#include <linux/fs.h>
2265 +#include <linux/device.h>
2266 +#include <linux/mm.h>
2267 +#include <linux/highmem.h>
2268 +#include <linux/pagemap.h>
2269 +#include <linux/bug.h>
2270 +#include <linux/semaphore.h>
2271 +#include <linux/list.h>
2272 +#include <linux/proc_fs.h>
2273 +
2274 +#include "vchiq_core.h"
2275 +#include "vchiq_ioctl.h"
2276 +#include "vchiq_arm.h"
2277 +
2278 +#define DEVICE_NAME "vchiq"
2279 +
2280 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2281 +#undef MODULE_PARAM_PREFIX
2282 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2283 +
2284 +#define VCHIQ_MINOR 0
2285 +
2286 +/* Some per-instance constants */
2287 +#define MAX_COMPLETIONS 16
2288 +#define MAX_SERVICES 64
2289 +#define MAX_ELEMENTS 8
2290 +#define MSG_QUEUE_SIZE 64
2291 +
2292 +#define KEEPALIVE_VER 1
2293 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2294 +
2295 +/* Run time control of log level, based on KERN_XXX level. */
2296 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2297 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2298 +
2299 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2300 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2301 +
2302 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2303 +static const char *const suspend_state_names[] = {
2304 + "VC_SUSPEND_FORCE_CANCELED",
2305 + "VC_SUSPEND_REJECTED",
2306 + "VC_SUSPEND_FAILED",
2307 + "VC_SUSPEND_IDLE",
2308 + "VC_SUSPEND_REQUESTED",
2309 + "VC_SUSPEND_IN_PROGRESS",
2310 + "VC_SUSPEND_SUSPENDED"
2311 +};
2312 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2313 +static const char *const resume_state_names[] = {
2314 + "VC_RESUME_FAILED",
2315 + "VC_RESUME_IDLE",
2316 + "VC_RESUME_REQUESTED",
2317 + "VC_RESUME_IN_PROGRESS",
2318 + "VC_RESUME_RESUMED"
2319 +};
2320 +/* The number of times we allow force suspend to timeout before actually
2321 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
2322 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2323 +*/
2324 +#define FORCE_SUSPEND_FAIL_MAX 8
2325 +
2326 +/* The time in ms allowed for videocore to go idle when force suspend has been
2327 + * requested */
2328 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2329 +
2330 +
2331 +static void suspend_timer_callback(unsigned long context);
2332 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
2333 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
2334 +
2335 +
2336 +typedef struct user_service_struct {
2337 + VCHIQ_SERVICE_T *service;
2338 + void *userdata;
2339 + VCHIQ_INSTANCE_T instance;
2340 + int is_vchi;
2341 + int dequeue_pending;
2342 + int message_available_pos;
2343 + int msg_insert;
2344 + int msg_remove;
2345 + struct semaphore insert_event;
2346 + struct semaphore remove_event;
2347 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2348 +} USER_SERVICE_T;
2349 +
2350 +struct bulk_waiter_node {
2351 + struct bulk_waiter bulk_waiter;
2352 + int pid;
2353 + struct list_head list;
2354 +};
2355 +
2356 +struct vchiq_instance_struct {
2357 + VCHIQ_STATE_T *state;
2358 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2359 + int completion_insert;
2360 + int completion_remove;
2361 + struct semaphore insert_event;
2362 + struct semaphore remove_event;
2363 + struct mutex completion_mutex;
2364 +
2365 + int connected;
2366 + int closing;
2367 + int pid;
2368 + int mark;
2369 +
2370 + struct list_head bulk_waiter_list;
2371 + struct mutex bulk_waiter_list_mutex;
2372 +
2373 + struct proc_dir_entry *proc_entry;
2374 +};
2375 +
2376 +typedef struct dump_context_struct {
2377 + char __user *buf;
2378 + size_t actual;
2379 + size_t space;
2380 + loff_t offset;
2381 +} DUMP_CONTEXT_T;
2382 +
2383 +static struct cdev vchiq_cdev;
2384 +static dev_t vchiq_devid;
2385 +static VCHIQ_STATE_T g_state;
2386 +static struct class *vchiq_class;
2387 +static struct device *vchiq_dev;
2388 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2389 +
2390 +static const char *const ioctl_names[] = {
2391 + "CONNECT",
2392 + "SHUTDOWN",
2393 + "CREATE_SERVICE",
2394 + "REMOVE_SERVICE",
2395 + "QUEUE_MESSAGE",
2396 + "QUEUE_BULK_TRANSMIT",
2397 + "QUEUE_BULK_RECEIVE",
2398 + "AWAIT_COMPLETION",
2399 + "DEQUEUE_MESSAGE",
2400 + "GET_CLIENT_ID",
2401 + "GET_CONFIG",
2402 + "CLOSE_SERVICE",
2403 + "USE_SERVICE",
2404 + "RELEASE_SERVICE",
2405 + "SET_SERVICE_OPTION",
2406 + "DUMP_PHYS_MEM"
2407 +};
2408 +
2409 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2410 + (VCHIQ_IOC_MAX + 1));
2411 +
2412 +static void
2413 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2414 +
2415 +/****************************************************************************
2416 +*
2417 +* add_completion
2418 +*
2419 +***************************************************************************/
2420 +
2421 +static VCHIQ_STATUS_T
2422 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2423 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2424 + void *bulk_userdata)
2425 +{
2426 + VCHIQ_COMPLETION_DATA_T *completion;
2427 + DEBUG_INITIALISE(g_state.local)
2428 +
2429 + while (instance->completion_insert ==
2430 + (instance->completion_remove + MAX_COMPLETIONS)) {
2431 + /* Out of space - wait for the client */
2432 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2433 + vchiq_log_trace(vchiq_arm_log_level,
2434 + "add_completion - completion queue full");
2435 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2436 + if (down_interruptible(&instance->remove_event) != 0) {
2437 + vchiq_log_info(vchiq_arm_log_level,
2438 + "service_callback interrupted");
2439 + return VCHIQ_RETRY;
2440 + } else if (instance->closing) {
2441 + vchiq_log_info(vchiq_arm_log_level,
2442 + "service_callback closing");
2443 + return VCHIQ_ERROR;
2444 + }
2445 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2446 + }
2447 +
2448 + completion =
2449 + &instance->completions[instance->completion_insert &
2450 + (MAX_COMPLETIONS - 1)];
2451 +
2452 + completion->header = header;
2453 + completion->reason = reason;
2454 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2455 + completion->service_userdata = user_service->service;
2456 + completion->bulk_userdata = bulk_userdata;
2457 +
2458 + if (reason == VCHIQ_SERVICE_CLOSED)
2459 + /* Take an extra reference, to be held until
2460 + this CLOSED notification is delivered. */
2461 + lock_service(user_service->service);
2462 +
2463 + /* A write barrier is needed here to ensure that the entire completion
2464 + record is written out before the insert point. */
2465 + wmb();
2466 +
2467 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
2468 + user_service->message_available_pos =
2469 + instance->completion_insert;
2470 + instance->completion_insert++;
2471 +
2472 + up(&instance->insert_event);
2473 +
2474 + return VCHIQ_SUCCESS;
2475 +}
2476 +
2477 +/****************************************************************************
2478 +*
2479 +* service_callback
2480 +*
2481 +***************************************************************************/
2482 +
2483 +static VCHIQ_STATUS_T
2484 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2485 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2486 +{
2487 + /* How do we ensure the callback goes to the right client?
2488 + ** The service_user data points to a USER_SERVICE_T record containing
2489 + ** the original callback and the user state structure, which contains a
2490 + ** circular buffer for completion records.
2491 + */
2492 + USER_SERVICE_T *user_service;
2493 + VCHIQ_SERVICE_T *service;
2494 + VCHIQ_INSTANCE_T instance;
2495 + DEBUG_INITIALISE(g_state.local)
2496 +
2497 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2498 +
2499 + service = handle_to_service(handle);
2500 + BUG_ON(!service);
2501 + user_service = (USER_SERVICE_T *)service->base.userdata;
2502 + instance = user_service->instance;
2503 +
2504 + if (!instance || instance->closing)
2505 + return VCHIQ_SUCCESS;
2506 +
2507 + vchiq_log_trace(vchiq_arm_log_level,
2508 + "service_callback - service %lx(%d), reason %d, header %lx, "
2509 + "instance %lx, bulk_userdata %lx",
2510 + (unsigned long)user_service,
2511 + service->localport,
2512 + reason, (unsigned long)header,
2513 + (unsigned long)instance, (unsigned long)bulk_userdata);
2514 +
2515 + if (header && user_service->is_vchi) {
2516 + spin_lock(&msg_queue_spinlock);
2517 + while (user_service->msg_insert ==
2518 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2519 + spin_unlock(&msg_queue_spinlock);
2520 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2521 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2522 + vchiq_log_trace(vchiq_arm_log_level,
2523 + "service_callback - msg queue full");
2524 + /* If there is no MESSAGE_AVAILABLE in the completion
2525 + ** queue, add one
2526 + */
2527 + if ((user_service->message_available_pos -
2528 + instance->completion_remove) < 0) {
2529 + VCHIQ_STATUS_T status;
2530 + vchiq_log_info(vchiq_arm_log_level,
2531 + "Inserting extra MESSAGE_AVAILABLE");
2532 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2533 + status = add_completion(instance, reason,
2534 + NULL, user_service, bulk_userdata);
2535 + if (status != VCHIQ_SUCCESS) {
2536 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2537 + return status;
2538 + }
2539 + }
2540 +
2541 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2542 + if (down_interruptible(&user_service->remove_event)
2543 + != 0) {
2544 + vchiq_log_info(vchiq_arm_log_level,
2545 + "service_callback interrupted");
2546 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2547 + return VCHIQ_RETRY;
2548 + } else if (instance->closing) {
2549 + vchiq_log_info(vchiq_arm_log_level,
2550 + "service_callback closing");
2551 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2552 + return VCHIQ_ERROR;
2553 + }
2554 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2555 + spin_lock(&msg_queue_spinlock);
2556 + }
2557 +
2558 + user_service->msg_queue[user_service->msg_insert &
2559 + (MSG_QUEUE_SIZE - 1)] = header;
2560 + user_service->msg_insert++;
2561 + spin_unlock(&msg_queue_spinlock);
2562 +
2563 + up(&user_service->insert_event);
2564 +
2565 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2566 + ** there is a MESSAGE_AVAILABLE in the completion queue then
2567 + ** bypass the completion queue.
2568 + */
2569 + if (((user_service->message_available_pos -
2570 + instance->completion_remove) >= 0) ||
2571 + user_service->dequeue_pending) {
2572 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2573 + user_service->dequeue_pending = 0;
2574 + return VCHIQ_SUCCESS;
2575 + }
2576 +
2577 + header = NULL;
2578 + }
2579 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2580 +
2581 + return add_completion(instance, reason, header, user_service,
2582 + bulk_userdata);
2583 +}
2584 +
2585 +/****************************************************************************
2586 +*
2587 +* user_service_free
2588 +*
2589 +***************************************************************************/
2590 +static void
2591 +user_service_free(void *userdata)
2592 +{
2593 + kfree(userdata);
2594 +}
2595 +
2596 +/****************************************************************************
2597 +*
2598 +* vchiq_ioctl
2599 +*
2600 +***************************************************************************/
2601 +
2602 +static long
2603 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2604 +{
2605 + VCHIQ_INSTANCE_T instance = file->private_data;
2606 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2607 + VCHIQ_SERVICE_T *service = NULL;
2608 + long ret = 0;
2609 + int i, rc;
2610 + DEBUG_INITIALISE(g_state.local)
2611 +
2612 + vchiq_log_trace(vchiq_arm_log_level,
2613 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2614 + (unsigned int)instance,
2615 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2616 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2617 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2618 +
2619 + switch (cmd) {
2620 + case VCHIQ_IOC_SHUTDOWN:
2621 + if (!instance->connected)
2622 + break;
2623 +
2624 + /* Remove all services */
2625 + i = 0;
2626 + while ((service = next_service_by_instance(instance->state,
2627 + instance, &i)) != NULL) {
2628 + status = vchiq_remove_service(service->handle);
2629 + unlock_service(service);
2630 + if (status != VCHIQ_SUCCESS)
2631 + break;
2632 + }
2633 + service = NULL;
2634 +
2635 + if (status == VCHIQ_SUCCESS) {
2636 + /* Wake the completion thread and ask it to exit */
2637 + instance->closing = 1;
2638 + up(&instance->insert_event);
2639 + }
2640 +
2641 + break;
2642 +
2643 + case VCHIQ_IOC_CONNECT:
2644 + if (instance->connected) {
2645 + ret = -EINVAL;
2646 + break;
2647 + }
2648 + rc = mutex_lock_interruptible(&instance->state->mutex);
2649 + if (rc != 0) {
2650 + vchiq_log_error(vchiq_arm_log_level,
2651 + "vchiq: connect: could not lock mutex for "
2652 + "state %d: %d",
2653 + instance->state->id, rc);
2654 + ret = -EINTR;
2655 + break;
2656 + }
2657 + status = vchiq_connect_internal(instance->state, instance);
2658 + mutex_unlock(&instance->state->mutex);
2659 +
2660 + if (status == VCHIQ_SUCCESS)
2661 + instance->connected = 1;
2662 + else
2663 + vchiq_log_error(vchiq_arm_log_level,
2664 + "vchiq: could not connect: %d", status);
2665 + break;
2666 +
2667 + case VCHIQ_IOC_CREATE_SERVICE: {
2668 + VCHIQ_CREATE_SERVICE_T args;
2669 + USER_SERVICE_T *user_service = NULL;
2670 + void *userdata;
2671 + int srvstate;
2672 +
2673 + if (copy_from_user
2674 + (&args, (const void __user *)arg,
2675 + sizeof(args)) != 0) {
2676 + ret = -EFAULT;
2677 + break;
2678 + }
2679 +
2680 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2681 + if (!user_service) {
2682 + ret = -ENOMEM;
2683 + break;
2684 + }
2685 +
2686 + if (args.is_open) {
2687 + if (!instance->connected) {
2688 + ret = -ENOTCONN;
2689 + kfree(user_service);
2690 + break;
2691 + }
2692 + srvstate = VCHIQ_SRVSTATE_OPENING;
2693 + } else {
2694 + srvstate =
2695 + instance->connected ?
2696 + VCHIQ_SRVSTATE_LISTENING :
2697 + VCHIQ_SRVSTATE_HIDDEN;
2698 + }
2699 +
2700 + userdata = args.params.userdata;
2701 + args.params.callback = service_callback;
2702 + args.params.userdata = user_service;
2703 + service = vchiq_add_service_internal(
2704 + instance->state,
2705 + &args.params, srvstate,
2706 + instance, user_service_free);
2707 +
2708 + if (service != NULL) {
2709 + user_service->service = service;
2710 + user_service->userdata = userdata;
2711 + user_service->instance = instance;
2712 + user_service->is_vchi = args.is_vchi;
2713 + user_service->dequeue_pending = 0;
2714 + user_service->message_available_pos =
2715 + instance->completion_remove - 1;
2716 + user_service->msg_insert = 0;
2717 + user_service->msg_remove = 0;
2718 + sema_init(&user_service->insert_event, 0);
2719 + sema_init(&user_service->remove_event, 0);
2720 +
2721 + if (args.is_open) {
2722 + status = vchiq_open_service_internal
2723 + (service, instance->pid);
2724 + if (status != VCHIQ_SUCCESS) {
2725 + vchiq_remove_service(service->handle);
2726 + service = NULL;
2727 + ret = (status == VCHIQ_RETRY) ?
2728 + -EINTR : -EIO;
2729 + break;
2730 + }
2731 + }
2732 +
2733 + if (copy_to_user((void __user *)
2734 + &(((VCHIQ_CREATE_SERVICE_T __user *)
2735 + arg)->handle),
2736 + (const void *)&service->handle,
2737 + sizeof(service->handle)) != 0) {
2738 + ret = -EFAULT;
2739 + vchiq_remove_service(service->handle);
2740 + }
2741 +
2742 + service = NULL;
2743 + } else {
2744 + ret = -EEXIST;
2745 + kfree(user_service);
2746 + }
2747 + } break;
2748 +
2749 + case VCHIQ_IOC_CLOSE_SERVICE: {
2750 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2751 +
2752 + service = find_service_for_instance(instance, handle);
2753 + if (service != NULL)
2754 + status = vchiq_close_service(service->handle);
2755 + else
2756 + ret = -EINVAL;
2757 + } break;
2758 +
2759 + case VCHIQ_IOC_REMOVE_SERVICE: {
2760 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2761 +
2762 + service = find_service_for_instance(instance, handle);
2763 + if (service != NULL)
2764 + status = vchiq_remove_service(service->handle);
2765 + else
2766 + ret = -EINVAL;
2767 + } break;
2768 +
2769 + case VCHIQ_IOC_USE_SERVICE:
2770 + case VCHIQ_IOC_RELEASE_SERVICE: {
2771 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2772 +
2773 + service = find_service_for_instance(instance, handle);
2774 + if (service != NULL) {
2775 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
2776 + vchiq_use_service_internal(service) :
2777 + vchiq_release_service_internal(service);
2778 + if (status != VCHIQ_SUCCESS) {
2779 + vchiq_log_error(vchiq_susp_log_level,
2780 + "%s: cmd %s returned error %d for "
2781 + "service %c%c%c%c:%03d",
2782 + __func__,
2783 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
2784 + "VCHIQ_IOC_USE_SERVICE" :
2785 + "VCHIQ_IOC_RELEASE_SERVICE",
2786 + status,
2787 + VCHIQ_FOURCC_AS_4CHARS(
2788 + service->base.fourcc),
2789 + service->client_id);
2790 + ret = -EINVAL;
2791 + }
2792 + } else
2793 + ret = -EINVAL;
2794 + } break;
2795 +
2796 + case VCHIQ_IOC_QUEUE_MESSAGE: {
2797 + VCHIQ_QUEUE_MESSAGE_T args;
2798 + if (copy_from_user
2799 + (&args, (const void __user *)arg,
2800 + sizeof(args)) != 0) {
2801 + ret = -EFAULT;
2802 + break;
2803 + }
2804 +
2805 + service = find_service_for_instance(instance, args.handle);
2806 +
2807 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
2808 + /* Copy elements into kernel space */
2809 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
2810 + if (copy_from_user(elements, args.elements,
2811 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
2812 + status = vchiq_queue_message
2813 + (args.handle,
2814 + elements, args.count);
2815 + else
2816 + ret = -EFAULT;
2817 + } else {
2818 + ret = -EINVAL;
2819 + }
2820 + } break;
2821 +
2822 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
2823 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
2824 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
2825 + struct bulk_waiter_node *waiter = NULL;
2826 + VCHIQ_BULK_DIR_T dir =
2827 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
2828 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
2829 +
2830 + if (copy_from_user
2831 + (&args, (const void __user *)arg,
2832 + sizeof(args)) != 0) {
2833 + ret = -EFAULT;
2834 + break;
2835 + }
2836 +
2837 + service = find_service_for_instance(instance, args.handle);
2838 + if (!service) {
2839 + ret = -EINVAL;
2840 + break;
2841 + }
2842 +
2843 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
2844 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
2845 + GFP_KERNEL);
2846 + if (!waiter) {
2847 + ret = -ENOMEM;
2848 + break;
2849 + }
2850 + args.userdata = &waiter->bulk_waiter;
2851 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
2852 + struct list_head *pos;
2853 + mutex_lock(&instance->bulk_waiter_list_mutex);
2854 + list_for_each(pos, &instance->bulk_waiter_list) {
2855 + if (list_entry(pos, struct bulk_waiter_node,
2856 + list)->pid == current->pid) {
2857 + waiter = list_entry(pos,
2858 + struct bulk_waiter_node,
2859 + list);
2860 + list_del(pos);
2861 + break;
2862 + }
2863 +
2864 + }
2865 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2866 + if (!waiter) {
2867 + vchiq_log_error(vchiq_arm_log_level,
2868 + "no bulk_waiter found for pid %d",
2869 + current->pid);
2870 + ret = -ESRCH;
2871 + break;
2872 + }
2873 + vchiq_log_info(vchiq_arm_log_level,
2874 + "found bulk_waiter %x for pid %d",
2875 + (unsigned int)waiter, current->pid);
2876 + args.userdata = &waiter->bulk_waiter;
2877 + }
2878 + status = vchiq_bulk_transfer
2879 + (args.handle,
2880 + VCHI_MEM_HANDLE_INVALID,
2881 + args.data, args.size,
2882 + args.userdata, args.mode,
2883 + dir);
2884 + if (!waiter)
2885 + break;
2886 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
2887 + !waiter->bulk_waiter.bulk) {
2888 + if (waiter->bulk_waiter.bulk) {
2889 + /* Cancel the signal when the transfer
2890 + ** completes. */
2891 + spin_lock(&bulk_waiter_spinlock);
2892 + waiter->bulk_waiter.bulk->userdata = NULL;
2893 + spin_unlock(&bulk_waiter_spinlock);
2894 + }
2895 + kfree(waiter);
2896 + } else {
2897 + const VCHIQ_BULK_MODE_T mode_waiting =
2898 + VCHIQ_BULK_MODE_WAITING;
2899 + waiter->pid = current->pid;
2900 + mutex_lock(&instance->bulk_waiter_list_mutex);
2901 + list_add(&waiter->list, &instance->bulk_waiter_list);
2902 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2903 + vchiq_log_info(vchiq_arm_log_level,
2904 + "saved bulk_waiter %x for pid %d",
2905 + (unsigned int)waiter, current->pid);
2906 +
2907 + if (copy_to_user((void __user *)
2908 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
2909 + arg)->mode),
2910 + (const void *)&mode_waiting,
2911 + sizeof(mode_waiting)) != 0)
2912 + ret = -EFAULT;
2913 + }
2914 + } break;
2915 +
2916 + case VCHIQ_IOC_AWAIT_COMPLETION: {
2917 + VCHIQ_AWAIT_COMPLETION_T args;
2918 +
2919 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2920 + if (!instance->connected) {
2921 + ret = -ENOTCONN;
2922 + break;
2923 + }
2924 +
2925 + if (copy_from_user(&args, (const void __user *)arg,
2926 + sizeof(args)) != 0) {
2927 + ret = -EFAULT;
2928 + break;
2929 + }
2930 +
2931 + mutex_lock(&instance->completion_mutex);
2932 +
2933 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2934 + while ((instance->completion_remove ==
2935 + instance->completion_insert)
2936 + && !instance->closing) {
2937 + int rc;
2938 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2939 + mutex_unlock(&instance->completion_mutex);
2940 + rc = down_interruptible(&instance->insert_event);
2941 + mutex_lock(&instance->completion_mutex);
2942 + if (rc != 0) {
2943 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2944 + vchiq_log_info(vchiq_arm_log_level,
2945 + "AWAIT_COMPLETION interrupted");
2946 + ret = -EINTR;
2947 + break;
2948 + }
2949 + }
2950 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2951 +
2952 + /* A read memory barrier is needed to stop prefetch of a stale
2953 + ** completion record
2954 + */
2955 + rmb();
2956 +
2957 + if (ret == 0) {
2958 + int msgbufcount = args.msgbufcount;
2959 + for (ret = 0; ret < args.count; ret++) {
2960 + VCHIQ_COMPLETION_DATA_T *completion;
2961 + VCHIQ_SERVICE_T *service;
2962 + USER_SERVICE_T *user_service;
2963 + VCHIQ_HEADER_T *header;
2964 + if (instance->completion_remove ==
2965 + instance->completion_insert)
2966 + break;
2967 + completion = &instance->completions[
2968 + instance->completion_remove &
2969 + (MAX_COMPLETIONS - 1)];
2970 +
2971 + service = completion->service_userdata;
2972 + user_service = service->base.userdata;
2973 + completion->service_userdata =
2974 + user_service->userdata;
2975 +
2976 + header = completion->header;
2977 + if (header) {
2978 + void __user *msgbuf;
2979 + int msglen;
2980 +
2981 + msglen = header->size +
2982 + sizeof(VCHIQ_HEADER_T);
2983 + /* This must be a VCHIQ-style service */
2984 + if (args.msgbufsize < msglen) {
2985 + vchiq_log_error(
2986 + vchiq_arm_log_level,
2987 + "header %x: msgbufsize"
2988 + " %x < msglen %x",
2989 + (unsigned int)header,
2990 + args.msgbufsize,
2991 + msglen);
2992 + WARN(1, "invalid message "
2993 + "size\n");
2994 + if (ret == 0)
2995 + ret = -EMSGSIZE;
2996 + break;
2997 + }
2998 + if (msgbufcount <= 0)
2999 + /* Stall here for lack of a
3000 + ** buffer for the message. */
3001 + break;
3002 + /* Get the pointer from user space */
3003 + msgbufcount--;
3004 + if (copy_from_user(&msgbuf,
3005 + (const void __user *)
3006 + &args.msgbufs[msgbufcount],
3007 + sizeof(msgbuf)) != 0) {
3008 + if (ret == 0)
3009 + ret = -EFAULT;
3010 + break;
3011 + }
3012 +
3013 + /* Copy the message to user space */
3014 + if (copy_to_user(msgbuf, header,
3015 + msglen) != 0) {
3016 + if (ret == 0)
3017 + ret = -EFAULT;
3018 + break;
3019 + }
3020 +
3021 + /* Now it has been copied, the message
3022 + ** can be released. */
3023 + vchiq_release_message(service->handle,
3024 + header);
3025 +
3026 + /* The completion must point to the
3027 + ** msgbuf. */
3028 + completion->header = msgbuf;
3029 + }
3030 +
3031 + if (completion->reason ==
3032 + VCHIQ_SERVICE_CLOSED)
3033 + unlock_service(service);
3034 +
3035 + if (copy_to_user((void __user *)(
3036 + (size_t)args.buf +
3037 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
3038 + completion,
3039 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
3040 + if (ret == 0)
3041 + ret = -EFAULT;
3042 + break;
3043 + }
3044 +
3045 + instance->completion_remove++;
3046 + }
3047 +
3048 + if (msgbufcount != args.msgbufcount) {
3049 + if (copy_to_user((void __user *)
3050 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3051 + msgbufcount,
3052 + &msgbufcount,
3053 + sizeof(msgbufcount)) != 0) {
3054 + ret = -EFAULT;
3055 + }
3056 + }
3057 + }
3058 +
3059 + if (ret != 0)
3060 + up(&instance->remove_event);
3061 + mutex_unlock(&instance->completion_mutex);
3062 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3063 + } break;
3064 +
3065 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3066 + VCHIQ_DEQUEUE_MESSAGE_T args;
3067 + USER_SERVICE_T *user_service;
3068 + VCHIQ_HEADER_T *header;
3069 +
3070 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3071 + if (copy_from_user
3072 + (&args, (const void __user *)arg,
3073 + sizeof(args)) != 0) {
3074 + ret = -EFAULT;
3075 + break;
3076 + }
3077 + service = find_service_for_instance(instance, args.handle);
3078 + if (!service) {
3079 + ret = -EINVAL;
3080 + break;
3081 + }
3082 + user_service = (USER_SERVICE_T *)service->base.userdata;
3083 + if (user_service->is_vchi == 0) {
3084 + ret = -EINVAL;
3085 + break;
3086 + }
3087 +
3088 + spin_lock(&msg_queue_spinlock);
3089 + if (user_service->msg_remove == user_service->msg_insert) {
3090 + if (!args.blocking) {
3091 + spin_unlock(&msg_queue_spinlock);
3092 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3093 + ret = -EWOULDBLOCK;
3094 + break;
3095 + }
3096 + user_service->dequeue_pending = 1;
3097 + do {
3098 + spin_unlock(&msg_queue_spinlock);
3099 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3100 + if (down_interruptible(
3101 + &user_service->insert_event) != 0) {
3102 + vchiq_log_info(vchiq_arm_log_level,
3103 + "DEQUEUE_MESSAGE interrupted");
3104 + ret = -EINTR;
3105 + break;
3106 + }
3107 + spin_lock(&msg_queue_spinlock);
3108 + } while (user_service->msg_remove ==
3109 + user_service->msg_insert);
3110 +
3111 + if (ret)
3112 + break;
3113 + }
3114 +
3115 + BUG_ON((int)(user_service->msg_insert -
3116 + user_service->msg_remove) < 0);
3117 +
3118 + header = user_service->msg_queue[user_service->msg_remove &
3119 + (MSG_QUEUE_SIZE - 1)];
3120 + user_service->msg_remove++;
3121 + spin_unlock(&msg_queue_spinlock);
3122 +
3123 + up(&user_service->remove_event);
3124 + if (header == NULL)
3125 + ret = -ENOTCONN;
3126 + else if (header->size <= args.bufsize) {
3127 + /* Copy to user space if msgbuf is not NULL */
3128 + if ((args.buf == NULL) ||
3129 + (copy_to_user((void __user *)args.buf,
3130 + header->data,
3131 + header->size) == 0)) {
3132 + ret = header->size;
3133 + vchiq_release_message(
3134 + service->handle,
3135 + header);
3136 + } else
3137 + ret = -EFAULT;
3138 + } else {
3139 + vchiq_log_error(vchiq_arm_log_level,
3140 + "header %x: bufsize %x < size %x",
3141 + (unsigned int)header, args.bufsize,
3142 + header->size);
3143 + WARN(1, "invalid size\n");
3144 + ret = -EMSGSIZE;
3145 + }
3146 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3147 + } break;
3148 +
3149 + case VCHIQ_IOC_GET_CLIENT_ID: {
3150 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3151 +
3152 + ret = vchiq_get_client_id(handle);
3153 + } break;
3154 +
3155 + case VCHIQ_IOC_GET_CONFIG: {
3156 + VCHIQ_GET_CONFIG_T args;
3157 + VCHIQ_CONFIG_T config;
3158 +
3159 + if (copy_from_user(&args, (const void __user *)arg,
3160 + sizeof(args)) != 0) {
3161 + ret = -EFAULT;
3162 + break;
3163 + }
3164 + if (args.config_size > sizeof(config)) {
3165 + ret = -EINVAL;
3166 + break;
3167 + }
3168 + status = vchiq_get_config(instance, args.config_size, &config);
3169 + if (status == VCHIQ_SUCCESS) {
3170 + if (copy_to_user((void __user *)args.pconfig,
3171 + &config, args.config_size) != 0) {
3172 + ret = -EFAULT;
3173 + break;
3174 + }
3175 + }
3176 + } break;
3177 +
3178 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
3179 + VCHIQ_SET_SERVICE_OPTION_T args;
3180 +
3181 + if (copy_from_user(
3182 + &args, (const void __user *)arg,
3183 + sizeof(args)) != 0) {
3184 + ret = -EFAULT;
3185 + break;
3186 + }
3187 +
3188 + service = find_service_for_instance(instance, args.handle);
3189 + if (!service) {
3190 + ret = -EINVAL;
3191 + break;
3192 + }
3193 +
3194 + status = vchiq_set_service_option(
3195 + args.handle, args.option, args.value);
3196 + } break;
3197 +
3198 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
3199 + VCHIQ_DUMP_MEM_T args;
3200 +
3201 + if (copy_from_user
3202 + (&args, (const void __user *)arg,
3203 + sizeof(args)) != 0) {
3204 + ret = -EFAULT;
3205 + break;
3206 + }
3207 + dump_phys_mem(args.virt_addr, args.num_bytes);
3208 + } break;
3209 +
3210 + default:
3211 + ret = -ENOTTY;
3212 + break;
3213 + }
3214 +
3215 + if (service)
3216 + unlock_service(service);
3217 +
3218 + if (ret == 0) {
3219 + if (status == VCHIQ_ERROR)
3220 + ret = -EIO;
3221 + else if (status == VCHIQ_RETRY)
3222 + ret = -EINTR;
3223 + }
3224 +
3225 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3226 + (ret != -EWOULDBLOCK))
3227 + vchiq_log_info(vchiq_arm_log_level,
3228 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3229 + (unsigned long)instance,
3230 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3231 + ioctl_names[_IOC_NR(cmd)] :
3232 + "<invalid>",
3233 + status, ret);
3234 + else
3235 + vchiq_log_trace(vchiq_arm_log_level,
3236 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3237 + (unsigned long)instance,
3238 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3239 + ioctl_names[_IOC_NR(cmd)] :
3240 + "<invalid>",
3241 + status, ret);
3242 +
3243 + return ret;
3244 +}
3245 +
3246 +/****************************************************************************
3247 +*
3248 +* vchiq_open
3249 +*
3250 +***************************************************************************/
3251 +
3252 +static int
3253 +vchiq_open(struct inode *inode, struct file *file)
3254 +{
3255 + int dev = iminor(inode) & 0x0f;
3256 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3257 + switch (dev) {
3258 + case VCHIQ_MINOR: {
3259 + int ret;
3260 + VCHIQ_STATE_T *state = vchiq_get_state();
3261 + VCHIQ_INSTANCE_T instance;
3262 +
3263 + if (!state) {
3264 + vchiq_log_error(vchiq_arm_log_level,
3265 + "vchiq has no connection to VideoCore");
3266 + return -ENOTCONN;
3267 + }
3268 +
3269 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3270 + if (!instance)
3271 + return -ENOMEM;
3272 +
3273 + instance->state = state;
3274 + instance->pid = current->tgid;
3275 +
3276 + ret = vchiq_proc_add_instance(instance);
3277 + if (ret != 0) {
3278 + kfree(instance);
3279 + return ret;
3280 + }
3281 +
3282 + sema_init(&instance->insert_event, 0);
3283 + sema_init(&instance->remove_event, 0);
3284 + mutex_init(&instance->completion_mutex);
3285 + mutex_init(&instance->bulk_waiter_list_mutex);
3286 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
3287 +
3288 + file->private_data = instance;
3289 + } break;
3290 +
3291 + default:
3292 + vchiq_log_error(vchiq_arm_log_level,
3293 + "Unknown minor device: %d", dev);
3294 + return -ENXIO;
3295 + }
3296 +
3297 + return 0;
3298 +}
3299 +
3300 +/****************************************************************************
3301 +*
3302 +* vchiq_release
3303 +*
3304 +***************************************************************************/
3305 +
3306 +static int
3307 +vchiq_release(struct inode *inode, struct file *file)
3308 +{
3309 + int dev = iminor(inode) & 0x0f;
3310 + int ret = 0;
3311 + switch (dev) {
3312 + case VCHIQ_MINOR: {
3313 + VCHIQ_INSTANCE_T instance = file->private_data;
3314 + VCHIQ_STATE_T *state = vchiq_get_state();
3315 + VCHIQ_SERVICE_T *service;
3316 + int i;
3317 +
3318 + vchiq_log_info(vchiq_arm_log_level,
3319 + "vchiq_release: instance=%lx",
3320 + (unsigned long)instance);
3321 +
3322 + if (!state) {
3323 + ret = -EPERM;
3324 + goto out;
3325 + }
3326 +
3327 + /* Ensure videocore is awake to allow termination. */
3328 + vchiq_use_internal(instance->state, NULL,
3329 + USE_TYPE_VCHIQ);
3330 +
3331 + mutex_lock(&instance->completion_mutex);
3332 +
3333 + /* Wake the completion thread and ask it to exit */
3334 + instance->closing = 1;
3335 + up(&instance->insert_event);
3336 +
3337 + mutex_unlock(&instance->completion_mutex);
3338 +
3339 + /* Wake the slot handler if the completion queue is full. */
3340 + up(&instance->remove_event);
3341 +
3342 + /* Mark all services for termination... */
3343 + i = 0;
3344 + while ((service = next_service_by_instance(state, instance,
3345 + &i)) != NULL) {
3346 + USER_SERVICE_T *user_service = service->base.userdata;
3347 +
3348 + /* Wake the slot handler if the msg queue is full. */
3349 + up(&user_service->remove_event);
3350 +
3351 + vchiq_terminate_service_internal(service);
3352 + unlock_service(service);
3353 + }
3354 +
3355 + /* ...and wait for them to die */
3356 + i = 0;
3357 + while ((service = next_service_by_instance(state, instance, &i))
3358 + != NULL) {
3359 + USER_SERVICE_T *user_service = service->base.userdata;
3360 +
3361 + down(&service->remove_event);
3362 +
3363 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3364 +
3365 + spin_lock(&msg_queue_spinlock);
3366 +
3367 + while (user_service->msg_remove !=
3368 + user_service->msg_insert) {
3369 + VCHIQ_HEADER_T *header = user_service->
3370 + msg_queue[user_service->msg_remove &
3371 + (MSG_QUEUE_SIZE - 1)];
3372 + user_service->msg_remove++;
3373 + spin_unlock(&msg_queue_spinlock);
3374 +
3375 + if (header)
3376 + vchiq_release_message(
3377 + service->handle,
3378 + header);
3379 + spin_lock(&msg_queue_spinlock);
3380 + }
3381 +
3382 + spin_unlock(&msg_queue_spinlock);
3383 +
3384 + unlock_service(service);
3385 + }
3386 +
3387 + /* Release any closed services */
3388 + while (instance->completion_remove !=
3389 + instance->completion_insert) {
3390 + VCHIQ_COMPLETION_DATA_T *completion;
3391 + VCHIQ_SERVICE_T *service;
3392 + completion = &instance->completions[
3393 + instance->completion_remove &
3394 + (MAX_COMPLETIONS - 1)];
3395 + service = completion->service_userdata;
3396 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
3397 + unlock_service(service);
3398 + instance->completion_remove++;
3399 + }
3400 +
3401 + /* Release the PEER service count. */
3402 + vchiq_release_internal(instance->state, NULL);
3403 +
3404 + {
3405 + struct list_head *pos, *next;
3406 + list_for_each_safe(pos, next,
3407 + &instance->bulk_waiter_list) {
3408 + struct bulk_waiter_node *waiter;
3409 + waiter = list_entry(pos,
3410 + struct bulk_waiter_node,
3411 + list);
3412 + list_del(pos);
3413 + vchiq_log_info(vchiq_arm_log_level,
3414 + "bulk_waiter - cleaned up %x "
3415 + "for pid %d",
3416 + (unsigned int)waiter, waiter->pid);
3417 + kfree(waiter);
3418 + }
3419 + }
3420 +
3421 + vchiq_proc_remove_instance(instance);
3422 +
3423 + kfree(instance);
3424 + file->private_data = NULL;
3425 + } break;
3426 +
3427 + default:
3428 + vchiq_log_error(vchiq_arm_log_level,
3429 + "Unknown minor device: %d", dev);
3430 + ret = -ENXIO;
3431 + }
3432 +
3433 +out:
3434 + return ret;
3435 +}
3436 +
3437 +/****************************************************************************
3438 +*
3439 +* vchiq_dump
3440 +*
3441 +***************************************************************************/
3442 +
3443 +void
3444 +vchiq_dump(void *dump_context, const char *str, int len)
3445 +{
3446 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3447 +
3448 + if (context->actual < context->space) {
3449 + int copy_bytes;
3450 + if (context->offset > 0) {
3451 + int skip_bytes = min(len, (int)context->offset);
3452 + str += skip_bytes;
3453 + len -= skip_bytes;
3454 + context->offset -= skip_bytes;
3455 + if (context->offset > 0)
3456 + return;
3457 + }
3458 + copy_bytes = min(len, (int)(context->space - context->actual));
3459 + if (copy_bytes == 0)
3460 + return;
3461 + if (copy_to_user(context->buf + context->actual, str,
3462 + copy_bytes))
3463 + context->actual = -EFAULT;
3464 + context->actual += copy_bytes;
3465 + len -= copy_bytes;
3466 +
3467 + /* If tne terminating NUL is included in the length, then it
3468 + ** marks the end of a line and should be replaced with a
3469 + ** carriage return. */
3470 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3471 + char cr = '\n';
3472 + if (copy_to_user(context->buf + context->actual - 1,
3473 + &cr, 1))
3474 + context->actual = -EFAULT;
3475 + }
3476 + }
3477 +}
3478 +
3479 +/****************************************************************************
3480 +*
3481 +* vchiq_dump_platform_instance_state
3482 +*
3483 +***************************************************************************/
3484 +
3485 +void
3486 +vchiq_dump_platform_instances(void *dump_context)
3487 +{
3488 + VCHIQ_STATE_T *state = vchiq_get_state();
3489 + char buf[80];
3490 + int len;
3491 + int i;
3492 +
3493 + /* There is no list of instances, so instead scan all services,
3494 + marking those that have been dumped. */
3495 +
3496 + for (i = 0; i < state->unused_service; i++) {
3497 + VCHIQ_SERVICE_T *service = state->services[i];
3498 + VCHIQ_INSTANCE_T instance;
3499 +
3500 + if (service && (service->base.callback == service_callback)) {
3501 + instance = service->instance;
3502 + if (instance)
3503 + instance->mark = 0;
3504 + }
3505 + }
3506 +
3507 + for (i = 0; i < state->unused_service; i++) {
3508 + VCHIQ_SERVICE_T *service = state->services[i];
3509 + VCHIQ_INSTANCE_T instance;
3510 +
3511 + if (service && (service->base.callback == service_callback)) {
3512 + instance = service->instance;
3513 + if (instance && !instance->mark) {
3514 + len = snprintf(buf, sizeof(buf),
3515 + "Instance %x: pid %d,%s completions "
3516 + "%d/%d",
3517 + (unsigned int)instance, instance->pid,
3518 + instance->connected ? " connected, " :
3519 + "",
3520 + instance->completion_insert -
3521 + instance->completion_remove,
3522 + MAX_COMPLETIONS);
3523 +
3524 + vchiq_dump(dump_context, buf, len + 1);
3525 +
3526 + instance->mark = 1;
3527 + }
3528 + }
3529 + }
3530 +}
3531 +
3532 +/****************************************************************************
3533 +*
3534 +* vchiq_dump_platform_service_state
3535 +*
3536 +***************************************************************************/
3537 +
3538 +void
3539 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3540 +{
3541 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3542 + char buf[80];
3543 + int len;
3544 +
3545 + len = snprintf(buf, sizeof(buf), " instance %x",
3546 + (unsigned int)service->instance);
3547 +
3548 + if ((service->base.callback == service_callback) &&
3549 + user_service->is_vchi) {
3550 + len += snprintf(buf + len, sizeof(buf) - len,
3551 + ", %d/%d messages",
3552 + user_service->msg_insert - user_service->msg_remove,
3553 + MSG_QUEUE_SIZE);
3554 +
3555 + if (user_service->dequeue_pending)
3556 + len += snprintf(buf + len, sizeof(buf) - len,
3557 + " (dequeue pending)");
3558 + }
3559 +
3560 + vchiq_dump(dump_context, buf, len + 1);
3561 +}
3562 +
3563 +/****************************************************************************
3564 +*
3565 +* dump_user_mem
3566 +*
3567 +***************************************************************************/
3568 +
3569 +static void
3570 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3571 +{
3572 + int rc;
3573 + uint8_t *end_virt_addr = virt_addr + num_bytes;
3574 + int num_pages;
3575 + int offset;
3576 + int end_offset;
3577 + int page_idx;
3578 + int prev_idx;
3579 + struct page *page;
3580 + struct page **pages;
3581 + uint8_t *kmapped_virt_ptr;
3582 +
3583 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3584 +
3585 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3586 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3587 + ~0x0fuL);
3588 +
3589 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3590 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3591 +
3592 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3593 +
3594 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3595 + if (pages == NULL) {
3596 + vchiq_log_error(vchiq_arm_log_level,
3597 + "Unable to allocation memory for %d pages\n",
3598 + num_pages);
3599 + return;
3600 + }
3601 +
3602 + down_read(&current->mm->mmap_sem);
3603 + rc = get_user_pages(current, /* task */
3604 + current->mm, /* mm */
3605 + (unsigned long)virt_addr, /* start */
3606 + num_pages, /* len */
3607 + 0, /* write */
3608 + 0, /* force */
3609 + pages, /* pages (array of page pointers) */
3610 + NULL); /* vmas */
3611 + up_read(&current->mm->mmap_sem);
3612 +
3613 + prev_idx = -1;
3614 + page = NULL;
3615 +
3616 + while (offset < end_offset) {
3617 +
3618 + int page_offset = offset % PAGE_SIZE;
3619 + page_idx = offset / PAGE_SIZE;
3620 +
3621 + if (page_idx != prev_idx) {
3622 +
3623 + if (page != NULL)
3624 + kunmap(page);
3625 + page = pages[page_idx];
3626 + kmapped_virt_ptr = kmap(page);
3627 +
3628 + prev_idx = page_idx;
3629 + }
3630 +
3631 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3632 + vchiq_log_dump_mem("ph",
3633 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
3634 + page_offset],
3635 + &kmapped_virt_ptr[page_offset], 16);
3636 +
3637 + offset += 16;
3638 + }
3639 + if (page != NULL)
3640 + kunmap(page);
3641 +
3642 + for (page_idx = 0; page_idx < num_pages; page_idx++)
3643 + page_cache_release(pages[page_idx]);
3644 +
3645 + kfree(pages);
3646 +}
3647 +
3648 +/****************************************************************************
3649 +*
3650 +* vchiq_read
3651 +*
3652 +***************************************************************************/
3653 +
3654 +static ssize_t
3655 +vchiq_read(struct file *file, char __user *buf,
3656 + size_t count, loff_t *ppos)
3657 +{
3658 + DUMP_CONTEXT_T context;
3659 + context.buf = buf;
3660 + context.actual = 0;
3661 + context.space = count;
3662 + context.offset = *ppos;
3663 +
3664 + vchiq_dump_state(&context, &g_state);
3665 +
3666 + *ppos += context.actual;
3667 +
3668 + return context.actual;
3669 +}
3670 +
3671 +VCHIQ_STATE_T *
3672 +vchiq_get_state(void)
3673 +{
3674 +
3675 + if (g_state.remote == NULL)
3676 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3677 + else if (g_state.remote->initialised != 1)
3678 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3679 + __func__, g_state.remote->initialised);
3680 +
3681 + return ((g_state.remote != NULL) &&
3682 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
3683 +}
3684 +
3685 +static const struct file_operations
3686 +vchiq_fops = {
3687 + .owner = THIS_MODULE,
3688 + .unlocked_ioctl = vchiq_ioctl,
3689 + .open = vchiq_open,
3690 + .release = vchiq_release,
3691 + .read = vchiq_read
3692 +};
3693 +
3694 +/*
3695 + * Autosuspend related functionality
3696 + */
3697 +
3698 +int
3699 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3700 +{
3701 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3702 + if (!arm_state)
3703 + /* autosuspend not supported - always return wanted */
3704 + return 1;
3705 + else if (arm_state->blocked_count)
3706 + return 1;
3707 + else if (!arm_state->videocore_use_count)
3708 + /* usage count zero - check for override unless we're forcing */
3709 + if (arm_state->resume_blocked)
3710 + return 0;
3711 + else
3712 + return vchiq_platform_videocore_wanted(state);
3713 + else
3714 + /* non-zero usage count - videocore still required */
3715 + return 1;
3716 +}
3717 +
3718 +static VCHIQ_STATUS_T
3719 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3720 + VCHIQ_HEADER_T *header,
3721 + VCHIQ_SERVICE_HANDLE_T service_user,
3722 + void *bulk_user)
3723 +{
3724 + vchiq_log_error(vchiq_susp_log_level,
3725 + "%s callback reason %d", __func__, reason);
3726 + return 0;
3727 +}
3728 +
3729 +static int
3730 +vchiq_keepalive_thread_func(void *v)
3731 +{
3732 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
3733 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3734 +
3735 + VCHIQ_STATUS_T status;
3736 + VCHIQ_INSTANCE_T instance;
3737 + VCHIQ_SERVICE_HANDLE_T ka_handle;
3738 +
3739 + VCHIQ_SERVICE_PARAMS_T params = {
3740 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
3741 + .callback = vchiq_keepalive_vchiq_callback,
3742 + .version = KEEPALIVE_VER,
3743 + .version_min = KEEPALIVE_VER_MIN
3744 + };
3745 +
3746 + status = vchiq_initialise(&instance);
3747 + if (status != VCHIQ_SUCCESS) {
3748 + vchiq_log_error(vchiq_susp_log_level,
3749 + "%s vchiq_initialise failed %d", __func__, status);
3750 + goto exit;
3751 + }
3752 +
3753 + status = vchiq_connect(instance);
3754 + if (status != VCHIQ_SUCCESS) {
3755 + vchiq_log_error(vchiq_susp_log_level,
3756 + "%s vchiq_connect failed %d", __func__, status);
3757 + goto shutdown;
3758 + }
3759 +
3760 + status = vchiq_add_service(instance, &params, &ka_handle);
3761 + if (status != VCHIQ_SUCCESS) {
3762 + vchiq_log_error(vchiq_susp_log_level,
3763 + "%s vchiq_open_service failed %d", __func__, status);
3764 + goto shutdown;
3765 + }
3766 +
3767 + while (1) {
3768 + long rc = 0, uc = 0;
3769 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
3770 + != 0) {
3771 + vchiq_log_error(vchiq_susp_log_level,
3772 + "%s interrupted", __func__);
3773 + flush_signals(current);
3774 + continue;
3775 + }
3776 +
3777 + /* read and clear counters. Do release_count then use_count to
3778 + * prevent getting more releases than uses */
3779 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
3780 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
3781 +
3782 + /* Call use/release service the requisite number of times.
3783 + * Process use before release so use counts don't go negative */
3784 + while (uc--) {
3785 + atomic_inc(&arm_state->ka_use_ack_count);
3786 + status = vchiq_use_service(ka_handle);
3787 + if (status != VCHIQ_SUCCESS) {
3788 + vchiq_log_error(vchiq_susp_log_level,
3789 + "%s vchiq_use_service error %d",
3790 + __func__, status);
3791 + }
3792 + }
3793 + while (rc--) {
3794 + status = vchiq_release_service(ka_handle);
3795 + if (status != VCHIQ_SUCCESS) {
3796 + vchiq_log_error(vchiq_susp_log_level,
3797 + "%s vchiq_release_service error %d",
3798 + __func__, status);
3799 + }
3800 + }
3801 + }
3802 +
3803 +shutdown:
3804 + vchiq_shutdown(instance);
3805 +exit:
3806 + return 0;
3807 +}
3808 +
3809 +
3810 +
3811 +VCHIQ_STATUS_T
3812 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
3813 +{
3814 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3815 +
3816 + if (arm_state) {
3817 + rwlock_init(&arm_state->susp_res_lock);
3818 +
3819 + init_completion(&arm_state->ka_evt);
3820 + atomic_set(&arm_state->ka_use_count, 0);
3821 + atomic_set(&arm_state->ka_use_ack_count, 0);
3822 + atomic_set(&arm_state->ka_release_count, 0);
3823 +
3824 + init_completion(&arm_state->vc_suspend_complete);
3825 +
3826 + init_completion(&arm_state->vc_resume_complete);
3827 + /* Initialise to 'done' state. We only want to block on resume
3828 + * completion while videocore is suspended. */
3829 + set_resume_state(arm_state, VC_RESUME_RESUMED);
3830 +
3831 + init_completion(&arm_state->resume_blocker);
3832 + /* Initialise to 'done' state. We only want to block on this
3833 + * completion while resume is blocked */
3834 + complete_all(&arm_state->resume_blocker);
3835 +
3836 + init_completion(&arm_state->blocked_blocker);
3837 + /* Initialise to 'done' state. We only want to block on this
3838 + * completion while things are waiting on the resume blocker */
3839 + complete_all(&arm_state->blocked_blocker);
3840 +
3841 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
3842 + arm_state->suspend_timer_running = 0;
3843 + init_timer(&arm_state->suspend_timer);
3844 + arm_state->suspend_timer.data = (unsigned long)(state);
3845 + arm_state->suspend_timer.function = suspend_timer_callback;
3846 +
3847 + arm_state->first_connect = 0;
3848 +
3849 + }
3850 + return status;
3851 +}
3852 +
3853 +/*
3854 +** Functions to modify the state variables;
3855 +** set_suspend_state
3856 +** set_resume_state
3857 +**
3858 +** There are more state variables than we might like, so ensure they remain in
3859 +** step. Suspend and resume state are maintained separately, since most of
3860 +** these state machines can operate independently. However, there are a few
3861 +** states where state transitions in one state machine cause a reset to the
3862 +** other state machine. In addition, there are some completion events which
3863 +** need to occur on state machine reset and end-state(s), so these are also
3864 +** dealt with in these functions.
3865 +**
3866 +** In all states we set the state variable according to the input, but in some
3867 +** cases we perform additional steps outlined below;
3868 +**
3869 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
3870 +** The suspend completion is completed after any suspend
3871 +** attempt. When we reset the state machine we also reset
3872 +** the completion. This reset occurs when videocore is
3873 +** resumed, and also if we initiate suspend after a suspend
3874 +** failure.
3875 +**
3876 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
3877 +** suspend - ie from this point on we must try to suspend
3878 +** before resuming can occur. We therefore also reset the
3879 +** resume state machine to VC_RESUME_IDLE in this state.
3880 +**
3881 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
3882 +** complete_all on the suspend completion to notify
3883 +** anything waiting for suspend to happen.
3884 +**
3885 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
3886 +** initiate resume, so no need to alter resume state.
3887 +** We call complete_all on the suspend completion to notify
3888 +** of suspend rejection.
3889 +**
3890 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
3891 +** suspend completion and reset the resume state machine.
3892 +**
3893 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
3894 +** resume completion is in it's 'done' state whenever
3895 +** videcore is running. Therfore, the VC_RESUME_IDLE state
3896 +** implies that videocore is suspended.
3897 +** Hence, any thread which needs to wait until videocore is
3898 +** running can wait on this completion - it will only block
3899 +** if videocore is suspended.
3900 +**
3901 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
3902 +** Call complete_all on the resume completion to unblock
3903 +** any threads waiting for resume. Also reset the suspend
3904 +** state machine to it's idle state.
3905 +**
3906 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
3907 +*/
3908 +
3909 +inline void
3910 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
3911 + enum vc_suspend_status new_state)
3912 +{
3913 + /* set the state in all cases */
3914 + arm_state->vc_suspend_state = new_state;
3915 +
3916 + /* state specific additional actions */
3917 + switch (new_state) {
3918 + case VC_SUSPEND_FORCE_CANCELED:
3919 + complete_all(&arm_state->vc_suspend_complete);
3920 + break;
3921 + case VC_SUSPEND_REJECTED:
3922 + complete_all(&arm_state->vc_suspend_complete);
3923 + break;
3924 + case VC_SUSPEND_FAILED:
3925 + complete_all(&arm_state->vc_suspend_complete);
3926 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
3927 + complete_all(&arm_state->vc_resume_complete);
3928 + break;
3929 + case VC_SUSPEND_IDLE:
3930 + INIT_COMPLETION(arm_state->vc_suspend_complete);
3931 + break;
3932 + case VC_SUSPEND_REQUESTED:
3933 + break;
3934 + case VC_SUSPEND_IN_PROGRESS:
3935 + set_resume_state(arm_state, VC_RESUME_IDLE);
3936 + break;
3937 + case VC_SUSPEND_SUSPENDED:
3938 + complete_all(&arm_state->vc_suspend_complete);
3939 + break;
3940 + default:
3941 + BUG();
3942 + break;
3943 + }
3944 +}
3945 +
3946 +inline void
3947 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
3948 + enum vc_resume_status new_state)
3949 +{
3950 + /* set the state in all cases */
3951 + arm_state->vc_resume_state = new_state;
3952 +
3953 + /* state specific additional actions */
3954 + switch (new_state) {
3955 + case VC_RESUME_FAILED:
3956 + break;
3957 + case VC_RESUME_IDLE:
3958 + INIT_COMPLETION(arm_state->vc_resume_complete);
3959 + break;
3960 + case VC_RESUME_REQUESTED:
3961 + break;
3962 + case VC_RESUME_IN_PROGRESS:
3963 + break;
3964 + case VC_RESUME_RESUMED:
3965 + complete_all(&arm_state->vc_resume_complete);
3966 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
3967 + break;
3968 + default:
3969 + BUG();
3970 + break;
3971 + }
3972 +}
3973 +
3974 +
3975 +/* should be called with the write lock held */
3976 +inline void
3977 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3978 +{
3979 + del_timer(&arm_state->suspend_timer);
3980 + arm_state->suspend_timer.expires = jiffies +
3981 + msecs_to_jiffies(arm_state->
3982 + suspend_timer_timeout);
3983 + add_timer(&arm_state->suspend_timer);
3984 + arm_state->suspend_timer_running = 1;
3985 +}
3986 +
3987 +/* should be called with the write lock held */
3988 +static inline void
3989 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3990 +{
3991 + if (arm_state->suspend_timer_running) {
3992 + del_timer(&arm_state->suspend_timer);
3993 + arm_state->suspend_timer_running = 0;
3994 + }
3995 +}
3996 +
3997 +static inline int
3998 +need_resume(VCHIQ_STATE_T *state)
3999 +{
4000 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4001 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
4002 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
4003 + vchiq_videocore_wanted(state);
4004 +}
4005 +
4006 +static int
4007 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
4008 +{
4009 + int status = VCHIQ_SUCCESS;
4010 + const unsigned long timeout_val =
4011 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
4012 + int resume_count = 0;
4013 +
4014 + /* Allow any threads which were blocked by the last force suspend to
4015 + * complete if they haven't already. Only give this one shot; if
4016 + * blocked_count is incremented after blocked_blocker is completed
4017 + * (which only happens when blocked_count hits 0) then those threads
4018 + * will have to wait until next time around */
4019 + if (arm_state->blocked_count) {
4020 + INIT_COMPLETION(arm_state->blocked_blocker);
4021 + write_unlock_bh(&arm_state->susp_res_lock);
4022 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
4023 + "blocked clients", __func__);
4024 + if (wait_for_completion_interruptible_timeout(
4025 + &arm_state->blocked_blocker, timeout_val)
4026 + <= 0) {
4027 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4028 + "previously blocked clients failed" , __func__);
4029 + status = VCHIQ_ERROR;
4030 + write_lock_bh(&arm_state->susp_res_lock);
4031 + goto out;
4032 + }
4033 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
4034 + "clients resumed", __func__);
4035 + write_lock_bh(&arm_state->susp_res_lock);
4036 + }
4037 +
4038 + /* We need to wait for resume to complete if it's in process */
4039 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
4040 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
4041 + if (resume_count > 1) {
4042 + status = VCHIQ_ERROR;
4043 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4044 + "many times for resume" , __func__);
4045 + goto out;
4046 + }
4047 + write_unlock_bh(&arm_state->susp_res_lock);
4048 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4049 + __func__);
4050 + if (wait_for_completion_interruptible_timeout(
4051 + &arm_state->vc_resume_complete, timeout_val)
4052 + <= 0) {
4053 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4054 + "resume failed (%s)", __func__,
4055 + resume_state_names[arm_state->vc_resume_state +
4056 + VC_RESUME_NUM_OFFSET]);
4057 + status = VCHIQ_ERROR;
4058 + write_lock_bh(&arm_state->susp_res_lock);
4059 + goto out;
4060 + }
4061 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4062 + write_lock_bh(&arm_state->susp_res_lock);
4063 + resume_count++;
4064 + }
4065 + INIT_COMPLETION(arm_state->resume_blocker);
4066 + arm_state->resume_blocked = 1;
4067 +
4068 +out:
4069 + return status;
4070 +}
4071 +
4072 +static inline void
4073 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4074 +{
4075 + complete_all(&arm_state->resume_blocker);
4076 + arm_state->resume_blocked = 0;
4077 +}
4078 +
4079 +/* Initiate suspend via slot handler. Should be called with the write lock
4080 + * held */
4081 +VCHIQ_STATUS_T
4082 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4083 +{
4084 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4085 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4086 +
4087 + if (!arm_state)
4088 + goto out;
4089 +
4090 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4091 + status = VCHIQ_SUCCESS;
4092 +
4093 +
4094 + switch (arm_state->vc_suspend_state) {
4095 + case VC_SUSPEND_REQUESTED:
4096 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4097 + "requested", __func__);
4098 + break;
4099 + case VC_SUSPEND_IN_PROGRESS:
4100 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4101 + "progress", __func__);
4102 + break;
4103 +
4104 + default:
4105 + /* We don't expect to be in other states, so log but continue
4106 + * anyway */
4107 + vchiq_log_error(vchiq_susp_log_level,
4108 + "%s unexpected suspend state %s", __func__,
4109 + suspend_state_names[arm_state->vc_suspend_state +
4110 + VC_SUSPEND_NUM_OFFSET]);
4111 + /* fall through */
4112 + case VC_SUSPEND_REJECTED:
4113 + case VC_SUSPEND_FAILED:
4114 + /* Ensure any idle state actions have been run */
4115 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4116 + /* fall through */
4117 + case VC_SUSPEND_IDLE:
4118 + vchiq_log_info(vchiq_susp_log_level,
4119 + "%s: suspending", __func__);
4120 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4121 + /* kick the slot handler thread to initiate suspend */
4122 + request_poll(state, NULL, 0);
4123 + break;
4124 + }
4125 +
4126 +out:
4127 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4128 + return status;
4129 +}
4130 +
4131 +void
4132 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4133 +{
4134 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4135 + int susp = 0;
4136 +
4137 + if (!arm_state)
4138 + goto out;
4139 +
4140 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4141 +
4142 + write_lock_bh(&arm_state->susp_res_lock);
4143 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4144 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4145 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4146 + susp = 1;
4147 + }
4148 + write_unlock_bh(&arm_state->susp_res_lock);
4149 +
4150 + if (susp)
4151 + vchiq_platform_suspend(state);
4152 +
4153 +out:
4154 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4155 + return;
4156 +}
4157 +
4158 +
4159 +static void
4160 +output_timeout_error(VCHIQ_STATE_T *state)
4161 +{
4162 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4163 + char service_err[50] = "";
4164 + int vc_use_count = arm_state->videocore_use_count;
4165 + int active_services = state->unused_service;
4166 + int i;
4167 +
4168 + if (!arm_state->videocore_use_count) {
4169 + snprintf(service_err, 50, " Videocore usecount is 0");
4170 + goto output_msg;
4171 + }
4172 + for (i = 0; i < active_services; i++) {
4173 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4174 + if (service_ptr && service_ptr->service_use_count &&
4175 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4176 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4177 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4178 + service_ptr->base.fourcc),
4179 + service_ptr->client_id,
4180 + service_ptr->service_use_count,
4181 + service_ptr->service_use_count ==
4182 + vc_use_count ? "" : " (+ more)");
4183 + break;
4184 + }
4185 + }
4186 +
4187 +output_msg:
4188 + vchiq_log_error(vchiq_susp_log_level,
4189 + "timed out waiting for vc suspend (%d).%s",
4190 + arm_state->autosuspend_override, service_err);
4191 +
4192 +}
4193 +
4194 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4195 +** We don't actually force suspend, since videocore may get into a bad state
4196 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
4197 +** determine a good point to suspend. If this doesn't happen within 100ms we
4198 +** report failure.
4199 +**
4200 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4201 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4202 +*/
4203 +VCHIQ_STATUS_T
4204 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4205 +{
4206 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4207 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4208 + long rc = 0;
4209 + int repeat = -1;
4210 +
4211 + if (!arm_state)
4212 + goto out;
4213 +
4214 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4215 +
4216 + write_lock_bh(&arm_state->susp_res_lock);
4217 +
4218 + status = block_resume(arm_state);
4219 + if (status != VCHIQ_SUCCESS)
4220 + goto unlock;
4221 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4222 + /* Already suspended - just block resume and exit */
4223 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4224 + __func__);
4225 + status = VCHIQ_SUCCESS;
4226 + goto unlock;
4227 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4228 + /* initiate suspend immediately in the case that we're waiting
4229 + * for the timeout */
4230 + stop_suspend_timer(arm_state);
4231 + if (!vchiq_videocore_wanted(state)) {
4232 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4233 + "idle, initiating suspend", __func__);
4234 + status = vchiq_arm_vcsuspend(state);
4235 + } else if (arm_state->autosuspend_override <
4236 + FORCE_SUSPEND_FAIL_MAX) {
4237 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
4238 + "videocore go idle", __func__);
4239 + status = VCHIQ_SUCCESS;
4240 + } else {
4241 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4242 + "many times - attempting suspend", __func__);
4243 + status = vchiq_arm_vcsuspend(state);
4244 + }
4245 + } else {
4246 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4247 + "in progress - wait for completion", __func__);
4248 + status = VCHIQ_SUCCESS;
4249 + }
4250 +
4251 + /* Wait for suspend to happen due to system idle (not forced..) */
4252 + if (status != VCHIQ_SUCCESS)
4253 + goto unblock_resume;
4254 +
4255 + do {
4256 + write_unlock_bh(&arm_state->susp_res_lock);
4257 +
4258 + rc = wait_for_completion_interruptible_timeout(
4259 + &arm_state->vc_suspend_complete,
4260 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4261 +
4262 + write_lock_bh(&arm_state->susp_res_lock);
4263 + if (rc < 0) {
4264 + vchiq_log_warning(vchiq_susp_log_level, "%s "
4265 + "interrupted waiting for suspend", __func__);
4266 + status = VCHIQ_ERROR;
4267 + goto unblock_resume;
4268 + } else if (rc == 0) {
4269 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4270 + /* Repeat timeout once if in progress */
4271 + if (repeat < 0) {
4272 + repeat = 1;
4273 + continue;
4274 + }
4275 + }
4276 + arm_state->autosuspend_override++;
4277 + output_timeout_error(state);
4278 +
4279 + status = VCHIQ_RETRY;
4280 + goto unblock_resume;
4281 + }
4282 + } while (0 < (repeat--));
4283 +
4284 + /* Check and report state in case we need to abort ARM suspend */
4285 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4286 + status = VCHIQ_RETRY;
4287 + vchiq_log_error(vchiq_susp_log_level,
4288 + "%s videocore suspend failed (state %s)", __func__,
4289 + suspend_state_names[arm_state->vc_suspend_state +
4290 + VC_SUSPEND_NUM_OFFSET]);
4291 + /* Reset the state only if it's still in an error state.
4292 + * Something could have already initiated another suspend. */
4293 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4294 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4295 +
4296 + goto unblock_resume;
4297 + }
4298 +
4299 + /* successfully suspended - unlock and exit */
4300 + goto unlock;
4301 +
4302 +unblock_resume:
4303 + /* all error states need to unblock resume before exit */
4304 + unblock_resume(arm_state);
4305 +
4306 +unlock:
4307 + write_unlock_bh(&arm_state->susp_res_lock);
4308 +
4309 +out:
4310 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4311 + return status;
4312 +}
4313 +
4314 +void
4315 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4316 +{
4317 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4318 +
4319 + if (!arm_state)
4320 + goto out;
4321 +
4322 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4323 +
4324 + write_lock_bh(&arm_state->susp_res_lock);
4325 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4326 + arm_state->first_connect &&
4327 + !vchiq_videocore_wanted(state)) {
4328 + vchiq_arm_vcsuspend(state);
4329 + }
4330 + write_unlock_bh(&arm_state->susp_res_lock);
4331 +
4332 +out:
4333 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4334 + return;
4335 +}
4336 +
4337 +
4338 +int
4339 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4340 +{
4341 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4342 + int resume = 0;
4343 + int ret = -1;
4344 +
4345 + if (!arm_state)
4346 + goto out;
4347 +
4348 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4349 +
4350 + write_lock_bh(&arm_state->susp_res_lock);
4351 + unblock_resume(arm_state);
4352 + resume = vchiq_check_resume(state);
4353 + write_unlock_bh(&arm_state->susp_res_lock);
4354 +
4355 + if (resume) {
4356 + if (wait_for_completion_interruptible(
4357 + &arm_state->vc_resume_complete) < 0) {
4358 + vchiq_log_error(vchiq_susp_log_level,
4359 + "%s interrupted", __func__);
4360 + /* failed, cannot accurately derive suspend
4361 + * state, so exit early. */
4362 + goto out;
4363 + }
4364 + }
4365 +
4366 + read_lock_bh(&arm_state->susp_res_lock);
4367 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4368 + vchiq_log_info(vchiq_susp_log_level,
4369 + "%s: Videocore remains suspended", __func__);
4370 + } else {
4371 + vchiq_log_info(vchiq_susp_log_level,
4372 + "%s: Videocore resumed", __func__);
4373 + ret = 0;
4374 + }
4375 + read_unlock_bh(&arm_state->susp_res_lock);
4376 +out:
4377 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4378 + return ret;
4379 +}
4380 +
4381 +/* This function should be called with the write lock held */
4382 +int
4383 +vchiq_check_resume(VCHIQ_STATE_T *state)
4384 +{
4385 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4386 + int resume = 0;
4387 +
4388 + if (!arm_state)
4389 + goto out;
4390 +
4391 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4392 +
4393 + if (need_resume(state)) {
4394 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4395 + request_poll(state, NULL, 0);
4396 + resume = 1;
4397 + }
4398 +
4399 +out:
4400 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4401 + return resume;
4402 +}
4403 +
4404 +void
4405 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4406 +{
4407 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4408 + int res = 0;
4409 +
4410 + if (!arm_state)
4411 + goto out;
4412 +
4413 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4414 +
4415 + write_lock_bh(&arm_state->susp_res_lock);
4416 + if (arm_state->wake_address == 0) {
4417 + vchiq_log_info(vchiq_susp_log_level,
4418 + "%s: already awake", __func__);
4419 + goto unlock;
4420 + }
4421 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4422 + vchiq_log_info(vchiq_susp_log_level,
4423 + "%s: already resuming", __func__);
4424 + goto unlock;
4425 + }
4426 +
4427 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4428 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4429 + res = 1;
4430 + } else
4431 + vchiq_log_trace(vchiq_susp_log_level,
4432 + "%s: not resuming (resume state %s)", __func__,
4433 + resume_state_names[arm_state->vc_resume_state +
4434 + VC_RESUME_NUM_OFFSET]);
4435 +
4436 +unlock:
4437 + write_unlock_bh(&arm_state->susp_res_lock);
4438 +
4439 + if (res)
4440 + vchiq_platform_resume(state);
4441 +
4442 +out:
4443 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4444 + return;
4445 +
4446 +}
4447 +
4448 +
4449 +
4450 +VCHIQ_STATUS_T
4451 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4452 + enum USE_TYPE_E use_type)
4453 +{
4454 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4455 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4456 + char entity[16];
4457 + int *entity_uc;
4458 + int local_uc, local_entity_uc;
4459 +
4460 + if (!arm_state)
4461 + goto out;
4462 +
4463 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4464 +
4465 + if (use_type == USE_TYPE_VCHIQ) {
4466 + sprintf(entity, "VCHIQ: ");
4467 + entity_uc = &arm_state->peer_use_count;
4468 + } else if (service) {
4469 + sprintf(entity, "%c%c%c%c:%03d",
4470 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4471 + service->client_id);
4472 + entity_uc = &service->service_use_count;
4473 + } else {
4474 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
4475 + "ptr", __func__);
4476 + ret = VCHIQ_ERROR;
4477 + goto out;
4478 + }
4479 +
4480 + write_lock_bh(&arm_state->susp_res_lock);
4481 + while (arm_state->resume_blocked) {
4482 + /* If we call 'use' while force suspend is waiting for suspend,
4483 + * then we're about to block the thread which the force is
4484 + * waiting to complete, so we're bound to just time out. In this
4485 + * case, set the suspend state such that the wait will be
4486 + * canceled, so we can complete as quickly as possible. */
4487 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4488 + VC_SUSPEND_IDLE) {
4489 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4490 + break;
4491 + }
4492 + /* If suspend is already in progress then we need to block */
4493 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4494 + /* Indicate that there are threads waiting on the resume
4495 + * blocker. These need to be allowed to complete before
4496 + * a _second_ call to force suspend can complete,
4497 + * otherwise low priority threads might never actually
4498 + * continue */
4499 + arm_state->blocked_count++;
4500 + write_unlock_bh(&arm_state->susp_res_lock);
4501 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4502 + "blocked - waiting...", __func__, entity);
4503 + if (wait_for_completion_killable(
4504 + &arm_state->resume_blocker) != 0) {
4505 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
4506 + "wait for resume blocker interrupted",
4507 + __func__, entity);
4508 + ret = VCHIQ_ERROR;
4509 + write_lock_bh(&arm_state->susp_res_lock);
4510 + arm_state->blocked_count--;
4511 + write_unlock_bh(&arm_state->susp_res_lock);
4512 + goto out;
4513 + }
4514 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4515 + "unblocked", __func__, entity);
4516 + write_lock_bh(&arm_state->susp_res_lock);
4517 + if (--arm_state->blocked_count == 0)
4518 + complete_all(&arm_state->blocked_blocker);
4519 + }
4520 + }
4521 +
4522 + stop_suspend_timer(arm_state);
4523 +
4524 + local_uc = ++arm_state->videocore_use_count;
4525 + local_entity_uc = ++(*entity_uc);
4526 +
4527 + /* If there's a pending request which hasn't yet been serviced then
4528 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
4529 + * vc_resume_complete will block until we either resume or fail to
4530 + * suspend */
4531 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4532 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4533 +
4534 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4535 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4536 + vchiq_log_info(vchiq_susp_log_level,
4537 + "%s %s count %d, state count %d",
4538 + __func__, entity, local_entity_uc, local_uc);
4539 + request_poll(state, NULL, 0);
4540 + } else
4541 + vchiq_log_trace(vchiq_susp_log_level,
4542 + "%s %s count %d, state count %d",
4543 + __func__, entity, *entity_uc, local_uc);
4544 +
4545 +
4546 + write_unlock_bh(&arm_state->susp_res_lock);
4547 +
4548 + /* Completion is in a done state when we're not suspended, so this won't
4549 + * block for the non-suspended case. */
4550 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4551 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4552 + __func__, entity);
4553 + if (wait_for_completion_killable(
4554 + &arm_state->vc_resume_complete) != 0) {
4555 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4556 + "resume interrupted", __func__, entity);
4557 + ret = VCHIQ_ERROR;
4558 + goto out;
4559 + }
4560 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4561 + entity);
4562 + }
4563 +
4564 + if (ret == VCHIQ_SUCCESS) {
4565 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4566 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4567 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4568 + /* Send the use notify to videocore */
4569 + status = vchiq_send_remote_use_active(state);
4570 + if (status == VCHIQ_SUCCESS)
4571 + ack_cnt--;
4572 + else
4573 + atomic_add(ack_cnt,
4574 + &arm_state->ka_use_ack_count);
4575 + }
4576 + }
4577 +
4578 +out:
4579 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4580 + return ret;
4581 +}
4582 +
4583 +VCHIQ_STATUS_T
4584 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4585 +{
4586 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4587 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4588 + char entity[16];
4589 + int *entity_uc;
4590 + int local_uc, local_entity_uc;
4591 +
4592 + if (!arm_state)
4593 + goto out;
4594 +
4595 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4596 +
4597 + if (service) {
4598 + sprintf(entity, "%c%c%c%c:%03d",
4599 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4600 + service->client_id);
4601 + entity_uc = &service->service_use_count;
4602 + } else {
4603 + sprintf(entity, "PEER: ");
4604 + entity_uc = &arm_state->peer_use_count;
4605 + }
4606 +
4607 + write_lock_bh(&arm_state->susp_res_lock);
4608 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
4609 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
4610 + WARN_ON(!arm_state->videocore_use_count);
4611 + WARN_ON(!(*entity_uc));
4612 + ret = VCHIQ_ERROR;
4613 + goto unlock;
4614 + }
4615 + local_uc = --arm_state->videocore_use_count;
4616 + local_entity_uc = --(*entity_uc);
4617 +
4618 + if (!vchiq_videocore_wanted(state)) {
4619 + if (vchiq_platform_use_suspend_timer() &&
4620 + !arm_state->resume_blocked) {
4621 + /* Only use the timer if we're not trying to force
4622 + * suspend (=> resume_blocked) */
4623 + start_suspend_timer(arm_state);
4624 + } else {
4625 + vchiq_log_info(vchiq_susp_log_level,
4626 + "%s %s count %d, state count %d - suspending",
4627 + __func__, entity, *entity_uc,
4628 + arm_state->videocore_use_count);
4629 + vchiq_arm_vcsuspend(state);
4630 + }
4631 + } else
4632 + vchiq_log_trace(vchiq_susp_log_level,
4633 + "%s %s count %d, state count %d",
4634 + __func__, entity, *entity_uc,
4635 + arm_state->videocore_use_count);
4636 +
4637 +unlock:
4638 + write_unlock_bh(&arm_state->susp_res_lock);
4639 +
4640 +out:
4641 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4642 + return ret;
4643 +}
4644 +
4645 +void
4646 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4647 +{
4648 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4649 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4650 + atomic_inc(&arm_state->ka_use_count);
4651 + complete(&arm_state->ka_evt);
4652 +}
4653 +
4654 +void
4655 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4656 +{
4657 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4658 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4659 + atomic_inc(&arm_state->ka_release_count);
4660 + complete(&arm_state->ka_evt);
4661 +}
4662 +
4663 +VCHIQ_STATUS_T
4664 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4665 +{
4666 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4667 +}
4668 +
4669 +VCHIQ_STATUS_T
4670 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4671 +{
4672 + return vchiq_release_internal(service->state, service);
4673 +}
4674 +
4675 +static void suspend_timer_callback(unsigned long context)
4676 +{
4677 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
4678 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4679 + if (!arm_state)
4680 + goto out;
4681 + vchiq_log_info(vchiq_susp_log_level,
4682 + "%s - suspend timer expired - check suspend", __func__);
4683 + vchiq_check_suspend(state);
4684 +out:
4685 + return;
4686 +}
4687 +
4688 +VCHIQ_STATUS_T
4689 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
4690 +{
4691 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4692 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4693 + if (service) {
4694 + ret = vchiq_use_internal(service->state, service,
4695 + USE_TYPE_SERVICE_NO_RESUME);
4696 + unlock_service(service);
4697 + }
4698 + return ret;
4699 +}
4700 +
4701 +VCHIQ_STATUS_T
4702 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
4703 +{
4704 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4705 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4706 + if (service) {
4707 + ret = vchiq_use_internal(service->state, service,
4708 + USE_TYPE_SERVICE);
4709 + unlock_service(service);
4710 + }
4711 + return ret;
4712 +}
4713 +
4714 +VCHIQ_STATUS_T
4715 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
4716 +{
4717 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4718 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4719 + if (service) {
4720 + ret = vchiq_release_internal(service->state, service);
4721 + unlock_service(service);
4722 + }
4723 + return ret;
4724 +}
4725 +
4726 +void
4727 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
4728 +{
4729 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4730 + int i, j = 0;
4731 + /* Only dump 64 services */
4732 + static const int local_max_services = 64;
4733 + /* If there's more than 64 services, only dump ones with
4734 + * non-zero counts */
4735 + int only_nonzero = 0;
4736 + static const char *nz = "<-- preventing suspend";
4737 +
4738 + enum vc_suspend_status vc_suspend_state;
4739 + enum vc_resume_status vc_resume_state;
4740 + int peer_count;
4741 + int vc_use_count;
4742 + int active_services;
4743 + struct service_data_struct {
4744 + int fourcc;
4745 + int clientid;
4746 + int use_count;
4747 + } service_data[local_max_services];
4748 +
4749 + if (!arm_state)
4750 + return;
4751 +
4752 + read_lock_bh(&arm_state->susp_res_lock);
4753 + vc_suspend_state = arm_state->vc_suspend_state;
4754 + vc_resume_state = arm_state->vc_resume_state;
4755 + peer_count = arm_state->peer_use_count;
4756 + vc_use_count = arm_state->videocore_use_count;
4757 + active_services = state->unused_service;
4758 + if (active_services > local_max_services)
4759 + only_nonzero = 1;
4760 +
4761 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
4762 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4763 + if (!service_ptr)
4764 + continue;
4765 +
4766 + if (only_nonzero && !service_ptr->service_use_count)
4767 + continue;
4768 +
4769 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
4770 + service_data[j].fourcc = service_ptr->base.fourcc;
4771 + service_data[j].clientid = service_ptr->client_id;
4772 + service_data[j++].use_count = service_ptr->
4773 + service_use_count;
4774 + }
4775 + }
4776 +
4777 + read_unlock_bh(&arm_state->susp_res_lock);
4778 +
4779 + vchiq_log_warning(vchiq_susp_log_level,
4780 + "-- Videcore suspend state: %s --",
4781 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
4782 + vchiq_log_warning(vchiq_susp_log_level,
4783 + "-- Videcore resume state: %s --",
4784 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
4785 +
4786 + if (only_nonzero)
4787 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
4788 + "services (%d). Only dumping up to first %d services "
4789 + "with non-zero use-count", active_services,
4790 + local_max_services);
4791 +
4792 + for (i = 0; i < j; i++) {
4793 + vchiq_log_warning(vchiq_susp_log_level,
4794 + "----- %c%c%c%c:%d service count %d %s",
4795 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
4796 + service_data[i].clientid,
4797 + service_data[i].use_count,
4798 + service_data[i].use_count ? nz : "");
4799 + }
4800 + vchiq_log_warning(vchiq_susp_log_level,
4801 + "----- VCHIQ use count count %d", peer_count);
4802 + vchiq_log_warning(vchiq_susp_log_level,
4803 + "--- Overall vchiq instance use count %d", vc_use_count);
4804 +
4805 + vchiq_dump_platform_use_state(state);
4806 +}
4807 +
4808 +VCHIQ_STATUS_T
4809 +vchiq_check_service(VCHIQ_SERVICE_T *service)
4810 +{
4811 + VCHIQ_ARM_STATE_T *arm_state;
4812 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4813 +
4814 + if (!service || !service->state)
4815 + goto out;
4816 +
4817 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4818 +
4819 + arm_state = vchiq_platform_get_arm_state(service->state);
4820 +
4821 + read_lock_bh(&arm_state->susp_res_lock);
4822 + if (service->service_use_count)
4823 + ret = VCHIQ_SUCCESS;
4824 + read_unlock_bh(&arm_state->susp_res_lock);
4825 +
4826 + if (ret == VCHIQ_ERROR) {
4827 + vchiq_log_error(vchiq_susp_log_level,
4828 + "%s ERROR - %c%c%c%c:%d service count %d, "
4829 + "state count %d, videocore suspend state %s", __func__,
4830 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4831 + service->client_id, service->service_use_count,
4832 + arm_state->videocore_use_count,
4833 + suspend_state_names[arm_state->vc_suspend_state +
4834 + VC_SUSPEND_NUM_OFFSET]);
4835 + vchiq_dump_service_use_state(service->state);
4836 + }
4837 +out:
4838 + return ret;
4839 +}
4840 +
4841 +/* stub functions */
4842 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
4843 +{
4844 + (void)state;
4845 +}
4846 +
4847 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
4848 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
4849 +{
4850 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4851 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
4852 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
4853 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
4854 + write_lock_bh(&arm_state->susp_res_lock);
4855 + if (!arm_state->first_connect) {
4856 + char threadname[10];
4857 + arm_state->first_connect = 1;
4858 + write_unlock_bh(&arm_state->susp_res_lock);
4859 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
4860 + state->id);
4861 + arm_state->ka_thread = kthread_create(
4862 + &vchiq_keepalive_thread_func,
4863 + (void *)state,
4864 + threadname);
4865 + if (arm_state->ka_thread == NULL) {
4866 + vchiq_log_error(vchiq_susp_log_level,
4867 + "vchiq: FATAL: couldn't create thread %s",
4868 + threadname);
4869 + } else {
4870 + wake_up_process(arm_state->ka_thread);
4871 + }
4872 + } else
4873 + write_unlock_bh(&arm_state->susp_res_lock);
4874 + }
4875 +}
4876 +
4877 +
4878 +/****************************************************************************
4879 +*
4880 +* vchiq_init - called when the module is loaded.
4881 +*
4882 +***************************************************************************/
4883 +
4884 +static int __init
4885 +vchiq_init(void)
4886 +{
4887 + int err;
4888 + void *ptr_err;
4889 +
4890 + /* create proc entries */
4891 + err = vchiq_proc_init();
4892 + if (err != 0)
4893 + goto failed_proc_init;
4894 +
4895 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
4896 + if (err != 0) {
4897 + vchiq_log_error(vchiq_arm_log_level,
4898 + "Unable to allocate device number");
4899 + goto failed_alloc_chrdev;
4900 + }
4901 + cdev_init(&vchiq_cdev, &vchiq_fops);
4902 + vchiq_cdev.owner = THIS_MODULE;
4903 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
4904 + if (err != 0) {
4905 + vchiq_log_error(vchiq_arm_log_level,
4906 + "Unable to register device");
4907 + goto failed_cdev_add;
4908 + }
4909 +
4910 + /* create sysfs entries */
4911 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
4912 + ptr_err = vchiq_class;
4913 + if (IS_ERR(ptr_err))
4914 + goto failed_class_create;
4915 +
4916 + vchiq_dev = device_create(vchiq_class, NULL,
4917 + vchiq_devid, NULL, "vchiq");
4918 + ptr_err = vchiq_dev;
4919 + if (IS_ERR(ptr_err))
4920 + goto failed_device_create;
4921 +
4922 + err = vchiq_platform_init(&g_state);
4923 + if (err != 0)
4924 + goto failed_platform_init;
4925 +
4926 + vchiq_log_info(vchiq_arm_log_level,
4927 + "vchiq: initialised - version %d (min %d), device %d.%d",
4928 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
4929 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
4930 +
4931 + return 0;
4932 +
4933 +failed_platform_init:
4934 + device_destroy(vchiq_class, vchiq_devid);
4935 +failed_device_create:
4936 + class_destroy(vchiq_class);
4937 +failed_class_create:
4938 + cdev_del(&vchiq_cdev);
4939 + err = PTR_ERR(ptr_err);
4940 +failed_cdev_add:
4941 + unregister_chrdev_region(vchiq_devid, 1);
4942 +failed_alloc_chrdev:
4943 + vchiq_proc_deinit();
4944 +failed_proc_init:
4945 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
4946 + return err;
4947 +}
4948 +
4949 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4950 +{
4951 + VCHIQ_SERVICE_T *service;
4952 + int use_count = 0, i;
4953 + i = 0;
4954 + while ((service = next_service_by_instance(instance->state,
4955 + instance, &i)) != NULL) {
4956 + use_count += service->service_use_count;
4957 + unlock_service(service);
4958 + }
4959 + return use_count;
4960 +}
4961 +
4962 +/* read the per-process use-count */
4963 +static int proc_read_use_count(char *page, char **start,
4964 + off_t off, int count,
4965 + int *eof, void *data)
4966 +{
4967 + VCHIQ_INSTANCE_T instance = data;
4968 + int len, use_count;
4969 +
4970 + use_count = vchiq_instance_get_use_count(instance);
4971 + len = snprintf(page+off, count, "%d\n", use_count);
4972 +
4973 + return len;
4974 +}
4975 +
4976 +/* add an instance (process) to the proc entries */
4977 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
4978 +{
4979 +#if 1
4980 + return 0;
4981 +#else
4982 + char pidstr[32];
4983 + struct proc_dir_entry *top, *use_count;
4984 + struct proc_dir_entry *clients = vchiq_clients_top();
4985 + int pid = instance->pid;
4986 +
4987 + snprintf(pidstr, sizeof(pidstr), "%d", pid);
4988 + top = proc_mkdir(pidstr, clients);
4989 + if (!top)
4990 + goto fail_top;
4991 +
4992 + use_count = create_proc_read_entry("use_count",
4993 + 0444, top,
4994 + proc_read_use_count,
4995 + instance);
4996 + if (!use_count)
4997 + goto fail_use_count;
4998 +
4999 + instance->proc_entry = top;
5000 +
5001 + return 0;
5002 +
5003 +fail_use_count:
5004 + remove_proc_entry(top->name, clients);
5005 +fail_top:
5006 + return -ENOMEM;
5007 +#endif
5008 +}
5009 +
5010 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
5011 +{
5012 +#if 0
5013 + struct proc_dir_entry *clients = vchiq_clients_top();
5014 + remove_proc_entry("use_count", instance->proc_entry);
5015 + remove_proc_entry(instance->proc_entry->name, clients);
5016 +#endif
5017 +}
5018 +
5019 +/****************************************************************************
5020 +*
5021 +* vchiq_exit - called when the module is unloaded.
5022 +*
5023 +***************************************************************************/
5024 +
5025 +static void __exit
5026 +vchiq_exit(void)
5027 +{
5028 + vchiq_platform_exit(&g_state);
5029 + device_destroy(vchiq_class, vchiq_devid);
5030 + class_destroy(vchiq_class);
5031 + cdev_del(&vchiq_cdev);
5032 + unregister_chrdev_region(vchiq_devid, 1);
5033 +}
5034 +
5035 +module_init(vchiq_init);
5036 +module_exit(vchiq_exit);
5037 +MODULE_LICENSE("GPL");
5038 +MODULE_AUTHOR("Broadcom Corporation");
5039 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5040 new file mode 100644
5041 index 0000000..75ad4c6
5042 --- /dev/null
5043 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5044 @@ -0,0 +1,212 @@
5045 +/**
5046 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5047 + *
5048 + * Redistribution and use in source and binary forms, with or without
5049 + * modification, are permitted provided that the following conditions
5050 + * are met:
5051 + * 1. Redistributions of source code must retain the above copyright
5052 + * notice, this list of conditions, and the following disclaimer,
5053 + * without modification.
5054 + * 2. Redistributions in binary form must reproduce the above copyright
5055 + * notice, this list of conditions and the following disclaimer in the
5056 + * documentation and/or other materials provided with the distribution.
5057 + * 3. The names of the above-listed copyright holders may not be used
5058 + * to endorse or promote products derived from this software without
5059 + * specific prior written permission.
5060 + *
5061 + * ALTERNATIVELY, this software may be distributed under the terms of the
5062 + * GNU General Public License ("GPL") version 2, as published by the Free
5063 + * Software Foundation.
5064 + *
5065 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5066 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5067 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5068 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5069 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5070 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5071 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5072 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5073 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5074 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5075 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5076 + */
5077 +
5078 +#ifndef VCHIQ_ARM_H
5079 +#define VCHIQ_ARM_H
5080 +
5081 +#include <linux/mutex.h>
5082 +#include <linux/semaphore.h>
5083 +#include <linux/atomic.h>
5084 +#include "vchiq_core.h"
5085 +
5086 +
5087 +enum vc_suspend_status {
5088 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5089 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
5090 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
5091 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
5092 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
5093 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
5094 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
5095 +};
5096 +
5097 +enum vc_resume_status {
5098 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
5099 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
5100 + VC_RESUME_REQUESTED, /* User has requested resume */
5101 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5102 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
5103 +};
5104 +
5105 +
5106 +enum USE_TYPE_E {
5107 + USE_TYPE_SERVICE,
5108 + USE_TYPE_SERVICE_NO_RESUME,
5109 + USE_TYPE_VCHIQ
5110 +};
5111 +
5112 +
5113 +
5114 +typedef struct vchiq_arm_state_struct {
5115 + /* Keepalive-related data */
5116 + struct task_struct *ka_thread;
5117 + struct completion ka_evt;
5118 + atomic_t ka_use_count;
5119 + atomic_t ka_use_ack_count;
5120 + atomic_t ka_release_count;
5121 +
5122 + struct completion vc_suspend_complete;
5123 + struct completion vc_resume_complete;
5124 +
5125 + rwlock_t susp_res_lock;
5126 + enum vc_suspend_status vc_suspend_state;
5127 + enum vc_resume_status vc_resume_state;
5128 +
5129 + unsigned int wake_address;
5130 +
5131 + struct timer_list suspend_timer;
5132 + int suspend_timer_timeout;
5133 + int suspend_timer_running;
5134 +
5135 + /* Global use count for videocore.
5136 + ** This is equal to the sum of the use counts for all services. When
5137 + ** this hits zero the videocore suspend procedure will be initiated.
5138 + */
5139 + int videocore_use_count;
5140 +
5141 + /* Use count to track requests from videocore peer.
5142 + ** This use count is not associated with a service, so needs to be
5143 + ** tracked separately with the state.
5144 + */
5145 + int peer_use_count;
5146 +
5147 + /* Flag to indicate whether resume is blocked. This happens when the
5148 + ** ARM is suspending
5149 + */
5150 + struct completion resume_blocker;
5151 + int resume_blocked;
5152 + struct completion blocked_blocker;
5153 + int blocked_count;
5154 +
5155 + int autosuspend_override;
5156 +
5157 + /* Flag to indicate that the first vchiq connect has made it through.
5158 + ** This means that both sides should be fully ready, and we should
5159 + ** be able to suspend after this point.
5160 + */
5161 + int first_connect;
5162 +
5163 + unsigned long long suspend_start_time;
5164 + unsigned long long sleep_start_time;
5165 + unsigned long long resume_start_time;
5166 + unsigned long long last_wake_time;
5167 +
5168 +} VCHIQ_ARM_STATE_T;
5169 +
5170 +extern int vchiq_arm_log_level;
5171 +extern int vchiq_susp_log_level;
5172 +
5173 +extern int __init
5174 +vchiq_platform_init(VCHIQ_STATE_T *state);
5175 +
5176 +extern void __exit
5177 +vchiq_platform_exit(VCHIQ_STATE_T *state);
5178 +
5179 +extern VCHIQ_STATE_T *
5180 +vchiq_get_state(void);
5181 +
5182 +extern VCHIQ_STATUS_T
5183 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5184 +
5185 +extern VCHIQ_STATUS_T
5186 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5187 +
5188 +extern int
5189 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5190 +
5191 +extern VCHIQ_STATUS_T
5192 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5193 +
5194 +extern VCHIQ_STATUS_T
5195 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5196 +
5197 +extern int
5198 +vchiq_check_resume(VCHIQ_STATE_T *state);
5199 +
5200 +extern void
5201 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5202 +
5203 +extern VCHIQ_STATUS_T
5204 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5205 +
5206 +extern VCHIQ_STATUS_T
5207 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5208 +
5209 +extern VCHIQ_STATUS_T
5210 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5211 +
5212 +extern VCHIQ_STATUS_T
5213 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5214 +
5215 +extern int
5216 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5217 +
5218 +extern int
5219 +vchiq_platform_use_suspend_timer(void);
5220 +
5221 +extern void
5222 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5223 +
5224 +extern void
5225 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5226 +
5227 +extern VCHIQ_ARM_STATE_T*
5228 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5229 +
5230 +extern int
5231 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5232 +
5233 +extern VCHIQ_STATUS_T
5234 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5235 + enum USE_TYPE_E use_type);
5236 +extern VCHIQ_STATUS_T
5237 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5238 +
5239 +void
5240 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5241 + enum vc_suspend_status new_state);
5242 +
5243 +void
5244 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5245 + enum vc_resume_status new_state);
5246 +
5247 +void
5248 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5249 +
5250 +extern int vchiq_proc_init(void);
5251 +extern void vchiq_proc_deinit(void);
5252 +extern struct proc_dir_entry *vchiq_proc_top(void);
5253 +extern struct proc_dir_entry *vchiq_clients_top(void);
5254 +
5255 +
5256 +#endif /* VCHIQ_ARM_H */
5257 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5258 new file mode 100644
5259 index 0000000..df64581
5260 --- /dev/null
5261 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5262 @@ -0,0 +1,37 @@
5263 +/**
5264 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5265 + *
5266 + * Redistribution and use in source and binary forms, with or without
5267 + * modification, are permitted provided that the following conditions
5268 + * are met:
5269 + * 1. Redistributions of source code must retain the above copyright
5270 + * notice, this list of conditions, and the following disclaimer,
5271 + * without modification.
5272 + * 2. Redistributions in binary form must reproduce the above copyright
5273 + * notice, this list of conditions and the following disclaimer in the
5274 + * documentation and/or other materials provided with the distribution.
5275 + * 3. The names of the above-listed copyright holders may not be used
5276 + * to endorse or promote products derived from this software without
5277 + * specific prior written permission.
5278 + *
5279 + * ALTERNATIVELY, this software may be distributed under the terms of the
5280 + * GNU General Public License ("GPL") version 2, as published by the Free
5281 + * Software Foundation.
5282 + *
5283 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5284 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5285 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5286 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5287 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5288 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5289 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5290 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5291 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5292 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5293 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5294 + */
5295 +
5296 +const char *vchiq_get_build_hostname(void);
5297 +const char *vchiq_get_build_version(void);
5298 +const char *vchiq_get_build_time(void);
5299 +const char *vchiq_get_build_date(void);
5300 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5301 new file mode 100644
5302 index 0000000..493c86c
5303 --- /dev/null
5304 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5305 @@ -0,0 +1,60 @@
5306 +/**
5307 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5308 + *
5309 + * Redistribution and use in source and binary forms, with or without
5310 + * modification, are permitted provided that the following conditions
5311 + * are met:
5312 + * 1. Redistributions of source code must retain the above copyright
5313 + * notice, this list of conditions, and the following disclaimer,
5314 + * without modification.
5315 + * 2. Redistributions in binary form must reproduce the above copyright
5316 + * notice, this list of conditions and the following disclaimer in the
5317 + * documentation and/or other materials provided with the distribution.
5318 + * 3. The names of the above-listed copyright holders may not be used
5319 + * to endorse or promote products derived from this software without
5320 + * specific prior written permission.
5321 + *
5322 + * ALTERNATIVELY, this software may be distributed under the terms of the
5323 + * GNU General Public License ("GPL") version 2, as published by the Free
5324 + * Software Foundation.
5325 + *
5326 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5327 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5328 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5329 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5330 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5331 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5332 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5333 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5334 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5335 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5336 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5337 + */
5338 +
5339 +#ifndef VCHIQ_CFG_H
5340 +#define VCHIQ_CFG_H
5341 +
5342 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5343 +/* The version of VCHIQ - change with any non-trivial change */
5344 +#define VCHIQ_VERSION 6
5345 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5346 +** incompatible change */
5347 +#define VCHIQ_VERSION_MIN 3
5348 +
5349 +#define VCHIQ_MAX_STATES 1
5350 +#define VCHIQ_MAX_SERVICES 4096
5351 +#define VCHIQ_MAX_SLOTS 128
5352 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5353 +
5354 +#define VCHIQ_NUM_CURRENT_BULKS 32
5355 +#define VCHIQ_NUM_SERVICE_BULKS 4
5356 +
5357 +#ifndef VCHIQ_ENABLE_DEBUG
5358 +#define VCHIQ_ENABLE_DEBUG 1
5359 +#endif
5360 +
5361 +#ifndef VCHIQ_ENABLE_STATS
5362 +#define VCHIQ_ENABLE_STATS 1
5363 +#endif
5364 +
5365 +#endif /* VCHIQ_CFG_H */
5366 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5367 new file mode 100644
5368 index 0000000..65f4b52
5369 --- /dev/null
5370 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5371 @@ -0,0 +1,119 @@
5372 +/**
5373 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5374 + *
5375 + * Redistribution and use in source and binary forms, with or without
5376 + * modification, are permitted provided that the following conditions
5377 + * are met:
5378 + * 1. Redistributions of source code must retain the above copyright
5379 + * notice, this list of conditions, and the following disclaimer,
5380 + * without modification.
5381 + * 2. Redistributions in binary form must reproduce the above copyright
5382 + * notice, this list of conditions and the following disclaimer in the
5383 + * documentation and/or other materials provided with the distribution.
5384 + * 3. The names of the above-listed copyright holders may not be used
5385 + * to endorse or promote products derived from this software without
5386 + * specific prior written permission.
5387 + *
5388 + * ALTERNATIVELY, this software may be distributed under the terms of the
5389 + * GNU General Public License ("GPL") version 2, as published by the Free
5390 + * Software Foundation.
5391 + *
5392 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5393 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5394 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5395 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5396 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5397 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5398 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5399 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5400 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5401 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5402 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5403 + */
5404 +
5405 +#include "vchiq_connected.h"
5406 +#include "vchiq_core.h"
5407 +#include <linux/module.h>
5408 +#include <linux/mutex.h>
5409 +
5410 +#define MAX_CALLBACKS 10
5411 +
5412 +static int g_connected;
5413 +static int g_num_deferred_callbacks;
5414 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5415 +static int g_once_init;
5416 +static struct mutex g_connected_mutex;
5417 +
5418 +/****************************************************************************
5419 +*
5420 +* Function to initialize our lock.
5421 +*
5422 +***************************************************************************/
5423 +
5424 +static void connected_init(void)
5425 +{
5426 + if (!g_once_init) {
5427 + mutex_init(&g_connected_mutex);
5428 + g_once_init = 1;
5429 + }
5430 +}
5431 +
5432 +/****************************************************************************
5433 +*
5434 +* This function is used to defer initialization until the vchiq stack is
5435 +* initialized. If the stack is already initialized, then the callback will
5436 +* be made immediately, otherwise it will be deferred until
5437 +* vchiq_call_connected_callbacks is called.
5438 +*
5439 +***************************************************************************/
5440 +
5441 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5442 +{
5443 + connected_init();
5444 +
5445 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5446 + return;
5447 +
5448 + if (g_connected)
5449 + /* We're already connected. Call the callback immediately. */
5450 +
5451 + callback();
5452 + else {
5453 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5454 + vchiq_log_error(vchiq_core_log_level,
5455 + "There already %d callback registered - "
5456 + "please increase MAX_CALLBACKS",
5457 + g_num_deferred_callbacks);
5458 + else {
5459 + g_deferred_callback[g_num_deferred_callbacks] =
5460 + callback;
5461 + g_num_deferred_callbacks++;
5462 + }
5463 + }
5464 + mutex_unlock(&g_connected_mutex);
5465 +}
5466 +
5467 +/****************************************************************************
5468 +*
5469 +* This function is called by the vchiq stack once it has been connected to
5470 +* the videocore and clients can start to use the stack.
5471 +*
5472 +***************************************************************************/
5473 +
5474 +void vchiq_call_connected_callbacks(void)
5475 +{
5476 + int i;
5477 +
5478 + connected_init();
5479 +
5480 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5481 + return;
5482 +
5483 + for (i = 0; i < g_num_deferred_callbacks; i++)
5484 + g_deferred_callback[i]();
5485 +
5486 + g_num_deferred_callbacks = 0;
5487 + g_connected = 1;
5488 + mutex_unlock(&g_connected_mutex);
5489 +}
5490 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5491 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5492 new file mode 100644
5493 index 0000000..e4cfdcc
5494 --- /dev/null
5495 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5496 @@ -0,0 +1,51 @@
5497 +/**
5498 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5499 + *
5500 + * Redistribution and use in source and binary forms, with or without
5501 + * modification, are permitted provided that the following conditions
5502 + * are met:
5503 + * 1. Redistributions of source code must retain the above copyright
5504 + * notice, this list of conditions, and the following disclaimer,
5505 + * without modification.
5506 + * 2. Redistributions in binary form must reproduce the above copyright
5507 + * notice, this list of conditions and the following disclaimer in the
5508 + * documentation and/or other materials provided with the distribution.
5509 + * 3. The names of the above-listed copyright holders may not be used
5510 + * to endorse or promote products derived from this software without
5511 + * specific prior written permission.
5512 + *
5513 + * ALTERNATIVELY, this software may be distributed under the terms of the
5514 + * GNU General Public License ("GPL") version 2, as published by the Free
5515 + * Software Foundation.
5516 + *
5517 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5518 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5519 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5520 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5521 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5522 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5523 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5524 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5525 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5526 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5527 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5528 + */
5529 +
5530 +#ifndef VCHIQ_CONNECTED_H
5531 +#define VCHIQ_CONNECTED_H
5532 +
5533 +/* ---- Include Files ----------------------------------------------------- */
5534 +
5535 +/* ---- Constants and Types ---------------------------------------------- */
5536 +
5537 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5538 +
5539 +/* ---- Variable Externs ------------------------------------------------- */
5540 +
5541 +/* ---- Function Prototypes ---------------------------------------------- */
5542 +
5543 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5544 +void vchiq_call_connected_callbacks(void);
5545 +
5546 +#endif /* VCHIQ_CONNECTED_H */
5547 +
5548 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5549 new file mode 100644
5550 index 0000000..f35ed4f
5551 --- /dev/null
5552 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5553 @@ -0,0 +1,3824 @@
5554 +/**
5555 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5556 + *
5557 + * Redistribution and use in source and binary forms, with or without
5558 + * modification, are permitted provided that the following conditions
5559 + * are met:
5560 + * 1. Redistributions of source code must retain the above copyright
5561 + * notice, this list of conditions, and the following disclaimer,
5562 + * without modification.
5563 + * 2. Redistributions in binary form must reproduce the above copyright
5564 + * notice, this list of conditions and the following disclaimer in the
5565 + * documentation and/or other materials provided with the distribution.
5566 + * 3. The names of the above-listed copyright holders may not be used
5567 + * to endorse or promote products derived from this software without
5568 + * specific prior written permission.
5569 + *
5570 + * ALTERNATIVELY, this software may be distributed under the terms of the
5571 + * GNU General Public License ("GPL") version 2, as published by the Free
5572 + * Software Foundation.
5573 + *
5574 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5575 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5576 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5577 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5578 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5579 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5580 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5581 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5582 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5583 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5584 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5585 + */
5586 +
5587 +#include "vchiq_core.h"
5588 +
5589 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5590 +
5591 +#define HANDLE_STATE_SHIFT 12
5592 +
5593 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5594 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5595 +#define SLOT_INDEX_FROM_DATA(state, data) \
5596 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5597 + VCHIQ_SLOT_SIZE)
5598 +#define SLOT_INDEX_FROM_INFO(state, info) \
5599 + ((unsigned int)(info - state->slot_info))
5600 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5601 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5602 +
5603 +
5604 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5605 +
5606 +
5607 +struct vchiq_open_payload {
5608 + int fourcc;
5609 + int client_id;
5610 + short version;
5611 + short version_min;
5612 +};
5613 +
5614 +struct vchiq_openack_payload {
5615 + short version;
5616 +};
5617 +
5618 +/* we require this for consistency between endpoints */
5619 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5620 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5621 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5622 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5623 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5624 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5625 +
5626 +/* Run time control of log level, based on KERN_XXX level. */
5627 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5628 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5629 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5630 +
5631 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5632 +
5633 +static DEFINE_SPINLOCK(service_spinlock);
5634 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5635 +DEFINE_SPINLOCK(quota_spinlock);
5636 +
5637 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5638 +static unsigned int handle_seq;
5639 +
5640 +static const char *const srvstate_names[] = {
5641 + "FREE",
5642 + "HIDDEN",
5643 + "LISTENING",
5644 + "OPENING",
5645 + "OPEN",
5646 + "OPENSYNC",
5647 + "CLOSESENT",
5648 + "CLOSERECVD",
5649 + "CLOSEWAIT",
5650 + "CLOSED"
5651 +};
5652 +
5653 +static const char *const reason_names[] = {
5654 + "SERVICE_OPENED",
5655 + "SERVICE_CLOSED",
5656 + "MESSAGE_AVAILABLE",
5657 + "BULK_TRANSMIT_DONE",
5658 + "BULK_RECEIVE_DONE",
5659 + "BULK_TRANSMIT_ABORTED",
5660 + "BULK_RECEIVE_ABORTED"
5661 +};
5662 +
5663 +static const char *const conn_state_names[] = {
5664 + "DISCONNECTED",
5665 + "CONNECTING",
5666 + "CONNECTED",
5667 + "PAUSING",
5668 + "PAUSE_SENT",
5669 + "PAUSED",
5670 + "RESUMING",
5671 + "PAUSE_TIMEOUT",
5672 + "RESUME_TIMEOUT"
5673 +};
5674 +
5675 +
5676 +static void
5677 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5678 +
5679 +static const char *msg_type_str(unsigned int msg_type)
5680 +{
5681 + switch (msg_type) {
5682 + case VCHIQ_MSG_PADDING: return "PADDING";
5683 + case VCHIQ_MSG_CONNECT: return "CONNECT";
5684 + case VCHIQ_MSG_OPEN: return "OPEN";
5685 + case VCHIQ_MSG_OPENACK: return "OPENACK";
5686 + case VCHIQ_MSG_CLOSE: return "CLOSE";
5687 + case VCHIQ_MSG_DATA: return "DATA";
5688 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
5689 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
5690 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
5691 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
5692 + case VCHIQ_MSG_PAUSE: return "PAUSE";
5693 + case VCHIQ_MSG_RESUME: return "RESUME";
5694 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
5695 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
5696 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
5697 + }
5698 + return "???";
5699 +}
5700 +
5701 +static inline void
5702 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5703 +{
5704 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5705 + service->state->id, service->localport,
5706 + srvstate_names[service->srvstate],
5707 + srvstate_names[newstate]);
5708 + service->srvstate = newstate;
5709 +}
5710 +
5711 +VCHIQ_SERVICE_T *
5712 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5713 +{
5714 + VCHIQ_SERVICE_T *service;
5715 +
5716 + spin_lock(&service_spinlock);
5717 + service = handle_to_service(handle);
5718 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5719 + (service->handle == handle)) {
5720 + BUG_ON(service->ref_count == 0);
5721 + service->ref_count++;
5722 + } else
5723 + service = NULL;
5724 + spin_unlock(&service_spinlock);
5725 +
5726 + if (!service)
5727 + vchiq_log_info(vchiq_core_log_level,
5728 + "Invalid service handle 0x%x", handle);
5729 +
5730 + return service;
5731 +}
5732 +
5733 +VCHIQ_SERVICE_T *
5734 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
5735 +{
5736 + VCHIQ_SERVICE_T *service = NULL;
5737 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
5738 + spin_lock(&service_spinlock);
5739 + service = state->services[localport];
5740 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
5741 + BUG_ON(service->ref_count == 0);
5742 + service->ref_count++;
5743 + } else
5744 + service = NULL;
5745 + spin_unlock(&service_spinlock);
5746 + }
5747 +
5748 + if (!service)
5749 + vchiq_log_info(vchiq_core_log_level,
5750 + "Invalid port %d", localport);
5751 +
5752 + return service;
5753 +}
5754 +
5755 +VCHIQ_SERVICE_T *
5756 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
5757 + VCHIQ_SERVICE_HANDLE_T handle) {
5758 + VCHIQ_SERVICE_T *service;
5759 +
5760 + spin_lock(&service_spinlock);
5761 + service = handle_to_service(handle);
5762 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5763 + (service->handle == handle) &&
5764 + (service->instance == instance)) {
5765 + BUG_ON(service->ref_count == 0);
5766 + service->ref_count++;
5767 + } else
5768 + service = NULL;
5769 + spin_unlock(&service_spinlock);
5770 +
5771 + if (!service)
5772 + vchiq_log_info(vchiq_core_log_level,
5773 + "Invalid service handle 0x%x", handle);
5774 +
5775 + return service;
5776 +}
5777 +
5778 +VCHIQ_SERVICE_T *
5779 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
5780 + int *pidx)
5781 +{
5782 + VCHIQ_SERVICE_T *service = NULL;
5783 + int idx = *pidx;
5784 +
5785 + spin_lock(&service_spinlock);
5786 + while (idx < state->unused_service) {
5787 + VCHIQ_SERVICE_T *srv = state->services[idx++];
5788 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
5789 + (srv->instance == instance)) {
5790 + service = srv;
5791 + BUG_ON(service->ref_count == 0);
5792 + service->ref_count++;
5793 + break;
5794 + }
5795 + }
5796 + spin_unlock(&service_spinlock);
5797 +
5798 + *pidx = idx;
5799 +
5800 + return service;
5801 +}
5802 +
5803 +void
5804 +lock_service(VCHIQ_SERVICE_T *service)
5805 +{
5806 + spin_lock(&service_spinlock);
5807 + BUG_ON(!service || (service->ref_count == 0));
5808 + if (service)
5809 + service->ref_count++;
5810 + spin_unlock(&service_spinlock);
5811 +}
5812 +
5813 +void
5814 +unlock_service(VCHIQ_SERVICE_T *service)
5815 +{
5816 + VCHIQ_STATE_T *state = service->state;
5817 + spin_lock(&service_spinlock);
5818 + BUG_ON(!service || (service->ref_count == 0));
5819 + if (service && service->ref_count) {
5820 + service->ref_count--;
5821 + if (!service->ref_count) {
5822 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
5823 + state->services[service->localport] = NULL;
5824 + } else
5825 + service = NULL;
5826 + }
5827 + spin_unlock(&service_spinlock);
5828 +
5829 + if (service && service->userdata_term)
5830 + service->userdata_term(service->base.userdata);
5831 +
5832 + kfree(service);
5833 +}
5834 +
5835 +int
5836 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
5837 +{
5838 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5839 + int id;
5840 +
5841 + id = service ? service->client_id : 0;
5842 + if (service)
5843 + unlock_service(service);
5844 +
5845 + return id;
5846 +}
5847 +
5848 +void *
5849 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
5850 +{
5851 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5852 +
5853 + return service ? service->base.userdata : NULL;
5854 +}
5855 +
5856 +int
5857 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
5858 +{
5859 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5860 +
5861 + return service ? service->base.fourcc : 0;
5862 +}
5863 +
5864 +static void
5865 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
5866 +{
5867 + VCHIQ_STATE_T *state = service->state;
5868 + VCHIQ_SERVICE_QUOTA_T *service_quota;
5869 +
5870 + service->closing = 1;
5871 +
5872 + /* Synchronise with other threads. */
5873 + mutex_lock(&state->recycle_mutex);
5874 + mutex_unlock(&state->recycle_mutex);
5875 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
5876 + /* If we're pausing then the slot_mutex is held until resume
5877 + * by the slot handler. Therefore don't try to acquire this
5878 + * mutex if we're the slot handler and in the pause sent state.
5879 + * We don't need to in this case anyway. */
5880 + mutex_lock(&state->slot_mutex);
5881 + mutex_unlock(&state->slot_mutex);
5882 + }
5883 +
5884 + /* Unblock any sending thread. */
5885 + service_quota = &state->service_quotas[service->localport];
5886 + up(&service_quota->quota_event);
5887 +}
5888 +
5889 +static void
5890 +mark_service_closing(VCHIQ_SERVICE_T *service)
5891 +{
5892 + mark_service_closing_internal(service, 0);
5893 +}
5894 +
5895 +static inline VCHIQ_STATUS_T
5896 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
5897 + VCHIQ_HEADER_T *header, void *bulk_userdata)
5898 +{
5899 + VCHIQ_STATUS_T status;
5900 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
5901 + service->state->id, service->localport, reason_names[reason],
5902 + (unsigned int)header, (unsigned int)bulk_userdata);
5903 + status = service->base.callback(reason, header, service->handle,
5904 + bulk_userdata);
5905 + if (status == VCHIQ_ERROR) {
5906 + vchiq_log_warning(vchiq_core_log_level,
5907 + "%d: ignoring ERROR from callback to service %x",
5908 + service->state->id, service->handle);
5909 + status = VCHIQ_SUCCESS;
5910 + }
5911 + return status;
5912 +}
5913 +
5914 +inline void
5915 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
5916 +{
5917 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
5918 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
5919 + conn_state_names[oldstate],
5920 + conn_state_names[newstate]);
5921 + state->conn_state = newstate;
5922 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
5923 +}
5924 +
5925 +static inline void
5926 +remote_event_create(REMOTE_EVENT_T *event)
5927 +{
5928 + event->armed = 0;
5929 + /* Don't clear the 'fired' flag because it may already have been set
5930 + ** by the other side. */
5931 + sema_init(event->event, 0);
5932 +}
5933 +
5934 +static inline void
5935 +remote_event_destroy(REMOTE_EVENT_T *event)
5936 +{
5937 + (void)event;
5938 +}
5939 +
5940 +static inline int
5941 +remote_event_wait(REMOTE_EVENT_T *event)
5942 +{
5943 + if (!event->fired) {
5944 + event->armed = 1;
5945 + dsb();
5946 + if (!event->fired) {
5947 + if (down_interruptible(event->event) != 0) {
5948 + event->armed = 0;
5949 + return 0;
5950 + }
5951 + }
5952 + event->armed = 0;
5953 + wmb();
5954 + }
5955 +
5956 + event->fired = 0;
5957 + return 1;
5958 +}
5959 +
5960 +static inline void
5961 +remote_event_signal_local(REMOTE_EVENT_T *event)
5962 +{
5963 + event->armed = 0;
5964 + up(event->event);
5965 +}
5966 +
5967 +static inline void
5968 +remote_event_poll(REMOTE_EVENT_T *event)
5969 +{
5970 + if (event->fired && event->armed)
5971 + remote_event_signal_local(event);
5972 +}
5973 +
5974 +void
5975 +remote_event_pollall(VCHIQ_STATE_T *state)
5976 +{
5977 + remote_event_poll(&state->local->sync_trigger);
5978 + remote_event_poll(&state->local->sync_release);
5979 + remote_event_poll(&state->local->trigger);
5980 + remote_event_poll(&state->local->recycle);
5981 +}
5982 +
5983 +/* Round up message sizes so that any space at the end of a slot is always big
5984 +** enough for a header. This relies on header size being a power of two, which
5985 +** has been verified earlier by a static assertion. */
5986 +
5987 +static inline unsigned int
5988 +calc_stride(unsigned int size)
5989 +{
5990 + /* Allow room for the header */
5991 + size += sizeof(VCHIQ_HEADER_T);
5992 +
5993 + /* Round up */
5994 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
5995 + - 1);
5996 +}
5997 +
5998 +/* Called by the slot handler thread */
5999 +static VCHIQ_SERVICE_T *
6000 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
6001 +{
6002 + int i;
6003 +
6004 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
6005 +
6006 + for (i = 0; i < state->unused_service; i++) {
6007 + VCHIQ_SERVICE_T *service = state->services[i];
6008 + if (service &&
6009 + (service->public_fourcc == fourcc) &&
6010 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
6011 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
6012 + (service->remoteport == VCHIQ_PORT_FREE)))) {
6013 + lock_service(service);
6014 + return service;
6015 + }
6016 + }
6017 +
6018 + return NULL;
6019 +}
6020 +
6021 +/* Called by the slot handler thread */
6022 +static VCHIQ_SERVICE_T *
6023 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
6024 +{
6025 + int i;
6026 + for (i = 0; i < state->unused_service; i++) {
6027 + VCHIQ_SERVICE_T *service = state->services[i];
6028 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
6029 + && (service->remoteport == port)) {
6030 + lock_service(service);
6031 + return service;
6032 + }
6033 + }
6034 + return NULL;
6035 +}
6036 +
6037 +inline void
6038 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
6039 +{
6040 + uint32_t value;
6041 +
6042 + if (service) {
6043 + do {
6044 + value = atomic_read(&service->poll_flags);
6045 + } while (atomic_cmpxchg(&service->poll_flags, value,
6046 + value | (1 << poll_type)) != value);
6047 +
6048 + do {
6049 + value = atomic_read(&state->poll_services[
6050 + service->localport>>5]);
6051 + } while (atomic_cmpxchg(
6052 + &state->poll_services[service->localport>>5],
6053 + value, value | (1 << (service->localport & 0x1f)))
6054 + != value);
6055 + }
6056 +
6057 + state->poll_needed = 1;
6058 + wmb();
6059 +
6060 + /* ... and ensure the slot handler runs. */
6061 + remote_event_signal_local(&state->local->trigger);
6062 +}
6063 +
6064 +/* Called from queue_message, by the slot handler and application threads,
6065 +** with slot_mutex held */
6066 +static VCHIQ_HEADER_T *
6067 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6068 +{
6069 + VCHIQ_SHARED_STATE_T *local = state->local;
6070 + int tx_pos = state->local_tx_pos;
6071 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6072 +
6073 + if (space > slot_space) {
6074 + VCHIQ_HEADER_T *header;
6075 + /* Fill the remaining space with padding */
6076 + WARN_ON(state->tx_data == NULL);
6077 + header = (VCHIQ_HEADER_T *)
6078 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6079 + header->msgid = VCHIQ_MSGID_PADDING;
6080 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6081 +
6082 + tx_pos += slot_space;
6083 + }
6084 +
6085 + /* If necessary, get the next slot. */
6086 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6087 + int slot_index;
6088 +
6089 + /* If there is no free slot... */
6090 +
6091 + if (down_trylock(&state->slot_available_event) != 0) {
6092 + /* ...wait for one. */
6093 +
6094 + VCHIQ_STATS_INC(state, slot_stalls);
6095 +
6096 + /* But first, flush through the last slot. */
6097 + state->local_tx_pos = tx_pos;
6098 + local->tx_pos = tx_pos;
6099 + remote_event_signal(&state->remote->trigger);
6100 +
6101 + if (!is_blocking ||
6102 + (down_interruptible(
6103 + &state->slot_available_event) != 0))
6104 + return NULL; /* No space available */
6105 + }
6106 +
6107 + BUG_ON(tx_pos ==
6108 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6109 +
6110 + slot_index = local->slot_queue[
6111 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6112 + VCHIQ_SLOT_QUEUE_MASK];
6113 + state->tx_data =
6114 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6115 + }
6116 +
6117 + state->local_tx_pos = tx_pos + space;
6118 +
6119 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6120 +}
6121 +
6122 +/* Called by the recycle thread. */
6123 +static void
6124 +process_free_queue(VCHIQ_STATE_T *state)
6125 +{
6126 + VCHIQ_SHARED_STATE_T *local = state->local;
6127 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6128 + int slot_queue_available;
6129 +
6130 + /* Use a read memory barrier to ensure that any state that may have
6131 + ** been modified by another thread is not masked by stale prefetched
6132 + ** values. */
6133 + rmb();
6134 +
6135 + /* Find slots which have been freed by the other side, and return them
6136 + ** to the available queue. */
6137 + slot_queue_available = state->slot_queue_available;
6138 +
6139 + while (slot_queue_available != local->slot_queue_recycle) {
6140 + unsigned int pos;
6141 + int slot_index = local->slot_queue[slot_queue_available++ &
6142 + VCHIQ_SLOT_QUEUE_MASK];
6143 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6144 + int data_found = 0;
6145 +
6146 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6147 + state->id, slot_index, (unsigned int)data,
6148 + local->slot_queue_recycle, slot_queue_available);
6149 +
6150 + /* Initialise the bitmask for services which have used this
6151 + ** slot */
6152 + BITSET_ZERO(service_found);
6153 +
6154 + pos = 0;
6155 +
6156 + while (pos < VCHIQ_SLOT_SIZE) {
6157 + VCHIQ_HEADER_T *header =
6158 + (VCHIQ_HEADER_T *)(data + pos);
6159 + int msgid = header->msgid;
6160 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6161 + int port = VCHIQ_MSG_SRCPORT(msgid);
6162 + VCHIQ_SERVICE_QUOTA_T *service_quota =
6163 + &state->service_quotas[port];
6164 + int count;
6165 + spin_lock(&quota_spinlock);
6166 + count = service_quota->message_use_count;
6167 + if (count > 0)
6168 + service_quota->message_use_count =
6169 + count - 1;
6170 + spin_unlock(&quota_spinlock);
6171 +
6172 + if (count == service_quota->message_quota)
6173 + /* Signal the service that it
6174 + ** has dropped below its quota
6175 + */
6176 + up(&service_quota->quota_event);
6177 + else if (count == 0) {
6178 + vchiq_log_error(vchiq_core_log_level,
6179 + "service %d "
6180 + "message_use_count=%d "
6181 + "(header %x, msgid %x, "
6182 + "header->msgid %x, "
6183 + "header->size %x)",
6184 + port,
6185 + service_quota->
6186 + message_use_count,
6187 + (unsigned int)header, msgid,
6188 + header->msgid,
6189 + header->size);
6190 + WARN(1, "invalid message use count\n");
6191 + }
6192 + if (!BITSET_IS_SET(service_found, port)) {
6193 + /* Set the found bit for this service */
6194 + BITSET_SET(service_found, port);
6195 +
6196 + spin_lock(&quota_spinlock);
6197 + count = service_quota->slot_use_count;
6198 + if (count > 0)
6199 + service_quota->slot_use_count =
6200 + count - 1;
6201 + spin_unlock(&quota_spinlock);
6202 +
6203 + if (count > 0) {
6204 + /* Signal the service in case
6205 + ** it has dropped below its
6206 + ** quota */
6207 + up(&service_quota->quota_event);
6208 + vchiq_log_trace(
6209 + vchiq_core_log_level,
6210 + "%d: pfq:%d %x@%x - "
6211 + "slot_use->%d",
6212 + state->id, port,
6213 + header->size,
6214 + (unsigned int)header,
6215 + count - 1);
6216 + } else {
6217 + vchiq_log_error(
6218 + vchiq_core_log_level,
6219 + "service %d "
6220 + "slot_use_count"
6221 + "=%d (header %x"
6222 + ", msgid %x, "
6223 + "header->msgid"
6224 + " %x, header->"
6225 + "size %x)",
6226 + port, count,
6227 + (unsigned int)header,
6228 + msgid,
6229 + header->msgid,
6230 + header->size);
6231 + WARN(1, "bad slot use count\n");
6232 + }
6233 + }
6234 +
6235 + data_found = 1;
6236 + }
6237 +
6238 + pos += calc_stride(header->size);
6239 + if (pos > VCHIQ_SLOT_SIZE) {
6240 + vchiq_log_error(vchiq_core_log_level,
6241 + "pfq - pos %x: header %x, msgid %x, "
6242 + "header->msgid %x, header->size %x",
6243 + pos, (unsigned int)header, msgid,
6244 + header->msgid, header->size);
6245 + WARN(1, "invalid slot position\n");
6246 + }
6247 + }
6248 +
6249 + if (data_found) {
6250 + int count;
6251 + spin_lock(&quota_spinlock);
6252 + count = state->data_use_count;
6253 + if (count > 0)
6254 + state->data_use_count =
6255 + count - 1;
6256 + spin_unlock(&quota_spinlock);
6257 + if (count == state->data_quota)
6258 + up(&state->data_quota_event);
6259 + }
6260 +
6261 + state->slot_queue_available = slot_queue_available;
6262 + up(&state->slot_available_event);
6263 + }
6264 +}
6265 +
6266 +/* Called by the slot handler and application threads */
6267 +static VCHIQ_STATUS_T
6268 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6269 + int msgid, const VCHIQ_ELEMENT_T *elements,
6270 + int count, int size, int is_blocking)
6271 +{
6272 + VCHIQ_SHARED_STATE_T *local;
6273 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6274 + VCHIQ_HEADER_T *header;
6275 + int type = VCHIQ_MSG_TYPE(msgid);
6276 +
6277 + unsigned int stride;
6278 +
6279 + local = state->local;
6280 +
6281 + stride = calc_stride(size);
6282 +
6283 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6284 +
6285 + if ((type != VCHIQ_MSG_RESUME) &&
6286 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
6287 + return VCHIQ_RETRY;
6288 +
6289 + if (type == VCHIQ_MSG_DATA) {
6290 + int tx_end_index;
6291 +
6292 + BUG_ON(!service);
6293 +
6294 + if (service->closing) {
6295 + /* The service has been closed */
6296 + mutex_unlock(&state->slot_mutex);
6297 + return VCHIQ_ERROR;
6298 + }
6299 +
6300 + service_quota = &state->service_quotas[service->localport];
6301 +
6302 + spin_lock(&quota_spinlock);
6303 +
6304 + /* Ensure this service doesn't use more than its quota of
6305 + ** messages or slots */
6306 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6307 + state->local_tx_pos + stride - 1);
6308 +
6309 + /* Ensure data messages don't use more than their quota of
6310 + ** slots */
6311 + while ((tx_end_index != state->previous_data_index) &&
6312 + (state->data_use_count == state->data_quota)) {
6313 + VCHIQ_STATS_INC(state, data_stalls);
6314 + spin_unlock(&quota_spinlock);
6315 + mutex_unlock(&state->slot_mutex);
6316 +
6317 + if (down_interruptible(&state->data_quota_event)
6318 + != 0)
6319 + return VCHIQ_RETRY;
6320 +
6321 + mutex_lock(&state->slot_mutex);
6322 + spin_lock(&quota_spinlock);
6323 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6324 + state->local_tx_pos + stride - 1);
6325 + if ((tx_end_index == state->previous_data_index) ||
6326 + (state->data_use_count < state->data_quota)) {
6327 + /* Pass the signal on to other waiters */
6328 + up(&state->data_quota_event);
6329 + break;
6330 + }
6331 + }
6332 +
6333 + while ((service_quota->message_use_count ==
6334 + service_quota->message_quota) ||
6335 + ((tx_end_index != service_quota->previous_tx_index) &&
6336 + (service_quota->slot_use_count ==
6337 + service_quota->slot_quota))) {
6338 + spin_unlock(&quota_spinlock);
6339 + vchiq_log_trace(vchiq_core_log_level,
6340 + "%d: qm:%d %s,%x - quota stall "
6341 + "(msg %d, slot %d)",
6342 + state->id, service->localport,
6343 + msg_type_str(type), size,
6344 + service_quota->message_use_count,
6345 + service_quota->slot_use_count);
6346 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6347 + mutex_unlock(&state->slot_mutex);
6348 + if (down_interruptible(&service_quota->quota_event)
6349 + != 0)
6350 + return VCHIQ_RETRY;
6351 + if (service->closing)
6352 + return VCHIQ_ERROR;
6353 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6354 + return VCHIQ_RETRY;
6355 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6356 + /* The service has been closed */
6357 + mutex_unlock(&state->slot_mutex);
6358 + return VCHIQ_ERROR;
6359 + }
6360 + spin_lock(&quota_spinlock);
6361 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6362 + state->local_tx_pos + stride - 1);
6363 + }
6364 +
6365 + spin_unlock(&quota_spinlock);
6366 + }
6367 +
6368 + header = reserve_space(state, stride, is_blocking);
6369 +
6370 + if (!header) {
6371 + if (service)
6372 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6373 + mutex_unlock(&state->slot_mutex);
6374 + return VCHIQ_RETRY;
6375 + }
6376 +
6377 + if (type == VCHIQ_MSG_DATA) {
6378 + int i, pos;
6379 + int tx_end_index;
6380 + int slot_use_count;
6381 +
6382 + vchiq_log_info(vchiq_core_log_level,
6383 + "%d: qm %s@%x,%x (%d->%d)",
6384 + state->id,
6385 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6386 + (unsigned int)header, size,
6387 + VCHIQ_MSG_SRCPORT(msgid),
6388 + VCHIQ_MSG_DSTPORT(msgid));
6389 +
6390 + BUG_ON(!service);
6391 +
6392 + for (i = 0, pos = 0; i < (unsigned int)count;
6393 + pos += elements[i++].size)
6394 + if (elements[i].size) {
6395 + if (vchiq_copy_from_user
6396 + (header->data + pos, elements[i].data,
6397 + (size_t) elements[i].size) !=
6398 + VCHIQ_SUCCESS) {
6399 + mutex_unlock(&state->slot_mutex);
6400 + VCHIQ_SERVICE_STATS_INC(service,
6401 + error_count);
6402 + return VCHIQ_ERROR;
6403 + }
6404 + if (i == 0) {
6405 + if (vchiq_core_msg_log_level >=
6406 + VCHIQ_LOG_INFO)
6407 + vchiq_log_dump_mem("Sent", 0,
6408 + header->data + pos,
6409 + min(64u,
6410 + elements[0].size));
6411 + }
6412 + }
6413 +
6414 + spin_lock(&quota_spinlock);
6415 + service_quota->message_use_count++;
6416 +
6417 + tx_end_index =
6418 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6419 +
6420 + /* If this transmission can't fit in the last slot used by any
6421 + ** service, the data_use_count must be increased. */
6422 + if (tx_end_index != state->previous_data_index) {
6423 + state->previous_data_index = tx_end_index;
6424 + state->data_use_count++;
6425 + }
6426 +
6427 + /* If this isn't the same slot last used by this service,
6428 + ** the service's slot_use_count must be increased. */
6429 + if (tx_end_index != service_quota->previous_tx_index) {
6430 + service_quota->previous_tx_index = tx_end_index;
6431 + slot_use_count = ++service_quota->slot_use_count;
6432 + } else {
6433 + slot_use_count = 0;
6434 + }
6435 +
6436 + spin_unlock(&quota_spinlock);
6437 +
6438 + if (slot_use_count)
6439 + vchiq_log_trace(vchiq_core_log_level,
6440 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6441 + state->id, service->localport,
6442 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6443 + slot_use_count, header);
6444 +
6445 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6446 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6447 + } else {
6448 + vchiq_log_info(vchiq_core_log_level,
6449 + "%d: qm %s@%x,%x (%d->%d)", state->id,
6450 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6451 + (unsigned int)header, size,
6452 + VCHIQ_MSG_SRCPORT(msgid),
6453 + VCHIQ_MSG_DSTPORT(msgid));
6454 + if (size != 0) {
6455 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6456 + memcpy(header->data, elements[0].data,
6457 + elements[0].size);
6458 + }
6459 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6460 + }
6461 +
6462 + header->msgid = msgid;
6463 + header->size = size;
6464 +
6465 + {
6466 + int svc_fourcc;
6467 +
6468 + svc_fourcc = service
6469 + ? service->base.fourcc
6470 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6471 +
6472 + vchiq_log_info(vchiq_core_msg_log_level,
6473 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6474 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6475 + VCHIQ_MSG_TYPE(msgid),
6476 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6477 + VCHIQ_MSG_SRCPORT(msgid),
6478 + VCHIQ_MSG_DSTPORT(msgid),
6479 + size);
6480 + }
6481 +
6482 + /* Make sure the new header is visible to the peer. */
6483 + wmb();
6484 +
6485 + /* Make the new tx_pos visible to the peer. */
6486 + local->tx_pos = state->local_tx_pos;
6487 + wmb();
6488 +
6489 + if (service && (type == VCHIQ_MSG_CLOSE))
6490 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6491 +
6492 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6493 + mutex_unlock(&state->slot_mutex);
6494 +
6495 + remote_event_signal(&state->remote->trigger);
6496 +
6497 + return VCHIQ_SUCCESS;
6498 +}
6499 +
6500 +/* Called by the slot handler and application threads */
6501 +static VCHIQ_STATUS_T
6502 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6503 + int msgid, const VCHIQ_ELEMENT_T *elements,
6504 + int count, int size, int is_blocking)
6505 +{
6506 + VCHIQ_SHARED_STATE_T *local;
6507 + VCHIQ_HEADER_T *header;
6508 +
6509 + local = state->local;
6510 +
6511 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6512 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
6513 + return VCHIQ_RETRY;
6514 +
6515 + remote_event_wait(&local->sync_release);
6516 +
6517 + rmb();
6518 +
6519 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6520 + local->slot_sync);
6521 +
6522 + {
6523 + int oldmsgid = header->msgid;
6524 + if (oldmsgid != VCHIQ_MSGID_PADDING)
6525 + vchiq_log_error(vchiq_core_log_level,
6526 + "%d: qms - msgid %x, not PADDING",
6527 + state->id, oldmsgid);
6528 + }
6529 +
6530 + if (service) {
6531 + int i, pos;
6532 +
6533 + vchiq_log_info(vchiq_sync_log_level,
6534 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6535 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6536 + (unsigned int)header, size,
6537 + VCHIQ_MSG_SRCPORT(msgid),
6538 + VCHIQ_MSG_DSTPORT(msgid));
6539 +
6540 + for (i = 0, pos = 0; i < (unsigned int)count;
6541 + pos += elements[i++].size)
6542 + if (elements[i].size) {
6543 + if (vchiq_copy_from_user
6544 + (header->data + pos, elements[i].data,
6545 + (size_t) elements[i].size) !=
6546 + VCHIQ_SUCCESS) {
6547 + mutex_unlock(&state->sync_mutex);
6548 + VCHIQ_SERVICE_STATS_INC(service,
6549 + error_count);
6550 + return VCHIQ_ERROR;
6551 + }
6552 + if (i == 0) {
6553 + if (vchiq_sync_log_level >=
6554 + VCHIQ_LOG_TRACE)
6555 + vchiq_log_dump_mem("Sent Sync",
6556 + 0, header->data + pos,
6557 + min(64u,
6558 + elements[0].size));
6559 + }
6560 + }
6561 +
6562 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6563 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6564 + } else {
6565 + vchiq_log_info(vchiq_sync_log_level,
6566 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6567 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6568 + (unsigned int)header, size,
6569 + VCHIQ_MSG_SRCPORT(msgid),
6570 + VCHIQ_MSG_DSTPORT(msgid));
6571 + if (size != 0) {
6572 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6573 + memcpy(header->data, elements[0].data,
6574 + elements[0].size);
6575 + }
6576 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6577 + }
6578 +
6579 + header->size = size;
6580 + header->msgid = msgid;
6581 +
6582 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6583 + int svc_fourcc;
6584 +
6585 + svc_fourcc = service
6586 + ? service->base.fourcc
6587 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6588 +
6589 + vchiq_log_trace(vchiq_sync_log_level,
6590 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6591 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6592 + VCHIQ_MSG_TYPE(msgid),
6593 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6594 + VCHIQ_MSG_SRCPORT(msgid),
6595 + VCHIQ_MSG_DSTPORT(msgid),
6596 + size);
6597 + }
6598 +
6599 + /* Make sure the new header is visible to the peer. */
6600 + wmb();
6601 +
6602 + remote_event_signal(&state->remote->sync_trigger);
6603 +
6604 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6605 + mutex_unlock(&state->sync_mutex);
6606 +
6607 + return VCHIQ_SUCCESS;
6608 +}
6609 +
6610 +static inline void
6611 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6612 +{
6613 + slot->use_count++;
6614 +}
6615 +
6616 +static void
6617 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6618 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6619 +{
6620 + int release_count;
6621 +
6622 + mutex_lock(&state->recycle_mutex);
6623 +
6624 + if (header) {
6625 + int msgid = header->msgid;
6626 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6627 + (service && service->closing)) {
6628 + mutex_unlock(&state->recycle_mutex);
6629 + return;
6630 + }
6631 +
6632 + /* Rewrite the message header to prevent a double
6633 + ** release */
6634 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6635 + }
6636 +
6637 + release_count = slot_info->release_count;
6638 + slot_info->release_count = ++release_count;
6639 +
6640 + if (release_count == slot_info->use_count) {
6641 + int slot_queue_recycle;
6642 + /* Add to the freed queue */
6643 +
6644 + /* A read barrier is necessary here to prevent speculative
6645 + ** fetches of remote->slot_queue_recycle from overtaking the
6646 + ** mutex. */
6647 + rmb();
6648 +
6649 + slot_queue_recycle = state->remote->slot_queue_recycle;
6650 + state->remote->slot_queue[slot_queue_recycle &
6651 + VCHIQ_SLOT_QUEUE_MASK] =
6652 + SLOT_INDEX_FROM_INFO(state, slot_info);
6653 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6654 + vchiq_log_info(vchiq_core_log_level,
6655 + "%d: release_slot %d - recycle->%x",
6656 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6657 + state->remote->slot_queue_recycle);
6658 +
6659 + /* A write barrier is necessary, but remote_event_signal
6660 + ** contains one. */
6661 + remote_event_signal(&state->remote->recycle);
6662 + }
6663 +
6664 + mutex_unlock(&state->recycle_mutex);
6665 +}
6666 +
6667 +/* Called by the slot handler - don't hold the bulk mutex */
6668 +static VCHIQ_STATUS_T
6669 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6670 + int retry_poll)
6671 +{
6672 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6673 +
6674 + vchiq_log_trace(vchiq_core_log_level,
6675 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
6676 + service->state->id, service->localport,
6677 + (queue == &service->bulk_tx) ? 't' : 'r',
6678 + queue->process, queue->remote_notify, queue->remove);
6679 +
6680 + if (service->state->is_master) {
6681 + while (queue->remote_notify != queue->process) {
6682 + VCHIQ_BULK_T *bulk =
6683 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
6684 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6685 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6686 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6687 + service->remoteport);
6688 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6689 + /* Only reply to non-dummy bulk requests */
6690 + if (bulk->remote_data) {
6691 + status = queue_message(service->state, NULL,
6692 + msgid, &element, 1, 4, 0);
6693 + if (status != VCHIQ_SUCCESS)
6694 + break;
6695 + }
6696 + queue->remote_notify++;
6697 + }
6698 + } else {
6699 + queue->remote_notify = queue->process;
6700 + }
6701 +
6702 + if (status == VCHIQ_SUCCESS) {
6703 + while (queue->remove != queue->remote_notify) {
6704 + VCHIQ_BULK_T *bulk =
6705 + &queue->bulks[BULK_INDEX(queue->remove)];
6706 +
6707 + /* Only generate callbacks for non-dummy bulk
6708 + ** requests, and non-terminated services */
6709 + if (bulk->data && service->instance) {
6710 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
6711 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
6712 + VCHIQ_SERVICE_STATS_INC(service,
6713 + bulk_tx_count);
6714 + VCHIQ_SERVICE_STATS_ADD(service,
6715 + bulk_tx_bytes,
6716 + bulk->actual);
6717 + } else {
6718 + VCHIQ_SERVICE_STATS_INC(service,
6719 + bulk_rx_count);
6720 + VCHIQ_SERVICE_STATS_ADD(service,
6721 + bulk_rx_bytes,
6722 + bulk->actual);
6723 + }
6724 + } else {
6725 + VCHIQ_SERVICE_STATS_INC(service,
6726 + bulk_aborted_count);
6727 + }
6728 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
6729 + struct bulk_waiter *waiter;
6730 + spin_lock(&bulk_waiter_spinlock);
6731 + waiter = bulk->userdata;
6732 + if (waiter) {
6733 + waiter->actual = bulk->actual;
6734 + up(&waiter->event);
6735 + }
6736 + spin_unlock(&bulk_waiter_spinlock);
6737 + } else if (bulk->mode ==
6738 + VCHIQ_BULK_MODE_CALLBACK) {
6739 + VCHIQ_REASON_T reason = (bulk->dir ==
6740 + VCHIQ_BULK_TRANSMIT) ?
6741 + ((bulk->actual ==
6742 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6743 + VCHIQ_BULK_TRANSMIT_ABORTED :
6744 + VCHIQ_BULK_TRANSMIT_DONE) :
6745 + ((bulk->actual ==
6746 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6747 + VCHIQ_BULK_RECEIVE_ABORTED :
6748 + VCHIQ_BULK_RECEIVE_DONE);
6749 + status = make_service_callback(service,
6750 + reason, NULL, bulk->userdata);
6751 + if (status == VCHIQ_RETRY)
6752 + break;
6753 + }
6754 + }
6755 +
6756 + queue->remove++;
6757 + up(&service->bulk_remove_event);
6758 + }
6759 + if (!retry_poll)
6760 + status = VCHIQ_SUCCESS;
6761 + }
6762 +
6763 + if (status == VCHIQ_RETRY)
6764 + request_poll(service->state, service,
6765 + (queue == &service->bulk_tx) ?
6766 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
6767 +
6768 + return status;
6769 +}
6770 +
6771 +/* Called by the slot handler thread */
6772 +static void
6773 +poll_services(VCHIQ_STATE_T *state)
6774 +{
6775 + int group, i;
6776 +
6777 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
6778 + uint32_t flags;
6779 + flags = atomic_xchg(&state->poll_services[group], 0);
6780 + for (i = 0; flags; i++) {
6781 + if (flags & (1 << i)) {
6782 + VCHIQ_SERVICE_T *service =
6783 + find_service_by_port(state,
6784 + (group<<5) + i);
6785 + uint32_t service_flags;
6786 + flags &= ~(1 << i);
6787 + if (!service)
6788 + continue;
6789 + service_flags =
6790 + atomic_xchg(&service->poll_flags, 0);
6791 + if (service_flags &
6792 + (1 << VCHIQ_POLL_REMOVE)) {
6793 + vchiq_log_info(vchiq_core_log_level,
6794 + "%d: ps - remove %d<->%d",
6795 + state->id, service->localport,
6796 + service->remoteport);
6797 +
6798 + /* Make it look like a client, because
6799 + it must be removed and not left in
6800 + the LISTENING state. */
6801 + service->public_fourcc =
6802 + VCHIQ_FOURCC_INVALID;
6803 +
6804 + if (vchiq_close_service_internal(
6805 + service, 0/*!close_recvd*/) !=
6806 + VCHIQ_SUCCESS)
6807 + request_poll(state, service,
6808 + VCHIQ_POLL_REMOVE);
6809 + } else if (service_flags &
6810 + (1 << VCHIQ_POLL_TERMINATE)) {
6811 + vchiq_log_info(vchiq_core_log_level,
6812 + "%d: ps - terminate %d<->%d",
6813 + state->id, service->localport,
6814 + service->remoteport);
6815 + if (vchiq_close_service_internal(
6816 + service, 0/*!close_recvd*/) !=
6817 + VCHIQ_SUCCESS)
6818 + request_poll(state, service,
6819 + VCHIQ_POLL_TERMINATE);
6820 + }
6821 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
6822 + notify_bulks(service,
6823 + &service->bulk_tx,
6824 + 1/*retry_poll*/);
6825 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
6826 + notify_bulks(service,
6827 + &service->bulk_rx,
6828 + 1/*retry_poll*/);
6829 + unlock_service(service);
6830 + }
6831 + }
6832 + }
6833 +}
6834 +
6835 +/* Called by the slot handler or application threads, holding the bulk mutex. */
6836 +static int
6837 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6838 +{
6839 + VCHIQ_STATE_T *state = service->state;
6840 + int resolved = 0;
6841 + int rc;
6842 +
6843 + while ((queue->process != queue->local_insert) &&
6844 + (queue->process != queue->remote_insert)) {
6845 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6846 +
6847 + vchiq_log_trace(vchiq_core_log_level,
6848 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
6849 + state->id, service->localport,
6850 + (queue == &service->bulk_tx) ? 't' : 'r',
6851 + queue->local_insert, queue->remote_insert,
6852 + queue->process);
6853 +
6854 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
6855 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
6856 +
6857 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
6858 + if (rc != 0)
6859 + break;
6860 +
6861 + vchiq_transfer_bulk(bulk);
6862 + mutex_unlock(&state->bulk_transfer_mutex);
6863 +
6864 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
6865 + const char *header = (queue == &service->bulk_tx) ?
6866 + "Send Bulk to" : "Recv Bulk from";
6867 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
6868 + vchiq_log_info(vchiq_core_msg_log_level,
6869 + "%s %c%c%c%c d:%d len:%d %x<->%x",
6870 + header,
6871 + VCHIQ_FOURCC_AS_4CHARS(
6872 + service->base.fourcc),
6873 + service->remoteport,
6874 + bulk->size,
6875 + (unsigned int)bulk->data,
6876 + (unsigned int)bulk->remote_data);
6877 + else
6878 + vchiq_log_info(vchiq_core_msg_log_level,
6879 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
6880 + " rx len:%d %x<->%x",
6881 + header,
6882 + VCHIQ_FOURCC_AS_4CHARS(
6883 + service->base.fourcc),
6884 + service->remoteport,
6885 + bulk->size,
6886 + bulk->remote_size,
6887 + (unsigned int)bulk->data,
6888 + (unsigned int)bulk->remote_data);
6889 + }
6890 +
6891 + vchiq_complete_bulk(bulk);
6892 + queue->process++;
6893 + resolved++;
6894 + }
6895 + return resolved;
6896 +}
6897 +
6898 +/* Called with the bulk_mutex held */
6899 +static void
6900 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6901 +{
6902 + int is_tx = (queue == &service->bulk_tx);
6903 + vchiq_log_trace(vchiq_core_log_level,
6904 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
6905 + service->state->id, service->localport, is_tx ? 't' : 'r',
6906 + queue->local_insert, queue->remote_insert, queue->process);
6907 +
6908 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
6909 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
6910 +
6911 + while ((queue->process != queue->local_insert) ||
6912 + (queue->process != queue->remote_insert)) {
6913 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6914 +
6915 + if (queue->process == queue->remote_insert) {
6916 + /* fabricate a matching dummy bulk */
6917 + bulk->remote_data = NULL;
6918 + bulk->remote_size = 0;
6919 + queue->remote_insert++;
6920 + }
6921 +
6922 + if (queue->process != queue->local_insert) {
6923 + vchiq_complete_bulk(bulk);
6924 +
6925 + vchiq_log_info(vchiq_core_msg_log_level,
6926 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
6927 + "rx len:%d",
6928 + is_tx ? "Send Bulk to" : "Recv Bulk from",
6929 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
6930 + service->remoteport,
6931 + bulk->size,
6932 + bulk->remote_size);
6933 + } else {
6934 + /* fabricate a matching dummy bulk */
6935 + bulk->data = NULL;
6936 + bulk->size = 0;
6937 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
6938 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
6939 + VCHIQ_BULK_RECEIVE;
6940 + queue->local_insert++;
6941 + }
6942 +
6943 + queue->process++;
6944 + }
6945 +}
6946 +
6947 +/* Called from the slot handler thread */
6948 +static void
6949 +pause_bulks(VCHIQ_STATE_T *state)
6950 +{
6951 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
6952 + WARN_ON_ONCE(1);
6953 + atomic_set(&pause_bulks_count, 1);
6954 + return;
6955 + }
6956 +
6957 + /* Block bulk transfers from all services */
6958 + mutex_lock(&state->bulk_transfer_mutex);
6959 +}
6960 +
6961 +/* Called from the slot handler thread */
6962 +static void
6963 +resume_bulks(VCHIQ_STATE_T *state)
6964 +{
6965 + int i;
6966 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
6967 + WARN_ON_ONCE(1);
6968 + atomic_set(&pause_bulks_count, 0);
6969 + return;
6970 + }
6971 +
6972 + /* Allow bulk transfers from all services */
6973 + mutex_unlock(&state->bulk_transfer_mutex);
6974 +
6975 + if (state->deferred_bulks == 0)
6976 + return;
6977 +
6978 + /* Deal with any bulks which had to be deferred due to being in
6979 + * paused state. Don't try to match up to number of deferred bulks
6980 + * in case we've had something come and close the service in the
6981 + * interim - just process all bulk queues for all services */
6982 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
6983 + __func__, state->deferred_bulks);
6984 +
6985 + for (i = 0; i < state->unused_service; i++) {
6986 + VCHIQ_SERVICE_T *service = state->services[i];
6987 + int resolved_rx = 0;
6988 + int resolved_tx = 0;
6989 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
6990 + continue;
6991 +
6992 + mutex_lock(&service->bulk_mutex);
6993 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
6994 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
6995 + mutex_unlock(&service->bulk_mutex);
6996 + if (resolved_rx)
6997 + notify_bulks(service, &service->bulk_rx, 1);
6998 + if (resolved_tx)
6999 + notify_bulks(service, &service->bulk_tx, 1);
7000 + }
7001 + state->deferred_bulks = 0;
7002 +}
7003 +
7004 +static int
7005 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
7006 +{
7007 + VCHIQ_SERVICE_T *service = NULL;
7008 + int msgid, size;
7009 + int type;
7010 + unsigned int localport, remoteport;
7011 +
7012 + msgid = header->msgid;
7013 + size = header->size;
7014 + type = VCHIQ_MSG_TYPE(msgid);
7015 + localport = VCHIQ_MSG_DSTPORT(msgid);
7016 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7017 + if (size >= sizeof(struct vchiq_open_payload)) {
7018 + const struct vchiq_open_payload *payload =
7019 + (struct vchiq_open_payload *)header->data;
7020 + unsigned int fourcc;
7021 +
7022 + fourcc = payload->fourcc;
7023 + vchiq_log_info(vchiq_core_log_level,
7024 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
7025 + state->id, (unsigned int)header,
7026 + localport,
7027 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
7028 +
7029 + service = get_listening_service(state, fourcc);
7030 +
7031 + if (service) {
7032 + /* A matching service exists */
7033 + short version = payload->version;
7034 + short version_min = payload->version_min;
7035 + if ((service->version < version_min) ||
7036 + (version < service->version_min)) {
7037 + /* Version mismatch */
7038 + vchiq_loud_error_header();
7039 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
7040 + "version mismatch - local (%d, min %d)"
7041 + " vs. remote (%d, min %d)",
7042 + state->id, service->localport,
7043 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
7044 + service->version, service->version_min,
7045 + version, version_min);
7046 + vchiq_loud_error_footer();
7047 + unlock_service(service);
7048 + service = NULL;
7049 + goto fail_open;
7050 + }
7051 + service->peer_version = version;
7052 +
7053 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
7054 + struct vchiq_openack_payload ack_payload = {
7055 + service->version
7056 + };
7057 + VCHIQ_ELEMENT_T body = {
7058 + &ack_payload,
7059 + sizeof(ack_payload)
7060 + };
7061 +
7062 + /* Acknowledge the OPEN */
7063 + if (service->sync) {
7064 + if (queue_message_sync(state, NULL,
7065 + VCHIQ_MAKE_MSG(
7066 + VCHIQ_MSG_OPENACK,
7067 + service->localport,
7068 + remoteport),
7069 + &body, 1, sizeof(ack_payload),
7070 + 0) == VCHIQ_RETRY)
7071 + goto bail_not_ready;
7072 + } else {
7073 + if (queue_message(state, NULL,
7074 + VCHIQ_MAKE_MSG(
7075 + VCHIQ_MSG_OPENACK,
7076 + service->localport,
7077 + remoteport),
7078 + &body, 1, sizeof(ack_payload),
7079 + 0) == VCHIQ_RETRY)
7080 + goto bail_not_ready;
7081 + }
7082 +
7083 + /* The service is now open */
7084 + vchiq_set_service_state(service,
7085 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7086 + : VCHIQ_SRVSTATE_OPEN);
7087 + }
7088 +
7089 + service->remoteport = remoteport;
7090 + service->client_id = ((int *)header->data)[1];
7091 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7092 + NULL, NULL) == VCHIQ_RETRY) {
7093 + /* Bail out if not ready */
7094 + service->remoteport = VCHIQ_PORT_FREE;
7095 + goto bail_not_ready;
7096 + }
7097 +
7098 + /* Success - the message has been dealt with */
7099 + unlock_service(service);
7100 + return 1;
7101 + }
7102 + }
7103 +
7104 +fail_open:
7105 + /* No available service, or an invalid request - send a CLOSE */
7106 + if (queue_message(state, NULL,
7107 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7108 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7109 + goto bail_not_ready;
7110 +
7111 + return 1;
7112 +
7113 +bail_not_ready:
7114 + if (service)
7115 + unlock_service(service);
7116 +
7117 + return 0;
7118 +}
7119 +
7120 +/* Called by the slot handler thread */
7121 +static void
7122 +parse_rx_slots(VCHIQ_STATE_T *state)
7123 +{
7124 + VCHIQ_SHARED_STATE_T *remote = state->remote;
7125 + VCHIQ_SERVICE_T *service = NULL;
7126 + int tx_pos;
7127 + DEBUG_INITIALISE(state->local)
7128 +
7129 + tx_pos = remote->tx_pos;
7130 +
7131 + while (state->rx_pos != tx_pos) {
7132 + VCHIQ_HEADER_T *header;
7133 + int msgid, size;
7134 + int type;
7135 + unsigned int localport, remoteport;
7136 +
7137 + DEBUG_TRACE(PARSE_LINE);
7138 + if (!state->rx_data) {
7139 + int rx_index;
7140 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7141 + rx_index = remote->slot_queue[
7142 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7143 + VCHIQ_SLOT_QUEUE_MASK];
7144 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7145 + rx_index);
7146 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7147 +
7148 + /* Initialise use_count to one, and increment
7149 + ** release_count at the end of the slot to avoid
7150 + ** releasing the slot prematurely. */
7151 + state->rx_info->use_count = 1;
7152 + state->rx_info->release_count = 0;
7153 + }
7154 +
7155 + header = (VCHIQ_HEADER_T *)(state->rx_data +
7156 + (state->rx_pos & VCHIQ_SLOT_MASK));
7157 + DEBUG_VALUE(PARSE_HEADER, (int)header);
7158 + msgid = header->msgid;
7159 + DEBUG_VALUE(PARSE_MSGID, msgid);
7160 + size = header->size;
7161 + type = VCHIQ_MSG_TYPE(msgid);
7162 + localport = VCHIQ_MSG_DSTPORT(msgid);
7163 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7164 +
7165 + if (type != VCHIQ_MSG_DATA)
7166 + VCHIQ_STATS_INC(state, ctrl_rx_count);
7167 +
7168 + switch (type) {
7169 + case VCHIQ_MSG_OPENACK:
7170 + case VCHIQ_MSG_CLOSE:
7171 + case VCHIQ_MSG_DATA:
7172 + case VCHIQ_MSG_BULK_RX:
7173 + case VCHIQ_MSG_BULK_TX:
7174 + case VCHIQ_MSG_BULK_RX_DONE:
7175 + case VCHIQ_MSG_BULK_TX_DONE:
7176 + service = find_service_by_port(state, localport);
7177 + if ((!service || service->remoteport != remoteport) &&
7178 + (localport == 0) &&
7179 + (type == VCHIQ_MSG_CLOSE)) {
7180 + /* This could be a CLOSE from a client which
7181 + hadn't yet received the OPENACK - look for
7182 + the connected service */
7183 + if (service)
7184 + unlock_service(service);
7185 + service = get_connected_service(state,
7186 + remoteport);
7187 + if (service)
7188 + vchiq_log_warning(vchiq_core_log_level,
7189 + "%d: prs %s@%x (%d->%d) - "
7190 + "found connected service %d",
7191 + state->id, msg_type_str(type),
7192 + (unsigned int)header,
7193 + remoteport, localport,
7194 + service->localport);
7195 + }
7196 +
7197 + if (!service) {
7198 + vchiq_log_error(vchiq_core_log_level,
7199 + "%d: prs %s@%x (%d->%d) - "
7200 + "invalid/closed service %d",
7201 + state->id, msg_type_str(type),
7202 + (unsigned int)header,
7203 + remoteport, localport, localport);
7204 + goto skip_message;
7205 + }
7206 + break;
7207 + default:
7208 + break;
7209 + }
7210 +
7211 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7212 + int svc_fourcc;
7213 +
7214 + svc_fourcc = service
7215 + ? service->base.fourcc
7216 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7217 + vchiq_log_info(vchiq_core_msg_log_level,
7218 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7219 + "len:%d",
7220 + msg_type_str(type), type,
7221 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7222 + remoteport, localport, size);
7223 + if (size > 0)
7224 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7225 + min(64, size));
7226 + }
7227 +
7228 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7229 + > VCHIQ_SLOT_SIZE) {
7230 + vchiq_log_error(vchiq_core_log_level,
7231 + "header %x (msgid %x) - size %x too big for "
7232 + "slot",
7233 + (unsigned int)header, (unsigned int)msgid,
7234 + (unsigned int)size);
7235 + WARN(1, "oversized for slot\n");
7236 + }
7237 +
7238 + switch (type) {
7239 + case VCHIQ_MSG_OPEN:
7240 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7241 + if (!parse_open(state, header))
7242 + goto bail_not_ready;
7243 + break;
7244 + case VCHIQ_MSG_OPENACK:
7245 + if (size >= sizeof(struct vchiq_openack_payload)) {
7246 + const struct vchiq_openack_payload *payload =
7247 + (struct vchiq_openack_payload *)
7248 + header->data;
7249 + service->peer_version = payload->version;
7250 + }
7251 + vchiq_log_info(vchiq_core_log_level,
7252 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7253 + state->id, (unsigned int)header, size,
7254 + remoteport, localport, service->peer_version);
7255 + if (service->srvstate ==
7256 + VCHIQ_SRVSTATE_OPENING) {
7257 + service->remoteport = remoteport;
7258 + vchiq_set_service_state(service,
7259 + VCHIQ_SRVSTATE_OPEN);
7260 + up(&service->remove_event);
7261 + } else
7262 + vchiq_log_error(vchiq_core_log_level,
7263 + "OPENACK received in state %s",
7264 + srvstate_names[service->srvstate]);
7265 + break;
7266 + case VCHIQ_MSG_CLOSE:
7267 + WARN_ON(size != 0); /* There should be no data */
7268 +
7269 + vchiq_log_info(vchiq_core_log_level,
7270 + "%d: prs CLOSE@%x (%d->%d)",
7271 + state->id, (unsigned int)header,
7272 + remoteport, localport);
7273 +
7274 + mark_service_closing_internal(service, 1);
7275 +
7276 + if (vchiq_close_service_internal(service,
7277 + 1/*close_recvd*/) == VCHIQ_RETRY)
7278 + goto bail_not_ready;
7279 +
7280 + vchiq_log_info(vchiq_core_log_level,
7281 + "Close Service %c%c%c%c s:%u d:%d",
7282 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7283 + service->localport,
7284 + service->remoteport);
7285 + break;
7286 + case VCHIQ_MSG_DATA:
7287 + vchiq_log_trace(vchiq_core_log_level,
7288 + "%d: prs DATA@%x,%x (%d->%d)",
7289 + state->id, (unsigned int)header, size,
7290 + remoteport, localport);
7291 +
7292 + if ((service->remoteport == remoteport)
7293 + && (service->srvstate ==
7294 + VCHIQ_SRVSTATE_OPEN)) {
7295 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7296 + claim_slot(state->rx_info);
7297 + DEBUG_TRACE(PARSE_LINE);
7298 + if (make_service_callback(service,
7299 + VCHIQ_MESSAGE_AVAILABLE, header,
7300 + NULL) == VCHIQ_RETRY) {
7301 + DEBUG_TRACE(PARSE_LINE);
7302 + goto bail_not_ready;
7303 + }
7304 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7305 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7306 + size);
7307 + } else {
7308 + VCHIQ_STATS_INC(state, error_count);
7309 + }
7310 + break;
7311 + case VCHIQ_MSG_CONNECT:
7312 + vchiq_log_info(vchiq_core_log_level,
7313 + "%d: prs CONNECT@%x",
7314 + state->id, (unsigned int)header);
7315 + up(&state->connect);
7316 + break;
7317 + case VCHIQ_MSG_BULK_RX:
7318 + case VCHIQ_MSG_BULK_TX: {
7319 + VCHIQ_BULK_QUEUE_T *queue;
7320 + WARN_ON(!state->is_master);
7321 + queue = (type == VCHIQ_MSG_BULK_RX) ?
7322 + &service->bulk_tx : &service->bulk_rx;
7323 + if ((service->remoteport == remoteport)
7324 + && (service->srvstate ==
7325 + VCHIQ_SRVSTATE_OPEN)) {
7326 + VCHIQ_BULK_T *bulk;
7327 + int resolved = 0;
7328 +
7329 + DEBUG_TRACE(PARSE_LINE);
7330 + if (mutex_lock_interruptible(
7331 + &service->bulk_mutex) != 0) {
7332 + DEBUG_TRACE(PARSE_LINE);
7333 + goto bail_not_ready;
7334 + }
7335 +
7336 + WARN_ON(!(queue->remote_insert < queue->remove +
7337 + VCHIQ_NUM_SERVICE_BULKS));
7338 + bulk = &queue->bulks[
7339 + BULK_INDEX(queue->remote_insert)];
7340 + bulk->remote_data =
7341 + (void *)((int *)header->data)[0];
7342 + bulk->remote_size = ((int *)header->data)[1];
7343 + wmb();
7344 +
7345 + vchiq_log_info(vchiq_core_log_level,
7346 + "%d: prs %s@%x (%d->%d) %x@%x",
7347 + state->id, msg_type_str(type),
7348 + (unsigned int)header,
7349 + remoteport, localport,
7350 + bulk->remote_size,
7351 + (unsigned int)bulk->remote_data);
7352 +
7353 + queue->remote_insert++;
7354 +
7355 + if (atomic_read(&pause_bulks_count)) {
7356 + state->deferred_bulks++;
7357 + vchiq_log_info(vchiq_core_log_level,
7358 + "%s: deferring bulk (%d)",
7359 + __func__,
7360 + state->deferred_bulks);
7361 + if (state->conn_state !=
7362 + VCHIQ_CONNSTATE_PAUSE_SENT)
7363 + vchiq_log_error(
7364 + vchiq_core_log_level,
7365 + "%s: bulks paused in "
7366 + "unexpected state %s",
7367 + __func__,
7368 + conn_state_names[
7369 + state->conn_state]);
7370 + } else if (state->conn_state ==
7371 + VCHIQ_CONNSTATE_CONNECTED) {
7372 + DEBUG_TRACE(PARSE_LINE);
7373 + resolved = resolve_bulks(service,
7374 + queue);
7375 + }
7376 +
7377 + mutex_unlock(&service->bulk_mutex);
7378 + if (resolved)
7379 + notify_bulks(service, queue,
7380 + 1/*retry_poll*/);
7381 + }
7382 + } break;
7383 + case VCHIQ_MSG_BULK_RX_DONE:
7384 + case VCHIQ_MSG_BULK_TX_DONE:
7385 + WARN_ON(state->is_master);
7386 + if ((service->remoteport == remoteport)
7387 + && (service->srvstate !=
7388 + VCHIQ_SRVSTATE_FREE)) {
7389 + VCHIQ_BULK_QUEUE_T *queue;
7390 + VCHIQ_BULK_T *bulk;
7391 +
7392 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7393 + &service->bulk_rx : &service->bulk_tx;
7394 +
7395 + DEBUG_TRACE(PARSE_LINE);
7396 + if (mutex_lock_interruptible(
7397 + &service->bulk_mutex) != 0) {
7398 + DEBUG_TRACE(PARSE_LINE);
7399 + goto bail_not_ready;
7400 + }
7401 + if ((int)(queue->remote_insert -
7402 + queue->local_insert) >= 0) {
7403 + vchiq_log_error(vchiq_core_log_level,
7404 + "%d: prs %s@%x (%d->%d) "
7405 + "unexpected (ri=%d,li=%d)",
7406 + state->id, msg_type_str(type),
7407 + (unsigned int)header,
7408 + remoteport, localport,
7409 + queue->remote_insert,
7410 + queue->local_insert);
7411 + mutex_unlock(&service->bulk_mutex);
7412 + break;
7413 + }
7414 +
7415 + BUG_ON(queue->process == queue->local_insert);
7416 + BUG_ON(queue->process != queue->remote_insert);
7417 +
7418 + bulk = &queue->bulks[
7419 + BULK_INDEX(queue->remote_insert)];
7420 + bulk->actual = *(int *)header->data;
7421 + queue->remote_insert++;
7422 +
7423 + vchiq_log_info(vchiq_core_log_level,
7424 + "%d: prs %s@%x (%d->%d) %x@%x",
7425 + state->id, msg_type_str(type),
7426 + (unsigned int)header,
7427 + remoteport, localport,
7428 + bulk->actual, (unsigned int)bulk->data);
7429 +
7430 + vchiq_log_trace(vchiq_core_log_level,
7431 + "%d: prs:%d %cx li=%x ri=%x p=%x",
7432 + state->id, localport,
7433 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
7434 + 'r' : 't',
7435 + queue->local_insert,
7436 + queue->remote_insert, queue->process);
7437 +
7438 + DEBUG_TRACE(PARSE_LINE);
7439 + WARN_ON(queue->process == queue->local_insert);
7440 + vchiq_complete_bulk(bulk);
7441 + queue->process++;
7442 + mutex_unlock(&service->bulk_mutex);
7443 + DEBUG_TRACE(PARSE_LINE);
7444 + notify_bulks(service, queue, 1/*retry_poll*/);
7445 + DEBUG_TRACE(PARSE_LINE);
7446 + }
7447 + break;
7448 + case VCHIQ_MSG_PADDING:
7449 + vchiq_log_trace(vchiq_core_log_level,
7450 + "%d: prs PADDING@%x,%x",
7451 + state->id, (unsigned int)header, size);
7452 + break;
7453 + case VCHIQ_MSG_PAUSE:
7454 + /* If initiated, signal the application thread */
7455 + vchiq_log_trace(vchiq_core_log_level,
7456 + "%d: prs PAUSE@%x,%x",
7457 + state->id, (unsigned int)header, size);
7458 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7459 + vchiq_log_error(vchiq_core_log_level,
7460 + "%d: PAUSE received in state PAUSED",
7461 + state->id);
7462 + break;
7463 + }
7464 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7465 + /* Send a PAUSE in response */
7466 + if (queue_message(state, NULL,
7467 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7468 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7469 + goto bail_not_ready;
7470 + if (state->is_master)
7471 + pause_bulks(state);
7472 + }
7473 + /* At this point slot_mutex is held */
7474 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7475 + vchiq_platform_paused(state);
7476 + break;
7477 + case VCHIQ_MSG_RESUME:
7478 + vchiq_log_trace(vchiq_core_log_level,
7479 + "%d: prs RESUME@%x,%x",
7480 + state->id, (unsigned int)header, size);
7481 + /* Release the slot mutex */
7482 + mutex_unlock(&state->slot_mutex);
7483 + if (state->is_master)
7484 + resume_bulks(state);
7485 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7486 + vchiq_platform_resumed(state);
7487 + break;
7488 +
7489 + case VCHIQ_MSG_REMOTE_USE:
7490 + vchiq_on_remote_use(state);
7491 + break;
7492 + case VCHIQ_MSG_REMOTE_RELEASE:
7493 + vchiq_on_remote_release(state);
7494 + break;
7495 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7496 + vchiq_on_remote_use_active(state);
7497 + break;
7498 +
7499 + default:
7500 + vchiq_log_error(vchiq_core_log_level,
7501 + "%d: prs invalid msgid %x@%x,%x",
7502 + state->id, msgid, (unsigned int)header, size);
7503 + WARN(1, "invalid message\n");
7504 + break;
7505 + }
7506 +
7507 +skip_message:
7508 + if (service) {
7509 + unlock_service(service);
7510 + service = NULL;
7511 + }
7512 +
7513 + state->rx_pos += calc_stride(size);
7514 +
7515 + DEBUG_TRACE(PARSE_LINE);
7516 + /* Perform some housekeeping when the end of the slot is
7517 + ** reached. */
7518 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7519 + /* Remove the extra reference count. */
7520 + release_slot(state, state->rx_info, NULL, NULL);
7521 + state->rx_data = NULL;
7522 + }
7523 + }
7524 +
7525 +bail_not_ready:
7526 + if (service)
7527 + unlock_service(service);
7528 +}
7529 +
7530 +/* Called by the slot handler thread */
7531 +static int
7532 +slot_handler_func(void *v)
7533 +{
7534 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7535 + VCHIQ_SHARED_STATE_T *local = state->local;
7536 + DEBUG_INITIALISE(local)
7537 +
7538 + while (1) {
7539 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
7540 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7541 + remote_event_wait(&local->trigger);
7542 +
7543 + rmb();
7544 +
7545 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7546 + if (state->poll_needed) {
7547 + /* Check if we need to suspend - may change our
7548 + * conn_state */
7549 + vchiq_platform_check_suspend(state);
7550 +
7551 + state->poll_needed = 0;
7552 +
7553 + /* Handle service polling and other rare conditions here
7554 + ** out of the mainline code */
7555 + switch (state->conn_state) {
7556 + case VCHIQ_CONNSTATE_CONNECTED:
7557 + /* Poll the services as requested */
7558 + poll_services(state);
7559 + break;
7560 +
7561 + case VCHIQ_CONNSTATE_PAUSING:
7562 + if (state->is_master)
7563 + pause_bulks(state);
7564 + if (queue_message(state, NULL,
7565 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7566 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7567 + vchiq_set_conn_state(state,
7568 + VCHIQ_CONNSTATE_PAUSE_SENT);
7569 + } else {
7570 + if (state->is_master)
7571 + resume_bulks(state);
7572 + /* Retry later */
7573 + state->poll_needed = 1;
7574 + }
7575 + break;
7576 +
7577 + case VCHIQ_CONNSTATE_PAUSED:
7578 + vchiq_platform_resume(state);
7579 + break;
7580 +
7581 + case VCHIQ_CONNSTATE_RESUMING:
7582 + if (queue_message(state, NULL,
7583 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7584 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7585 + if (state->is_master)
7586 + resume_bulks(state);
7587 + vchiq_set_conn_state(state,
7588 + VCHIQ_CONNSTATE_CONNECTED);
7589 + vchiq_platform_resumed(state);
7590 + } else {
7591 + /* This should really be impossible,
7592 + ** since the PAUSE should have flushed
7593 + ** through outstanding messages. */
7594 + vchiq_log_error(vchiq_core_log_level,
7595 + "Failed to send RESUME "
7596 + "message");
7597 + BUG();
7598 + }
7599 + break;
7600 +
7601 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7602 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7603 + vchiq_platform_handle_timeout(state);
7604 + break;
7605 + default:
7606 + break;
7607 + }
7608 +
7609 +
7610 + }
7611 +
7612 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7613 + parse_rx_slots(state);
7614 + }
7615 + return 0;
7616 +}
7617 +
7618 +
7619 +/* Called by the recycle thread */
7620 +static int
7621 +recycle_func(void *v)
7622 +{
7623 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7624 + VCHIQ_SHARED_STATE_T *local = state->local;
7625 +
7626 + while (1) {
7627 + remote_event_wait(&local->recycle);
7628 +
7629 + process_free_queue(state);
7630 + }
7631 + return 0;
7632 +}
7633 +
7634 +
7635 +/* Called by the sync thread */
7636 +static int
7637 +sync_func(void *v)
7638 +{
7639 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7640 + VCHIQ_SHARED_STATE_T *local = state->local;
7641 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7642 + state->remote->slot_sync);
7643 +
7644 + while (1) {
7645 + VCHIQ_SERVICE_T *service;
7646 + int msgid, size;
7647 + int type;
7648 + unsigned int localport, remoteport;
7649 +
7650 + remote_event_wait(&local->sync_trigger);
7651 +
7652 + rmb();
7653 +
7654 + msgid = header->msgid;
7655 + size = header->size;
7656 + type = VCHIQ_MSG_TYPE(msgid);
7657 + localport = VCHIQ_MSG_DSTPORT(msgid);
7658 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7659 +
7660 + service = find_service_by_port(state, localport);
7661 +
7662 + if (!service) {
7663 + vchiq_log_error(vchiq_sync_log_level,
7664 + "%d: sf %s@%x (%d->%d) - "
7665 + "invalid/closed service %d",
7666 + state->id, msg_type_str(type),
7667 + (unsigned int)header,
7668 + remoteport, localport, localport);
7669 + release_message_sync(state, header);
7670 + continue;
7671 + }
7672 +
7673 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7674 + int svc_fourcc;
7675 +
7676 + svc_fourcc = service
7677 + ? service->base.fourcc
7678 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7679 + vchiq_log_trace(vchiq_sync_log_level,
7680 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7681 + msg_type_str(type),
7682 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7683 + remoteport, localport, size);
7684 + if (size > 0)
7685 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7686 + min(64, size));
7687 + }
7688 +
7689 + switch (type) {
7690 + case VCHIQ_MSG_OPENACK:
7691 + if (size >= sizeof(struct vchiq_openack_payload)) {
7692 + const struct vchiq_openack_payload *payload =
7693 + (struct vchiq_openack_payload *)
7694 + header->data;
7695 + service->peer_version = payload->version;
7696 + }
7697 + vchiq_log_info(vchiq_sync_log_level,
7698 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
7699 + state->id, (unsigned int)header, size,
7700 + remoteport, localport, service->peer_version);
7701 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
7702 + service->remoteport = remoteport;
7703 + vchiq_set_service_state(service,
7704 + VCHIQ_SRVSTATE_OPENSYNC);
7705 + up(&service->remove_event);
7706 + }
7707 + release_message_sync(state, header);
7708 + break;
7709 +
7710 + case VCHIQ_MSG_DATA:
7711 + vchiq_log_trace(vchiq_sync_log_level,
7712 + "%d: sf DATA@%x,%x (%d->%d)",
7713 + state->id, (unsigned int)header, size,
7714 + remoteport, localport);
7715 +
7716 + if ((service->remoteport == remoteport) &&
7717 + (service->srvstate ==
7718 + VCHIQ_SRVSTATE_OPENSYNC)) {
7719 + if (make_service_callback(service,
7720 + VCHIQ_MESSAGE_AVAILABLE, header,
7721 + NULL) == VCHIQ_RETRY)
7722 + vchiq_log_error(vchiq_sync_log_level,
7723 + "synchronous callback to "
7724 + "service %d returns "
7725 + "VCHIQ_RETRY",
7726 + localport);
7727 + }
7728 + break;
7729 +
7730 + default:
7731 + vchiq_log_error(vchiq_sync_log_level,
7732 + "%d: sf unexpected msgid %x@%x,%x",
7733 + state->id, msgid, (unsigned int)header, size);
7734 + release_message_sync(state, header);
7735 + break;
7736 + }
7737 +
7738 + unlock_service(service);
7739 + }
7740 +
7741 + return 0;
7742 +}
7743 +
7744 +
7745 +static void
7746 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
7747 +{
7748 + queue->local_insert = 0;
7749 + queue->remote_insert = 0;
7750 + queue->process = 0;
7751 + queue->remote_notify = 0;
7752 + queue->remove = 0;
7753 +}
7754 +
7755 +
7756 +inline const char *
7757 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
7758 +{
7759 + return conn_state_names[conn_state];
7760 +}
7761 +
7762 +
7763 +VCHIQ_SLOT_ZERO_T *
7764 +vchiq_init_slots(void *mem_base, int mem_size)
7765 +{
7766 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
7767 + VCHIQ_SLOT_ZERO_T *slot_zero =
7768 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
7769 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
7770 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
7771 +
7772 + /* Ensure there is enough memory to run an absolutely minimum system */
7773 + num_slots -= first_data_slot;
7774 +
7775 + if (num_slots < 4) {
7776 + vchiq_log_error(vchiq_core_log_level,
7777 + "vchiq_init_slots - insufficient memory %x bytes",
7778 + mem_size);
7779 + return NULL;
7780 + }
7781 +
7782 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
7783 +
7784 + slot_zero->magic = VCHIQ_MAGIC;
7785 + slot_zero->version = VCHIQ_VERSION;
7786 + slot_zero->version_min = VCHIQ_VERSION_MIN;
7787 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
7788 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
7789 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
7790 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
7791 +
7792 + slot_zero->master.slot_sync = first_data_slot;
7793 + slot_zero->master.slot_first = first_data_slot + 1;
7794 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
7795 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
7796 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
7797 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
7798 +
7799 + return slot_zero;
7800 +}
7801 +
7802 +VCHIQ_STATUS_T
7803 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
7804 + int is_master)
7805 +{
7806 + VCHIQ_SHARED_STATE_T *local;
7807 + VCHIQ_SHARED_STATE_T *remote;
7808 + VCHIQ_STATUS_T status;
7809 + char threadname[10];
7810 + static int id;
7811 + int i;
7812 +
7813 + vchiq_log_warning(vchiq_core_log_level,
7814 + "%s: slot_zero = 0x%08lx, is_master = %d",
7815 + __func__, (unsigned long)slot_zero, is_master);
7816 +
7817 + /* Check the input configuration */
7818 +
7819 + if (slot_zero->magic != VCHIQ_MAGIC) {
7820 + vchiq_loud_error_header();
7821 + vchiq_loud_error("Invalid VCHIQ magic value found.");
7822 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
7823 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
7824 + vchiq_loud_error_footer();
7825 + return VCHIQ_ERROR;
7826 + }
7827 +
7828 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
7829 + vchiq_loud_error_header();
7830 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7831 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
7832 + "(minimum %d)",
7833 + (unsigned int)slot_zero, slot_zero->version,
7834 + VCHIQ_VERSION_MIN);
7835 + vchiq_loud_error("Restart with a newer VideoCore image.");
7836 + vchiq_loud_error_footer();
7837 + return VCHIQ_ERROR;
7838 + }
7839 +
7840 + if (VCHIQ_VERSION < slot_zero->version_min) {
7841 + vchiq_loud_error_header();
7842 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7843 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
7844 + "minimum %d)",
7845 + (unsigned int)slot_zero, VCHIQ_VERSION,
7846 + slot_zero->version_min);
7847 + vchiq_loud_error("Restart with a newer kernel.");
7848 + vchiq_loud_error_footer();
7849 + return VCHIQ_ERROR;
7850 + }
7851 +
7852 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
7853 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
7854 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
7855 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
7856 + vchiq_loud_error_header();
7857 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
7858 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
7859 + "(expected %x)",
7860 + (unsigned int)slot_zero,
7861 + slot_zero->slot_zero_size,
7862 + sizeof(VCHIQ_SLOT_ZERO_T));
7863 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
7864 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
7865 + "(expected %d",
7866 + (unsigned int)slot_zero, slot_zero->slot_size,
7867 + VCHIQ_SLOT_SIZE);
7868 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
7869 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
7870 + "(expected %d)",
7871 + (unsigned int)slot_zero, slot_zero->max_slots,
7872 + VCHIQ_MAX_SLOTS);
7873 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
7874 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
7875 + "(expected %d)",
7876 + (unsigned int)slot_zero,
7877 + slot_zero->max_slots_per_side,
7878 + VCHIQ_MAX_SLOTS_PER_SIDE);
7879 + vchiq_loud_error_footer();
7880 + return VCHIQ_ERROR;
7881 + }
7882 +
7883 + if (is_master) {
7884 + local = &slot_zero->master;
7885 + remote = &slot_zero->slave;
7886 + } else {
7887 + local = &slot_zero->slave;
7888 + remote = &slot_zero->master;
7889 + }
7890 +
7891 + if (local->initialised) {
7892 + vchiq_loud_error_header();
7893 + if (remote->initialised)
7894 + vchiq_loud_error("local state has already been "
7895 + "initialised");
7896 + else
7897 + vchiq_loud_error("master/slave mismatch - two %ss",
7898 + is_master ? "master" : "slave");
7899 + vchiq_loud_error_footer();
7900 + return VCHIQ_ERROR;
7901 + }
7902 +
7903 + memset(state, 0, sizeof(VCHIQ_STATE_T));
7904 +
7905 + state->id = id++;
7906 + state->is_master = is_master;
7907 +
7908 + /*
7909 + initialize shared state pointers
7910 + */
7911 +
7912 + state->local = local;
7913 + state->remote = remote;
7914 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
7915 +
7916 + /*
7917 + initialize events and mutexes
7918 + */
7919 +
7920 + sema_init(&state->connect, 0);
7921 + mutex_init(&state->mutex);
7922 + sema_init(&state->trigger_event, 0);
7923 + sema_init(&state->recycle_event, 0);
7924 + sema_init(&state->sync_trigger_event, 0);
7925 + sema_init(&state->sync_release_event, 0);
7926 +
7927 + mutex_init(&state->slot_mutex);
7928 + mutex_init(&state->recycle_mutex);
7929 + mutex_init(&state->sync_mutex);
7930 + mutex_init(&state->bulk_transfer_mutex);
7931 +
7932 + sema_init(&state->slot_available_event, 0);
7933 + sema_init(&state->slot_remove_event, 0);
7934 + sema_init(&state->data_quota_event, 0);
7935 +
7936 + state->slot_queue_available = 0;
7937 +
7938 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
7939 + VCHIQ_SERVICE_QUOTA_T *service_quota =
7940 + &state->service_quotas[i];
7941 + sema_init(&service_quota->quota_event, 0);
7942 + }
7943 +
7944 + for (i = local->slot_first; i <= local->slot_last; i++) {
7945 + local->slot_queue[state->slot_queue_available++] = i;
7946 + up(&state->slot_available_event);
7947 + }
7948 +
7949 + state->default_slot_quota = state->slot_queue_available/2;
7950 + state->default_message_quota =
7951 + min((unsigned short)(state->default_slot_quota * 256),
7952 + (unsigned short)~0);
7953 +
7954 + state->previous_data_index = -1;
7955 + state->data_use_count = 0;
7956 + state->data_quota = state->slot_queue_available - 1;
7957 +
7958 + local->trigger.event = &state->trigger_event;
7959 + remote_event_create(&local->trigger);
7960 + local->tx_pos = 0;
7961 +
7962 + local->recycle.event = &state->recycle_event;
7963 + remote_event_create(&local->recycle);
7964 + local->slot_queue_recycle = state->slot_queue_available;
7965 +
7966 + local->sync_trigger.event = &state->sync_trigger_event;
7967 + remote_event_create(&local->sync_trigger);
7968 +
7969 + local->sync_release.event = &state->sync_release_event;
7970 + remote_event_create(&local->sync_release);
7971 +
7972 + /* At start-of-day, the slot is empty and available */
7973 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
7974 + = VCHIQ_MSGID_PADDING;
7975 + remote_event_signal_local(&local->sync_release);
7976 +
7977 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
7978 +
7979 + status = vchiq_platform_init_state(state);
7980 +
7981 + /*
7982 + bring up slot handler thread
7983 + */
7984 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
7985 + state->slot_handler_thread = kthread_create(&slot_handler_func,
7986 + (void *)state,
7987 + threadname);
7988 +
7989 + if (state->slot_handler_thread == NULL) {
7990 + vchiq_loud_error_header();
7991 + vchiq_loud_error("couldn't create thread %s", threadname);
7992 + vchiq_loud_error_footer();
7993 + return VCHIQ_ERROR;
7994 + }
7995 + set_user_nice(state->slot_handler_thread, -19);
7996 + wake_up_process(state->slot_handler_thread);
7997 +
7998 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
7999 + state->recycle_thread = kthread_create(&recycle_func,
8000 + (void *)state,
8001 + threadname);
8002 + if (state->recycle_thread == NULL) {
8003 + vchiq_loud_error_header();
8004 + vchiq_loud_error("couldn't create thread %s", threadname);
8005 + vchiq_loud_error_footer();
8006 + return VCHIQ_ERROR;
8007 + }
8008 + set_user_nice(state->recycle_thread, -19);
8009 + wake_up_process(state->recycle_thread);
8010 +
8011 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
8012 + state->sync_thread = kthread_create(&sync_func,
8013 + (void *)state,
8014 + threadname);
8015 + if (state->sync_thread == NULL) {
8016 + vchiq_loud_error_header();
8017 + vchiq_loud_error("couldn't create thread %s", threadname);
8018 + vchiq_loud_error_footer();
8019 + return VCHIQ_ERROR;
8020 + }
8021 + set_user_nice(state->sync_thread, -20);
8022 + wake_up_process(state->sync_thread);
8023 +
8024 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
8025 + vchiq_states[state->id] = state;
8026 +
8027 + /* Indicate readiness to the other side */
8028 + local->initialised = 1;
8029 +
8030 + return status;
8031 +}
8032 +
8033 +/* Called from application thread when a client or server service is created. */
8034 +VCHIQ_SERVICE_T *
8035 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
8036 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
8037 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
8038 +{
8039 + VCHIQ_SERVICE_T *service;
8040 +
8041 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
8042 + if (service) {
8043 + service->base.fourcc = params->fourcc;
8044 + service->base.callback = params->callback;
8045 + service->base.userdata = params->userdata;
8046 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
8047 + service->ref_count = 1;
8048 + service->srvstate = VCHIQ_SRVSTATE_FREE;
8049 + service->userdata_term = userdata_term;
8050 + service->localport = VCHIQ_PORT_FREE;
8051 + service->remoteport = VCHIQ_PORT_FREE;
8052 +
8053 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
8054 + VCHIQ_FOURCC_INVALID : params->fourcc;
8055 + service->client_id = 0;
8056 + service->auto_close = 1;
8057 + service->sync = 0;
8058 + service->closing = 0;
8059 + atomic_set(&service->poll_flags, 0);
8060 + service->version = params->version;
8061 + service->version_min = params->version_min;
8062 + service->state = state;
8063 + service->instance = instance;
8064 + service->service_use_count = 0;
8065 + init_bulk_queue(&service->bulk_tx);
8066 + init_bulk_queue(&service->bulk_rx);
8067 + sema_init(&service->remove_event, 0);
8068 + sema_init(&service->bulk_remove_event, 0);
8069 + mutex_init(&service->bulk_mutex);
8070 + memset(&service->stats, 0, sizeof(service->stats));
8071 + } else {
8072 + vchiq_log_error(vchiq_core_log_level,
8073 + "Out of memory");
8074 + }
8075 +
8076 + if (service) {
8077 + VCHIQ_SERVICE_T **pservice = NULL;
8078 + int i;
8079 +
8080 + /* Although it is perfectly possible to use service_spinlock
8081 + ** to protect the creation of services, it is overkill as it
8082 + ** disables interrupts while the array is searched.
8083 + ** The only danger is of another thread trying to create a
8084 + ** service - service deletion is safe.
8085 + ** Therefore it is preferable to use state->mutex which,
8086 + ** although slower to claim, doesn't block interrupts while
8087 + ** it is held.
8088 + */
8089 +
8090 + mutex_lock(&state->mutex);
8091 +
8092 + /* Prepare to use a previously unused service */
8093 + if (state->unused_service < VCHIQ_MAX_SERVICES)
8094 + pservice = &state->services[state->unused_service];
8095 +
8096 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8097 + for (i = 0; i < state->unused_service; i++) {
8098 + VCHIQ_SERVICE_T *srv = state->services[i];
8099 + if (!srv) {
8100 + pservice = &state->services[i];
8101 + break;
8102 + }
8103 + }
8104 + } else {
8105 + for (i = (state->unused_service - 1); i >= 0; i--) {
8106 + VCHIQ_SERVICE_T *srv = state->services[i];
8107 + if (!srv)
8108 + pservice = &state->services[i];
8109 + else if ((srv->public_fourcc == params->fourcc)
8110 + && ((srv->instance != instance) ||
8111 + (srv->base.callback !=
8112 + params->callback))) {
8113 + /* There is another server using this
8114 + ** fourcc which doesn't match. */
8115 + pservice = NULL;
8116 + break;
8117 + }
8118 + }
8119 + }
8120 +
8121 + if (pservice) {
8122 + service->localport = (pservice - state->services);
8123 + if (!handle_seq)
8124 + handle_seq = VCHIQ_MAX_STATES *
8125 + VCHIQ_MAX_SERVICES;
8126 + service->handle = handle_seq |
8127 + (state->id * VCHIQ_MAX_SERVICES) |
8128 + service->localport;
8129 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8130 + *pservice = service;
8131 + if (pservice == &state->services[state->unused_service])
8132 + state->unused_service++;
8133 + }
8134 +
8135 + mutex_unlock(&state->mutex);
8136 +
8137 + if (!pservice) {
8138 + kfree(service);
8139 + service = NULL;
8140 + }
8141 + }
8142 +
8143 + if (service) {
8144 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8145 + &state->service_quotas[service->localport];
8146 + service_quota->slot_quota = state->default_slot_quota;
8147 + service_quota->message_quota = state->default_message_quota;
8148 + if (service_quota->slot_use_count == 0)
8149 + service_quota->previous_tx_index =
8150 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8151 + - 1;
8152 +
8153 + /* Bring this service online */
8154 + vchiq_set_service_state(service, srvstate);
8155 +
8156 + vchiq_log_info(vchiq_core_msg_log_level,
8157 + "%s Service %c%c%c%c SrcPort:%d",
8158 + (srvstate == VCHIQ_SRVSTATE_OPENING)
8159 + ? "Open" : "Add",
8160 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8161 + service->localport);
8162 + }
8163 +
8164 + /* Don't unlock the service - leave it with a ref_count of 1. */
8165 +
8166 + return service;
8167 +}
8168 +
8169 +VCHIQ_STATUS_T
8170 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8171 +{
8172 + struct vchiq_open_payload payload = {
8173 + service->base.fourcc,
8174 + client_id,
8175 + service->version,
8176 + service->version_min
8177 + };
8178 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8179 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8180 +
8181 + service->client_id = client_id;
8182 + vchiq_use_service_internal(service);
8183 + status = queue_message(service->state, NULL,
8184 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8185 + &body, 1, sizeof(payload), 1);
8186 + if (status == VCHIQ_SUCCESS) {
8187 + if (down_interruptible(&service->remove_event) != 0) {
8188 + status = VCHIQ_RETRY;
8189 + vchiq_release_service_internal(service);
8190 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8191 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8192 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8193 + vchiq_log_error(vchiq_core_log_level,
8194 + "%d: osi - srvstate = %s (ref %d)",
8195 + service->state->id,
8196 + srvstate_names[service->srvstate],
8197 + service->ref_count);
8198 + status = VCHIQ_ERROR;
8199 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8200 + vchiq_release_service_internal(service);
8201 + }
8202 + }
8203 + return status;
8204 +}
8205 +
8206 +static void
8207 +release_service_messages(VCHIQ_SERVICE_T *service)
8208 +{
8209 + VCHIQ_STATE_T *state = service->state;
8210 + int slot_last = state->remote->slot_last;
8211 + int i;
8212 +
8213 + /* Release any claimed messages */
8214 + for (i = state->remote->slot_first; i <= slot_last; i++) {
8215 + VCHIQ_SLOT_INFO_T *slot_info =
8216 + SLOT_INFO_FROM_INDEX(state, i);
8217 + if (slot_info->release_count != slot_info->use_count) {
8218 + char *data =
8219 + (char *)SLOT_DATA_FROM_INDEX(state, i);
8220 + unsigned int pos, end;
8221 +
8222 + end = VCHIQ_SLOT_SIZE;
8223 + if (data == state->rx_data)
8224 + /* This buffer is still being read from - stop
8225 + ** at the current read position */
8226 + end = state->rx_pos & VCHIQ_SLOT_MASK;
8227 +
8228 + pos = 0;
8229 +
8230 + while (pos < end) {
8231 + VCHIQ_HEADER_T *header =
8232 + (VCHIQ_HEADER_T *)(data + pos);
8233 + int msgid = header->msgid;
8234 + int port = VCHIQ_MSG_DSTPORT(msgid);
8235 + if ((port == service->localport) &&
8236 + (msgid & VCHIQ_MSGID_CLAIMED)) {
8237 + vchiq_log_info(vchiq_core_log_level,
8238 + " fsi - hdr %x",
8239 + (unsigned int)header);
8240 + release_slot(state, slot_info, header,
8241 + NULL);
8242 + }
8243 + pos += calc_stride(header->size);
8244 + if (pos > VCHIQ_SLOT_SIZE) {
8245 + vchiq_log_error(vchiq_core_log_level,
8246 + "fsi - pos %x: header %x, "
8247 + "msgid %x, header->msgid %x, "
8248 + "header->size %x",
8249 + pos, (unsigned int)header,
8250 + msgid, header->msgid,
8251 + header->size);
8252 + WARN(1, "invalid slot position\n");
8253 + }
8254 + }
8255 + }
8256 + }
8257 +}
8258 +
8259 +static int
8260 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8261 +{
8262 + VCHIQ_STATUS_T status;
8263 +
8264 + /* Abort any outstanding bulk transfers */
8265 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8266 + return 0;
8267 + abort_outstanding_bulks(service, &service->bulk_tx);
8268 + abort_outstanding_bulks(service, &service->bulk_rx);
8269 + mutex_unlock(&service->bulk_mutex);
8270 +
8271 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8272 + if (status == VCHIQ_SUCCESS)
8273 + status = notify_bulks(service, &service->bulk_rx,
8274 + 0/*!retry_poll*/);
8275 + return (status == VCHIQ_SUCCESS);
8276 +}
8277 +
8278 +static VCHIQ_STATUS_T
8279 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8280 +{
8281 + VCHIQ_STATUS_T status;
8282 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8283 + int newstate;
8284 +
8285 + switch (service->srvstate) {
8286 + case VCHIQ_SRVSTATE_OPEN:
8287 + case VCHIQ_SRVSTATE_CLOSESENT:
8288 + case VCHIQ_SRVSTATE_CLOSERECVD:
8289 + if (is_server) {
8290 + if (service->auto_close) {
8291 + service->client_id = 0;
8292 + service->remoteport = VCHIQ_PORT_FREE;
8293 + newstate = VCHIQ_SRVSTATE_LISTENING;
8294 + } else
8295 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8296 + } else
8297 + newstate = VCHIQ_SRVSTATE_CLOSED;
8298 + vchiq_set_service_state(service, newstate);
8299 + break;
8300 + case VCHIQ_SRVSTATE_LISTENING:
8301 + break;
8302 + default:
8303 + vchiq_log_error(vchiq_core_log_level,
8304 + "close_service_complete(%x) called in state %s",
8305 + service->handle, srvstate_names[service->srvstate]);
8306 + WARN(1, "close_service_complete in unexpected state\n");
8307 + return VCHIQ_ERROR;
8308 + }
8309 +
8310 + status = make_service_callback(service,
8311 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
8312 +
8313 + if (status != VCHIQ_RETRY) {
8314 + int uc = service->service_use_count;
8315 + int i;
8316 + /* Complete the close process */
8317 + for (i = 0; i < uc; i++)
8318 + /* cater for cases where close is forced and the
8319 + ** client may not close all it's handles */
8320 + vchiq_release_service_internal(service);
8321 +
8322 + service->client_id = 0;
8323 + service->remoteport = VCHIQ_PORT_FREE;
8324 +
8325 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8326 + vchiq_free_service_internal(service);
8327 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8328 + if (is_server)
8329 + service->closing = 0;
8330 +
8331 + up(&service->remove_event);
8332 + }
8333 + } else
8334 + vchiq_set_service_state(service, failstate);
8335 +
8336 + return status;
8337 +}
8338 +
8339 +/* Called by the slot handler */
8340 +VCHIQ_STATUS_T
8341 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8342 +{
8343 + VCHIQ_STATE_T *state = service->state;
8344 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8345 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8346 +
8347 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8348 + service->state->id, service->localport, close_recvd,
8349 + srvstate_names[service->srvstate]);
8350 +
8351 + switch (service->srvstate) {
8352 + case VCHIQ_SRVSTATE_CLOSED:
8353 + case VCHIQ_SRVSTATE_HIDDEN:
8354 + case VCHIQ_SRVSTATE_LISTENING:
8355 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8356 + if (close_recvd)
8357 + vchiq_log_error(vchiq_core_log_level,
8358 + "vchiq_close_service_internal(1) called "
8359 + "in state %s",
8360 + srvstate_names[service->srvstate]);
8361 + else if (is_server) {
8362 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8363 + status = VCHIQ_ERROR;
8364 + } else {
8365 + service->client_id = 0;
8366 + service->remoteport = VCHIQ_PORT_FREE;
8367 + if (service->srvstate ==
8368 + VCHIQ_SRVSTATE_CLOSEWAIT)
8369 + vchiq_set_service_state(service,
8370 + VCHIQ_SRVSTATE_LISTENING);
8371 + }
8372 + up(&service->remove_event);
8373 + } else
8374 + vchiq_free_service_internal(service);
8375 + break;
8376 + case VCHIQ_SRVSTATE_OPENING:
8377 + if (close_recvd) {
8378 + /* The open was rejected - tell the user */
8379 + vchiq_set_service_state(service,
8380 + VCHIQ_SRVSTATE_CLOSEWAIT);
8381 + up(&service->remove_event);
8382 + } else {
8383 + /* Shutdown mid-open - let the other side know */
8384 + status = queue_message(state, service,
8385 + VCHIQ_MAKE_MSG
8386 + (VCHIQ_MSG_CLOSE,
8387 + service->localport,
8388 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8389 + NULL, 0, 0, 0);
8390 + }
8391 + break;
8392 +
8393 + case VCHIQ_SRVSTATE_OPENSYNC:
8394 + mutex_lock(&state->sync_mutex);
8395 + /* Drop through */
8396 +
8397 + case VCHIQ_SRVSTATE_OPEN:
8398 + if (state->is_master || close_recvd) {
8399 + if (!do_abort_bulks(service))
8400 + status = VCHIQ_RETRY;
8401 + }
8402 +
8403 + release_service_messages(service);
8404 +
8405 + if (status == VCHIQ_SUCCESS)
8406 + status = queue_message(state, service,
8407 + VCHIQ_MAKE_MSG
8408 + (VCHIQ_MSG_CLOSE,
8409 + service->localport,
8410 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8411 + NULL, 0, 0, 0);
8412 +
8413 + if (status == VCHIQ_SUCCESS) {
8414 + if (!close_recvd)
8415 + break;
8416 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8417 + mutex_unlock(&state->sync_mutex);
8418 + break;
8419 + } else
8420 + break;
8421 +
8422 + status = close_service_complete(service,
8423 + VCHIQ_SRVSTATE_CLOSERECVD);
8424 + break;
8425 +
8426 + case VCHIQ_SRVSTATE_CLOSESENT:
8427 + if (!close_recvd)
8428 + /* This happens when a process is killed mid-close */
8429 + break;
8430 +
8431 + if (!state->is_master) {
8432 + if (!do_abort_bulks(service)) {
8433 + status = VCHIQ_RETRY;
8434 + break;
8435 + }
8436 + }
8437 +
8438 + if (status == VCHIQ_SUCCESS)
8439 + status = close_service_complete(service,
8440 + VCHIQ_SRVSTATE_CLOSERECVD);
8441 + break;
8442 +
8443 + case VCHIQ_SRVSTATE_CLOSERECVD:
8444 + if (!close_recvd && is_server)
8445 + /* Force into LISTENING mode */
8446 + vchiq_set_service_state(service,
8447 + VCHIQ_SRVSTATE_LISTENING);
8448 + status = close_service_complete(service,
8449 + VCHIQ_SRVSTATE_CLOSERECVD);
8450 + break;
8451 +
8452 + default:
8453 + vchiq_log_error(vchiq_core_log_level,
8454 + "vchiq_close_service_internal(%d) called in state %s",
8455 + close_recvd, srvstate_names[service->srvstate]);
8456 + break;
8457 + }
8458 +
8459 + return status;
8460 +}
8461 +
8462 +/* Called from the application process upon process death */
8463 +void
8464 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8465 +{
8466 + VCHIQ_STATE_T *state = service->state;
8467 +
8468 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8469 + state->id, service->localport, service->remoteport);
8470 +
8471 + mark_service_closing(service);
8472 +
8473 + /* Mark the service for removal by the slot handler */
8474 + request_poll(state, service, VCHIQ_POLL_REMOVE);
8475 +}
8476 +
8477 +/* Called from the slot handler */
8478 +void
8479 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8480 +{
8481 + VCHIQ_STATE_T *state = service->state;
8482 +
8483 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8484 + state->id, service->localport);
8485 +
8486 + switch (service->srvstate) {
8487 + case VCHIQ_SRVSTATE_OPENING:
8488 + case VCHIQ_SRVSTATE_CLOSED:
8489 + case VCHIQ_SRVSTATE_HIDDEN:
8490 + case VCHIQ_SRVSTATE_LISTENING:
8491 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8492 + break;
8493 + default:
8494 + vchiq_log_error(vchiq_core_log_level,
8495 + "%d: fsi - (%d) in state %s",
8496 + state->id, service->localport,
8497 + srvstate_names[service->srvstate]);
8498 + return;
8499 + }
8500 +
8501 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8502 +
8503 + up(&service->remove_event);
8504 +
8505 + /* Release the initial lock */
8506 + unlock_service(service);
8507 +}
8508 +
8509 +VCHIQ_STATUS_T
8510 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8511 +{
8512 + VCHIQ_SERVICE_T *service;
8513 + int i;
8514 +
8515 + /* Find all services registered to this client and enable them. */
8516 + i = 0;
8517 + while ((service = next_service_by_instance(state, instance,
8518 + &i)) != NULL) {
8519 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8520 + vchiq_set_service_state(service,
8521 + VCHIQ_SRVSTATE_LISTENING);
8522 + unlock_service(service);
8523 + }
8524 +
8525 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8526 + if (queue_message(state, NULL,
8527 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8528 + 0, 1) == VCHIQ_RETRY)
8529 + return VCHIQ_RETRY;
8530 +
8531 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8532 + }
8533 +
8534 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8535 + if (down_interruptible(&state->connect) != 0)
8536 + return VCHIQ_RETRY;
8537 +
8538 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8539 + up(&state->connect);
8540 + }
8541 +
8542 + return VCHIQ_SUCCESS;
8543 +}
8544 +
8545 +VCHIQ_STATUS_T
8546 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8547 +{
8548 + VCHIQ_SERVICE_T *service;
8549 + int i;
8550 +
8551 + /* Find all services registered to this client and enable them. */
8552 + i = 0;
8553 + while ((service = next_service_by_instance(state, instance,
8554 + &i)) != NULL) {
8555 + (void)vchiq_remove_service(service->handle);
8556 + unlock_service(service);
8557 + }
8558 +
8559 + return VCHIQ_SUCCESS;
8560 +}
8561 +
8562 +VCHIQ_STATUS_T
8563 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8564 +{
8565 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8566 +
8567 + switch (state->conn_state) {
8568 + case VCHIQ_CONNSTATE_CONNECTED:
8569 + /* Request a pause */
8570 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8571 + request_poll(state, NULL, 0);
8572 + break;
8573 + default:
8574 + vchiq_log_error(vchiq_core_log_level,
8575 + "vchiq_pause_internal in state %s\n",
8576 + conn_state_names[state->conn_state]);
8577 + status = VCHIQ_ERROR;
8578 + VCHIQ_STATS_INC(state, error_count);
8579 + break;
8580 + }
8581 +
8582 + return status;
8583 +}
8584 +
8585 +VCHIQ_STATUS_T
8586 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8587 +{
8588 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8589 +
8590 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8591 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8592 + request_poll(state, NULL, 0);
8593 + } else {
8594 + status = VCHIQ_ERROR;
8595 + VCHIQ_STATS_INC(state, error_count);
8596 + }
8597 +
8598 + return status;
8599 +}
8600 +
8601 +VCHIQ_STATUS_T
8602 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8603 +{
8604 + /* Unregister the service */
8605 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8606 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8607 +
8608 + if (!service)
8609 + return VCHIQ_ERROR;
8610 +
8611 + vchiq_log_info(vchiq_core_log_level,
8612 + "%d: close_service:%d",
8613 + service->state->id, service->localport);
8614 +
8615 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8616 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8617 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8618 + unlock_service(service);
8619 + return VCHIQ_ERROR;
8620 + }
8621 +
8622 + mark_service_closing(service);
8623 +
8624 + if (current == service->state->slot_handler_thread) {
8625 + status = vchiq_close_service_internal(service,
8626 + 0/*!close_recvd*/);
8627 + BUG_ON(status == VCHIQ_RETRY);
8628 + } else {
8629 + /* Mark the service for termination by the slot handler */
8630 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8631 + }
8632 +
8633 + while (1) {
8634 + if (down_interruptible(&service->remove_event) != 0) {
8635 + status = VCHIQ_RETRY;
8636 + break;
8637 + }
8638 +
8639 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8640 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8641 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8642 + break;
8643 +
8644 + vchiq_log_warning(vchiq_core_log_level,
8645 + "%d: close_service:%d - waiting in state %s",
8646 + service->state->id, service->localport,
8647 + srvstate_names[service->srvstate]);
8648 + }
8649 +
8650 + if ((status == VCHIQ_SUCCESS) &&
8651 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8652 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8653 + status = VCHIQ_ERROR;
8654 +
8655 + unlock_service(service);
8656 +
8657 + return status;
8658 +}
8659 +
8660 +VCHIQ_STATUS_T
8661 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
8662 +{
8663 + /* Unregister the service */
8664 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8665 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8666 +
8667 + if (!service)
8668 + return VCHIQ_ERROR;
8669 +
8670 + vchiq_log_info(vchiq_core_log_level,
8671 + "%d: remove_service:%d",
8672 + service->state->id, service->localport);
8673 +
8674 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
8675 + unlock_service(service);
8676 + return VCHIQ_ERROR;
8677 + }
8678 +
8679 + mark_service_closing(service);
8680 +
8681 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
8682 + (current == service->state->slot_handler_thread)) {
8683 + /* Make it look like a client, because it must be removed and
8684 + not left in the LISTENING state. */
8685 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
8686 +
8687 + status = vchiq_close_service_internal(service,
8688 + 0/*!close_recvd*/);
8689 + BUG_ON(status == VCHIQ_RETRY);
8690 + } else {
8691 + /* Mark the service for removal by the slot handler */
8692 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
8693 + }
8694 + while (1) {
8695 + if (down_interruptible(&service->remove_event) != 0) {
8696 + status = VCHIQ_RETRY;
8697 + break;
8698 + }
8699 +
8700 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8701 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8702 + break;
8703 +
8704 + vchiq_log_warning(vchiq_core_log_level,
8705 + "%d: remove_service:%d - waiting in state %s",
8706 + service->state->id, service->localport,
8707 + srvstate_names[service->srvstate]);
8708 + }
8709 +
8710 + if ((status == VCHIQ_SUCCESS) &&
8711 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
8712 + status = VCHIQ_ERROR;
8713 +
8714 + unlock_service(service);
8715 +
8716 + return status;
8717 +}
8718 +
8719 +
8720 +/* This function may be called by kernel threads or user threads.
8721 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
8722 + * received and the call should be retried after being returned to user
8723 + * context.
8724 + * When called in blocking mode, the userdata field points to a bulk_waiter
8725 + * structure.
8726 + */
8727 +VCHIQ_STATUS_T
8728 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
8729 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
8730 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
8731 +{
8732 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8733 + VCHIQ_BULK_QUEUE_T *queue;
8734 + VCHIQ_BULK_T *bulk;
8735 + VCHIQ_STATE_T *state;
8736 + struct bulk_waiter *bulk_waiter = NULL;
8737 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
8738 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
8739 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
8740 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8741 +
8742 + if (!service ||
8743 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
8744 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
8745 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8746 + goto error_exit;
8747 +
8748 + switch (mode) {
8749 + case VCHIQ_BULK_MODE_NOCALLBACK:
8750 + case VCHIQ_BULK_MODE_CALLBACK:
8751 + break;
8752 + case VCHIQ_BULK_MODE_BLOCKING:
8753 + bulk_waiter = (struct bulk_waiter *)userdata;
8754 + sema_init(&bulk_waiter->event, 0);
8755 + bulk_waiter->actual = 0;
8756 + bulk_waiter->bulk = NULL;
8757 + break;
8758 + case VCHIQ_BULK_MODE_WAITING:
8759 + bulk_waiter = (struct bulk_waiter *)userdata;
8760 + bulk = bulk_waiter->bulk;
8761 + goto waiting;
8762 + default:
8763 + goto error_exit;
8764 + }
8765 +
8766 + state = service->state;
8767 +
8768 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
8769 + &service->bulk_tx : &service->bulk_rx;
8770 +
8771 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
8772 + status = VCHIQ_RETRY;
8773 + goto error_exit;
8774 + }
8775 +
8776 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
8777 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
8778 + do {
8779 + mutex_unlock(&service->bulk_mutex);
8780 + if (down_interruptible(&service->bulk_remove_event)
8781 + != 0) {
8782 + status = VCHIQ_RETRY;
8783 + goto error_exit;
8784 + }
8785 + if (mutex_lock_interruptible(&service->bulk_mutex)
8786 + != 0) {
8787 + status = VCHIQ_RETRY;
8788 + goto error_exit;
8789 + }
8790 + } while (queue->local_insert == queue->remove +
8791 + VCHIQ_NUM_SERVICE_BULKS);
8792 + }
8793 +
8794 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
8795 +
8796 + bulk->mode = mode;
8797 + bulk->dir = dir;
8798 + bulk->userdata = userdata;
8799 + bulk->size = size;
8800 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
8801 +
8802 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
8803 + VCHIQ_SUCCESS)
8804 + goto unlock_error_exit;
8805 +
8806 + wmb();
8807 +
8808 + vchiq_log_info(vchiq_core_log_level,
8809 + "%d: bt (%d->%d) %cx %x@%x %x",
8810 + state->id,
8811 + service->localport, service->remoteport, dir_char,
8812 + size, (unsigned int)bulk->data, (unsigned int)userdata);
8813 +
8814 + if (state->is_master) {
8815 + queue->local_insert++;
8816 + if (resolve_bulks(service, queue))
8817 + request_poll(state, service,
8818 + (dir == VCHIQ_BULK_TRANSMIT) ?
8819 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
8820 + } else {
8821 + int payload[2] = { (int)bulk->data, bulk->size };
8822 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
8823 +
8824 + status = queue_message(state, NULL,
8825 + VCHIQ_MAKE_MSG(dir_msgtype,
8826 + service->localport, service->remoteport),
8827 + &element, 1, sizeof(payload), 1);
8828 + if (status != VCHIQ_SUCCESS) {
8829 + vchiq_complete_bulk(bulk);
8830 + goto unlock_error_exit;
8831 + }
8832 + queue->local_insert++;
8833 + }
8834 +
8835 + mutex_unlock(&service->bulk_mutex);
8836 +
8837 + vchiq_log_trace(vchiq_core_log_level,
8838 + "%d: bt:%d %cx li=%x ri=%x p=%x",
8839 + state->id,
8840 + service->localport, dir_char,
8841 + queue->local_insert, queue->remote_insert, queue->process);
8842 +
8843 +waiting:
8844 + unlock_service(service);
8845 +
8846 + status = VCHIQ_SUCCESS;
8847 +
8848 + if (bulk_waiter) {
8849 + bulk_waiter->bulk = bulk;
8850 + if (down_interruptible(&bulk_waiter->event) != 0)
8851 + status = VCHIQ_RETRY;
8852 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
8853 + status = VCHIQ_ERROR;
8854 + }
8855 +
8856 + return status;
8857 +
8858 +unlock_error_exit:
8859 + mutex_unlock(&service->bulk_mutex);
8860 +
8861 +error_exit:
8862 + if (service)
8863 + unlock_service(service);
8864 + return status;
8865 +}
8866 +
8867 +VCHIQ_STATUS_T
8868 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
8869 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
8870 +{
8871 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8872 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8873 +
8874 + unsigned int size = 0;
8875 + unsigned int i;
8876 +
8877 + if (!service ||
8878 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8879 + goto error_exit;
8880 +
8881 + for (i = 0; i < (unsigned int)count; i++) {
8882 + if (elements[i].size) {
8883 + if (elements[i].data == NULL) {
8884 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8885 + goto error_exit;
8886 + }
8887 + size += elements[i].size;
8888 + }
8889 + }
8890 +
8891 + if (size > VCHIQ_MAX_MSG_SIZE) {
8892 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8893 + goto error_exit;
8894 + }
8895 +
8896 + switch (service->srvstate) {
8897 + case VCHIQ_SRVSTATE_OPEN:
8898 + status = queue_message(service->state, service,
8899 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8900 + service->localport,
8901 + service->remoteport),
8902 + elements, count, size, 1);
8903 + break;
8904 + case VCHIQ_SRVSTATE_OPENSYNC:
8905 + status = queue_message_sync(service->state, service,
8906 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8907 + service->localport,
8908 + service->remoteport),
8909 + elements, count, size, 1);
8910 + break;
8911 + default:
8912 + status = VCHIQ_ERROR;
8913 + break;
8914 + }
8915 +
8916 +error_exit:
8917 + if (service)
8918 + unlock_service(service);
8919 +
8920 + return status;
8921 +}
8922 +
8923 +void
8924 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
8925 +{
8926 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8927 + VCHIQ_SHARED_STATE_T *remote;
8928 + VCHIQ_STATE_T *state;
8929 + int slot_index;
8930 +
8931 + if (!service)
8932 + return;
8933 +
8934 + state = service->state;
8935 + remote = state->remote;
8936 +
8937 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
8938 +
8939 + if ((slot_index >= remote->slot_first) &&
8940 + (slot_index <= remote->slot_last)) {
8941 + int msgid = header->msgid;
8942 + if (msgid & VCHIQ_MSGID_CLAIMED) {
8943 + VCHIQ_SLOT_INFO_T *slot_info =
8944 + SLOT_INFO_FROM_INDEX(state, slot_index);
8945 +
8946 + release_slot(state, slot_info, header, service);
8947 + }
8948 + } else if (slot_index == remote->slot_sync)
8949 + release_message_sync(state, header);
8950 +
8951 + unlock_service(service);
8952 +}
8953 +
8954 +static void
8955 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8956 +{
8957 + header->msgid = VCHIQ_MSGID_PADDING;
8958 + wmb();
8959 + remote_event_signal(&state->remote->sync_release);
8960 +}
8961 +
8962 +VCHIQ_STATUS_T
8963 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
8964 +{
8965 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8966 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8967 +
8968 + if (!service ||
8969 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
8970 + !peer_version)
8971 + goto exit;
8972 + *peer_version = service->peer_version;
8973 + status = VCHIQ_SUCCESS;
8974 +
8975 +exit:
8976 + if (service)
8977 + unlock_service(service);
8978 + return status;
8979 +}
8980 +
8981 +VCHIQ_STATUS_T
8982 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
8983 + int config_size, VCHIQ_CONFIG_T *pconfig)
8984 +{
8985 + VCHIQ_CONFIG_T config;
8986 +
8987 + (void)instance;
8988 +
8989 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
8990 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
8991 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
8992 + config.max_services = VCHIQ_MAX_SERVICES;
8993 + config.version = VCHIQ_VERSION;
8994 + config.version_min = VCHIQ_VERSION_MIN;
8995 +
8996 + if (config_size > sizeof(VCHIQ_CONFIG_T))
8997 + return VCHIQ_ERROR;
8998 +
8999 + memcpy(pconfig, &config,
9000 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
9001 +
9002 + return VCHIQ_SUCCESS;
9003 +}
9004 +
9005 +VCHIQ_STATUS_T
9006 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
9007 + VCHIQ_SERVICE_OPTION_T option, int value)
9008 +{
9009 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9010 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9011 +
9012 + if (service) {
9013 + switch (option) {
9014 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
9015 + service->auto_close = value;
9016 + status = VCHIQ_SUCCESS;
9017 + break;
9018 +
9019 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
9020 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9021 + &service->state->service_quotas[
9022 + service->localport];
9023 + if (value == 0)
9024 + value = service->state->default_slot_quota;
9025 + if ((value >= service_quota->slot_use_count) &&
9026 + (value < (unsigned short)~0)) {
9027 + service_quota->slot_quota = value;
9028 + if ((value >= service_quota->slot_use_count) &&
9029 + (service_quota->message_quota >=
9030 + service_quota->message_use_count)) {
9031 + /* Signal the service that it may have
9032 + ** dropped below its quota */
9033 + up(&service_quota->quota_event);
9034 + }
9035 + status = VCHIQ_SUCCESS;
9036 + }
9037 + } break;
9038 +
9039 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
9040 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9041 + &service->state->service_quotas[
9042 + service->localport];
9043 + if (value == 0)
9044 + value = service->state->default_message_quota;
9045 + if ((value >= service_quota->message_use_count) &&
9046 + (value < (unsigned short)~0)) {
9047 + service_quota->message_quota = value;
9048 + if ((value >=
9049 + service_quota->message_use_count) &&
9050 + (service_quota->slot_quota >=
9051 + service_quota->slot_use_count))
9052 + /* Signal the service that it may have
9053 + ** dropped below its quota */
9054 + up(&service_quota->quota_event);
9055 + status = VCHIQ_SUCCESS;
9056 + }
9057 + } break;
9058 +
9059 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
9060 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9061 + (service->srvstate ==
9062 + VCHIQ_SRVSTATE_LISTENING)) {
9063 + service->sync = value;
9064 + status = VCHIQ_SUCCESS;
9065 + }
9066 + break;
9067 +
9068 + default:
9069 + break;
9070 + }
9071 + unlock_service(service);
9072 + }
9073 +
9074 + return status;
9075 +}
9076 +
9077 +void
9078 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9079 + VCHIQ_SHARED_STATE_T *shared, const char *label)
9080 +{
9081 + static const char *const debug_names[] = {
9082 + "<entries>",
9083 + "SLOT_HANDLER_COUNT",
9084 + "SLOT_HANDLER_LINE",
9085 + "PARSE_LINE",
9086 + "PARSE_HEADER",
9087 + "PARSE_MSGID",
9088 + "AWAIT_COMPLETION_LINE",
9089 + "DEQUEUE_MESSAGE_LINE",
9090 + "SERVICE_CALLBACK_LINE",
9091 + "MSG_QUEUE_FULL_COUNT",
9092 + "COMPLETION_QUEUE_FULL_COUNT"
9093 + };
9094 + int i;
9095 +
9096 + char buf[80];
9097 + int len;
9098 + len = snprintf(buf, sizeof(buf),
9099 + " %s: slots %d-%d tx_pos=%x recycle=%x",
9100 + label, shared->slot_first, shared->slot_last,
9101 + shared->tx_pos, shared->slot_queue_recycle);
9102 + vchiq_dump(dump_context, buf, len + 1);
9103 +
9104 + len = snprintf(buf, sizeof(buf),
9105 + " Slots claimed:");
9106 + vchiq_dump(dump_context, buf, len + 1);
9107 +
9108 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
9109 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9110 + if (slot_info.use_count != slot_info.release_count) {
9111 + len = snprintf(buf, sizeof(buf),
9112 + " %d: %d/%d", i, slot_info.use_count,
9113 + slot_info.release_count);
9114 + vchiq_dump(dump_context, buf, len + 1);
9115 + }
9116 + }
9117 +
9118 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9119 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
9120 + debug_names[i], shared->debug[i], shared->debug[i]);
9121 + vchiq_dump(dump_context, buf, len + 1);
9122 + }
9123 +}
9124 +
9125 +void
9126 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9127 +{
9128 + char buf[80];
9129 + int len;
9130 + int i;
9131 +
9132 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9133 + conn_state_names[state->conn_state]);
9134 + vchiq_dump(dump_context, buf, len + 1);
9135 +
9136 + len = snprintf(buf, sizeof(buf),
9137 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
9138 + state->local->tx_pos,
9139 + (uint32_t)state->tx_data +
9140 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
9141 + state->rx_pos,
9142 + (uint32_t)state->rx_data +
9143 + (state->rx_pos & VCHIQ_SLOT_MASK));
9144 + vchiq_dump(dump_context, buf, len + 1);
9145 +
9146 + len = snprintf(buf, sizeof(buf),
9147 + " Version: %d (min %d)",
9148 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9149 + vchiq_dump(dump_context, buf, len + 1);
9150 +
9151 + if (VCHIQ_ENABLE_STATS) {
9152 + len = snprintf(buf, sizeof(buf),
9153 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9154 + "error_count=%d",
9155 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9156 + state->stats.error_count);
9157 + vchiq_dump(dump_context, buf, len + 1);
9158 + }
9159 +
9160 + len = snprintf(buf, sizeof(buf),
9161 + " Slots: %d available (%d data), %d recyclable, %d stalls "
9162 + "(%d data)",
9163 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9164 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9165 + state->data_quota - state->data_use_count,
9166 + state->local->slot_queue_recycle - state->slot_queue_available,
9167 + state->stats.slot_stalls, state->stats.data_stalls);
9168 + vchiq_dump(dump_context, buf, len + 1);
9169 +
9170 + vchiq_dump_platform_state(dump_context);
9171 +
9172 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9173 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9174 +
9175 + vchiq_dump_platform_instances(dump_context);
9176 +
9177 + for (i = 0; i < state->unused_service; i++) {
9178 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9179 +
9180 + if (service) {
9181 + vchiq_dump_service_state(dump_context, service);
9182 + unlock_service(service);
9183 + }
9184 + }
9185 +}
9186 +
9187 +void
9188 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9189 +{
9190 + char buf[80];
9191 + int len;
9192 +
9193 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9194 + service->localport, srvstate_names[service->srvstate],
9195 + service->ref_count - 1); /*Don't include the lock just taken*/
9196 +
9197 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9198 + char remoteport[30];
9199 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9200 + &service->state->service_quotas[service->localport];
9201 + int fourcc = service->base.fourcc;
9202 + int tx_pending, rx_pending;
9203 + if (service->remoteport != VCHIQ_PORT_FREE) {
9204 + int len2 = snprintf(remoteport, sizeof(remoteport),
9205 + "%d", service->remoteport);
9206 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9207 + snprintf(remoteport + len2,
9208 + sizeof(remoteport) - len2,
9209 + " (client %x)", service->client_id);
9210 + } else
9211 + strcpy(remoteport, "n/a");
9212 +
9213 + len += snprintf(buf + len, sizeof(buf) - len,
9214 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9215 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
9216 + remoteport,
9217 + service_quota->message_use_count,
9218 + service_quota->message_quota,
9219 + service_quota->slot_use_count,
9220 + service_quota->slot_quota);
9221 +
9222 + vchiq_dump(dump_context, buf, len + 1);
9223 +
9224 + tx_pending = service->bulk_tx.local_insert -
9225 + service->bulk_tx.remote_insert;
9226 +
9227 + rx_pending = service->bulk_rx.local_insert -
9228 + service->bulk_rx.remote_insert;
9229 +
9230 + len = snprintf(buf, sizeof(buf),
9231 + " Bulk: tx_pending=%d (size %d),"
9232 + " rx_pending=%d (size %d)",
9233 + tx_pending,
9234 + tx_pending ? service->bulk_tx.bulks[
9235 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
9236 + rx_pending,
9237 + rx_pending ? service->bulk_rx.bulks[
9238 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
9239 +
9240 + if (VCHIQ_ENABLE_STATS) {
9241 + vchiq_dump(dump_context, buf, len + 1);
9242 +
9243 + len = snprintf(buf, sizeof(buf),
9244 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
9245 + "rx_count=%d, rx_bytes=%llu",
9246 + service->stats.ctrl_tx_count,
9247 + service->stats.ctrl_tx_bytes,
9248 + service->stats.ctrl_rx_count,
9249 + service->stats.ctrl_rx_bytes);
9250 + vchiq_dump(dump_context, buf, len + 1);
9251 +
9252 + len = snprintf(buf, sizeof(buf),
9253 + " Bulk: tx_count=%d, tx_bytes=%llu, "
9254 + "rx_count=%d, rx_bytes=%llu",
9255 + service->stats.bulk_tx_count,
9256 + service->stats.bulk_tx_bytes,
9257 + service->stats.bulk_rx_count,
9258 + service->stats.bulk_rx_bytes);
9259 + vchiq_dump(dump_context, buf, len + 1);
9260 +
9261 + len = snprintf(buf, sizeof(buf),
9262 + " %d quota stalls, %d slot stalls, "
9263 + "%d bulk stalls, %d aborted, %d errors",
9264 + service->stats.quota_stalls,
9265 + service->stats.slot_stalls,
9266 + service->stats.bulk_stalls,
9267 + service->stats.bulk_aborted_count,
9268 + service->stats.error_count);
9269 + }
9270 + }
9271 +
9272 + vchiq_dump(dump_context, buf, len + 1);
9273 +
9274 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9275 + vchiq_dump_platform_service_state(dump_context, service);
9276 +}
9277 +
9278 +
9279 +void
9280 +vchiq_loud_error_header(void)
9281 +{
9282 + vchiq_log_error(vchiq_core_log_level,
9283 + "============================================================"
9284 + "================");
9285 + vchiq_log_error(vchiq_core_log_level,
9286 + "============================================================"
9287 + "================");
9288 + vchiq_log_error(vchiq_core_log_level, "=====");
9289 +}
9290 +
9291 +void
9292 +vchiq_loud_error_footer(void)
9293 +{
9294 + vchiq_log_error(vchiq_core_log_level, "=====");
9295 + vchiq_log_error(vchiq_core_log_level,
9296 + "============================================================"
9297 + "================");
9298 + vchiq_log_error(vchiq_core_log_level,
9299 + "============================================================"
9300 + "================");
9301 +}
9302 +
9303 +
9304 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9305 +{
9306 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9307 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9308 + status = queue_message(state, NULL,
9309 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9310 + NULL, 0, 0, 0);
9311 + return status;
9312 +}
9313 +
9314 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9315 +{
9316 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9317 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9318 + status = queue_message(state, NULL,
9319 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9320 + NULL, 0, 0, 0);
9321 + return status;
9322 +}
9323 +
9324 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9325 +{
9326 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9327 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9328 + status = queue_message(state, NULL,
9329 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9330 + NULL, 0, 0, 0);
9331 + return status;
9332 +}
9333 +
9334 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9335 + size_t numBytes)
9336 +{
9337 + const uint8_t *mem = (const uint8_t *)voidMem;
9338 + size_t offset;
9339 + char lineBuf[100];
9340 + char *s;
9341 +
9342 + while (numBytes > 0) {
9343 + s = lineBuf;
9344 +
9345 + for (offset = 0; offset < 16; offset++) {
9346 + if (offset < numBytes)
9347 + s += snprintf(s, 4, "%02x ", mem[offset]);
9348 + else
9349 + s += snprintf(s, 4, " ");
9350 + }
9351 +
9352 + for (offset = 0; offset < 16; offset++) {
9353 + if (offset < numBytes) {
9354 + uint8_t ch = mem[offset];
9355 +
9356 + if ((ch < ' ') || (ch > '~'))
9357 + ch = '.';
9358 + *s++ = (char)ch;
9359 + }
9360 + }
9361 + *s++ = '\0';
9362 +
9363 + if ((label != NULL) && (*label != '\0'))
9364 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9365 + "%s: %08x: %s", label, addr, lineBuf);
9366 + else
9367 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9368 + "%08x: %s", addr, lineBuf);
9369 +
9370 + addr += 16;
9371 + mem += 16;
9372 + if (numBytes > 16)
9373 + numBytes -= 16;
9374 + else
9375 + numBytes = 0;
9376 + }
9377 +}
9378 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9379 new file mode 100644
9380 index 0000000..47cdf27
9381 --- /dev/null
9382 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9383 @@ -0,0 +1,706 @@
9384 +/**
9385 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9386 + *
9387 + * Redistribution and use in source and binary forms, with or without
9388 + * modification, are permitted provided that the following conditions
9389 + * are met:
9390 + * 1. Redistributions of source code must retain the above copyright
9391 + * notice, this list of conditions, and the following disclaimer,
9392 + * without modification.
9393 + * 2. Redistributions in binary form must reproduce the above copyright
9394 + * notice, this list of conditions and the following disclaimer in the
9395 + * documentation and/or other materials provided with the distribution.
9396 + * 3. The names of the above-listed copyright holders may not be used
9397 + * to endorse or promote products derived from this software without
9398 + * specific prior written permission.
9399 + *
9400 + * ALTERNATIVELY, this software may be distributed under the terms of the
9401 + * GNU General Public License ("GPL") version 2, as published by the Free
9402 + * Software Foundation.
9403 + *
9404 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9405 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9406 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9407 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9408 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9409 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9410 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9411 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9412 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9413 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9414 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9415 + */
9416 +
9417 +#ifndef VCHIQ_CORE_H
9418 +#define VCHIQ_CORE_H
9419 +
9420 +#include <linux/mutex.h>
9421 +#include <linux/semaphore.h>
9422 +#include <linux/kthread.h>
9423 +
9424 +#include "vchiq_cfg.h"
9425 +
9426 +#include "vchiq.h"
9427 +
9428 +/* Run time control of log level, based on KERN_XXX level. */
9429 +#define VCHIQ_LOG_DEFAULT 4
9430 +#define VCHIQ_LOG_ERROR 3
9431 +#define VCHIQ_LOG_WARNING 4
9432 +#define VCHIQ_LOG_INFO 6
9433 +#define VCHIQ_LOG_TRACE 7
9434 +
9435 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
9436 +
9437 +#ifndef vchiq_log_error
9438 +#define vchiq_log_error(cat, fmt, ...) \
9439 + do { if (cat >= VCHIQ_LOG_ERROR) \
9440 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9441 +#endif
9442 +#ifndef vchiq_log_warning
9443 +#define vchiq_log_warning(cat, fmt, ...) \
9444 + do { if (cat >= VCHIQ_LOG_WARNING) \
9445 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9446 +#endif
9447 +#ifndef vchiq_log_info
9448 +#define vchiq_log_info(cat, fmt, ...) \
9449 + do { if (cat >= VCHIQ_LOG_INFO) \
9450 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9451 +#endif
9452 +#ifndef vchiq_log_trace
9453 +#define vchiq_log_trace(cat, fmt, ...) \
9454 + do { if (cat >= VCHIQ_LOG_TRACE) \
9455 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9456 +#endif
9457 +
9458 +#define vchiq_loud_error(...) \
9459 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9460 +
9461 +#ifndef vchiq_static_assert
9462 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9463 + extern int vchiq_static_assert[(cond) ? 1 : -1]
9464 +#endif
9465 +
9466 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9467 +
9468 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9469 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9470 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9471 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9472 +
9473 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
9474 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9475 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9476 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9477 +
9478 +#define VCHIQ_MSG_PADDING 0 /* - */
9479 +#define VCHIQ_MSG_CONNECT 1 /* - */
9480 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
9481 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
9482 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
9483 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
9484 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
9485 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
9486 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
9487 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
9488 +#define VCHIQ_MSG_PAUSE 10 /* - */
9489 +#define VCHIQ_MSG_RESUME 11 /* - */
9490 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
9491 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
9492 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
9493 +
9494 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
9495 +#define VCHIQ_PORT_FREE 0x1000
9496 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
9497 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9498 + ((type<<24) | (srcport<<12) | (dstport<<0))
9499 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
9500 +#define VCHIQ_MSG_SRCPORT(msgid) \
9501 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9502 +#define VCHIQ_MSG_DSTPORT(msgid) \
9503 + ((unsigned short)msgid & 0xfff)
9504 +
9505 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9506 + ((fourcc) >> 24) & 0xff, \
9507 + ((fourcc) >> 16) & 0xff, \
9508 + ((fourcc) >> 8) & 0xff, \
9509 + (fourcc) & 0xff
9510 +
9511 +/* Ensure the fields are wide enough */
9512 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9513 + == 0);
9514 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9515 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9516 + (unsigned int)VCHIQ_PORT_FREE);
9517 +
9518 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9519 +#define VCHIQ_MSGID_CLAIMED 0x40000000
9520 +
9521 +#define VCHIQ_FOURCC_INVALID 0x00000000
9522 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
9523 +
9524 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9525 +
9526 +typedef uint32_t BITSET_T;
9527 +
9528 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9529 +
9530 +#define BITSET_SIZE(b) ((b + 31) >> 5)
9531 +#define BITSET_WORD(b) (b >> 5)
9532 +#define BITSET_BIT(b) (1 << (b & 31))
9533 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
9534 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9535 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9536 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9537 +
9538 +#if VCHIQ_ENABLE_STATS
9539 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9540 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9541 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9542 + (service->stats. stat += addend)
9543 +#else
9544 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9545 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9546 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9547 +#endif
9548 +
9549 +enum {
9550 + DEBUG_ENTRIES,
9551 +#if VCHIQ_ENABLE_DEBUG
9552 + DEBUG_SLOT_HANDLER_COUNT,
9553 + DEBUG_SLOT_HANDLER_LINE,
9554 + DEBUG_PARSE_LINE,
9555 + DEBUG_PARSE_HEADER,
9556 + DEBUG_PARSE_MSGID,
9557 + DEBUG_AWAIT_COMPLETION_LINE,
9558 + DEBUG_DEQUEUE_MESSAGE_LINE,
9559 + DEBUG_SERVICE_CALLBACK_LINE,
9560 + DEBUG_MSG_QUEUE_FULL_COUNT,
9561 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9562 +#endif
9563 + DEBUG_MAX
9564 +};
9565 +
9566 +#if VCHIQ_ENABLE_DEBUG
9567 +
9568 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9569 +#define DEBUG_TRACE(d) \
9570 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9571 +#define DEBUG_VALUE(d, v) \
9572 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9573 +#define DEBUG_COUNT(d) \
9574 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9575 +
9576 +#else /* VCHIQ_ENABLE_DEBUG */
9577 +
9578 +#define DEBUG_INITIALISE(local)
9579 +#define DEBUG_TRACE(d)
9580 +#define DEBUG_VALUE(d, v)
9581 +#define DEBUG_COUNT(d)
9582 +
9583 +#endif /* VCHIQ_ENABLE_DEBUG */
9584 +
9585 +typedef enum {
9586 + VCHIQ_CONNSTATE_DISCONNECTED,
9587 + VCHIQ_CONNSTATE_CONNECTING,
9588 + VCHIQ_CONNSTATE_CONNECTED,
9589 + VCHIQ_CONNSTATE_PAUSING,
9590 + VCHIQ_CONNSTATE_PAUSE_SENT,
9591 + VCHIQ_CONNSTATE_PAUSED,
9592 + VCHIQ_CONNSTATE_RESUMING,
9593 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9594 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
9595 +} VCHIQ_CONNSTATE_T;
9596 +
9597 +enum {
9598 + VCHIQ_SRVSTATE_FREE,
9599 + VCHIQ_SRVSTATE_HIDDEN,
9600 + VCHIQ_SRVSTATE_LISTENING,
9601 + VCHIQ_SRVSTATE_OPENING,
9602 + VCHIQ_SRVSTATE_OPEN,
9603 + VCHIQ_SRVSTATE_OPENSYNC,
9604 + VCHIQ_SRVSTATE_CLOSESENT,
9605 + VCHIQ_SRVSTATE_CLOSERECVD,
9606 + VCHIQ_SRVSTATE_CLOSEWAIT,
9607 + VCHIQ_SRVSTATE_CLOSED
9608 +};
9609 +
9610 +enum {
9611 + VCHIQ_POLL_TERMINATE,
9612 + VCHIQ_POLL_REMOVE,
9613 + VCHIQ_POLL_TXNOTIFY,
9614 + VCHIQ_POLL_RXNOTIFY,
9615 + VCHIQ_POLL_COUNT
9616 +};
9617 +
9618 +typedef enum {
9619 + VCHIQ_BULK_TRANSMIT,
9620 + VCHIQ_BULK_RECEIVE
9621 +} VCHIQ_BULK_DIR_T;
9622 +
9623 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9624 +
9625 +typedef struct vchiq_bulk_struct {
9626 + short mode;
9627 + short dir;
9628 + void *userdata;
9629 + VCHI_MEM_HANDLE_T handle;
9630 + void *data;
9631 + int size;
9632 + void *remote_data;
9633 + int remote_size;
9634 + int actual;
9635 +} VCHIQ_BULK_T;
9636 +
9637 +typedef struct vchiq_bulk_queue_struct {
9638 + int local_insert; /* Where to insert the next local bulk */
9639 + int remote_insert; /* Where to insert the next remote bulk (master) */
9640 + int process; /* Bulk to transfer next */
9641 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
9642 + int remove; /* Bulk to notify the local client of, and remove,
9643 + ** next */
9644 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
9645 +} VCHIQ_BULK_QUEUE_T;
9646 +
9647 +typedef struct remote_event_struct {
9648 + int armed;
9649 + int fired;
9650 + struct semaphore *event;
9651 +} REMOTE_EVENT_T;
9652 +
9653 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
9654 +
9655 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
9656 +
9657 +typedef struct vchiq_slot_struct {
9658 + char data[VCHIQ_SLOT_SIZE];
9659 +} VCHIQ_SLOT_T;
9660 +
9661 +typedef struct vchiq_slot_info_struct {
9662 + /* Use two counters rather than one to avoid the need for a mutex. */
9663 + short use_count;
9664 + short release_count;
9665 +} VCHIQ_SLOT_INFO_T;
9666 +
9667 +typedef struct vchiq_service_struct {
9668 + VCHIQ_SERVICE_BASE_T base;
9669 + VCHIQ_SERVICE_HANDLE_T handle;
9670 + unsigned int ref_count;
9671 + int srvstate;
9672 + VCHIQ_USERDATA_TERM_T userdata_term;
9673 + unsigned int localport;
9674 + unsigned int remoteport;
9675 + int public_fourcc;
9676 + int client_id;
9677 + char auto_close;
9678 + char sync;
9679 + char closing;
9680 + atomic_t poll_flags;
9681 + short version;
9682 + short version_min;
9683 + short peer_version;
9684 +
9685 + VCHIQ_STATE_T *state;
9686 + VCHIQ_INSTANCE_T instance;
9687 +
9688 + int service_use_count;
9689 +
9690 + VCHIQ_BULK_QUEUE_T bulk_tx;
9691 + VCHIQ_BULK_QUEUE_T bulk_rx;
9692 +
9693 + struct semaphore remove_event;
9694 + struct semaphore bulk_remove_event;
9695 + struct mutex bulk_mutex;
9696 +
9697 + struct service_stats_struct {
9698 + int quota_stalls;
9699 + int slot_stalls;
9700 + int bulk_stalls;
9701 + int error_count;
9702 + int ctrl_tx_count;
9703 + int ctrl_rx_count;
9704 + int bulk_tx_count;
9705 + int bulk_rx_count;
9706 + int bulk_aborted_count;
9707 + uint64_t ctrl_tx_bytes;
9708 + uint64_t ctrl_rx_bytes;
9709 + uint64_t bulk_tx_bytes;
9710 + uint64_t bulk_rx_bytes;
9711 + } stats;
9712 +} VCHIQ_SERVICE_T;
9713 +
9714 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
9715 + statically allocated, since for accounting reasons a service's slot
9716 + usage is carried over between users of the same port number.
9717 + */
9718 +typedef struct vchiq_service_quota_struct {
9719 + unsigned short slot_quota;
9720 + unsigned short slot_use_count;
9721 + unsigned short message_quota;
9722 + unsigned short message_use_count;
9723 + struct semaphore quota_event;
9724 + int previous_tx_index;
9725 +} VCHIQ_SERVICE_QUOTA_T;
9726 +
9727 +typedef struct vchiq_shared_state_struct {
9728 +
9729 + /* A non-zero value here indicates that the content is valid. */
9730 + int initialised;
9731 +
9732 + /* The first and last (inclusive) slots allocated to the owner. */
9733 + int slot_first;
9734 + int slot_last;
9735 +
9736 + /* The slot allocated to synchronous messages from the owner. */
9737 + int slot_sync;
9738 +
9739 + /* Signalling this event indicates that owner's slot handler thread
9740 + ** should run. */
9741 + REMOTE_EVENT_T trigger;
9742 +
9743 + /* Indicates the byte position within the stream where the next message
9744 + ** will be written. The least significant bits are an index into the
9745 + ** slot. The next bits are the index of the slot in slot_queue. */
9746 + int tx_pos;
9747 +
9748 + /* This event should be signalled when a slot is recycled. */
9749 + REMOTE_EVENT_T recycle;
9750 +
9751 + /* The slot_queue index where the next recycled slot will be written. */
9752 + int slot_queue_recycle;
9753 +
9754 + /* This event should be signalled when a synchronous message is sent. */
9755 + REMOTE_EVENT_T sync_trigger;
9756 +
9757 + /* This event should be signalled when a synchronous message has been
9758 + ** released. */
9759 + REMOTE_EVENT_T sync_release;
9760 +
9761 + /* A circular buffer of slot indexes. */
9762 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
9763 +
9764 + /* Debugging state */
9765 + int debug[DEBUG_MAX];
9766 +} VCHIQ_SHARED_STATE_T;
9767 +
9768 +typedef struct vchiq_slot_zero_struct {
9769 + int magic;
9770 + short version;
9771 + short version_min;
9772 + int slot_zero_size;
9773 + int slot_size;
9774 + int max_slots;
9775 + int max_slots_per_side;
9776 + int platform_data[2];
9777 + VCHIQ_SHARED_STATE_T master;
9778 + VCHIQ_SHARED_STATE_T slave;
9779 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
9780 +} VCHIQ_SLOT_ZERO_T;
9781 +
9782 +struct vchiq_state_struct {
9783 + int id;
9784 + int initialised;
9785 + VCHIQ_CONNSTATE_T conn_state;
9786 + int is_master;
9787 +
9788 + VCHIQ_SHARED_STATE_T *local;
9789 + VCHIQ_SHARED_STATE_T *remote;
9790 + VCHIQ_SLOT_T *slot_data;
9791 +
9792 + unsigned short default_slot_quota;
9793 + unsigned short default_message_quota;
9794 +
9795 + /* Event indicating connect message received */
9796 + struct semaphore connect;
9797 +
9798 + /* Mutex protecting services */
9799 + struct mutex mutex;
9800 + VCHIQ_INSTANCE_T *instance;
9801 +
9802 + /* Processes incoming messages */
9803 + struct task_struct *slot_handler_thread;
9804 +
9805 + /* Processes recycled slots */
9806 + struct task_struct *recycle_thread;
9807 +
9808 + /* Processes synchronous messages */
9809 + struct task_struct *sync_thread;
9810 +
9811 + /* Local implementation of the trigger remote event */
9812 + struct semaphore trigger_event;
9813 +
9814 + /* Local implementation of the recycle remote event */
9815 + struct semaphore recycle_event;
9816 +
9817 + /* Local implementation of the sync trigger remote event */
9818 + struct semaphore sync_trigger_event;
9819 +
9820 + /* Local implementation of the sync release remote event */
9821 + struct semaphore sync_release_event;
9822 +
9823 + char *tx_data;
9824 + char *rx_data;
9825 + VCHIQ_SLOT_INFO_T *rx_info;
9826 +
9827 + struct mutex slot_mutex;
9828 +
9829 + struct mutex recycle_mutex;
9830 +
9831 + struct mutex sync_mutex;
9832 +
9833 + struct mutex bulk_transfer_mutex;
9834 +
9835 + /* Indicates the byte position within the stream from where the next
9836 + ** message will be read. The least significant bits are an index into
9837 + ** the slot.The next bits are the index of the slot in
9838 + ** remote->slot_queue. */
9839 + int rx_pos;
9840 +
9841 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
9842 + from remote->tx_pos. */
9843 + int local_tx_pos;
9844 +
9845 + /* The slot_queue index of the slot to become available next. */
9846 + int slot_queue_available;
9847 +
9848 + /* A flag to indicate if any poll has been requested */
9849 + int poll_needed;
9850 +
9851 + /* Ths index of the previous slot used for data messages. */
9852 + int previous_data_index;
9853 +
9854 + /* The number of slots occupied by data messages. */
9855 + unsigned short data_use_count;
9856 +
9857 + /* The maximum number of slots to be occupied by data messages. */
9858 + unsigned short data_quota;
9859 +
9860 + /* An array of bit sets indicating which services must be polled. */
9861 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
9862 +
9863 + /* The number of the first unused service */
9864 + int unused_service;
9865 +
9866 + /* Signalled when a free slot becomes available. */
9867 + struct semaphore slot_available_event;
9868 +
9869 + struct semaphore slot_remove_event;
9870 +
9871 + /* Signalled when a free data slot becomes available. */
9872 + struct semaphore data_quota_event;
9873 +
9874 + /* Incremented when there are bulk transfers which cannot be processed
9875 + * whilst paused and must be processed on resume */
9876 + int deferred_bulks;
9877 +
9878 + struct state_stats_struct {
9879 + int slot_stalls;
9880 + int data_stalls;
9881 + int ctrl_tx_count;
9882 + int ctrl_rx_count;
9883 + int error_count;
9884 + } stats;
9885 +
9886 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
9887 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
9888 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
9889 +
9890 + VCHIQ_PLATFORM_STATE_T platform_state;
9891 +};
9892 +
9893 +struct bulk_waiter {
9894 + VCHIQ_BULK_T *bulk;
9895 + struct semaphore event;
9896 + int actual;
9897 +};
9898 +
9899 +extern spinlock_t bulk_waiter_spinlock;
9900 +
9901 +extern int vchiq_core_log_level;
9902 +extern int vchiq_core_msg_log_level;
9903 +extern int vchiq_sync_log_level;
9904 +
9905 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
9906 +
9907 +extern const char *
9908 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
9909 +
9910 +extern VCHIQ_SLOT_ZERO_T *
9911 +vchiq_init_slots(void *mem_base, int mem_size);
9912 +
9913 +extern VCHIQ_STATUS_T
9914 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
9915 + int is_master);
9916 +
9917 +extern VCHIQ_STATUS_T
9918 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9919 +
9920 +extern VCHIQ_SERVICE_T *
9921 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9922 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9923 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
9924 +
9925 +extern VCHIQ_STATUS_T
9926 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
9927 +
9928 +extern VCHIQ_STATUS_T
9929 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
9930 +
9931 +extern void
9932 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
9933 +
9934 +extern void
9935 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
9936 +
9937 +extern VCHIQ_STATUS_T
9938 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9939 +
9940 +extern VCHIQ_STATUS_T
9941 +vchiq_pause_internal(VCHIQ_STATE_T *state);
9942 +
9943 +extern VCHIQ_STATUS_T
9944 +vchiq_resume_internal(VCHIQ_STATE_T *state);
9945 +
9946 +extern void
9947 +remote_event_pollall(VCHIQ_STATE_T *state);
9948 +
9949 +extern VCHIQ_STATUS_T
9950 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9951 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9952 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
9953 +
9954 +extern void
9955 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
9956 +
9957 +extern void
9958 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
9959 +
9960 +extern void
9961 +vchiq_loud_error_header(void);
9962 +
9963 +extern void
9964 +vchiq_loud_error_footer(void);
9965 +
9966 +extern void
9967 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
9968 +
9969 +static inline VCHIQ_SERVICE_T *
9970 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
9971 +{
9972 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
9973 + (VCHIQ_MAX_STATES - 1)];
9974 + if (!state)
9975 + return NULL;
9976 +
9977 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
9978 +}
9979 +
9980 +extern VCHIQ_SERVICE_T *
9981 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
9982 +
9983 +extern VCHIQ_SERVICE_T *
9984 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
9985 +
9986 +extern VCHIQ_SERVICE_T *
9987 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
9988 + VCHIQ_SERVICE_HANDLE_T handle);
9989 +
9990 +extern VCHIQ_SERVICE_T *
9991 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
9992 + int *pidx);
9993 +
9994 +extern void
9995 +lock_service(VCHIQ_SERVICE_T *service);
9996 +
9997 +extern void
9998 +unlock_service(VCHIQ_SERVICE_T *service);
9999 +
10000 +/* The following functions are called from vchiq_core, and external
10001 +** implementations must be provided. */
10002 +
10003 +extern VCHIQ_STATUS_T
10004 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
10005 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
10006 +
10007 +extern void
10008 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
10009 +
10010 +extern void
10011 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
10012 +
10013 +extern VCHIQ_STATUS_T
10014 +vchiq_copy_from_user(void *dst, const void *src, int size);
10015 +
10016 +extern void
10017 +remote_event_signal(REMOTE_EVENT_T *event);
10018 +
10019 +void
10020 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
10021 +
10022 +extern void
10023 +vchiq_platform_paused(VCHIQ_STATE_T *state);
10024 +
10025 +extern VCHIQ_STATUS_T
10026 +vchiq_platform_resume(VCHIQ_STATE_T *state);
10027 +
10028 +extern void
10029 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
10030 +
10031 +extern void
10032 +vchiq_dump(void *dump_context, const char *str, int len);
10033 +
10034 +extern void
10035 +vchiq_dump_platform_state(void *dump_context);
10036 +
10037 +extern void
10038 +vchiq_dump_platform_instances(void *dump_context);
10039 +
10040 +extern void
10041 +vchiq_dump_platform_service_state(void *dump_context,
10042 + VCHIQ_SERVICE_T *service);
10043 +
10044 +extern VCHIQ_STATUS_T
10045 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
10046 +
10047 +extern VCHIQ_STATUS_T
10048 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
10049 +
10050 +extern void
10051 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
10052 +
10053 +extern void
10054 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
10055 +
10056 +extern VCHIQ_STATUS_T
10057 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
10058 +
10059 +extern VCHIQ_STATUS_T
10060 +vchiq_check_service(VCHIQ_SERVICE_T *service);
10061 +
10062 +extern void
10063 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10064 +
10065 +extern VCHIQ_STATUS_T
10066 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10067 +
10068 +extern VCHIQ_STATUS_T
10069 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10070 +
10071 +extern VCHIQ_STATUS_T
10072 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10073 +
10074 +extern void
10075 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10076 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10077 +
10078 +extern void
10079 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10080 +
10081 +extern void
10082 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10083 +
10084 +
10085 +extern void
10086 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10087 + size_t numBytes);
10088 +
10089 +#endif
10090 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10091 new file mode 100644
10092 index 0000000..77dd613
10093 --- /dev/null
10094 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10095 @@ -0,0 +1,89 @@
10096 +#!/usr/bin/perl -w
10097 +
10098 +use strict;
10099 +
10100 +#
10101 +# Generate a version from available information
10102 +#
10103 +
10104 +my $prefix = shift @ARGV;
10105 +my $root = shift @ARGV;
10106 +
10107 +
10108 +if ( not defined $root ) {
10109 + die "usage: $0 prefix root-dir\n";
10110 +}
10111 +
10112 +if ( ! -d $root ) {
10113 + die "root directory $root not found\n";
10114 +}
10115 +
10116 +my $version = "unknown";
10117 +my $tainted = "";
10118 +
10119 +if ( -d "$root/.git" ) {
10120 + # attempt to work out git version. only do so
10121 + # on a linux build host, as cygwin builds are
10122 + # already slow enough
10123 +
10124 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10125 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10126 + $version = "no git version";
10127 + }
10128 + else {
10129 + $version = <F>;
10130 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10131 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10132 + }
10133 +
10134 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10135 + $tainted = <G>;
10136 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10137 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10138 + if (length $tainted) {
10139 + $version = join ' ', $version, "(tainted)";
10140 + }
10141 + else {
10142 + $version = join ' ', $version, "(clean)";
10143 + }
10144 + }
10145 + }
10146 +}
10147 +
10148 +my $hostname = `hostname`;
10149 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10150 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10151 +
10152 +
10153 +print STDERR "Version $version\n";
10154 +print <<EOF;
10155 +#include "${prefix}_build_info.h"
10156 +#include <linux/broadcom/vc_debug_sym.h>
10157 +
10158 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10159 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10160 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
10161 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
10162 +
10163 +const char *vchiq_get_build_hostname( void )
10164 +{
10165 + return vchiq_build_hostname;
10166 +}
10167 +
10168 +const char *vchiq_get_build_version( void )
10169 +{
10170 + return vchiq_build_version;
10171 +}
10172 +
10173 +const char *vchiq_get_build_date( void )
10174 +{
10175 + return vchiq_build_date;
10176 +}
10177 +
10178 +const char *vchiq_get_build_time( void )
10179 +{
10180 + return vchiq_build_time;
10181 +}
10182 +EOF
10183 +
10184 +
10185 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10186 new file mode 100644
10187 index 0000000..50359b0
10188 --- /dev/null
10189 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10190 @@ -0,0 +1,188 @@
10191 +/**
10192 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10193 + *
10194 + * Redistribution and use in source and binary forms, with or without
10195 + * modification, are permitted provided that the following conditions
10196 + * are met:
10197 + * 1. Redistributions of source code must retain the above copyright
10198 + * notice, this list of conditions, and the following disclaimer,
10199 + * without modification.
10200 + * 2. Redistributions in binary form must reproduce the above copyright
10201 + * notice, this list of conditions and the following disclaimer in the
10202 + * documentation and/or other materials provided with the distribution.
10203 + * 3. The names of the above-listed copyright holders may not be used
10204 + * to endorse or promote products derived from this software without
10205 + * specific prior written permission.
10206 + *
10207 + * ALTERNATIVELY, this software may be distributed under the terms of the
10208 + * GNU General Public License ("GPL") version 2, as published by the Free
10209 + * Software Foundation.
10210 + *
10211 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10212 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10213 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10214 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10215 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10216 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10217 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10218 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10219 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10220 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10221 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10222 + */
10223 +
10224 +#ifndef VCHIQ_IF_H
10225 +#define VCHIQ_IF_H
10226 +
10227 +#include "interface/vchi/vchi_mh.h"
10228 +
10229 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
10230 +
10231 +#define VCHIQ_SLOT_SIZE 4096
10232 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
10233 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
10234 +
10235 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
10236 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
10237 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
10238 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
10239 +
10240 +typedef enum {
10241 + VCHIQ_SERVICE_OPENED, /* service, -, - */
10242 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
10243 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
10244 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
10245 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
10246 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
10247 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
10248 +} VCHIQ_REASON_T;
10249 +
10250 +typedef enum {
10251 + VCHIQ_ERROR = -1,
10252 + VCHIQ_SUCCESS = 0,
10253 + VCHIQ_RETRY = 1
10254 +} VCHIQ_STATUS_T;
10255 +
10256 +typedef enum {
10257 + VCHIQ_BULK_MODE_CALLBACK,
10258 + VCHIQ_BULK_MODE_BLOCKING,
10259 + VCHIQ_BULK_MODE_NOCALLBACK,
10260 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
10261 +} VCHIQ_BULK_MODE_T;
10262 +
10263 +typedef enum {
10264 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
10265 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
10266 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
10267 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS
10268 +} VCHIQ_SERVICE_OPTION_T;
10269 +
10270 +typedef struct vchiq_header_struct {
10271 + /* The message identifier - opaque to applications. */
10272 + int msgid;
10273 +
10274 + /* Size of message data. */
10275 + unsigned int size;
10276 +
10277 + char data[0]; /* message */
10278 +} VCHIQ_HEADER_T;
10279 +
10280 +typedef struct {
10281 + const void *data;
10282 + unsigned int size;
10283 +} VCHIQ_ELEMENT_T;
10284 +
10285 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
10286 +
10287 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
10288 + VCHIQ_SERVICE_HANDLE_T, void *);
10289 +
10290 +typedef struct vchiq_service_base_struct {
10291 + int fourcc;
10292 + VCHIQ_CALLBACK_T callback;
10293 + void *userdata;
10294 +} VCHIQ_SERVICE_BASE_T;
10295 +
10296 +typedef struct vchiq_service_params_struct {
10297 + int fourcc;
10298 + VCHIQ_CALLBACK_T callback;
10299 + void *userdata;
10300 + short version; /* Increment for non-trivial changes */
10301 + short version_min; /* Update for incompatible changes */
10302 +} VCHIQ_SERVICE_PARAMS_T;
10303 +
10304 +typedef struct vchiq_config_struct {
10305 + unsigned int max_msg_size;
10306 + unsigned int bulk_threshold; /* The message size above which it
10307 + is better to use a bulk transfer
10308 + (<= max_msg_size) */
10309 + unsigned int max_outstanding_bulks;
10310 + unsigned int max_services;
10311 + short version; /* The version of VCHIQ */
10312 + short version_min; /* The minimum compatible version of VCHIQ */
10313 +} VCHIQ_CONFIG_T;
10314 +
10315 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
10316 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
10317 +
10318 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
10319 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
10320 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
10321 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
10322 + const VCHIQ_SERVICE_PARAMS_T *params,
10323 + VCHIQ_SERVICE_HANDLE_T *pservice);
10324 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
10325 + const VCHIQ_SERVICE_PARAMS_T *params,
10326 + VCHIQ_SERVICE_HANDLE_T *pservice);
10327 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
10328 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
10329 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
10330 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
10331 + VCHIQ_SERVICE_HANDLE_T service);
10332 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
10333 +
10334 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
10335 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
10336 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
10337 + VCHIQ_HEADER_T *header);
10338 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10339 + const void *data, unsigned int size, void *userdata);
10340 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10341 + void *data, unsigned int size, void *userdata);
10342 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
10343 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10344 + const void *offset, unsigned int size, void *userdata);
10345 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
10346 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10347 + void *offset, unsigned int size, void *userdata);
10348 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10349 + const void *data, unsigned int size, void *userdata,
10350 + VCHIQ_BULK_MODE_T mode);
10351 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10352 + void *data, unsigned int size, void *userdata,
10353 + VCHIQ_BULK_MODE_T mode);
10354 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
10355 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
10356 + void *userdata, VCHIQ_BULK_MODE_T mode);
10357 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
10358 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
10359 + void *userdata, VCHIQ_BULK_MODE_T mode);
10360 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
10361 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
10362 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
10363 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
10364 + int config_size, VCHIQ_CONFIG_T *pconfig);
10365 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
10366 + VCHIQ_SERVICE_OPTION_T option, int value);
10367 +
10368 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
10369 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
10370 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
10371 +
10372 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
10373 + void *ptr, size_t num_bytes);
10374 +
10375 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
10376 + short *peer_version);
10377 +
10378 +#endif /* VCHIQ_IF_H */
10379 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10380 new file mode 100644
10381 index 0000000..e248037
10382 --- /dev/null
10383 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10384 @@ -0,0 +1,129 @@
10385 +/**
10386 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10387 + *
10388 + * Redistribution and use in source and binary forms, with or without
10389 + * modification, are permitted provided that the following conditions
10390 + * are met:
10391 + * 1. Redistributions of source code must retain the above copyright
10392 + * notice, this list of conditions, and the following disclaimer,
10393 + * without modification.
10394 + * 2. Redistributions in binary form must reproduce the above copyright
10395 + * notice, this list of conditions and the following disclaimer in the
10396 + * documentation and/or other materials provided with the distribution.
10397 + * 3. The names of the above-listed copyright holders may not be used
10398 + * to endorse or promote products derived from this software without
10399 + * specific prior written permission.
10400 + *
10401 + * ALTERNATIVELY, this software may be distributed under the terms of the
10402 + * GNU General Public License ("GPL") version 2, as published by the Free
10403 + * Software Foundation.
10404 + *
10405 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10406 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10407 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10408 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10409 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10410 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10411 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10412 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10413 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10414 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10415 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10416 + */
10417 +
10418 +#ifndef VCHIQ_IOCTLS_H
10419 +#define VCHIQ_IOCTLS_H
10420 +
10421 +#include <linux/ioctl.h>
10422 +#include "vchiq_if.h"
10423 +
10424 +#define VCHIQ_IOC_MAGIC 0xc4
10425 +#define VCHIQ_INVALID_HANDLE (~0)
10426 +
10427 +typedef struct {
10428 + VCHIQ_SERVICE_PARAMS_T params;
10429 + int is_open;
10430 + int is_vchi;
10431 + unsigned int handle; /* OUT */
10432 +} VCHIQ_CREATE_SERVICE_T;
10433 +
10434 +typedef struct {
10435 + unsigned int handle;
10436 + unsigned int count;
10437 + const VCHIQ_ELEMENT_T *elements;
10438 +} VCHIQ_QUEUE_MESSAGE_T;
10439 +
10440 +typedef struct {
10441 + unsigned int handle;
10442 + void *data;
10443 + unsigned int size;
10444 + void *userdata;
10445 + VCHIQ_BULK_MODE_T mode;
10446 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
10447 +
10448 +typedef struct {
10449 + VCHIQ_REASON_T reason;
10450 + VCHIQ_HEADER_T *header;
10451 + void *service_userdata;
10452 + void *bulk_userdata;
10453 +} VCHIQ_COMPLETION_DATA_T;
10454 +
10455 +typedef struct {
10456 + unsigned int count;
10457 + VCHIQ_COMPLETION_DATA_T *buf;
10458 + unsigned int msgbufsize;
10459 + unsigned int msgbufcount; /* IN/OUT */
10460 + void **msgbufs;
10461 +} VCHIQ_AWAIT_COMPLETION_T;
10462 +
10463 +typedef struct {
10464 + unsigned int handle;
10465 + int blocking;
10466 + unsigned int bufsize;
10467 + void *buf;
10468 +} VCHIQ_DEQUEUE_MESSAGE_T;
10469 +
10470 +typedef struct {
10471 + unsigned int config_size;
10472 + VCHIQ_CONFIG_T *pconfig;
10473 +} VCHIQ_GET_CONFIG_T;
10474 +
10475 +typedef struct {
10476 + unsigned int handle;
10477 + VCHIQ_SERVICE_OPTION_T option;
10478 + int value;
10479 +} VCHIQ_SET_SERVICE_OPTION_T;
10480 +
10481 +typedef struct {
10482 + void *virt_addr;
10483 + size_t num_bytes;
10484 +} VCHIQ_DUMP_MEM_T;
10485 +
10486 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
10487 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
10488 +#define VCHIQ_IOC_CREATE_SERVICE \
10489 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
10490 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
10491 +#define VCHIQ_IOC_QUEUE_MESSAGE \
10492 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
10493 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
10494 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
10495 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
10496 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
10497 +#define VCHIQ_IOC_AWAIT_COMPLETION \
10498 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
10499 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
10500 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
10501 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
10502 +#define VCHIQ_IOC_GET_CONFIG \
10503 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
10504 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
10505 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
10506 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
10507 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
10508 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
10509 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
10510 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
10511 +#define VCHIQ_IOC_MAX 15
10512 +
10513 +#endif
10514 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10515 new file mode 100644
10516 index 0000000..be9735f
10517 --- /dev/null
10518 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10519 @@ -0,0 +1,456 @@
10520 +/**
10521 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10522 + *
10523 + * Redistribution and use in source and binary forms, with or without
10524 + * modification, are permitted provided that the following conditions
10525 + * are met:
10526 + * 1. Redistributions of source code must retain the above copyright
10527 + * notice, this list of conditions, and the following disclaimer,
10528 + * without modification.
10529 + * 2. Redistributions in binary form must reproduce the above copyright
10530 + * notice, this list of conditions and the following disclaimer in the
10531 + * documentation and/or other materials provided with the distribution.
10532 + * 3. The names of the above-listed copyright holders may not be used
10533 + * to endorse or promote products derived from this software without
10534 + * specific prior written permission.
10535 + *
10536 + * ALTERNATIVELY, this software may be distributed under the terms of the
10537 + * GNU General Public License ("GPL") version 2, as published by the Free
10538 + * Software Foundation.
10539 + *
10540 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10541 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10542 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10543 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10544 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10545 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10546 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10547 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10548 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10549 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10550 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10551 + */
10552 +
10553 +/* ---- Include Files ---------------------------------------------------- */
10554 +
10555 +#include <linux/kernel.h>
10556 +#include <linux/module.h>
10557 +#include <linux/mutex.h>
10558 +
10559 +#include "vchiq_core.h"
10560 +#include "vchiq_arm.h"
10561 +
10562 +/* ---- Public Variables ------------------------------------------------- */
10563 +
10564 +/* ---- Private Constants and Types -------------------------------------- */
10565 +
10566 +struct bulk_waiter_node {
10567 + struct bulk_waiter bulk_waiter;
10568 + int pid;
10569 + struct list_head list;
10570 +};
10571 +
10572 +struct vchiq_instance_struct {
10573 + VCHIQ_STATE_T *state;
10574 +
10575 + int connected;
10576 +
10577 + struct list_head bulk_waiter_list;
10578 + struct mutex bulk_waiter_list_mutex;
10579 +};
10580 +
10581 +static VCHIQ_STATUS_T
10582 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10583 + unsigned int size, VCHIQ_BULK_DIR_T dir);
10584 +
10585 +/****************************************************************************
10586 +*
10587 +* vchiq_initialise
10588 +*
10589 +***************************************************************************/
10590 +#define VCHIQ_INIT_RETRIES 10
10591 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
10592 +{
10593 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10594 + VCHIQ_STATE_T *state;
10595 + VCHIQ_INSTANCE_T instance = NULL;
10596 + int i;
10597 +
10598 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
10599 +
10600 + /* VideoCore may not be ready due to boot up timing.
10601 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
10602 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
10603 + state = vchiq_get_state();
10604 + if (state)
10605 + break;
10606 + udelay(500);
10607 + }
10608 + if (i==VCHIQ_INIT_RETRIES) {
10609 + vchiq_log_error(vchiq_core_log_level,
10610 + "%s: videocore not initialized\n", __func__);
10611 + goto failed;
10612 + } else if (i>0) {
10613 + vchiq_log_warning(vchiq_core_log_level,
10614 + "%s: videocore initialized after %d retries\n", __func__, i);
10615 + }
10616 +
10617 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
10618 + if (!instance) {
10619 + vchiq_log_error(vchiq_core_log_level,
10620 + "%s: error allocating vchiq instance\n", __func__);
10621 + goto failed;
10622 + }
10623 +
10624 + instance->connected = 0;
10625 + instance->state = state;
10626 + mutex_init(&instance->bulk_waiter_list_mutex);
10627 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
10628 +
10629 + *instanceOut = instance;
10630 +
10631 + status = VCHIQ_SUCCESS;
10632 +
10633 +failed:
10634 + vchiq_log_trace(vchiq_core_log_level,
10635 + "%s(%p): returning %d", __func__, instance, status);
10636 +
10637 + return status;
10638 +}
10639 +EXPORT_SYMBOL(vchiq_initialise);
10640 +
10641 +/****************************************************************************
10642 +*
10643 +* vchiq_shutdown
10644 +*
10645 +***************************************************************************/
10646 +
10647 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
10648 +{
10649 + VCHIQ_STATUS_T status;
10650 + VCHIQ_STATE_T *state = instance->state;
10651 +
10652 + vchiq_log_trace(vchiq_core_log_level,
10653 + "%s(%p) called", __func__, instance);
10654 +
10655 + if (mutex_lock_interruptible(&state->mutex) != 0)
10656 + return VCHIQ_RETRY;
10657 +
10658 + /* Remove all services */
10659 + status = vchiq_shutdown_internal(state, instance);
10660 +
10661 + mutex_unlock(&state->mutex);
10662 +
10663 + vchiq_log_trace(vchiq_core_log_level,
10664 + "%s(%p): returning %d", __func__, instance, status);
10665 +
10666 + if (status == VCHIQ_SUCCESS) {
10667 + struct list_head *pos, *next;
10668 + list_for_each_safe(pos, next,
10669 + &instance->bulk_waiter_list) {
10670 + struct bulk_waiter_node *waiter;
10671 + waiter = list_entry(pos,
10672 + struct bulk_waiter_node,
10673 + list);
10674 + list_del(pos);
10675 + vchiq_log_info(vchiq_arm_log_level,
10676 + "bulk_waiter - cleaned up %x "
10677 + "for pid %d",
10678 + (unsigned int)waiter, waiter->pid);
10679 + kfree(waiter);
10680 + }
10681 + kfree(instance);
10682 + }
10683 +
10684 + return status;
10685 +}
10686 +EXPORT_SYMBOL(vchiq_shutdown);
10687 +
10688 +/****************************************************************************
10689 +*
10690 +* vchiq_is_connected
10691 +*
10692 +***************************************************************************/
10693 +
10694 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
10695 +{
10696 + return instance->connected;
10697 +}
10698 +
10699 +/****************************************************************************
10700 +*
10701 +* vchiq_connect
10702 +*
10703 +***************************************************************************/
10704 +
10705 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
10706 +{
10707 + VCHIQ_STATUS_T status;
10708 + VCHIQ_STATE_T *state = instance->state;
10709 +
10710 + vchiq_log_trace(vchiq_core_log_level,
10711 + "%s(%p) called", __func__, instance);
10712 +
10713 + if (mutex_lock_interruptible(&state->mutex) != 0) {
10714 + vchiq_log_trace(vchiq_core_log_level,
10715 + "%s: call to mutex_lock failed", __func__);
10716 + status = VCHIQ_RETRY;
10717 + goto failed;
10718 + }
10719 + status = vchiq_connect_internal(state, instance);
10720 +
10721 + if (status == VCHIQ_SUCCESS)
10722 + instance->connected = 1;
10723 +
10724 + mutex_unlock(&state->mutex);
10725 +
10726 +failed:
10727 + vchiq_log_trace(vchiq_core_log_level,
10728 + "%s(%p): returning %d", __func__, instance, status);
10729 +
10730 + return status;
10731 +}
10732 +EXPORT_SYMBOL(vchiq_connect);
10733 +
10734 +/****************************************************************************
10735 +*
10736 +* vchiq_add_service
10737 +*
10738 +***************************************************************************/
10739 +
10740 +VCHIQ_STATUS_T vchiq_add_service(
10741 + VCHIQ_INSTANCE_T instance,
10742 + const VCHIQ_SERVICE_PARAMS_T *params,
10743 + VCHIQ_SERVICE_HANDLE_T *phandle)
10744 +{
10745 + VCHIQ_STATUS_T status;
10746 + VCHIQ_STATE_T *state = instance->state;
10747 + VCHIQ_SERVICE_T *service = NULL;
10748 + int srvstate;
10749 +
10750 + vchiq_log_trace(vchiq_core_log_level,
10751 + "%s(%p) called", __func__, instance);
10752 +
10753 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10754 +
10755 + srvstate = vchiq_is_connected(instance)
10756 + ? VCHIQ_SRVSTATE_LISTENING
10757 + : VCHIQ_SRVSTATE_HIDDEN;
10758 +
10759 + service = vchiq_add_service_internal(
10760 + state,
10761 + params,
10762 + srvstate,
10763 + instance,
10764 + NULL);
10765 +
10766 + if (service) {
10767 + *phandle = service->handle;
10768 + status = VCHIQ_SUCCESS;
10769 + } else
10770 + status = VCHIQ_ERROR;
10771 +
10772 + vchiq_log_trace(vchiq_core_log_level,
10773 + "%s(%p): returning %d", __func__, instance, status);
10774 +
10775 + return status;
10776 +}
10777 +EXPORT_SYMBOL(vchiq_add_service);
10778 +
10779 +/****************************************************************************
10780 +*
10781 +* vchiq_open_service
10782 +*
10783 +***************************************************************************/
10784 +
10785 +VCHIQ_STATUS_T vchiq_open_service(
10786 + VCHIQ_INSTANCE_T instance,
10787 + const VCHIQ_SERVICE_PARAMS_T *params,
10788 + VCHIQ_SERVICE_HANDLE_T *phandle)
10789 +{
10790 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10791 + VCHIQ_STATE_T *state = instance->state;
10792 + VCHIQ_SERVICE_T *service = NULL;
10793 +
10794 + vchiq_log_trace(vchiq_core_log_level,
10795 + "%s(%p) called", __func__, instance);
10796 +
10797 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10798 +
10799 + if (!vchiq_is_connected(instance))
10800 + goto failed;
10801 +
10802 + service = vchiq_add_service_internal(state,
10803 + params,
10804 + VCHIQ_SRVSTATE_OPENING,
10805 + instance,
10806 + NULL);
10807 +
10808 + if (service) {
10809 + status = vchiq_open_service_internal(service, current->pid);
10810 + if (status == VCHIQ_SUCCESS)
10811 + *phandle = service->handle;
10812 + else
10813 + vchiq_remove_service(service->handle);
10814 + }
10815 +
10816 +failed:
10817 + vchiq_log_trace(vchiq_core_log_level,
10818 + "%s(%p): returning %d", __func__, instance, status);
10819 +
10820 + return status;
10821 +}
10822 +EXPORT_SYMBOL(vchiq_open_service);
10823 +
10824 +VCHIQ_STATUS_T
10825 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
10826 + const void *data, unsigned int size, void *userdata)
10827 +{
10828 + return vchiq_bulk_transfer(handle,
10829 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10830 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
10831 +}
10832 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
10833 +
10834 +VCHIQ_STATUS_T
10835 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10836 + unsigned int size, void *userdata)
10837 +{
10838 + return vchiq_bulk_transfer(handle,
10839 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10840 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
10841 +}
10842 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
10843 +
10844 +VCHIQ_STATUS_T
10845 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
10846 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10847 +{
10848 + VCHIQ_STATUS_T status;
10849 +
10850 + switch (mode) {
10851 + case VCHIQ_BULK_MODE_NOCALLBACK:
10852 + case VCHIQ_BULK_MODE_CALLBACK:
10853 + status = vchiq_bulk_transfer(handle,
10854 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10855 + mode, VCHIQ_BULK_TRANSMIT);
10856 + break;
10857 + case VCHIQ_BULK_MODE_BLOCKING:
10858 + status = vchiq_blocking_bulk_transfer(handle,
10859 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
10860 + break;
10861 + default:
10862 + return VCHIQ_ERROR;
10863 + }
10864 +
10865 + return status;
10866 +}
10867 +EXPORT_SYMBOL(vchiq_bulk_transmit);
10868 +
10869 +VCHIQ_STATUS_T
10870 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10871 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10872 +{
10873 + VCHIQ_STATUS_T status;
10874 +
10875 + switch (mode) {
10876 + case VCHIQ_BULK_MODE_NOCALLBACK:
10877 + case VCHIQ_BULK_MODE_CALLBACK:
10878 + status = vchiq_bulk_transfer(handle,
10879 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10880 + mode, VCHIQ_BULK_RECEIVE);
10881 + break;
10882 + case VCHIQ_BULK_MODE_BLOCKING:
10883 + status = vchiq_blocking_bulk_transfer(handle,
10884 + (void *)data, size, VCHIQ_BULK_RECEIVE);
10885 + break;
10886 + default:
10887 + return VCHIQ_ERROR;
10888 + }
10889 +
10890 + return status;
10891 +}
10892 +EXPORT_SYMBOL(vchiq_bulk_receive);
10893 +
10894 +static VCHIQ_STATUS_T
10895 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10896 + unsigned int size, VCHIQ_BULK_DIR_T dir)
10897 +{
10898 + VCHIQ_INSTANCE_T instance;
10899 + VCHIQ_SERVICE_T *service;
10900 + VCHIQ_STATUS_T status;
10901 + struct bulk_waiter_node *waiter = NULL;
10902 + struct list_head *pos;
10903 +
10904 + service = find_service_by_handle(handle);
10905 + if (!service)
10906 + return VCHIQ_ERROR;
10907 +
10908 + instance = service->instance;
10909 +
10910 + unlock_service(service);
10911 +
10912 + mutex_lock(&instance->bulk_waiter_list_mutex);
10913 + list_for_each(pos, &instance->bulk_waiter_list) {
10914 + if (list_entry(pos, struct bulk_waiter_node,
10915 + list)->pid == current->pid) {
10916 + waiter = list_entry(pos,
10917 + struct bulk_waiter_node,
10918 + list);
10919 + list_del(pos);
10920 + break;
10921 + }
10922 + }
10923 + mutex_unlock(&instance->bulk_waiter_list_mutex);
10924 +
10925 + if (waiter) {
10926 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10927 + if (bulk) {
10928 + /* This thread has an outstanding bulk transfer. */
10929 + if ((bulk->data != data) ||
10930 + (bulk->size != size)) {
10931 + /* This is not a retry of the previous one.
10932 + ** Cancel the signal when the transfer
10933 + ** completes. */
10934 + spin_lock(&bulk_waiter_spinlock);
10935 + bulk->userdata = NULL;
10936 + spin_unlock(&bulk_waiter_spinlock);
10937 + }
10938 + }
10939 + }
10940 +
10941 + if (!waiter) {
10942 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
10943 + if (!waiter) {
10944 + vchiq_log_error(vchiq_core_log_level,
10945 + "%s - out of memory", __func__);
10946 + return VCHIQ_ERROR;
10947 + }
10948 + }
10949 +
10950 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
10951 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
10952 + dir);
10953 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
10954 + !waiter->bulk_waiter.bulk) {
10955 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10956 + if (bulk) {
10957 + /* Cancel the signal when the transfer
10958 + ** completes. */
10959 + spin_lock(&bulk_waiter_spinlock);
10960 + bulk->userdata = NULL;
10961 + spin_unlock(&bulk_waiter_spinlock);
10962 + }
10963 + kfree(waiter);
10964 + } else {
10965 + waiter->pid = current->pid;
10966 + mutex_lock(&instance->bulk_waiter_list_mutex);
10967 + list_add(&waiter->list, &instance->bulk_waiter_list);
10968 + mutex_unlock(&instance->bulk_waiter_list_mutex);
10969 + vchiq_log_info(vchiq_arm_log_level,
10970 + "saved bulk_waiter %x for pid %d",
10971 + (unsigned int)waiter, current->pid);
10972 + }
10973 +
10974 + return status;
10975 +}
10976 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
10977 new file mode 100644
10978 index 0000000..d02e776
10979 --- /dev/null
10980 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
10981 @@ -0,0 +1,71 @@
10982 +/**
10983 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10984 + *
10985 + * Redistribution and use in source and binary forms, with or without
10986 + * modification, are permitted provided that the following conditions
10987 + * are met:
10988 + * 1. Redistributions of source code must retain the above copyright
10989 + * notice, this list of conditions, and the following disclaimer,
10990 + * without modification.
10991 + * 2. Redistributions in binary form must reproduce the above copyright
10992 + * notice, this list of conditions and the following disclaimer in the
10993 + * documentation and/or other materials provided with the distribution.
10994 + * 3. The names of the above-listed copyright holders may not be used
10995 + * to endorse or promote products derived from this software without
10996 + * specific prior written permission.
10997 + *
10998 + * ALTERNATIVELY, this software may be distributed under the terms of the
10999 + * GNU General Public License ("GPL") version 2, as published by the Free
11000 + * Software Foundation.
11001 + *
11002 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11003 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11004 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11005 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11006 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11007 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11008 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11009 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11010 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11011 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11012 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11013 + */
11014 +
11015 +#ifndef VCHIQ_MEMDRV_H
11016 +#define VCHIQ_MEMDRV_H
11017 +
11018 +/* ---- Include Files ----------------------------------------------------- */
11019 +
11020 +#include <linux/kernel.h>
11021 +#include "vchiq_if.h"
11022 +
11023 +/* ---- Constants and Types ---------------------------------------------- */
11024 +
11025 +typedef struct {
11026 + void *armSharedMemVirt;
11027 + dma_addr_t armSharedMemPhys;
11028 + size_t armSharedMemSize;
11029 +
11030 + void *vcSharedMemVirt;
11031 + dma_addr_t vcSharedMemPhys;
11032 + size_t vcSharedMemSize;
11033 +} VCHIQ_SHARED_MEM_INFO_T;
11034 +
11035 +/* ---- Variable Externs ------------------------------------------------- */
11036 +
11037 +/* ---- Function Prototypes ---------------------------------------------- */
11038 +
11039 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
11040 +
11041 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
11042 +
11043 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
11044 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11045 +
11046 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
11047 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11048 +
11049 +VCHIQ_STATUS_T vchiq_userdrv_resume(
11050 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11051 +
11052 +#endif
11053 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11054 new file mode 100644
11055 index 0000000..54a3ece
11056 --- /dev/null
11057 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11058 @@ -0,0 +1,58 @@
11059 +/**
11060 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11061 + *
11062 + * Redistribution and use in source and binary forms, with or without
11063 + * modification, are permitted provided that the following conditions
11064 + * are met:
11065 + * 1. Redistributions of source code must retain the above copyright
11066 + * notice, this list of conditions, and the following disclaimer,
11067 + * without modification.
11068 + * 2. Redistributions in binary form must reproduce the above copyright
11069 + * notice, this list of conditions and the following disclaimer in the
11070 + * documentation and/or other materials provided with the distribution.
11071 + * 3. The names of the above-listed copyright holders may not be used
11072 + * to endorse or promote products derived from this software without
11073 + * specific prior written permission.
11074 + *
11075 + * ALTERNATIVELY, this software may be distributed under the terms of the
11076 + * GNU General Public License ("GPL") version 2, as published by the Free
11077 + * Software Foundation.
11078 + *
11079 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11080 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11081 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11082 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11083 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11084 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11085 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11086 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11087 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11088 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11089 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11090 + */
11091 +
11092 +#ifndef VCHIQ_PAGELIST_H
11093 +#define VCHIQ_PAGELIST_H
11094 +
11095 +#ifndef PAGE_SIZE
11096 +#define PAGE_SIZE 4096
11097 +#endif
11098 +#define CACHE_LINE_SIZE 32
11099 +#define PAGELIST_WRITE 0
11100 +#define PAGELIST_READ 1
11101 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11102 +
11103 +typedef struct pagelist_struct {
11104 + unsigned long length;
11105 + unsigned short type;
11106 + unsigned short offset;
11107 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11108 + pages at consecutive addresses. */
11109 +} PAGELIST_T;
11110 +
11111 +typedef struct fragments_struct {
11112 + char headbuf[CACHE_LINE_SIZE];
11113 + char tailbuf[CACHE_LINE_SIZE];
11114 +} FRAGMENTS_T;
11115 +
11116 +#endif /* VCHIQ_PAGELIST_H */
11117 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11118 new file mode 100644
11119 index 0000000..dc3bdda
11120 --- /dev/null
11121 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11122 @@ -0,0 +1,254 @@
11123 +/**
11124 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11125 + *
11126 + * Redistribution and use in source and binary forms, with or without
11127 + * modification, are permitted provided that the following conditions
11128 + * are met:
11129 + * 1. Redistributions of source code must retain the above copyright
11130 + * notice, this list of conditions, and the following disclaimer,
11131 + * without modification.
11132 + * 2. Redistributions in binary form must reproduce the above copyright
11133 + * notice, this list of conditions and the following disclaimer in the
11134 + * documentation and/or other materials provided with the distribution.
11135 + * 3. The names of the above-listed copyright holders may not be used
11136 + * to endorse or promote products derived from this software without
11137 + * specific prior written permission.
11138 + *
11139 + * ALTERNATIVELY, this software may be distributed under the terms of the
11140 + * GNU General Public License ("GPL") version 2, as published by the Free
11141 + * Software Foundation.
11142 + *
11143 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11144 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11145 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11146 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11147 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11148 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11149 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11150 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11151 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11152 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11153 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11154 + */
11155 +
11156 +
11157 +#include <linux/proc_fs.h>
11158 +#include "vchiq_core.h"
11159 +#include "vchiq_arm.h"
11160 +
11161 +#if 1
11162 +
11163 +int vchiq_proc_init(void)
11164 +{
11165 + return 0;
11166 +}
11167 +
11168 +void vchiq_proc_deinit(void)
11169 +{
11170 +}
11171 +
11172 +#else
11173 +
11174 +struct vchiq_proc_info {
11175 + /* Global 'vc' proc entry used by all instances */
11176 + struct proc_dir_entry *vc_cfg_dir;
11177 +
11178 + /* one entry per client process */
11179 + struct proc_dir_entry *clients;
11180 +
11181 + /* log categories */
11182 + struct proc_dir_entry *log_categories;
11183 +};
11184 +
11185 +static struct vchiq_proc_info proc_info;
11186 +
11187 +struct proc_dir_entry *vchiq_proc_top(void)
11188 +{
11189 + BUG_ON(proc_info.vc_cfg_dir == NULL);
11190 + return proc_info.vc_cfg_dir;
11191 +}
11192 +
11193 +/****************************************************************************
11194 +*
11195 +* log category entries
11196 +*
11197 +***************************************************************************/
11198 +#define PROC_WRITE_BUF_SIZE 256
11199 +
11200 +#define VCHIQ_LOG_ERROR_STR "error"
11201 +#define VCHIQ_LOG_WARNING_STR "warning"
11202 +#define VCHIQ_LOG_INFO_STR "info"
11203 +#define VCHIQ_LOG_TRACE_STR "trace"
11204 +
11205 +static int log_cfg_read(char *buffer,
11206 + char **start,
11207 + off_t off,
11208 + int count,
11209 + int *eof,
11210 + void *data)
11211 +{
11212 + int len = 0;
11213 + char *log_value = NULL;
11214 +
11215 + switch (*((int *)data)) {
11216 + case VCHIQ_LOG_ERROR:
11217 + log_value = VCHIQ_LOG_ERROR_STR;
11218 + break;
11219 + case VCHIQ_LOG_WARNING:
11220 + log_value = VCHIQ_LOG_WARNING_STR;
11221 + break;
11222 + case VCHIQ_LOG_INFO:
11223 + log_value = VCHIQ_LOG_INFO_STR;
11224 + break;
11225 + case VCHIQ_LOG_TRACE:
11226 + log_value = VCHIQ_LOG_TRACE_STR;
11227 + break;
11228 + default:
11229 + break;
11230 + }
11231 +
11232 + len += sprintf(buffer + len,
11233 + "%s\n",
11234 + log_value ? log_value : "(null)");
11235 +
11236 + return len;
11237 +}
11238 +
11239 +
11240 +static int log_cfg_write(struct file *file,
11241 + const char __user *buffer,
11242 + unsigned long count,
11243 + void *data)
11244 +{
11245 + int *log_module = data;
11246 + char kbuf[PROC_WRITE_BUF_SIZE + 1];
11247 +
11248 + (void)file;
11249 +
11250 + memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
11251 + if (count >= PROC_WRITE_BUF_SIZE)
11252 + count = PROC_WRITE_BUF_SIZE;
11253 +
11254 + if (copy_from_user(kbuf,
11255 + buffer,
11256 + count) != 0)
11257 + return -EFAULT;
11258 + kbuf[count - 1] = 0;
11259 +
11260 + if (strncmp("error", kbuf, strlen("error")) == 0)
11261 + *log_module = VCHIQ_LOG_ERROR;
11262 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
11263 + *log_module = VCHIQ_LOG_WARNING;
11264 + else if (strncmp("info", kbuf, strlen("info")) == 0)
11265 + *log_module = VCHIQ_LOG_INFO;
11266 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
11267 + *log_module = VCHIQ_LOG_TRACE;
11268 + else
11269 + *log_module = VCHIQ_LOG_DEFAULT;
11270 +
11271 + return count;
11272 +}
11273 +
11274 +/* Log category proc entries */
11275 +struct vchiq_proc_log_entry {
11276 + const char *name;
11277 + int *plevel;
11278 + struct proc_dir_entry *dir;
11279 +};
11280 +
11281 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
11282 + { "core", &vchiq_core_log_level },
11283 + { "msg", &vchiq_core_msg_log_level },
11284 + { "sync", &vchiq_sync_log_level },
11285 + { "susp", &vchiq_susp_log_level },
11286 + { "arm", &vchiq_arm_log_level },
11287 +};
11288 +static int n_log_entries =
11289 + sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
11290 +
11291 +/* create an entry under /proc/vc/log for each log category */
11292 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
11293 +{
11294 + struct proc_dir_entry *dir;
11295 + size_t i;
11296 + int ret = 0;
11297 + dir = proc_mkdir("log", proc_info.vc_cfg_dir);
11298 + if (!dir)
11299 + return -ENOMEM;
11300 + proc_info.log_categories = dir;
11301 +
11302 + for (i = 0; i < n_log_entries; i++) {
11303 + dir = create_proc_entry(vchiq_proc_log_entries[i].name,
11304 + 0644,
11305 + proc_info.log_categories);
11306 + if (!dir) {
11307 + ret = -ENOMEM;
11308 + break;
11309 + }
11310 +
11311 + dir->read_proc = &log_cfg_read;
11312 + dir->write_proc = &log_cfg_write;
11313 + dir->data = (void *)vchiq_proc_log_entries[i].plevel;
11314 +
11315 + vchiq_proc_log_entries[i].dir = dir;
11316 + }
11317 + return ret;
11318 +}
11319 +
11320 +
11321 +int vchiq_proc_init(void)
11322 +{
11323 + BUG_ON(proc_info.vc_cfg_dir != NULL);
11324 +
11325 + proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
11326 + if (proc_info.vc_cfg_dir == NULL)
11327 + goto fail;
11328 +
11329 + proc_info.clients = proc_mkdir("clients",
11330 + proc_info.vc_cfg_dir);
11331 + if (!proc_info.clients)
11332 + goto fail;
11333 +
11334 + if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
11335 + goto fail;
11336 +
11337 + return 0;
11338 +
11339 +fail:
11340 + vchiq_proc_deinit();
11341 + vchiq_log_error(vchiq_arm_log_level,
11342 + "%s: failed to create proc directory",
11343 + __func__);
11344 +
11345 + return -ENOMEM;
11346 +}
11347 +
11348 +/* remove all the proc entries */
11349 +void vchiq_proc_deinit(void)
11350 +{
11351 + /* log category entries */
11352 + if (proc_info.log_categories) {
11353 + size_t i;
11354 + for (i = 0; i < n_log_entries; i++)
11355 + if (vchiq_proc_log_entries[i].dir)
11356 + remove_proc_entry(
11357 + vchiq_proc_log_entries[i].name,
11358 + proc_info.log_categories);
11359 +
11360 + remove_proc_entry(proc_info.log_categories->name,
11361 + proc_info.vc_cfg_dir);
11362 + }
11363 + if (proc_info.clients)
11364 + remove_proc_entry(proc_info.clients->name,
11365 + proc_info.vc_cfg_dir);
11366 + if (proc_info.vc_cfg_dir)
11367 + remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
11368 +}
11369 +
11370 +struct proc_dir_entry *vchiq_clients_top(void)
11371 +{
11372 + return proc_info.clients;
11373 +}
11374 +
11375 +#endif
11376 +
11377 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11378 new file mode 100644
11379 index 0000000..f752f8d
11380 --- /dev/null
11381 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11382 @@ -0,0 +1,815 @@
11383 +/**
11384 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11385 + *
11386 + * Redistribution and use in source and binary forms, with or without
11387 + * modification, are permitted provided that the following conditions
11388 + * are met:
11389 + * 1. Redistributions of source code must retain the above copyright
11390 + * notice, this list of conditions, and the following disclaimer,
11391 + * without modification.
11392 + * 2. Redistributions in binary form must reproduce the above copyright
11393 + * notice, this list of conditions and the following disclaimer in the
11394 + * documentation and/or other materials provided with the distribution.
11395 + * 3. The names of the above-listed copyright holders may not be used
11396 + * to endorse or promote products derived from this software without
11397 + * specific prior written permission.
11398 + *
11399 + * ALTERNATIVELY, this software may be distributed under the terms of the
11400 + * GNU General Public License ("GPL") version 2, as published by the Free
11401 + * Software Foundation.
11402 + *
11403 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11404 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11405 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11406 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11407 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11408 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11409 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11410 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11411 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11412 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11413 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11414 + */
11415 +#include <linux/module.h>
11416 +#include <linux/types.h>
11417 +
11418 +#include "interface/vchi/vchi.h"
11419 +#include "vchiq.h"
11420 +#include "vchiq_core.h"
11421 +
11422 +#include "vchiq_util.h"
11423 +
11424 +#include <stddef.h>
11425 +
11426 +#define vchiq_status_to_vchi(status) ((int32_t)status)
11427 +
11428 +typedef struct {
11429 + VCHIQ_SERVICE_HANDLE_T handle;
11430 +
11431 + VCHIU_QUEUE_T queue;
11432 +
11433 + VCHI_CALLBACK_T callback;
11434 + void *callback_param;
11435 +} SHIM_SERVICE_T;
11436 +
11437 +/* ----------------------------------------------------------------------
11438 + * return pointer to the mphi message driver function table
11439 + * -------------------------------------------------------------------- */
11440 +const VCHI_MESSAGE_DRIVER_T *
11441 +vchi_mphi_message_driver_func_table(void)
11442 +{
11443 + return NULL;
11444 +}
11445 +
11446 +/* ----------------------------------------------------------------------
11447 + * return a pointer to the 'single' connection driver fops
11448 + * -------------------------------------------------------------------- */
11449 +const VCHI_CONNECTION_API_T *
11450 +single_get_func_table(void)
11451 +{
11452 + return NULL;
11453 +}
11454 +
11455 +VCHI_CONNECTION_T *vchi_create_connection(
11456 + const VCHI_CONNECTION_API_T *function_table,
11457 + const VCHI_MESSAGE_DRIVER_T *low_level)
11458 +{
11459 + (void)function_table;
11460 + (void)low_level;
11461 + return NULL;
11462 +}
11463 +
11464 +/***********************************************************
11465 + * Name: vchi_msg_peek
11466 + *
11467 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11468 + * void **data,
11469 + * uint32_t *msg_size,
11470 +
11471 +
11472 + * VCHI_FLAGS_T flags
11473 + *
11474 + * Description: Routine to return a pointer to the current message (to allow in
11475 + * place processing). The message can be removed using
11476 + * vchi_msg_remove when you're finished
11477 + *
11478 + * Returns: int32_t - success == 0
11479 + *
11480 + ***********************************************************/
11481 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
11482 + void **data,
11483 + uint32_t *msg_size,
11484 + VCHI_FLAGS_T flags)
11485 +{
11486 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11487 + VCHIQ_HEADER_T *header;
11488 +
11489 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11490 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11491 +
11492 + if (flags == VCHI_FLAGS_NONE)
11493 + if (vchiu_queue_is_empty(&service->queue))
11494 + return -1;
11495 +
11496 + header = vchiu_queue_peek(&service->queue);
11497 +
11498 + *data = header->data;
11499 + *msg_size = header->size;
11500 +
11501 + return 0;
11502 +}
11503 +EXPORT_SYMBOL(vchi_msg_peek);
11504 +
11505 +/***********************************************************
11506 + * Name: vchi_msg_remove
11507 + *
11508 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11509 + *
11510 + * Description: Routine to remove a message (after it has been read with
11511 + * vchi_msg_peek)
11512 + *
11513 + * Returns: int32_t - success == 0
11514 + *
11515 + ***********************************************************/
11516 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
11517 +{
11518 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11519 + VCHIQ_HEADER_T *header;
11520 +
11521 + header = vchiu_queue_pop(&service->queue);
11522 +
11523 + vchiq_release_message(service->handle, header);
11524 +
11525 + return 0;
11526 +}
11527 +EXPORT_SYMBOL(vchi_msg_remove);
11528 +
11529 +/***********************************************************
11530 + * Name: vchi_msg_queue
11531 + *
11532 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11533 + * const void *data,
11534 + * uint32_t data_size,
11535 + * VCHI_FLAGS_T flags,
11536 + * void *msg_handle,
11537 + *
11538 + * Description: Thin wrapper to queue a message onto a connection
11539 + *
11540 + * Returns: int32_t - success == 0
11541 + *
11542 + ***********************************************************/
11543 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
11544 + const void *data,
11545 + uint32_t data_size,
11546 + VCHI_FLAGS_T flags,
11547 + void *msg_handle)
11548 +{
11549 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11550 + VCHIQ_ELEMENT_T element = {data, data_size};
11551 + VCHIQ_STATUS_T status;
11552 +
11553 + (void)msg_handle;
11554 +
11555 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11556 +
11557 + status = vchiq_queue_message(service->handle, &element, 1);
11558 +
11559 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
11560 + ** implement a retry mechanism since this function is supposed
11561 + ** to block until queued
11562 + */
11563 + while (status == VCHIQ_RETRY) {
11564 + msleep(1);
11565 + status = vchiq_queue_message(service->handle, &element, 1);
11566 + }
11567 +
11568 + return vchiq_status_to_vchi(status);
11569 +}
11570 +EXPORT_SYMBOL(vchi_msg_queue);
11571 +
11572 +/***********************************************************
11573 + * Name: vchi_bulk_queue_receive
11574 + *
11575 + * Arguments: VCHI_BULK_HANDLE_T handle,
11576 + * void *data_dst,
11577 + * const uint32_t data_size,
11578 + * VCHI_FLAGS_T flags
11579 + * void *bulk_handle
11580 + *
11581 + * Description: Routine to setup a rcv buffer
11582 + *
11583 + * Returns: int32_t - success == 0
11584 + *
11585 + ***********************************************************/
11586 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
11587 + void *data_dst,
11588 + uint32_t data_size,
11589 + VCHI_FLAGS_T flags,
11590 + void *bulk_handle)
11591 +{
11592 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11593 + VCHIQ_BULK_MODE_T mode;
11594 + VCHIQ_STATUS_T status;
11595 +
11596 + switch ((int)flags) {
11597 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11598 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11599 + WARN_ON(!service->callback);
11600 + mode = VCHIQ_BULK_MODE_CALLBACK;
11601 + break;
11602 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11603 + mode = VCHIQ_BULK_MODE_BLOCKING;
11604 + break;
11605 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11606 + case VCHI_FLAGS_NONE:
11607 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11608 + break;
11609 + default:
11610 + WARN(1, "unsupported message\n");
11611 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11612 + }
11613 +
11614 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
11615 + bulk_handle, mode);
11616 +
11617 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
11618 + ** implement a retry mechanism since this function is supposed
11619 + ** to block until queued
11620 + */
11621 + while (status == VCHIQ_RETRY) {
11622 + msleep(1);
11623 + status = vchiq_bulk_receive(service->handle, data_dst,
11624 + data_size, bulk_handle, mode);
11625 + }
11626 +
11627 + return vchiq_status_to_vchi(status);
11628 +}
11629 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
11630 +
11631 +/***********************************************************
11632 + * Name: vchi_bulk_queue_transmit
11633 + *
11634 + * Arguments: VCHI_BULK_HANDLE_T handle,
11635 + * const void *data_src,
11636 + * uint32_t data_size,
11637 + * VCHI_FLAGS_T flags,
11638 + * void *bulk_handle
11639 + *
11640 + * Description: Routine to transmit some data
11641 + *
11642 + * Returns: int32_t - success == 0
11643 + *
11644 + ***********************************************************/
11645 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
11646 + const void *data_src,
11647 + uint32_t data_size,
11648 + VCHI_FLAGS_T flags,
11649 + void *bulk_handle)
11650 +{
11651 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11652 + VCHIQ_BULK_MODE_T mode;
11653 + VCHIQ_STATUS_T status;
11654 +
11655 + switch ((int)flags) {
11656 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11657 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11658 + WARN_ON(!service->callback);
11659 + mode = VCHIQ_BULK_MODE_CALLBACK;
11660 + break;
11661 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
11662 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11663 + mode = VCHIQ_BULK_MODE_BLOCKING;
11664 + break;
11665 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11666 + case VCHI_FLAGS_NONE:
11667 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11668 + break;
11669 + default:
11670 + WARN(1, "unsupported message\n");
11671 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11672 + }
11673 +
11674 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
11675 + bulk_handle, mode);
11676 +
11677 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
11678 + ** implement a retry mechanism since this function is supposed
11679 + ** to block until queued
11680 + */
11681 + while (status == VCHIQ_RETRY) {
11682 + msleep(1);
11683 + status = vchiq_bulk_transmit(service->handle, data_src,
11684 + data_size, bulk_handle, mode);
11685 + }
11686 +
11687 + return vchiq_status_to_vchi(status);
11688 +}
11689 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
11690 +
11691 +/***********************************************************
11692 + * Name: vchi_msg_dequeue
11693 + *
11694 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11695 + * void *data,
11696 + * uint32_t max_data_size_to_read,
11697 + * uint32_t *actual_msg_size
11698 + * VCHI_FLAGS_T flags
11699 + *
11700 + * Description: Routine to dequeue a message into the supplied buffer
11701 + *
11702 + * Returns: int32_t - success == 0
11703 + *
11704 + ***********************************************************/
11705 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
11706 + void *data,
11707 + uint32_t max_data_size_to_read,
11708 + uint32_t *actual_msg_size,
11709 + VCHI_FLAGS_T flags)
11710 +{
11711 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11712 + VCHIQ_HEADER_T *header;
11713 +
11714 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11715 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11716 +
11717 + if (flags == VCHI_FLAGS_NONE)
11718 + if (vchiu_queue_is_empty(&service->queue))
11719 + return -1;
11720 +
11721 + header = vchiu_queue_pop(&service->queue);
11722 +
11723 + memcpy(data, header->data, header->size < max_data_size_to_read ?
11724 + header->size : max_data_size_to_read);
11725 +
11726 + *actual_msg_size = header->size;
11727 +
11728 + vchiq_release_message(service->handle, header);
11729 +
11730 + return 0;
11731 +}
11732 +EXPORT_SYMBOL(vchi_msg_dequeue);
11733 +
11734 +/***********************************************************
11735 + * Name: vchi_msg_queuev
11736 + *
11737 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11738 + * VCHI_MSG_VECTOR_T *vector,
11739 + * uint32_t count,
11740 + * VCHI_FLAGS_T flags,
11741 + * void *msg_handle
11742 + *
11743 + * Description: Thin wrapper to queue a message onto a connection
11744 + *
11745 + * Returns: int32_t - success == 0
11746 + *
11747 + ***********************************************************/
11748 +
11749 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
11750 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
11751 + offsetof(VCHIQ_ELEMENT_T, data));
11752 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
11753 + offsetof(VCHIQ_ELEMENT_T, size));
11754 +
11755 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
11756 + VCHI_MSG_VECTOR_T *vector,
11757 + uint32_t count,
11758 + VCHI_FLAGS_T flags,
11759 + void *msg_handle)
11760 +{
11761 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11762 +
11763 + (void)msg_handle;
11764 +
11765 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11766 +
11767 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
11768 + (const VCHIQ_ELEMENT_T *)vector, count));
11769 +}
11770 +EXPORT_SYMBOL(vchi_msg_queuev);
11771 +
11772 +/***********************************************************
11773 + * Name: vchi_held_msg_release
11774 + *
11775 + * Arguments: VCHI_HELD_MSG_T *message
11776 + *
11777 + * Description: Routine to release a held message (after it has been read with
11778 + * vchi_msg_hold)
11779 + *
11780 + * Returns: int32_t - success == 0
11781 + *
11782 + ***********************************************************/
11783 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
11784 +{
11785 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
11786 + (VCHIQ_HEADER_T *)message->message);
11787 +
11788 + return 0;
11789 +}
11790 +
11791 +/***********************************************************
11792 + * Name: vchi_msg_hold
11793 + *
11794 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11795 + * void **data,
11796 + * uint32_t *msg_size,
11797 + * VCHI_FLAGS_T flags,
11798 + * VCHI_HELD_MSG_T *message_handle
11799 + *
11800 + * Description: Routine to return a pointer to the current message (to allow
11801 + * in place processing). The message is dequeued - don't forget
11802 + * to release the message using vchi_held_msg_release when you're
11803 + * finished.
11804 + *
11805 + * Returns: int32_t - success == 0
11806 + *
11807 + ***********************************************************/
11808 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
11809 + void **data,
11810 + uint32_t *msg_size,
11811 + VCHI_FLAGS_T flags,
11812 + VCHI_HELD_MSG_T *message_handle)
11813 +{
11814 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11815 + VCHIQ_HEADER_T *header;
11816 +
11817 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11818 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11819 +
11820 + if (flags == VCHI_FLAGS_NONE)
11821 + if (vchiu_queue_is_empty(&service->queue))
11822 + return -1;
11823 +
11824 + header = vchiu_queue_pop(&service->queue);
11825 +
11826 + *data = header->data;
11827 + *msg_size = header->size;
11828 +
11829 + message_handle->service =
11830 + (struct opaque_vchi_service_t *)service->handle;
11831 + message_handle->message = header;
11832 +
11833 + return 0;
11834 +}
11835 +
11836 +/***********************************************************
11837 + * Name: vchi_initialise
11838 + *
11839 + * Arguments: VCHI_INSTANCE_T *instance_handle
11840 + * VCHI_CONNECTION_T **connections
11841 + * const uint32_t num_connections
11842 + *
11843 + * Description: Initialises the hardware but does not transmit anything
11844 + * When run as a Host App this will be called twice hence the need
11845 + * to malloc the state information
11846 + *
11847 + * Returns: 0 if successful, failure otherwise
11848 + *
11849 + ***********************************************************/
11850 +
11851 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
11852 +{
11853 + VCHIQ_INSTANCE_T instance;
11854 + VCHIQ_STATUS_T status;
11855 +
11856 + status = vchiq_initialise(&instance);
11857 +
11858 + *instance_handle = (VCHI_INSTANCE_T)instance;
11859 +
11860 + return vchiq_status_to_vchi(status);
11861 +}
11862 +EXPORT_SYMBOL(vchi_initialise);
11863 +
11864 +/***********************************************************
11865 + * Name: vchi_connect
11866 + *
11867 + * Arguments: VCHI_CONNECTION_T **connections
11868 + * const uint32_t num_connections
11869 + * VCHI_INSTANCE_T instance_handle)
11870 + *
11871 + * Description: Starts the command service on each connection,
11872 + * causing INIT messages to be pinged back and forth
11873 + *
11874 + * Returns: 0 if successful, failure otherwise
11875 + *
11876 + ***********************************************************/
11877 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
11878 + const uint32_t num_connections,
11879 + VCHI_INSTANCE_T instance_handle)
11880 +{
11881 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11882 +
11883 + (void)connections;
11884 + (void)num_connections;
11885 +
11886 + return vchiq_connect(instance);
11887 +}
11888 +EXPORT_SYMBOL(vchi_connect);
11889 +
11890 +
11891 +/***********************************************************
11892 + * Name: vchi_disconnect
11893 + *
11894 + * Arguments: VCHI_INSTANCE_T instance_handle
11895 + *
11896 + * Description: Stops the command service on each connection,
11897 + * causing DE-INIT messages to be pinged back and forth
11898 + *
11899 + * Returns: 0 if successful, failure otherwise
11900 + *
11901 + ***********************************************************/
11902 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
11903 +{
11904 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11905 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
11906 +}
11907 +EXPORT_SYMBOL(vchi_disconnect);
11908 +
11909 +
11910 +/***********************************************************
11911 + * Name: vchi_service_open
11912 + * Name: vchi_service_create
11913 + *
11914 + * Arguments: VCHI_INSTANCE_T *instance_handle
11915 + * SERVICE_CREATION_T *setup,
11916 + * VCHI_SERVICE_HANDLE_T *handle
11917 + *
11918 + * Description: Routine to open a service
11919 + *
11920 + * Returns: int32_t - success == 0
11921 + *
11922 + ***********************************************************/
11923 +
11924 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
11925 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
11926 +{
11927 + SHIM_SERVICE_T *service =
11928 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
11929 +
11930 + switch (reason) {
11931 + case VCHIQ_MESSAGE_AVAILABLE:
11932 + vchiu_queue_push(&service->queue, header);
11933 +
11934 + if (service->callback)
11935 + service->callback(service->callback_param,
11936 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
11937 + break;
11938 + case VCHIQ_BULK_TRANSMIT_DONE:
11939 + if (service->callback)
11940 + service->callback(service->callback_param,
11941 + VCHI_CALLBACK_BULK_SENT, bulk_user);
11942 + break;
11943 + case VCHIQ_BULK_RECEIVE_DONE:
11944 + if (service->callback)
11945 + service->callback(service->callback_param,
11946 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
11947 + break;
11948 + case VCHIQ_SERVICE_CLOSED:
11949 + if (service->callback)
11950 + service->callback(service->callback_param,
11951 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
11952 + break;
11953 + case VCHIQ_SERVICE_OPENED:
11954 + /* No equivalent VCHI reason */
11955 + break;
11956 + case VCHIQ_BULK_TRANSMIT_ABORTED:
11957 + if (service->callback)
11958 + service->callback(service->callback_param,
11959 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user);
11960 + break;
11961 + case VCHIQ_BULK_RECEIVE_ABORTED:
11962 + if (service->callback)
11963 + service->callback(service->callback_param,
11964 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user);
11965 + break;
11966 + default:
11967 + WARN(1, "not supported\n");
11968 + break;
11969 + }
11970 +
11971 + return VCHIQ_SUCCESS;
11972 +}
11973 +
11974 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
11975 + SERVICE_CREATION_T *setup)
11976 +{
11977 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
11978 +
11979 + (void)instance;
11980 +
11981 + if (service) {
11982 + if (vchiu_queue_init(&service->queue, 64)) {
11983 + service->callback = setup->callback;
11984 + service->callback_param = setup->callback_param;
11985 + } else {
11986 + kfree(service);
11987 + service = NULL;
11988 + }
11989 + }
11990 +
11991 + return service;
11992 +}
11993 +
11994 +static void service_free(SHIM_SERVICE_T *service)
11995 +{
11996 + if (service) {
11997 + vchiu_queue_delete(&service->queue);
11998 + kfree(service);
11999 + }
12000 +}
12001 +
12002 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
12003 + SERVICE_CREATION_T *setup,
12004 + VCHI_SERVICE_HANDLE_T *handle)
12005 +{
12006 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12007 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12008 + if (service) {
12009 + VCHIQ_SERVICE_PARAMS_T params;
12010 + VCHIQ_STATUS_T status;
12011 +
12012 + memset(&params, 0, sizeof(params));
12013 + params.fourcc = setup->service_id;
12014 + params.callback = shim_callback;
12015 + params.userdata = service;
12016 + params.version = setup->version.version;
12017 + params.version_min = setup->version.version_min;
12018 +
12019 + status = vchiq_open_service(instance, &params,
12020 + &service->handle);
12021 + if (status != VCHIQ_SUCCESS) {
12022 + service_free(service);
12023 + service = NULL;
12024 + }
12025 + }
12026 +
12027 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12028 +
12029 + return (service != NULL) ? 0 : -1;
12030 +}
12031 +EXPORT_SYMBOL(vchi_service_open);
12032 +
12033 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
12034 + SERVICE_CREATION_T *setup,
12035 + VCHI_SERVICE_HANDLE_T *handle)
12036 +{
12037 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12038 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12039 + if (service) {
12040 + VCHIQ_SERVICE_PARAMS_T params;
12041 + VCHIQ_STATUS_T status;
12042 +
12043 + memset(&params, 0, sizeof(params));
12044 + params.fourcc = setup->service_id;
12045 + params.callback = shim_callback;
12046 + params.userdata = service;
12047 + params.version = setup->version.version;
12048 + params.version_min = setup->version.version_min;
12049 + status = vchiq_add_service(instance, &params, &service->handle);
12050 +
12051 + if (status != VCHIQ_SUCCESS) {
12052 + service_free(service);
12053 + service = NULL;
12054 + }
12055 + }
12056 +
12057 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12058 +
12059 + return (service != NULL) ? 0 : -1;
12060 +}
12061 +EXPORT_SYMBOL(vchi_service_create);
12062 +
12063 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
12064 +{
12065 + int32_t ret = -1;
12066 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12067 + if (service) {
12068 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
12069 + if (status == VCHIQ_SUCCESS) {
12070 + service_free(service);
12071 + service = NULL;
12072 + }
12073 +
12074 + ret = vchiq_status_to_vchi(status);
12075 + }
12076 + return ret;
12077 +}
12078 +EXPORT_SYMBOL(vchi_service_close);
12079 +
12080 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
12081 +{
12082 + int32_t ret = -1;
12083 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12084 + if (service) {
12085 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
12086 + if (status == VCHIQ_SUCCESS) {
12087 + service_free(service);
12088 + service = NULL;
12089 + }
12090 +
12091 + ret = vchiq_status_to_vchi(status);
12092 + }
12093 + return ret;
12094 +}
12095 +EXPORT_SYMBOL(vchi_service_destroy);
12096 +
12097 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12098 +{
12099 + int32_t ret = -1;
12100 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12101 + if(service)
12102 + {
12103 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12104 + ret = vchiq_status_to_vchi( status );
12105 + }
12106 + return ret;
12107 +}
12108 +EXPORT_SYMBOL(vchi_get_peer_version);
12109 +
12110 +/* ----------------------------------------------------------------------
12111 + * read a uint32_t from buffer.
12112 + * network format is defined to be little endian
12113 + * -------------------------------------------------------------------- */
12114 +uint32_t
12115 +vchi_readbuf_uint32(const void *_ptr)
12116 +{
12117 + const unsigned char *ptr = _ptr;
12118 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12119 +}
12120 +
12121 +/* ----------------------------------------------------------------------
12122 + * write a uint32_t to buffer.
12123 + * network format is defined to be little endian
12124 + * -------------------------------------------------------------------- */
12125 +void
12126 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12127 +{
12128 + unsigned char *ptr = _ptr;
12129 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
12130 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
12131 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12132 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12133 +}
12134 +
12135 +/* ----------------------------------------------------------------------
12136 + * read a uint16_t from buffer.
12137 + * network format is defined to be little endian
12138 + * -------------------------------------------------------------------- */
12139 +uint16_t
12140 +vchi_readbuf_uint16(const void *_ptr)
12141 +{
12142 + const unsigned char *ptr = _ptr;
12143 + return ptr[0] | (ptr[1] << 8);
12144 +}
12145 +
12146 +/* ----------------------------------------------------------------------
12147 + * write a uint16_t into the buffer.
12148 + * network format is defined to be little endian
12149 + * -------------------------------------------------------------------- */
12150 +void
12151 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12152 +{
12153 + unsigned char *ptr = _ptr;
12154 + ptr[0] = (value >> 0) & 0xFF;
12155 + ptr[1] = (value >> 8) & 0xFF;
12156 +}
12157 +
12158 +/***********************************************************
12159 + * Name: vchi_service_use
12160 + *
12161 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12162 + *
12163 + * Description: Routine to increment refcount on a service
12164 + *
12165 + * Returns: void
12166 + *
12167 + ***********************************************************/
12168 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12169 +{
12170 + int32_t ret = -1;
12171 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12172 + if (service)
12173 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12174 + return ret;
12175 +}
12176 +EXPORT_SYMBOL(vchi_service_use);
12177 +
12178 +/***********************************************************
12179 + * Name: vchi_service_release
12180 + *
12181 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12182 + *
12183 + * Description: Routine to decrement refcount on a service
12184 + *
12185 + * Returns: void
12186 + *
12187 + ***********************************************************/
12188 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12189 +{
12190 + int32_t ret = -1;
12191 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12192 + if (service)
12193 + ret = vchiq_status_to_vchi(
12194 + vchiq_release_service(service->handle));
12195 + return ret;
12196 +}
12197 +EXPORT_SYMBOL(vchi_service_release);
12198 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12199 new file mode 100644
12200 index 0000000..c2eefef
12201 --- /dev/null
12202 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12203 @@ -0,0 +1,151 @@
12204 +/**
12205 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12206 + *
12207 + * Redistribution and use in source and binary forms, with or without
12208 + * modification, are permitted provided that the following conditions
12209 + * are met:
12210 + * 1. Redistributions of source code must retain the above copyright
12211 + * notice, this list of conditions, and the following disclaimer,
12212 + * without modification.
12213 + * 2. Redistributions in binary form must reproduce the above copyright
12214 + * notice, this list of conditions and the following disclaimer in the
12215 + * documentation and/or other materials provided with the distribution.
12216 + * 3. The names of the above-listed copyright holders may not be used
12217 + * to endorse or promote products derived from this software without
12218 + * specific prior written permission.
12219 + *
12220 + * ALTERNATIVELY, this software may be distributed under the terms of the
12221 + * GNU General Public License ("GPL") version 2, as published by the Free
12222 + * Software Foundation.
12223 + *
12224 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12225 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12226 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12227 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12228 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12229 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12230 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12231 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12232 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12233 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12234 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12235 + */
12236 +
12237 +#include "vchiq_util.h"
12238 +
12239 +static inline int is_pow2(int i)
12240 +{
12241 + return i && !(i & (i - 1));
12242 +}
12243 +
12244 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12245 +{
12246 + WARN_ON(!is_pow2(size));
12247 +
12248 + queue->size = size;
12249 + queue->read = 0;
12250 + queue->write = 0;
12251 +
12252 + sema_init(&queue->pop, 0);
12253 + sema_init(&queue->push, 0);
12254 +
12255 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12256 + if (queue->storage == NULL) {
12257 + vchiu_queue_delete(queue);
12258 + return 0;
12259 + }
12260 + return 1;
12261 +}
12262 +
12263 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12264 +{
12265 + if (queue->storage != NULL)
12266 + kfree(queue->storage);
12267 +}
12268 +
12269 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12270 +{
12271 + return queue->read == queue->write;
12272 +}
12273 +
12274 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12275 +{
12276 + return queue->write == queue->read + queue->size;
12277 +}
12278 +
12279 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12280 +{
12281 + while (queue->write == queue->read + queue->size) {
12282 + if (down_interruptible(&queue->pop) != 0) {
12283 + flush_signals(current);
12284 + }
12285 + }
12286 +
12287 + /*
12288 + * Write to queue->storage must be visible after read from
12289 + * queue->read
12290 + */
12291 + smp_mb();
12292 +
12293 + queue->storage[queue->write & (queue->size - 1)] = header;
12294 +
12295 + /*
12296 + * Write to queue->storage must be visible before write to
12297 + * queue->write
12298 + */
12299 + smp_wmb();
12300 +
12301 + queue->write++;
12302 +
12303 + up(&queue->push);
12304 +}
12305 +
12306 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12307 +{
12308 + while (queue->write == queue->read) {
12309 + if (down_interruptible(&queue->push) != 0) {
12310 + flush_signals(current);
12311 + }
12312 + }
12313 +
12314 + up(&queue->push); // We haven't removed anything from the queue.
12315 +
12316 + /*
12317 + * Read from queue->storage must be visible after read from
12318 + * queue->write
12319 + */
12320 + smp_rmb();
12321 +
12322 + return queue->storage[queue->read & (queue->size - 1)];
12323 +}
12324 +
12325 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12326 +{
12327 + VCHIQ_HEADER_T *header;
12328 +
12329 + while (queue->write == queue->read) {
12330 + if (down_interruptible(&queue->push) != 0) {
12331 + flush_signals(current);
12332 + }
12333 + }
12334 +
12335 + /*
12336 + * Read from queue->storage must be visible after read from
12337 + * queue->write
12338 + */
12339 + smp_rmb();
12340 +
12341 + header = queue->storage[queue->read & (queue->size - 1)];
12342 +
12343 + /*
12344 + * Read from queue->storage must be visible before write to
12345 + * queue->read
12346 + */
12347 + smp_mb();
12348 +
12349 + queue->read++;
12350 +
12351 + up(&queue->pop);
12352 +
12353 + return header;
12354 +}
12355 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12356 new file mode 100644
12357 index 0000000..7c4bf7c
12358 --- /dev/null
12359 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12360 @@ -0,0 +1,82 @@
12361 +/**
12362 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12363 + *
12364 + * Redistribution and use in source and binary forms, with or without
12365 + * modification, are permitted provided that the following conditions
12366 + * are met:
12367 + * 1. Redistributions of source code must retain the above copyright
12368 + * notice, this list of conditions, and the following disclaimer,
12369 + * without modification.
12370 + * 2. Redistributions in binary form must reproduce the above copyright
12371 + * notice, this list of conditions and the following disclaimer in the
12372 + * documentation and/or other materials provided with the distribution.
12373 + * 3. The names of the above-listed copyright holders may not be used
12374 + * to endorse or promote products derived from this software without
12375 + * specific prior written permission.
12376 + *
12377 + * ALTERNATIVELY, this software may be distributed under the terms of the
12378 + * GNU General Public License ("GPL") version 2, as published by the Free
12379 + * Software Foundation.
12380 + *
12381 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12382 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12383 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12384 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12385 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12386 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12387 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12388 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12389 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12390 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12391 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12392 + */
12393 +
12394 +#ifndef VCHIQ_UTIL_H
12395 +#define VCHIQ_UTIL_H
12396 +
12397 +#include <linux/types.h>
12398 +#include <linux/semaphore.h>
12399 +#include <linux/mutex.h>
12400 +#include <linux/bitops.h>
12401 +#include <linux/kthread.h>
12402 +#include <linux/wait.h>
12403 +#include <linux/vmalloc.h>
12404 +#include <linux/jiffies.h>
12405 +#include <linux/delay.h>
12406 +#include <linux/string.h>
12407 +#include <linux/types.h>
12408 +#include <linux/interrupt.h>
12409 +#include <linux/random.h>
12410 +#include <linux/sched.h>
12411 +#include <linux/ctype.h>
12412 +#include <linux/uaccess.h>
12413 +#include <linux/time.h> /* for time_t */
12414 +#include <linux/slab.h>
12415 +#include <linux/vmalloc.h>
12416 +
12417 +#include "vchiq_if.h"
12418 +
12419 +typedef struct {
12420 + int size;
12421 + int read;
12422 + int write;
12423 +
12424 + struct semaphore pop;
12425 + struct semaphore push;
12426 +
12427 + VCHIQ_HEADER_T **storage;
12428 +} VCHIU_QUEUE_T;
12429 +
12430 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
12431 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
12432 +
12433 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
12434 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
12435 +
12436 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
12437 +
12438 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
12439 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
12440 +
12441 +#endif
12442 +
12443 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12444 new file mode 100644
12445 index 0000000..b6bfa21
12446 --- /dev/null
12447 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12448 @@ -0,0 +1,59 @@
12449 +/**
12450 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12451 + *
12452 + * Redistribution and use in source and binary forms, with or without
12453 + * modification, are permitted provided that the following conditions
12454 + * are met:
12455 + * 1. Redistributions of source code must retain the above copyright
12456 + * notice, this list of conditions, and the following disclaimer,
12457 + * without modification.
12458 + * 2. Redistributions in binary form must reproduce the above copyright
12459 + * notice, this list of conditions and the following disclaimer in the
12460 + * documentation and/or other materials provided with the distribution.
12461 + * 3. The names of the above-listed copyright holders may not be used
12462 + * to endorse or promote products derived from this software without
12463 + * specific prior written permission.
12464 + *
12465 + * ALTERNATIVELY, this software may be distributed under the terms of the
12466 + * GNU General Public License ("GPL") version 2, as published by the Free
12467 + * Software Foundation.
12468 + *
12469 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12470 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12471 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12472 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12473 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12474 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12475 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12476 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12477 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12478 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12479 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12480 + */
12481 +#include "vchiq_build_info.h"
12482 +#include <linux/broadcom/vc_debug_sym.h>
12483 +
12484 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
12485 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
12486 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
12487 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
12488 +
12489 +const char *vchiq_get_build_hostname( void )
12490 +{
12491 + return vchiq_build_hostname;
12492 +}
12493 +
12494 +const char *vchiq_get_build_version( void )
12495 +{
12496 + return vchiq_build_version;
12497 +}
12498 +
12499 +const char *vchiq_get_build_date( void )
12500 +{
12501 + return vchiq_build_date;
12502 +}
12503 +
12504 +const char *vchiq_get_build_time( void )
12505 +{
12506 + return vchiq_build_time;
12507 +}
12508 --
12509 1.9.1
12510