1 From 2fdd1c3ab61a7a58a3ef3bc5b3d1504202bbc108 Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 005/174] bcm2708 vchiq driver
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
8 drivers/misc/Kconfig | 2 +
9 drivers/misc/Makefile | 1 +
10 drivers/misc/vc04_services/Kconfig | 10 +
11 drivers/misc/vc04_services/Makefile | 18 +
12 .../interface/vchi/connections/connection.h | 328 ++
13 .../interface/vchi/message_drivers/message.h | 204 ++
14 drivers/misc/vc04_services/interface/vchi/vchi.h | 373 ++
15 .../misc/vc04_services/interface/vchi/vchi_cfg.h | 224 ++
16 .../interface/vchi/vchi_cfg_internal.h | 71 +
17 .../vc04_services/interface/vchi/vchi_common.h | 163 +
18 .../misc/vc04_services/interface/vchi/vchi_mh.h | 42 +
19 .../misc/vc04_services/interface/vchiq_arm/vchiq.h | 41 +
20 .../vc04_services/interface/vchiq_arm/vchiq_2835.h | 42 +
21 .../interface/vchiq_arm/vchiq_2835_arm.c | 538 +++
22 .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 2813 ++++++++++++++
23 .../vc04_services/interface/vchiq_arm/vchiq_arm.h | 212 ++
24 .../interface/vchiq_arm/vchiq_build_info.h | 37 +
25 .../vc04_services/interface/vchiq_arm/vchiq_cfg.h | 60 +
26 .../interface/vchiq_arm/vchiq_connected.c | 119 +
27 .../interface/vchiq_arm/vchiq_connected.h | 51 +
28 .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3824 ++++++++++++++++++++
29 .../vc04_services/interface/vchiq_arm/vchiq_core.h | 706 ++++
30 .../interface/vchiq_arm/vchiq_genversion | 89 +
31 .../vc04_services/interface/vchiq_arm/vchiq_if.h | 188 +
32 .../interface/vchiq_arm/vchiq_ioctl.h | 129 +
33 .../interface/vchiq_arm/vchiq_kern_lib.c | 456 +++
34 .../interface/vchiq_arm/vchiq_memdrv.h | 71 +
35 .../interface/vchiq_arm/vchiq_pagelist.h | 58 +
36 .../vc04_services/interface/vchiq_arm/vchiq_proc.c | 254 ++
37 .../vc04_services/interface/vchiq_arm/vchiq_shim.c | 815 +++++
38 .../vc04_services/interface/vchiq_arm/vchiq_util.c | 151 +
39 .../vc04_services/interface/vchiq_arm/vchiq_util.h | 82 +
40 .../interface/vchiq_arm/vchiq_version.c | 59 +
41 33 files changed, 12231 insertions(+)
42 create mode 100644 drivers/misc/vc04_services/Kconfig
43 create mode 100644 drivers/misc/vc04_services/Makefile
44 create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
45 create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
46 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
47 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
48 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
49 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
50 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
51 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
52 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
53 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
54 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
55 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
56 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
57 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
58 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
59 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
60 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
61 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
62 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
63 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
64 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
65 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
66 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
67 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
68 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
69 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
70 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
71 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
72 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
74 --- a/drivers/misc/Kconfig
75 +++ b/drivers/misc/Kconfig
76 @@ -536,4 +536,6 @@ source "drivers/misc/carma/Kconfig"
77 source "drivers/misc/altera-stapl/Kconfig"
78 source "drivers/misc/mei/Kconfig"
79 source "drivers/misc/vmw_vmci/Kconfig"
80 +source "drivers/misc/vc04_services/Kconfig"
83 --- a/drivers/misc/Makefile
84 +++ b/drivers/misc/Makefile
85 @@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/
86 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
87 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
88 obj-$(CONFIG_SRAM) += sram.o
89 +obj-y += vc04_services/
91 +++ b/drivers/misc/vc04_services/Kconfig
94 + tristate "Videocore VCHIQ"
95 + depends on MACH_BCM2708
98 + Kernel to VideoCore communication interface for the
99 + BCM2708 family of products.
100 + Defaults to Y when the Broadcom Videocore services
101 + are included in the build, N otherwise.
104 +++ b/drivers/misc/vc04_services/Makefile
106 +ifeq ($(CONFIG_MACH_BCM2708),y)
108 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
111 + interface/vchiq_arm/vchiq_core.o \
112 + interface/vchiq_arm/vchiq_arm.o \
113 + interface/vchiq_arm/vchiq_kern_lib.o \
114 + interface/vchiq_arm/vchiq_2835_arm.o \
115 + interface/vchiq_arm/vchiq_proc.o \
116 + interface/vchiq_arm/vchiq_shim.o \
117 + interface/vchiq_arm/vchiq_util.o \
118 + interface/vchiq_arm/vchiq_connected.o \
120 +EXTRA_CFLAGS += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
125 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
128 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
130 + * Redistribution and use in source and binary forms, with or without
131 + * modification, are permitted provided that the following conditions
133 + * 1. Redistributions of source code must retain the above copyright
134 + * notice, this list of conditions, and the following disclaimer,
135 + * without modification.
136 + * 2. Redistributions in binary form must reproduce the above copyright
137 + * notice, this list of conditions and the following disclaimer in the
138 + * documentation and/or other materials provided with the distribution.
139 + * 3. The names of the above-listed copyright holders may not be used
140 + * to endorse or promote products derived from this software without
141 + * specific prior written permission.
143 + * ALTERNATIVELY, this software may be distributed under the terms of the
144 + * GNU General Public License ("GPL") version 2, as published by the Free
145 + * Software Foundation.
147 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
148 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
149 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
150 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
151 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
152 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
153 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
154 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
155 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
156 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
157 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
160 +#ifndef CONNECTION_H_
161 +#define CONNECTION_H_
163 +#include <linux/kernel.h>
164 +#include <linux/types.h>
165 +#include <linux/semaphore.h>
167 +#include "interface/vchi/vchi_cfg_internal.h"
168 +#include "interface/vchi/vchi_common.h"
169 +#include "interface/vchi/message_drivers/message.h"
171 +/******************************************************************************
173 + *****************************************************************************/
175 +// Opaque handle for a connection / service pair
176 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
178 +// opaque handle to the connection state information
179 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
181 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
184 +/******************************************************************************
186 + *****************************************************************************/
188 +// Routine to init a connection with a particular low level driver
189 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
190 + const VCHI_MESSAGE_DRIVER_T * driver );
192 +// Routine to control CRC enabling at a connection level
193 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
194 + VCHI_CRC_CONTROL_T control );
196 +// Routine to create a service
197 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
198 + int32_t service_id,
199 + uint32_t rx_fifo_size,
200 + uint32_t tx_fifo_size,
202 + VCHI_CALLBACK_T callback,
203 + void *callback_param,
205 + int32_t want_unaligned_bulk_rx,
206 + int32_t want_unaligned_bulk_tx,
207 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
209 +// Routine to close a service
210 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
212 +// Routine to queue a message
213 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
215 + uint32_t data_size,
216 + VCHI_FLAGS_T flags,
217 + void *msg_handle );
219 +// scatter-gather (vector) message queueing
220 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
221 + VCHI_MSG_VECTOR_T *vector,
223 + VCHI_FLAGS_T flags,
224 + void *msg_handle );
226 +// Routine to dequeue a message
227 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
229 + uint32_t max_data_size_to_read,
230 + uint32_t *actual_msg_size,
231 + VCHI_FLAGS_T flags );
233 +// Routine to peek at a message
234 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
236 + uint32_t *msg_size,
237 + VCHI_FLAGS_T flags );
239 +// Routine to hold a message
240 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
242 + uint32_t *msg_size,
243 + VCHI_FLAGS_T flags,
244 + void **message_handle );
246 +// Routine to initialise a received message iterator
247 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
248 + VCHI_MSG_ITER_T *iter,
249 + VCHI_FLAGS_T flags );
251 +// Routine to release a held message
252 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
253 + void *message_handle );
255 +// Routine to get info on a held message
256 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
257 + void *message_handle,
260 + uint32_t *tx_timestamp,
261 + uint32_t *rx_timestamp );
263 +// Routine to check whether the iterator has a next message
264 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
265 + const VCHI_MSG_ITER_T *iter );
267 +// Routine to advance the iterator
268 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
269 + VCHI_MSG_ITER_T *iter,
271 + uint32_t *msg_size );
273 +// Routine to remove the last message returned by the iterator
274 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
275 + VCHI_MSG_ITER_T *iter );
277 +// Routine to hold the last message returned by the iterator
278 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
279 + VCHI_MSG_ITER_T *iter,
280 + void **msg_handle );
282 +// Routine to transmit bulk data
283 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
284 + const void *data_src,
285 + uint32_t data_size,
286 + VCHI_FLAGS_T flags,
287 + void *bulk_handle );
289 +// Routine to receive data
290 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
292 + uint32_t data_size,
293 + VCHI_FLAGS_T flags,
294 + void *bulk_handle );
296 +// Routine to report if a server is available
297 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
299 +// Routine to report the number of RX slots available
300 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
302 +// Routine to report the RX slot size
303 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
305 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
306 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
309 + MESSAGE_TX_CHANNEL_T channel,
310 + uint32_t channel_params,
311 + uint32_t data_length,
312 + uint32_t data_offset);
314 +// Callback to inform a service that a Xon or Xoff message has been received
315 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
317 +// Callback to inform a service that a server available reply message has been received
318 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
320 +// Callback to indicate that bulk auxiliary messages have arrived
321 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
323 +// Callback to indicate that bulk auxiliary messages have arrived
324 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
326 +// Callback with all the connection info you require
327 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
329 +// Callback to inform of a disconnect
330 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
332 +// Callback to inform of a power control request
333 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
335 +// allocate memory suitably aligned for this connection
336 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
338 +// free memory allocated by buffer_allocate
339 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
342 +/******************************************************************************
343 + System driver struct
344 + *****************************************************************************/
346 +struct opaque_vchi_connection_api_t
348 + // Routine to init the connection
349 + VCHI_CONNECTION_INIT_T init;
351 + // Connection-level CRC control
352 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
354 + // Routine to connect to or create service
355 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
357 + // Routine to disconnect from a service
358 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
360 + // Routine to queue a message
361 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
363 + // scatter-gather (vector) message queue
364 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
366 + // Routine to dequeue a message
367 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
369 + // Routine to peek at a message
370 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
372 + // Routine to hold a message
373 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
375 + // Routine to initialise a received message iterator
376 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
378 + // Routine to release a message
379 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
381 + // Routine to get information on a held message
382 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
384 + // Routine to check for next message on iterator
385 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
387 + // Routine to get next message on iterator
388 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
390 + // Routine to remove the last message returned by iterator
391 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
393 + // Routine to hold the last message returned by iterator
394 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
396 + // Routine to transmit bulk data
397 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
399 + // Routine to receive data
400 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
402 + // Routine to report the available servers
403 + VCHI_CONNECTION_SERVER_PRESENT server_present;
405 + // Routine to report the number of RX slots available
406 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
408 + // Routine to report the RX slot size
409 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
411 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
412 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
414 + // Callback to inform a service that a Xon or Xoff message has been received
415 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
417 + // Callback to inform a service that a server available reply message has been received
418 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
420 + // Callback to indicate that bulk auxiliary messages have arrived
421 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
423 + // Callback to indicate that a bulk auxiliary message has been transmitted
424 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
426 + // Callback to provide information about the connection
427 + VCHI_CONNECTION_INFO connection_info;
429 + // Callback to notify that peer has requested disconnect
430 + VCHI_CONNECTION_DISCONNECT disconnect;
432 + // Callback to notify that peer has requested power change
433 + VCHI_CONNECTION_POWER_CONTROL power_control;
435 + // allocate memory suitably aligned for this connection
436 + VCHI_BUFFER_ALLOCATE buffer_allocate;
438 + // free memory allocated by buffer_allocate
439 + VCHI_BUFFER_FREE buffer_free;
443 +struct vchi_connection_t {
444 + const VCHI_CONNECTION_API_T *api;
445 + VCHI_CONNECTION_STATE_T *state;
446 +#ifdef VCHI_COARSE_LOCKING
447 + struct semaphore sem;
452 +#endif /* CONNECTION_H_ */
454 +/****************************** End of file **********************************/
456 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
459 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
461 + * Redistribution and use in source and binary forms, with or without
462 + * modification, are permitted provided that the following conditions
464 + * 1. Redistributions of source code must retain the above copyright
465 + * notice, this list of conditions, and the following disclaimer,
466 + * without modification.
467 + * 2. Redistributions in binary form must reproduce the above copyright
468 + * notice, this list of conditions and the following disclaimer in the
469 + * documentation and/or other materials provided with the distribution.
470 + * 3. The names of the above-listed copyright holders may not be used
471 + * to endorse or promote products derived from this software without
472 + * specific prior written permission.
474 + * ALTERNATIVELY, this software may be distributed under the terms of the
475 + * GNU General Public License ("GPL") version 2, as published by the Free
476 + * Software Foundation.
478 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
479 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
480 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
481 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
482 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
483 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
484 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
485 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
486 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
487 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
488 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
491 +#ifndef _VCHI_MESSAGE_H_
492 +#define _VCHI_MESSAGE_H_
494 +#include <linux/kernel.h>
495 +#include <linux/types.h>
496 +#include <linux/semaphore.h>
498 +#include "interface/vchi/vchi_cfg_internal.h"
499 +#include "interface/vchi/vchi_common.h"
502 +typedef enum message_event_type {
503 + MESSAGE_EVENT_NONE,
505 + MESSAGE_EVENT_MESSAGE,
506 + MESSAGE_EVENT_SLOT_COMPLETE,
507 + MESSAGE_EVENT_RX_BULK_PAUSED,
508 + MESSAGE_EVENT_RX_BULK_COMPLETE,
509 + MESSAGE_EVENT_TX_COMPLETE,
510 + MESSAGE_EVENT_MSG_DISCARDED
511 +} MESSAGE_EVENT_TYPE_T;
513 +typedef enum vchi_msg_flags
515 + VCHI_MSG_FLAGS_NONE = 0x0,
516 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
519 +typedef enum message_tx_channel
521 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
522 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
523 +} MESSAGE_TX_CHANNEL_T;
525 +// Macros used for cycling through bulk channels
526 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
527 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
529 +typedef enum message_rx_channel
531 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
532 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
533 +} MESSAGE_RX_CHANNEL_T;
535 +// Message receive slot information
536 +typedef struct rx_msg_slot_info {
538 + struct rx_msg_slot_info *next;
539 + //struct slot_info *prev;
540 +#if !defined VCHI_COARSE_LOCKING
541 + struct semaphore sem;
544 + uint8_t *addr; // base address of slot
545 + uint32_t len; // length of slot in bytes
547 + uint32_t write_ptr; // hardware causes this to advance
548 + uint32_t read_ptr; // this module does the reading
549 + int active; // is this slot in the hardware dma fifo?
550 + uint32_t msgs_parsed; // count how many messages are in this slot
551 + uint32_t msgs_released; // how many messages have been released
552 + void *state; // connection state information
553 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
554 +} RX_MSG_SLOTINFO_T;
556 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
557 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
558 +// driver will be tasked with sending the aligned core section.
559 +typedef struct rx_bulk_slotinfo_t {
560 + struct rx_bulk_slotinfo_t *next;
562 + struct semaphore *blocking;
568 + // needed for the callback
571 + VCHI_FLAGS_T flags;
572 +} RX_BULK_SLOTINFO_T;
575 +/* ----------------------------------------------------------------------
576 + * each connection driver will have a pool of the following struct.
578 + * the pool will be managed by vchi_qman_*
579 + * this means there will be multiple queues (single linked lists)
580 + * a given struct message_info will be on exactly one of these queues
582 + * -------------------------------------------------------------------- */
583 +typedef struct rx_message_info {
585 + struct message_info *next;
586 + //struct message_info *prev;
590 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
591 + uint32_t tx_timestamp;
592 + uint32_t rx_timestamp;
594 +} RX_MESSAGE_INFO_T;
597 + MESSAGE_EVENT_TYPE_T type;
601 + void *addr; // address of message
602 + uint16_t slot_delta; // whether this message indicated slot delta
603 + uint32_t len; // length of message
604 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
605 + int32_t service; // service id this message is destined for
606 + uint32_t tx_timestamp; // timestamp from the header
607 + uint32_t rx_timestamp; // timestamp when we parsed it
610 + // FIXME: cleanup slot reporting...
611 + RX_MSG_SLOTINFO_T *rx_msg;
612 + RX_BULK_SLOTINFO_T *rx_bulk;
614 + MESSAGE_TX_CHANNEL_T tx_channel;
620 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
623 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
624 +} VCHI_MESSAGE_DRIVER_OPEN_T;
627 +// handle to this instance of message driver (as returned by ->open)
628 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
630 +struct opaque_vchi_message_driver_t {
631 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
632 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
633 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
634 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
635 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
636 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
637 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
638 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
639 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
640 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
641 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
643 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
644 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
645 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
646 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
647 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
648 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
650 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
651 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
652 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
653 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
654 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
655 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
659 +#endif // _VCHI_MESSAGE_H_
661 +/****************************** End of file ***********************************/
663 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
666 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
668 + * Redistribution and use in source and binary forms, with or without
669 + * modification, are permitted provided that the following conditions
671 + * 1. Redistributions of source code must retain the above copyright
672 + * notice, this list of conditions, and the following disclaimer,
673 + * without modification.
674 + * 2. Redistributions in binary form must reproduce the above copyright
675 + * notice, this list of conditions and the following disclaimer in the
676 + * documentation and/or other materials provided with the distribution.
677 + * 3. The names of the above-listed copyright holders may not be used
678 + * to endorse or promote products derived from this software without
679 + * specific prior written permission.
681 + * ALTERNATIVELY, this software may be distributed under the terms of the
682 + * GNU General Public License ("GPL") version 2, as published by the Free
683 + * Software Foundation.
685 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
686 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
687 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
688 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
689 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
690 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
691 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
692 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
693 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
694 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
695 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
701 +#include "interface/vchi/vchi_cfg.h"
702 +#include "interface/vchi/vchi_common.h"
703 +#include "interface/vchi/connections/connection.h"
704 +#include "vchi_mh.h"
707 +/******************************************************************************
709 + *****************************************************************************/
711 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
712 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
713 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
715 +#ifdef USE_VCHIQ_ARM
716 +#define VCHI_BULK_ALIGNED(x) 1
718 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
721 +struct vchi_version {
723 + uint32_t version_min;
725 +#define VCHI_VERSION(v_) { v_, v_ }
726 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
733 +} VCHI_MSG_VECTOR_TYPE_T;
735 +typedef struct vchi_msg_vector_ex {
737 + VCHI_MSG_VECTOR_TYPE_T type;
743 + VCHI_MEM_HANDLE_T handle;
748 + // an ordinary data pointer
751 + const void *vec_base;
755 + // a nested vector list
758 + struct vchi_msg_vector_ex *vec;
762 +} VCHI_MSG_VECTOR_EX_T;
765 +// Construct an entry in a msg vector for a pointer (p) of length (l)
766 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
768 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
769 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
771 +// Macros to manipulate 'FOURCC' values
772 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
773 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
776 +// Opaque service information
777 +struct opaque_vchi_service_t;
779 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
780 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
783 + struct opaque_vchi_service_t *service;
789 +// structure used to provide the information needed to open a server or a client
791 + struct vchi_version version;
792 + int32_t service_id;
793 + VCHI_CONNECTION_T *connection;
794 + uint32_t rx_fifo_size;
795 + uint32_t tx_fifo_size;
796 + VCHI_CALLBACK_T callback;
797 + void *callback_param;
798 + /* client intends to receive bulk transfers of
799 + odd lengths or into unaligned buffers */
800 + int32_t want_unaligned_bulk_rx;
801 + /* client intends to transmit bulk transfers of
802 + odd lengths or out of unaligned buffers */
803 + int32_t want_unaligned_bulk_tx;
804 + /* client wants to check CRCs on (bulk) xfers.
805 + Only needs to be set at 1 end - will do both directions. */
807 +} SERVICE_CREATION_T;
809 +// Opaque handle for a VCHI instance
810 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
812 +// Opaque handle for a server or client
813 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
815 +// Service registration & startup
816 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
818 +typedef struct service_info_tag {
819 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
820 + VCHI_SERVICE_INIT init; /* Service initialisation function */
821 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
824 +/******************************************************************************
825 + Global funcs - implementation is specific to which side you are on (local / remote)
826 + *****************************************************************************/
832 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
833 + const VCHI_MESSAGE_DRIVER_T * low_level);
836 +// Routine used to initialise the vchi on both local + remote connections
837 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
839 +extern int32_t vchi_exit( void );
841 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
842 + const uint32_t num_connections,
843 + VCHI_INSTANCE_T instance_handle );
845 +//When this is called, ensure that all services have no data pending.
846 +//Bulk transfers can remain 'queued'
847 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
849 +// Global control over bulk CRC checking
850 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
851 + VCHI_CRC_CONTROL_T control );
854 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
855 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
856 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
859 +/******************************************************************************
861 + *****************************************************************************/
862 +// Routine to create a named service
863 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
864 + SERVICE_CREATION_T *setup,
865 + VCHI_SERVICE_HANDLE_T *handle );
867 +// Routine to destory a service
868 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
870 +// Routine to open a named service
871 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
872 + SERVICE_CREATION_T *setup,
873 + VCHI_SERVICE_HANDLE_T *handle);
875 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
876 + short *peer_version );
878 +// Routine to close a named service
879 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
881 +// Routine to increment ref count on a named service
882 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
884 +// Routine to decrement ref count on a named service
885 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
887 +// Routine to send a message accross a service
888 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
890 + uint32_t data_size,
891 + VCHI_FLAGS_T flags,
892 + void *msg_handle );
894 +// scatter-gather (vector) and send message
895 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
896 + VCHI_MSG_VECTOR_EX_T *vector,
898 + VCHI_FLAGS_T flags,
899 + void *msg_handle );
901 +// legacy scatter-gather (vector) and send message, only handles pointers
902 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
903 + VCHI_MSG_VECTOR_T *vector,
905 + VCHI_FLAGS_T flags,
906 + void *msg_handle );
908 +// Routine to receive a msg from a service
909 +// Dequeue is equivalent to hold, copy into client buffer, release
910 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
912 + uint32_t max_data_size_to_read,
913 + uint32_t *actual_msg_size,
914 + VCHI_FLAGS_T flags );
916 +// Routine to look at a message in place.
917 +// The message is not dequeued, so a subsequent call to peek or dequeue
918 +// will return the same message.
919 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
921 + uint32_t *msg_size,
922 + VCHI_FLAGS_T flags );
924 +// Routine to remove a message after it has been read in place with peek
925 +// The first message on the queue is dequeued.
926 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
928 +// Routine to look at a message in place.
929 +// The message is dequeued, so the caller is left holding it; the descriptor is
930 +// filled in and must be released when the user has finished with the message.
931 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
932 + void **data, // } may be NULL, as info can be
933 + uint32_t *msg_size, // } obtained from HELD_MSG_T
934 + VCHI_FLAGS_T flags,
935 + VCHI_HELD_MSG_T *message_descriptor );
937 +// Initialise an iterator to look through messages in place
938 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
939 + VCHI_MSG_ITER_T *iter,
940 + VCHI_FLAGS_T flags );
942 +/******************************************************************************
943 + Global service support API - operations on held messages and message iterators
944 + *****************************************************************************/
946 +// Routine to get the address of a held message
947 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
949 +// Routine to get the size of a held message
950 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
952 +// Routine to get the transmit timestamp as written into the header by the peer
953 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
955 +// Routine to get the reception timestamp, written as we parsed the header
956 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
958 +// Routine to release a held message after it has been processed
959 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
961 +// Indicates whether the iterator has a next message.
962 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
964 +// Return the pointer and length for the next message and advance the iterator.
965 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
967 + uint32_t *msg_size );
969 +// Remove the last message returned by vchi_msg_iter_next.
970 +// Can only be called once after each call to vchi_msg_iter_next.
971 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
973 +// Hold the last message returned by vchi_msg_iter_next.
974 +// Can only be called once after each call to vchi_msg_iter_next.
975 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
976 + VCHI_HELD_MSG_T *message );
978 +// Return information for the next message, and hold it, advancing the iterator.
979 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
980 + void **data, // } may be NULL
981 + uint32_t *msg_size, // }
982 + VCHI_HELD_MSG_T *message );
985 +/******************************************************************************
987 + *****************************************************************************/
989 +// Routine to prepare interface for a transfer from the other side
990 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
992 + uint32_t data_size,
993 + VCHI_FLAGS_T flags,
994 + void *transfer_handle );
997 +// Prepare interface for a transfer from the other side into relocatable memory.
998 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
999 + VCHI_MEM_HANDLE_T h_dst,
1001 + uint32_t data_size,
1002 + const VCHI_FLAGS_T flags,
1003 + void * const bulk_handle );
1005 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1006 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1007 + const void *data_src,
1008 + uint32_t data_size,
1009 + VCHI_FLAGS_T flags,
1010 + void *transfer_handle );
1013 +/******************************************************************************
1014 + Configuration plumbing
1015 + *****************************************************************************/
1017 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1018 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1019 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1020 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1022 +// declare all message drivers here
1023 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1029 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1030 + VCHI_MEM_HANDLE_T h_src,
1032 + uint32_t data_size,
1033 + VCHI_FLAGS_T flags,
1034 + void *transfer_handle );
1035 +#endif /* VCHI_H_ */
1037 +/****************************** End of file **********************************/
1039 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1042 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1044 + * Redistribution and use in source and binary forms, with or without
1045 + * modification, are permitted provided that the following conditions
1047 + * 1. Redistributions of source code must retain the above copyright
1048 + * notice, this list of conditions, and the following disclaimer,
1049 + * without modification.
1050 + * 2. Redistributions in binary form must reproduce the above copyright
1051 + * notice, this list of conditions and the following disclaimer in the
1052 + * documentation and/or other materials provided with the distribution.
1053 + * 3. The names of the above-listed copyright holders may not be used
1054 + * to endorse or promote products derived from this software without
1055 + * specific prior written permission.
1057 + * ALTERNATIVELY, this software may be distributed under the terms of the
1058 + * GNU General Public License ("GPL") version 2, as published by the Free
1059 + * Software Foundation.
1061 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1062 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1063 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1064 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1065 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1066 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1067 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1068 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1069 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1070 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1071 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1074 +#ifndef VCHI_CFG_H_
1075 +#define VCHI_CFG_H_
1077 +/****************************************************************************************
1078 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1080 + ***************************************************************************************/
1082 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1083 +/* Really determined by the message driver, and should be available from a run-time call. */
1084 +#ifndef VCHI_BULK_ALIGN
1085 +# if __VCCOREVER__ >= 0x04000000
1086 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1088 +# define VCHI_BULK_ALIGN 16
1092 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1093 +/* May be less than or greater than VCHI_BULK_ALIGN */
1094 +/* Really determined by the message driver, and should be available from a run-time call. */
1095 +#ifndef VCHI_BULK_GRANULARITY
1096 +# if __VCCOREVER__ >= 0x04000000
1097 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1099 +# define VCHI_BULK_GRANULARITY 16
1103 +/* The largest possible message to be queued with vchi_msg_queue. */
1104 +#ifndef VCHI_MAX_MSG_SIZE
1105 +# if defined VCHI_LOCAL_HOST_PORT
1106 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1108 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1112 +/******************************************************************************************
1113 + * Defines below are system configuration options, and should not be used by VCHI services.
1114 + *****************************************************************************************/
1116 +/* How many connections can we support? A localhost implementation uses 2 connections,
1117 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1119 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1120 +# define VCHI_MAX_NUM_CONNECTIONS 3
1123 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1124 + * amount of static memory. */
1125 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1126 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1129 +/* Adjust if using a message driver that supports more logical TX channels */
1130 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1131 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1134 +/* Adjust if using a message driver that supports more logical RX channels */
1135 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1136 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1139 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1140 + * receive queue space, less message headers. */
1141 +#ifndef VCHI_NUM_READ_SLOTS
1142 +# if defined(VCHI_LOCAL_HOST_PORT)
1143 +# define VCHI_NUM_READ_SLOTS 4
1145 +# define VCHI_NUM_READ_SLOTS 48
1149 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1150 + * performance. Only define on VideoCore end, talking to host.
1152 +//#define VCHI_MSG_RX_OVERRUN
1154 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1155 + * underneath VCHI will usually have its own buffering. */
1156 +#ifndef VCHI_NUM_WRITE_SLOTS
1157 +# define VCHI_NUM_WRITE_SLOTS 4
1160 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1161 + * then it's taking up too much buffer space, and the peer service will be told to stop
1162 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1163 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1165 +#ifndef VCHI_XOFF_THRESHOLD
1166 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1169 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1170 + * service has dequeued/released enough messages that it's now occupying
1171 + * VCHI_XON_THRESHOLD slots or fewer. */
1172 +#ifndef VCHI_XON_THRESHOLD
1173 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1176 +/* A size below which a bulk transfer omits the handshake completely and always goes
1177 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1178 + * can guarantee this by enabling unaligned transmits).
1180 +#ifndef VCHI_MIN_BULK_SIZE
1181 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1184 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1185 + * speed and latency; the smaller the chunk size the better change of messages and other
1186 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1187 + * break transmissions into chunks.
1189 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1190 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1193 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1194 + * with multiple-line frames. Only use if the receiver can cope. */
1195 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1196 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1199 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1200 + * vchi_msg_queue will be blocked. */
1201 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1202 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1205 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1206 + * will be suspended until older messages are dequeued/released. */
1207 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1208 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1211 +/* Really should be able to cope if we run out of received message descriptors, by
1212 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1213 + * under the carpet. */
1214 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1215 +# undef VCHI_RX_MSG_QUEUE_SIZE
1216 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1219 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1220 + * will be blocked. */
1221 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1222 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1225 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1226 + * will be blocked. */
1227 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1228 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1231 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1232 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1233 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1234 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1235 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1236 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1239 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1240 + * transmitter on and off.
1242 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1244 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1246 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1247 + * negative for no IDLE.
1249 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1250 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1253 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1254 + * negative for no OFF.
1256 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1257 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1260 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1262 +#endif /* VCHI_CFG_H_ */
1264 +/****************************** End of file **********************************/
1266 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1269 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1271 + * Redistribution and use in source and binary forms, with or without
1272 + * modification, are permitted provided that the following conditions
1274 + * 1. Redistributions of source code must retain the above copyright
1275 + * notice, this list of conditions, and the following disclaimer,
1276 + * without modification.
1277 + * 2. Redistributions in binary form must reproduce the above copyright
1278 + * notice, this list of conditions and the following disclaimer in the
1279 + * documentation and/or other materials provided with the distribution.
1280 + * 3. The names of the above-listed copyright holders may not be used
1281 + * to endorse or promote products derived from this software without
1282 + * specific prior written permission.
1284 + * ALTERNATIVELY, this software may be distributed under the terms of the
1285 + * GNU General Public License ("GPL") version 2, as published by the Free
1286 + * Software Foundation.
1288 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1289 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1290 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1291 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1292 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1293 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1294 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1295 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1296 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1297 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1298 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1301 +#ifndef VCHI_CFG_INTERNAL_H_
1302 +#define VCHI_CFG_INTERNAL_H_
1304 +/****************************************************************************************
1305 + * Control optimisation attempts.
1306 + ***************************************************************************************/
1308 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1309 +#define VCHI_COARSE_LOCKING
1311 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1312 +// (only relevant if VCHI_COARSE_LOCKING)
1313 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1315 +// Avoid lock on non-blocking peek
1316 +// (only relevant if VCHI_COARSE_LOCKING)
1317 +#define VCHI_AVOID_PEEK_LOCK
1319 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1320 +#define VCHI_MULTIPLE_HANDLER_THREADS
1322 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1323 +// our way through the pool of descriptors.
1324 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1326 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1327 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1329 +// Don't use message descriptors for TX messages that don't need them
1330 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1332 +// Nano-locks for multiqueue
1333 +//#define VCHI_MQUEUE_NANOLOCKS
1335 +// Lock-free(er) dequeuing
1336 +//#define VCHI_RX_NANOLOCKS
1338 +#endif /*VCHI_CFG_INTERNAL_H_*/
1340 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1343 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1345 + * Redistribution and use in source and binary forms, with or without
1346 + * modification, are permitted provided that the following conditions
1348 + * 1. Redistributions of source code must retain the above copyright
1349 + * notice, this list of conditions, and the following disclaimer,
1350 + * without modification.
1351 + * 2. Redistributions in binary form must reproduce the above copyright
1352 + * notice, this list of conditions and the following disclaimer in the
1353 + * documentation and/or other materials provided with the distribution.
1354 + * 3. The names of the above-listed copyright holders may not be used
1355 + * to endorse or promote products derived from this software without
1356 + * specific prior written permission.
1358 + * ALTERNATIVELY, this software may be distributed under the terms of the
1359 + * GNU General Public License ("GPL") version 2, as published by the Free
1360 + * Software Foundation.
1362 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1363 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1364 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1365 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1366 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1367 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1368 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1369 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1370 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1371 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1372 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1375 +#ifndef VCHI_COMMON_H_
1376 +#define VCHI_COMMON_H_
1379 +//flags used when sending messages (must be bitmapped)
1382 + VCHI_FLAGS_NONE = 0x0,
1383 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
1384 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
1385 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
1386 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
1387 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
1388 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
1390 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
1391 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
1392 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
1393 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
1394 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
1395 + VCHI_FLAGS_INTERNAL = 0xFF0000
1398 +// constants for vchi_crc_control()
1400 + VCHI_CRC_NOTHING = -1,
1401 + VCHI_CRC_PER_SERVICE = 0,
1402 + VCHI_CRC_EVERYTHING = 1,
1403 +} VCHI_CRC_CONTROL_T;
1405 +//callback reasons when an event occurs on a service
1408 + VCHI_CALLBACK_REASON_MIN,
1410 + //This indicates that there is data available
1411 + //handle is the msg id that was transmitted with the data
1412 + // When a message is received and there was no FULL message available previously, send callback
1413 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1414 + VCHI_CALLBACK_MSG_AVAILABLE,
1415 + VCHI_CALLBACK_MSG_SENT,
1416 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1418 + // This indicates that a transfer from the other side has completed
1419 + VCHI_CALLBACK_BULK_RECEIVED,
1420 + //This indicates that data queued up to be sent has now gone
1421 + //handle is the msg id that was used when sending the data
1422 + VCHI_CALLBACK_BULK_SENT,
1423 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1424 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1426 + VCHI_CALLBACK_SERVICE_CLOSED,
1428 + // this side has sent XOFF to peer due to lack of data consumption by service
1429 + // (suggests the service may need to take some recovery action if it has
1430 + // been deliberately holding off consuming data)
1431 + VCHI_CALLBACK_SENT_XOFF,
1432 + VCHI_CALLBACK_SENT_XON,
1434 + // indicates that a bulk transfer has finished reading the source buffer
1435 + VCHI_CALLBACK_BULK_DATA_READ,
1437 + // power notification events (currently host side only)
1438 + VCHI_CALLBACK_PEER_OFF,
1439 + VCHI_CALLBACK_PEER_SUSPENDED,
1440 + VCHI_CALLBACK_PEER_ON,
1441 + VCHI_CALLBACK_PEER_RESUMED,
1442 + VCHI_CALLBACK_FORCED_POWER_OFF,
1444 +#ifdef USE_VCHIQ_ARM
1445 + // some extra notifications provided by vchiq_arm
1446 + VCHI_CALLBACK_SERVICE_OPENED,
1447 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1448 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1451 + VCHI_CALLBACK_REASON_MAX
1452 +} VCHI_CALLBACK_REASON_T;
1454 +//Calback used by all services / bulk transfers
1455 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1456 + VCHI_CALLBACK_REASON_T reason,
1457 + void *handle ); //for transmitting msg's only
1462 + * Define vector struct for scatter-gather (vector) operations
1463 + * Vectors can be nested - if a vector element has negative length, then
1464 + * the data pointer is treated as pointing to another vector array, with
1465 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1466 + * you can do this:
1468 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1470 + * VCHI_MSG_VECTOR_T nv[2];
1471 + * nv[0].vec_base = my_header;
1472 + * nv[0].vec_len = sizeof my_header;
1473 + * nv[1].vec_base = v;
1474 + * nv[1].vec_len = -n;
1478 +typedef struct vchi_msg_vector {
1479 + const void *vec_base;
1481 +} VCHI_MSG_VECTOR_T;
1483 +// Opaque type for a connection API
1484 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1486 +// Opaque type for a message driver
1487 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1490 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1491 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1492 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1493 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1494 +// is used again after messages for that service are removed/dequeued by any
1495 +// means other than vchi_msg_iter_... calls on the iterator itself.
1497 + struct opaque_vchi_service_t *service;
1504 +#endif // VCHI_COMMON_H_
1506 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1509 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1511 + * Redistribution and use in source and binary forms, with or without
1512 + * modification, are permitted provided that the following conditions
1514 + * 1. Redistributions of source code must retain the above copyright
1515 + * notice, this list of conditions, and the following disclaimer,
1516 + * without modification.
1517 + * 2. Redistributions in binary form must reproduce the above copyright
1518 + * notice, this list of conditions and the following disclaimer in the
1519 + * documentation and/or other materials provided with the distribution.
1520 + * 3. The names of the above-listed copyright holders may not be used
1521 + * to endorse or promote products derived from this software without
1522 + * specific prior written permission.
1524 + * ALTERNATIVELY, this software may be distributed under the terms of the
1525 + * GNU General Public License ("GPL") version 2, as published by the Free
1526 + * Software Foundation.
1528 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1529 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1530 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1531 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1532 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1533 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1534 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1535 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1536 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1537 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1538 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1544 +#include <linux/types.h>
1546 +typedef int32_t VCHI_MEM_HANDLE_T;
1547 +#define VCHI_MEM_HANDLE_INVALID 0
1551 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1554 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1556 + * Redistribution and use in source and binary forms, with or without
1557 + * modification, are permitted provided that the following conditions
1559 + * 1. Redistributions of source code must retain the above copyright
1560 + * notice, this list of conditions, and the following disclaimer,
1561 + * without modification.
1562 + * 2. Redistributions in binary form must reproduce the above copyright
1563 + * notice, this list of conditions and the following disclaimer in the
1564 + * documentation and/or other materials provided with the distribution.
1565 + * 3. The names of the above-listed copyright holders may not be used
1566 + * to endorse or promote products derived from this software without
1567 + * specific prior written permission.
1569 + * ALTERNATIVELY, this software may be distributed under the terms of the
1570 + * GNU General Public License ("GPL") version 2, as published by the Free
1571 + * Software Foundation.
1573 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1574 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1575 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1576 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1577 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1578 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1579 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1580 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1581 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1582 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1583 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1586 +#ifndef VCHIQ_VCHIQ_H
1587 +#define VCHIQ_VCHIQ_H
1589 +#include "vchiq_if.h"
1590 +#include "vchiq_util.h"
1595 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1598 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1600 + * Redistribution and use in source and binary forms, with or without
1601 + * modification, are permitted provided that the following conditions
1603 + * 1. Redistributions of source code must retain the above copyright
1604 + * notice, this list of conditions, and the following disclaimer,
1605 + * without modification.
1606 + * 2. Redistributions in binary form must reproduce the above copyright
1607 + * notice, this list of conditions and the following disclaimer in the
1608 + * documentation and/or other materials provided with the distribution.
1609 + * 3. The names of the above-listed copyright holders may not be used
1610 + * to endorse or promote products derived from this software without
1611 + * specific prior written permission.
1613 + * ALTERNATIVELY, this software may be distributed under the terms of the
1614 + * GNU General Public License ("GPL") version 2, as published by the Free
1615 + * Software Foundation.
1617 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1618 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1619 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1620 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1621 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1622 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1623 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1624 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1625 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1626 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1627 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1630 +#ifndef VCHIQ_2835_H
1631 +#define VCHIQ_2835_H
1633 +#include "vchiq_pagelist.h"
1635 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1636 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
1638 +#endif /* VCHIQ_2835_H */
1640 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1643 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1645 + * Redistribution and use in source and binary forms, with or without
1646 + * modification, are permitted provided that the following conditions
1648 + * 1. Redistributions of source code must retain the above copyright
1649 + * notice, this list of conditions, and the following disclaimer,
1650 + * without modification.
1651 + * 2. Redistributions in binary form must reproduce the above copyright
1652 + * notice, this list of conditions and the following disclaimer in the
1653 + * documentation and/or other materials provided with the distribution.
1654 + * 3. The names of the above-listed copyright holders may not be used
1655 + * to endorse or promote products derived from this software without
1656 + * specific prior written permission.
1658 + * ALTERNATIVELY, this software may be distributed under the terms of the
1659 + * GNU General Public License ("GPL") version 2, as published by the Free
1660 + * Software Foundation.
1662 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1663 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1664 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1665 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1666 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1667 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1668 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1669 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1670 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1671 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1672 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1675 +#include <linux/kernel.h>
1676 +#include <linux/types.h>
1677 +#include <linux/errno.h>
1678 +#include <linux/interrupt.h>
1679 +#include <linux/irq.h>
1680 +#include <linux/pagemap.h>
1681 +#include <linux/dma-mapping.h>
1682 +#include <linux/version.h>
1683 +#include <linux/io.h>
1684 +#include <linux/uaccess.h>
1685 +#include <asm/pgtable.h>
1687 +#include <mach/irqs.h>
1689 +#include <mach/platform.h>
1690 +#include <mach/vcio.h>
1692 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1694 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
1695 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
1697 +#include "vchiq_arm.h"
1698 +#include "vchiq_2835.h"
1699 +#include "vchiq_connected.h"
1701 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1703 +typedef struct vchiq_2835_state_struct {
1705 + VCHIQ_ARM_STATE_T arm_state;
1706 +} VCHIQ_2835_ARM_STATE_T;
1708 +static char *g_slot_mem;
1709 +static int g_slot_mem_size;
1710 +dma_addr_t g_slot_phys;
1711 +static FRAGMENTS_T *g_fragments_base;
1712 +static FRAGMENTS_T *g_free_fragments;
1713 +struct semaphore g_free_fragments_sema;
1715 +extern int vchiq_arm_log_level;
1717 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1720 +vchiq_doorbell_irq(int irq, void *dev_id);
1723 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1724 + struct task_struct *task, PAGELIST_T ** ppagelist);
1727 +free_pagelist(PAGELIST_T *pagelist, int actual);
1730 +vchiq_platform_init(VCHIQ_STATE_T *state)
1732 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1733 + int frag_mem_size;
1737 + /* Allocate space for the channels in coherent memory */
1738 + g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1739 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1741 + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
1742 + &g_slot_phys, GFP_ATOMIC);
1744 + if (!g_slot_mem) {
1745 + vchiq_log_error(vchiq_arm_log_level,
1746 + "Unable to allocate channel memory");
1748 + goto failed_alloc;
1751 + WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
1753 + vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
1754 + if (!vchiq_slot_zero) {
1756 + goto failed_init_slots;
1759 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1760 + (int)g_slot_phys + g_slot_mem_size;
1761 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1764 + g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
1765 + g_slot_mem_size += frag_mem_size;
1767 + g_free_fragments = g_fragments_base;
1768 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1769 + *(FRAGMENTS_T **)&g_fragments_base[i] =
1770 + &g_fragments_base[i + 1];
1772 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1773 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1775 + if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
1778 + goto failed_vchiq_init;
1781 + err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
1782 + IRQF_IRQPOLL, "VCHIQ doorbell",
1785 + vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
1786 + "irq=%d err=%d", __func__,
1787 + VCHIQ_DOORBELL_IRQ, err);
1788 + goto failed_request_irq;
1791 + /* Send the base address of the slots to VideoCore */
1793 + dsb(); /* Ensure all writes have completed */
1795 + bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
1797 + vchiq_log_info(vchiq_arm_log_level,
1798 + "vchiq_init - done (slots %x, phys %x)",
1799 + (unsigned int)vchiq_slot_zero, g_slot_phys);
1801 + vchiq_call_connected_callbacks();
1805 +failed_request_irq:
1808 + dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
1815 +vchiq_platform_exit(VCHIQ_STATE_T *state)
1817 + free_irq(VCHIQ_DOORBELL_IRQ, state);
1818 + dma_free_coherent(NULL, g_slot_mem_size,
1819 + g_slot_mem, g_slot_phys);
1824 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
1826 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1827 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
1828 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
1829 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
1830 + if(status != VCHIQ_SUCCESS)
1832 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
1838 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
1840 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
1844 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
1848 +remote_event_signal(REMOTE_EVENT_T *event)
1854 + dsb(); /* data barrier operation */
1856 + if (event->armed) {
1857 + /* trigger vc interrupt */
1859 + writel(0, __io_address(ARM_0_BELL2));
1864 +vchiq_copy_from_user(void *dst, const void *src, int size)
1866 + if ((uint32_t)src < TASK_SIZE) {
1867 + return copy_from_user(dst, src, size);
1869 + memcpy(dst, src, size);
1875 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
1876 + void *offset, int size, int dir)
1878 + PAGELIST_T *pagelist;
1881 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
1883 + ret = create_pagelist((char __user *)offset, size,
1884 + (dir == VCHIQ_BULK_RECEIVE)
1890 + return VCHIQ_ERROR;
1892 + bulk->handle = memhandle;
1893 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
1895 + /* Store the pagelist address in remote_data, which isn't used by the
1897 + bulk->remote_data = pagelist;
1899 + return VCHIQ_SUCCESS;
1903 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
1905 + if (bulk && bulk->remote_data && bulk->actual)
1906 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
1910 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
1913 + * This should only be called on the master (VideoCore) side, but
1914 + * provide an implementation to avoid the need for ifdefery.
1920 +vchiq_dump_platform_state(void *dump_context)
1924 + len = snprintf(buf, sizeof(buf),
1925 + " Platform: 2835 (VC master)");
1926 + vchiq_dump(dump_context, buf, len + 1);
1930 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
1932 + return VCHIQ_ERROR;
1936 +vchiq_platform_resume(VCHIQ_STATE_T *state)
1938 + return VCHIQ_SUCCESS;
1942 +vchiq_platform_paused(VCHIQ_STATE_T *state)
1947 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
1952 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
1954 + return 1; // autosuspend not supported - videocore always wanted
1958 +vchiq_platform_use_suspend_timer(void)
1963 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
1965 + vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
1968 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
1977 +vchiq_doorbell_irq(int irq, void *dev_id)
1979 + VCHIQ_STATE_T *state = dev_id;
1980 + irqreturn_t ret = IRQ_NONE;
1981 + unsigned int status;
1983 + /* Read (and clear) the doorbell */
1984 + status = readl(__io_address(ARM_0_BELL0));
1986 + if (status & 0x4) { /* Was the doorbell rung? */
1987 + remote_event_pollall(state);
1988 + ret = IRQ_HANDLED;
1994 +/* There is a potential problem with partial cache lines (pages?)
1995 +** at the ends of the block when reading. If the CPU accessed anything in
1996 +** the same line (page?) then it may have pulled old data into the cache,
1997 +** obscuring the new data underneath. We can solve this by transferring the
1998 +** partial cache lines separately, and allowing the ARM to copy into the
2001 +** N.B. This implementation plays slightly fast and loose with the Linux
2002 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2003 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2004 +** from increased speed as a result.
2008 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2009 + struct task_struct *task, PAGELIST_T ** ppagelist)
2011 + PAGELIST_T *pagelist;
2012 + struct page **pages;
2013 + struct page *page;
2014 + unsigned long *addrs;
2015 + unsigned int num_pages, offset, i;
2016 + char *addr, *base_addr, *next_addr;
2017 + int run, addridx, actual_pages;
2019 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
2020 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2022 + *ppagelist = NULL;
2024 + /* Allocate enough storage to hold the page pointers and the page
2027 + pagelist = kmalloc(sizeof(PAGELIST_T) +
2028 + (num_pages * sizeof(unsigned long)) +
2029 + (num_pages * sizeof(pages[0])),
2032 + vchiq_log_trace(vchiq_arm_log_level,
2033 + "create_pagelist - %x", (unsigned int)pagelist);
2037 + addrs = pagelist->addrs;
2038 + pages = (struct page **)(addrs + num_pages);
2040 + down_read(&task->mm->mmap_sem);
2041 + actual_pages = get_user_pages(task, task->mm,
2042 + (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages,
2043 + (type == PAGELIST_READ) /*Write */ , 0 /*Force */ ,
2044 + pages, NULL /*vmas */);
2045 + up_read(&task->mm->mmap_sem);
2047 + if (actual_pages != num_pages)
2049 + /* This is probably due to the process being killed */
2050 + while (actual_pages > 0)
2053 + page_cache_release(pages[actual_pages]);
2056 + if (actual_pages == 0)
2057 + actual_pages = -ENOMEM;
2058 + return actual_pages;
2061 + pagelist->length = count;
2062 + pagelist->type = type;
2063 + pagelist->offset = offset;
2065 + /* Group the pages into runs of contiguous pages */
2067 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2068 + next_addr = base_addr + PAGE_SIZE;
2072 + for (i = 1; i < num_pages; i++) {
2073 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2074 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2075 + next_addr += PAGE_SIZE;
2078 + addrs[addridx] = (unsigned long)base_addr + run;
2081 + next_addr = addr + PAGE_SIZE;
2086 + addrs[addridx] = (unsigned long)base_addr + run;
2089 + /* Partial cache lines (fragments) require special measures */
2090 + if ((type == PAGELIST_READ) &&
2091 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2092 + ((pagelist->offset + pagelist->length) &
2093 + (CACHE_LINE_SIZE - 1)))) {
2094 + FRAGMENTS_T *fragments;
2096 + if (down_interruptible(&g_free_fragments_sema) != 0) {
2101 + WARN_ON(g_free_fragments == NULL);
2103 + down(&g_free_fragments_mutex);
2104 + fragments = (FRAGMENTS_T *) g_free_fragments;
2105 + WARN_ON(fragments == NULL);
2106 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2107 + up(&g_free_fragments_mutex);
2109 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2110 + g_fragments_base);
2113 + for (page = virt_to_page(pagelist);
2114 + page <= virt_to_page(addrs + num_pages - 1); page++) {
2115 + flush_dcache_page(page);
2118 + *ppagelist = pagelist;
2124 +free_pagelist(PAGELIST_T *pagelist, int actual)
2126 + struct page **pages;
2127 + unsigned int num_pages, i;
2129 + vchiq_log_trace(vchiq_arm_log_level,
2130 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2133 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2136 + pages = (struct page **)(pagelist->addrs + num_pages);
2138 + /* Deal with any partial cache lines (fragments) */
2139 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2140 + FRAGMENTS_T *fragments = g_fragments_base +
2141 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2142 + int head_bytes, tail_bytes;
2143 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2144 + (CACHE_LINE_SIZE - 1);
2145 + tail_bytes = (pagelist->offset + actual) &
2146 + (CACHE_LINE_SIZE - 1);
2148 + if ((actual >= 0) && (head_bytes != 0)) {
2149 + if (head_bytes > actual)
2150 + head_bytes = actual;
2152 + memcpy((char *)page_address(pages[0]) +
2154 + fragments->headbuf,
2157 + if ((actual >= 0) && (head_bytes < actual) &&
2158 + (tail_bytes != 0)) {
2159 + memcpy((char *)page_address(pages[num_pages - 1]) +
2160 + ((pagelist->offset + actual) &
2161 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2162 + fragments->tailbuf, tail_bytes);
2165 + down(&g_free_fragments_mutex);
2166 + *(FRAGMENTS_T **) fragments = g_free_fragments;
2167 + g_free_fragments = fragments;
2168 + up(&g_free_fragments_mutex);
2169 + up(&g_free_fragments_sema);
2172 + for (i = 0; i < num_pages; i++) {
2173 + if (pagelist->type != PAGELIST_WRITE)
2174 + set_page_dirty(pages[i]);
2175 + page_cache_release(pages[i]);
2181 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2184 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2186 + * Redistribution and use in source and binary forms, with or without
2187 + * modification, are permitted provided that the following conditions
2189 + * 1. Redistributions of source code must retain the above copyright
2190 + * notice, this list of conditions, and the following disclaimer,
2191 + * without modification.
2192 + * 2. Redistributions in binary form must reproduce the above copyright
2193 + * notice, this list of conditions and the following disclaimer in the
2194 + * documentation and/or other materials provided with the distribution.
2195 + * 3. The names of the above-listed copyright holders may not be used
2196 + * to endorse or promote products derived from this software without
2197 + * specific prior written permission.
2199 + * ALTERNATIVELY, this software may be distributed under the terms of the
2200 + * GNU General Public License ("GPL") version 2, as published by the Free
2201 + * Software Foundation.
2203 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2204 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2205 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2206 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2207 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2208 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2209 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2210 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2211 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2212 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2213 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2216 +#include <linux/kernel.h>
2217 +#include <linux/module.h>
2218 +#include <linux/types.h>
2219 +#include <linux/errno.h>
2220 +#include <linux/cdev.h>
2221 +#include <linux/fs.h>
2222 +#include <linux/device.h>
2223 +#include <linux/mm.h>
2224 +#include <linux/highmem.h>
2225 +#include <linux/pagemap.h>
2226 +#include <linux/bug.h>
2227 +#include <linux/semaphore.h>
2228 +#include <linux/list.h>
2229 +#include <linux/proc_fs.h>
2231 +#include "vchiq_core.h"
2232 +#include "vchiq_ioctl.h"
2233 +#include "vchiq_arm.h"
2235 +#define DEVICE_NAME "vchiq"
2237 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2238 +#undef MODULE_PARAM_PREFIX
2239 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2241 +#define VCHIQ_MINOR 0
2243 +/* Some per-instance constants */
2244 +#define MAX_COMPLETIONS 16
2245 +#define MAX_SERVICES 64
2246 +#define MAX_ELEMENTS 8
2247 +#define MSG_QUEUE_SIZE 64
2249 +#define KEEPALIVE_VER 1
2250 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2252 +/* Run time control of log level, based on KERN_XXX level. */
2253 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2254 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2256 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2257 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2259 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2260 +static const char *const suspend_state_names[] = {
2261 + "VC_SUSPEND_FORCE_CANCELED",
2262 + "VC_SUSPEND_REJECTED",
2263 + "VC_SUSPEND_FAILED",
2264 + "VC_SUSPEND_IDLE",
2265 + "VC_SUSPEND_REQUESTED",
2266 + "VC_SUSPEND_IN_PROGRESS",
2267 + "VC_SUSPEND_SUSPENDED"
2269 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2270 +static const char *const resume_state_names[] = {
2271 + "VC_RESUME_FAILED",
2273 + "VC_RESUME_REQUESTED",
2274 + "VC_RESUME_IN_PROGRESS",
2275 + "VC_RESUME_RESUMED"
2277 +/* The number of times we allow force suspend to timeout before actually
2278 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
2279 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2281 +#define FORCE_SUSPEND_FAIL_MAX 8
2283 +/* The time in ms allowed for videocore to go idle when force suspend has been
2285 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2288 +static void suspend_timer_callback(unsigned long context);
2289 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
2290 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
2293 +typedef struct user_service_struct {
2294 + VCHIQ_SERVICE_T *service;
2296 + VCHIQ_INSTANCE_T instance;
2298 + int dequeue_pending;
2299 + int message_available_pos;
2302 + struct semaphore insert_event;
2303 + struct semaphore remove_event;
2304 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2307 +struct bulk_waiter_node {
2308 + struct bulk_waiter bulk_waiter;
2310 + struct list_head list;
2313 +struct vchiq_instance_struct {
2314 + VCHIQ_STATE_T *state;
2315 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2316 + int completion_insert;
2317 + int completion_remove;
2318 + struct semaphore insert_event;
2319 + struct semaphore remove_event;
2320 + struct mutex completion_mutex;
2327 + struct list_head bulk_waiter_list;
2328 + struct mutex bulk_waiter_list_mutex;
2330 + struct proc_dir_entry *proc_entry;
2333 +typedef struct dump_context_struct {
2340 +static struct cdev vchiq_cdev;
2341 +static dev_t vchiq_devid;
2342 +static VCHIQ_STATE_T g_state;
2343 +static struct class *vchiq_class;
2344 +static struct device *vchiq_dev;
2345 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2347 +static const char *const ioctl_names[] = {
2353 + "QUEUE_BULK_TRANSMIT",
2354 + "QUEUE_BULK_RECEIVE",
2355 + "AWAIT_COMPLETION",
2356 + "DEQUEUE_MESSAGE",
2361 + "RELEASE_SERVICE",
2362 + "SET_SERVICE_OPTION",
2366 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2367 + (VCHIQ_IOC_MAX + 1));
2370 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2372 +/****************************************************************************
2376 +***************************************************************************/
2378 +static VCHIQ_STATUS_T
2379 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2380 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2381 + void *bulk_userdata)
2383 + VCHIQ_COMPLETION_DATA_T *completion;
2384 + DEBUG_INITIALISE(g_state.local)
2386 + while (instance->completion_insert ==
2387 + (instance->completion_remove + MAX_COMPLETIONS)) {
2388 + /* Out of space - wait for the client */
2389 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2390 + vchiq_log_trace(vchiq_arm_log_level,
2391 + "add_completion - completion queue full");
2392 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2393 + if (down_interruptible(&instance->remove_event) != 0) {
2394 + vchiq_log_info(vchiq_arm_log_level,
2395 + "service_callback interrupted");
2396 + return VCHIQ_RETRY;
2397 + } else if (instance->closing) {
2398 + vchiq_log_info(vchiq_arm_log_level,
2399 + "service_callback closing");
2400 + return VCHIQ_ERROR;
2402 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2406 + &instance->completions[instance->completion_insert &
2407 + (MAX_COMPLETIONS - 1)];
2409 + completion->header = header;
2410 + completion->reason = reason;
2411 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2412 + completion->service_userdata = user_service->service;
2413 + completion->bulk_userdata = bulk_userdata;
2415 + if (reason == VCHIQ_SERVICE_CLOSED)
2416 + /* Take an extra reference, to be held until
2417 + this CLOSED notification is delivered. */
2418 + lock_service(user_service->service);
2420 + /* A write barrier is needed here to ensure that the entire completion
2421 + record is written out before the insert point. */
2424 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
2425 + user_service->message_available_pos =
2426 + instance->completion_insert;
2427 + instance->completion_insert++;
2429 + up(&instance->insert_event);
2431 + return VCHIQ_SUCCESS;
2434 +/****************************************************************************
2438 +***************************************************************************/
2440 +static VCHIQ_STATUS_T
2441 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2442 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2444 + /* How do we ensure the callback goes to the right client?
2445 + ** The service_user data points to a USER_SERVICE_T record containing
2446 + ** the original callback and the user state structure, which contains a
2447 + ** circular buffer for completion records.
2449 + USER_SERVICE_T *user_service;
2450 + VCHIQ_SERVICE_T *service;
2451 + VCHIQ_INSTANCE_T instance;
2452 + DEBUG_INITIALISE(g_state.local)
2454 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2456 + service = handle_to_service(handle);
2458 + user_service = (USER_SERVICE_T *)service->base.userdata;
2459 + instance = user_service->instance;
2461 + if (!instance || instance->closing)
2462 + return VCHIQ_SUCCESS;
2464 + vchiq_log_trace(vchiq_arm_log_level,
2465 + "service_callback - service %lx(%d), reason %d, header %lx, "
2466 + "instance %lx, bulk_userdata %lx",
2467 + (unsigned long)user_service,
2468 + service->localport,
2469 + reason, (unsigned long)header,
2470 + (unsigned long)instance, (unsigned long)bulk_userdata);
2472 + if (header && user_service->is_vchi) {
2473 + spin_lock(&msg_queue_spinlock);
2474 + while (user_service->msg_insert ==
2475 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2476 + spin_unlock(&msg_queue_spinlock);
2477 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2478 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2479 + vchiq_log_trace(vchiq_arm_log_level,
2480 + "service_callback - msg queue full");
2481 + /* If there is no MESSAGE_AVAILABLE in the completion
2484 + if ((user_service->message_available_pos -
2485 + instance->completion_remove) < 0) {
2486 + VCHIQ_STATUS_T status;
2487 + vchiq_log_info(vchiq_arm_log_level,
2488 + "Inserting extra MESSAGE_AVAILABLE");
2489 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2490 + status = add_completion(instance, reason,
2491 + NULL, user_service, bulk_userdata);
2492 + if (status != VCHIQ_SUCCESS) {
2493 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2498 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2499 + if (down_interruptible(&user_service->remove_event)
2501 + vchiq_log_info(vchiq_arm_log_level,
2502 + "service_callback interrupted");
2503 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2504 + return VCHIQ_RETRY;
2505 + } else if (instance->closing) {
2506 + vchiq_log_info(vchiq_arm_log_level,
2507 + "service_callback closing");
2508 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2509 + return VCHIQ_ERROR;
2511 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2512 + spin_lock(&msg_queue_spinlock);
2515 + user_service->msg_queue[user_service->msg_insert &
2516 + (MSG_QUEUE_SIZE - 1)] = header;
2517 + user_service->msg_insert++;
2518 + spin_unlock(&msg_queue_spinlock);
2520 + up(&user_service->insert_event);
2522 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2523 + ** there is a MESSAGE_AVAILABLE in the completion queue then
2524 + ** bypass the completion queue.
2526 + if (((user_service->message_available_pos -
2527 + instance->completion_remove) >= 0) ||
2528 + user_service->dequeue_pending) {
2529 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2530 + user_service->dequeue_pending = 0;
2531 + return VCHIQ_SUCCESS;
2536 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2538 + return add_completion(instance, reason, header, user_service,
2542 +/****************************************************************************
2544 +* user_service_free
2546 +***************************************************************************/
2548 +user_service_free(void *userdata)
2553 +/****************************************************************************
2557 +***************************************************************************/
2560 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2562 + VCHIQ_INSTANCE_T instance = file->private_data;
2563 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2564 + VCHIQ_SERVICE_T *service = NULL;
2567 + DEBUG_INITIALISE(g_state.local)
2569 + vchiq_log_trace(vchiq_arm_log_level,
2570 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2571 + (unsigned int)instance,
2572 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2573 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2574 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2577 + case VCHIQ_IOC_SHUTDOWN:
2578 + if (!instance->connected)
2581 + /* Remove all services */
2583 + while ((service = next_service_by_instance(instance->state,
2584 + instance, &i)) != NULL) {
2585 + status = vchiq_remove_service(service->handle);
2586 + unlock_service(service);
2587 + if (status != VCHIQ_SUCCESS)
2592 + if (status == VCHIQ_SUCCESS) {
2593 + /* Wake the completion thread and ask it to exit */
2594 + instance->closing = 1;
2595 + up(&instance->insert_event);
2600 + case VCHIQ_IOC_CONNECT:
2601 + if (instance->connected) {
2605 + rc = mutex_lock_interruptible(&instance->state->mutex);
2607 + vchiq_log_error(vchiq_arm_log_level,
2608 + "vchiq: connect: could not lock mutex for "
2610 + instance->state->id, rc);
2614 + status = vchiq_connect_internal(instance->state, instance);
2615 + mutex_unlock(&instance->state->mutex);
2617 + if (status == VCHIQ_SUCCESS)
2618 + instance->connected = 1;
2620 + vchiq_log_error(vchiq_arm_log_level,
2621 + "vchiq: could not connect: %d", status);
2624 + case VCHIQ_IOC_CREATE_SERVICE: {
2625 + VCHIQ_CREATE_SERVICE_T args;
2626 + USER_SERVICE_T *user_service = NULL;
2630 + if (copy_from_user
2631 + (&args, (const void __user *)arg,
2632 + sizeof(args)) != 0) {
2637 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2638 + if (!user_service) {
2643 + if (args.is_open) {
2644 + if (!instance->connected) {
2646 + kfree(user_service);
2649 + srvstate = VCHIQ_SRVSTATE_OPENING;
2652 + instance->connected ?
2653 + VCHIQ_SRVSTATE_LISTENING :
2654 + VCHIQ_SRVSTATE_HIDDEN;
2657 + userdata = args.params.userdata;
2658 + args.params.callback = service_callback;
2659 + args.params.userdata = user_service;
2660 + service = vchiq_add_service_internal(
2662 + &args.params, srvstate,
2663 + instance, user_service_free);
2665 + if (service != NULL) {
2666 + user_service->service = service;
2667 + user_service->userdata = userdata;
2668 + user_service->instance = instance;
2669 + user_service->is_vchi = args.is_vchi;
2670 + user_service->dequeue_pending = 0;
2671 + user_service->message_available_pos =
2672 + instance->completion_remove - 1;
2673 + user_service->msg_insert = 0;
2674 + user_service->msg_remove = 0;
2675 + sema_init(&user_service->insert_event, 0);
2676 + sema_init(&user_service->remove_event, 0);
2678 + if (args.is_open) {
2679 + status = vchiq_open_service_internal
2680 + (service, instance->pid);
2681 + if (status != VCHIQ_SUCCESS) {
2682 + vchiq_remove_service(service->handle);
2684 + ret = (status == VCHIQ_RETRY) ?
2690 + if (copy_to_user((void __user *)
2691 + &(((VCHIQ_CREATE_SERVICE_T __user *)
2693 + (const void *)&service->handle,
2694 + sizeof(service->handle)) != 0) {
2696 + vchiq_remove_service(service->handle);
2702 + kfree(user_service);
2706 + case VCHIQ_IOC_CLOSE_SERVICE: {
2707 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2709 + service = find_service_for_instance(instance, handle);
2710 + if (service != NULL)
2711 + status = vchiq_close_service(service->handle);
2716 + case VCHIQ_IOC_REMOVE_SERVICE: {
2717 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2719 + service = find_service_for_instance(instance, handle);
2720 + if (service != NULL)
2721 + status = vchiq_remove_service(service->handle);
2726 + case VCHIQ_IOC_USE_SERVICE:
2727 + case VCHIQ_IOC_RELEASE_SERVICE: {
2728 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2730 + service = find_service_for_instance(instance, handle);
2731 + if (service != NULL) {
2732 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
2733 + vchiq_use_service_internal(service) :
2734 + vchiq_release_service_internal(service);
2735 + if (status != VCHIQ_SUCCESS) {
2736 + vchiq_log_error(vchiq_susp_log_level,
2737 + "%s: cmd %s returned error %d for "
2738 + "service %c%c%c%c:%03d",
2740 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
2741 + "VCHIQ_IOC_USE_SERVICE" :
2742 + "VCHIQ_IOC_RELEASE_SERVICE",
2744 + VCHIQ_FOURCC_AS_4CHARS(
2745 + service->base.fourcc),
2746 + service->client_id);
2753 + case VCHIQ_IOC_QUEUE_MESSAGE: {
2754 + VCHIQ_QUEUE_MESSAGE_T args;
2755 + if (copy_from_user
2756 + (&args, (const void __user *)arg,
2757 + sizeof(args)) != 0) {
2762 + service = find_service_for_instance(instance, args.handle);
2764 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
2765 + /* Copy elements into kernel space */
2766 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
2767 + if (copy_from_user(elements, args.elements,
2768 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
2769 + status = vchiq_queue_message
2771 + elements, args.count);
2779 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
2780 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
2781 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
2782 + struct bulk_waiter_node *waiter = NULL;
2783 + VCHIQ_BULK_DIR_T dir =
2784 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
2785 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
2787 + if (copy_from_user
2788 + (&args, (const void __user *)arg,
2789 + sizeof(args)) != 0) {
2794 + service = find_service_for_instance(instance, args.handle);
2800 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
2801 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
2807 + args.userdata = &waiter->bulk_waiter;
2808 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
2809 + struct list_head *pos;
2810 + mutex_lock(&instance->bulk_waiter_list_mutex);
2811 + list_for_each(pos, &instance->bulk_waiter_list) {
2812 + if (list_entry(pos, struct bulk_waiter_node,
2813 + list)->pid == current->pid) {
2814 + waiter = list_entry(pos,
2815 + struct bulk_waiter_node,
2822 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2824 + vchiq_log_error(vchiq_arm_log_level,
2825 + "no bulk_waiter found for pid %d",
2830 + vchiq_log_info(vchiq_arm_log_level,
2831 + "found bulk_waiter %x for pid %d",
2832 + (unsigned int)waiter, current->pid);
2833 + args.userdata = &waiter->bulk_waiter;
2835 + status = vchiq_bulk_transfer
2837 + VCHI_MEM_HANDLE_INVALID,
2838 + args.data, args.size,
2839 + args.userdata, args.mode,
2843 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
2844 + !waiter->bulk_waiter.bulk) {
2845 + if (waiter->bulk_waiter.bulk) {
2846 + /* Cancel the signal when the transfer
2848 + spin_lock(&bulk_waiter_spinlock);
2849 + waiter->bulk_waiter.bulk->userdata = NULL;
2850 + spin_unlock(&bulk_waiter_spinlock);
2854 + const VCHIQ_BULK_MODE_T mode_waiting =
2855 + VCHIQ_BULK_MODE_WAITING;
2856 + waiter->pid = current->pid;
2857 + mutex_lock(&instance->bulk_waiter_list_mutex);
2858 + list_add(&waiter->list, &instance->bulk_waiter_list);
2859 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2860 + vchiq_log_info(vchiq_arm_log_level,
2861 + "saved bulk_waiter %x for pid %d",
2862 + (unsigned int)waiter, current->pid);
2864 + if (copy_to_user((void __user *)
2865 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
2867 + (const void *)&mode_waiting,
2868 + sizeof(mode_waiting)) != 0)
2873 + case VCHIQ_IOC_AWAIT_COMPLETION: {
2874 + VCHIQ_AWAIT_COMPLETION_T args;
2876 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2877 + if (!instance->connected) {
2882 + if (copy_from_user(&args, (const void __user *)arg,
2883 + sizeof(args)) != 0) {
2888 + mutex_lock(&instance->completion_mutex);
2890 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2891 + while ((instance->completion_remove ==
2892 + instance->completion_insert)
2893 + && !instance->closing) {
2895 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2896 + mutex_unlock(&instance->completion_mutex);
2897 + rc = down_interruptible(&instance->insert_event);
2898 + mutex_lock(&instance->completion_mutex);
2900 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2901 + vchiq_log_info(vchiq_arm_log_level,
2902 + "AWAIT_COMPLETION interrupted");
2907 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2909 + /* A read memory barrier is needed to stop prefetch of a stale
2910 + ** completion record
2915 + int msgbufcount = args.msgbufcount;
2916 + for (ret = 0; ret < args.count; ret++) {
2917 + VCHIQ_COMPLETION_DATA_T *completion;
2918 + VCHIQ_SERVICE_T *service;
2919 + USER_SERVICE_T *user_service;
2920 + VCHIQ_HEADER_T *header;
2921 + if (instance->completion_remove ==
2922 + instance->completion_insert)
2924 + completion = &instance->completions[
2925 + instance->completion_remove &
2926 + (MAX_COMPLETIONS - 1)];
2928 + service = completion->service_userdata;
2929 + user_service = service->base.userdata;
2930 + completion->service_userdata =
2931 + user_service->userdata;
2933 + header = completion->header;
2935 + void __user *msgbuf;
2938 + msglen = header->size +
2939 + sizeof(VCHIQ_HEADER_T);
2940 + /* This must be a VCHIQ-style service */
2941 + if (args.msgbufsize < msglen) {
2943 + vchiq_arm_log_level,
2944 + "header %x: msgbufsize"
2945 + " %x < msglen %x",
2946 + (unsigned int)header,
2949 + WARN(1, "invalid message "
2955 + if (msgbufcount <= 0)
2956 + /* Stall here for lack of a
2957 + ** buffer for the message. */
2959 + /* Get the pointer from user space */
2961 + if (copy_from_user(&msgbuf,
2962 + (const void __user *)
2963 + &args.msgbufs[msgbufcount],
2964 + sizeof(msgbuf)) != 0) {
2970 + /* Copy the message to user space */
2971 + if (copy_to_user(msgbuf, header,
2978 + /* Now it has been copied, the message
2979 + ** can be released. */
2980 + vchiq_release_message(service->handle,
2983 + /* The completion must point to the
2985 + completion->header = msgbuf;
2988 + if (completion->reason ==
2989 + VCHIQ_SERVICE_CLOSED)
2990 + unlock_service(service);
2992 + if (copy_to_user((void __user *)(
2993 + (size_t)args.buf +
2994 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
2996 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
3002 + instance->completion_remove++;
3005 + if (msgbufcount != args.msgbufcount) {
3006 + if (copy_to_user((void __user *)
3007 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3010 + sizeof(msgbufcount)) != 0) {
3017 + up(&instance->remove_event);
3018 + mutex_unlock(&instance->completion_mutex);
3019 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3022 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3023 + VCHIQ_DEQUEUE_MESSAGE_T args;
3024 + USER_SERVICE_T *user_service;
3025 + VCHIQ_HEADER_T *header;
3027 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3028 + if (copy_from_user
3029 + (&args, (const void __user *)arg,
3030 + sizeof(args)) != 0) {
3034 + service = find_service_for_instance(instance, args.handle);
3039 + user_service = (USER_SERVICE_T *)service->base.userdata;
3040 + if (user_service->is_vchi == 0) {
3045 + spin_lock(&msg_queue_spinlock);
3046 + if (user_service->msg_remove == user_service->msg_insert) {
3047 + if (!args.blocking) {
3048 + spin_unlock(&msg_queue_spinlock);
3049 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3050 + ret = -EWOULDBLOCK;
3053 + user_service->dequeue_pending = 1;
3055 + spin_unlock(&msg_queue_spinlock);
3056 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3057 + if (down_interruptible(
3058 + &user_service->insert_event) != 0) {
3059 + vchiq_log_info(vchiq_arm_log_level,
3060 + "DEQUEUE_MESSAGE interrupted");
3064 + spin_lock(&msg_queue_spinlock);
3065 + } while (user_service->msg_remove ==
3066 + user_service->msg_insert);
3072 + BUG_ON((int)(user_service->msg_insert -
3073 + user_service->msg_remove) < 0);
3075 + header = user_service->msg_queue[user_service->msg_remove &
3076 + (MSG_QUEUE_SIZE - 1)];
3077 + user_service->msg_remove++;
3078 + spin_unlock(&msg_queue_spinlock);
3080 + up(&user_service->remove_event);
3081 + if (header == NULL)
3083 + else if (header->size <= args.bufsize) {
3084 + /* Copy to user space if msgbuf is not NULL */
3085 + if ((args.buf == NULL) ||
3086 + (copy_to_user((void __user *)args.buf,
3088 + header->size) == 0)) {
3089 + ret = header->size;
3090 + vchiq_release_message(
3096 + vchiq_log_error(vchiq_arm_log_level,
3097 + "header %x: bufsize %x < size %x",
3098 + (unsigned int)header, args.bufsize,
3100 + WARN(1, "invalid size\n");
3103 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3106 + case VCHIQ_IOC_GET_CLIENT_ID: {
3107 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3109 + ret = vchiq_get_client_id(handle);
3112 + case VCHIQ_IOC_GET_CONFIG: {
3113 + VCHIQ_GET_CONFIG_T args;
3114 + VCHIQ_CONFIG_T config;
3116 + if (copy_from_user(&args, (const void __user *)arg,
3117 + sizeof(args)) != 0) {
3121 + if (args.config_size > sizeof(config)) {
3125 + status = vchiq_get_config(instance, args.config_size, &config);
3126 + if (status == VCHIQ_SUCCESS) {
3127 + if (copy_to_user((void __user *)args.pconfig,
3128 + &config, args.config_size) != 0) {
3135 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
3136 + VCHIQ_SET_SERVICE_OPTION_T args;
3138 + if (copy_from_user(
3139 + &args, (const void __user *)arg,
3140 + sizeof(args)) != 0) {
3145 + service = find_service_for_instance(instance, args.handle);
3151 + status = vchiq_set_service_option(
3152 + args.handle, args.option, args.value);
3155 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
3156 + VCHIQ_DUMP_MEM_T args;
3158 + if (copy_from_user
3159 + (&args, (const void __user *)arg,
3160 + sizeof(args)) != 0) {
3164 + dump_phys_mem(args.virt_addr, args.num_bytes);
3173 + unlock_service(service);
3176 + if (status == VCHIQ_ERROR)
3178 + else if (status == VCHIQ_RETRY)
3182 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3183 + (ret != -EWOULDBLOCK))
3184 + vchiq_log_info(vchiq_arm_log_level,
3185 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3186 + (unsigned long)instance,
3187 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3188 + ioctl_names[_IOC_NR(cmd)] :
3192 + vchiq_log_trace(vchiq_arm_log_level,
3193 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3194 + (unsigned long)instance,
3195 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3196 + ioctl_names[_IOC_NR(cmd)] :
3203 +/****************************************************************************
3207 +***************************************************************************/
3210 +vchiq_open(struct inode *inode, struct file *file)
3212 + int dev = iminor(inode) & 0x0f;
3213 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3215 + case VCHIQ_MINOR: {
3217 + VCHIQ_STATE_T *state = vchiq_get_state();
3218 + VCHIQ_INSTANCE_T instance;
3221 + vchiq_log_error(vchiq_arm_log_level,
3222 + "vchiq has no connection to VideoCore");
3226 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3230 + instance->state = state;
3231 + instance->pid = current->tgid;
3233 + ret = vchiq_proc_add_instance(instance);
3239 + sema_init(&instance->insert_event, 0);
3240 + sema_init(&instance->remove_event, 0);
3241 + mutex_init(&instance->completion_mutex);
3242 + mutex_init(&instance->bulk_waiter_list_mutex);
3243 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
3245 + file->private_data = instance;
3249 + vchiq_log_error(vchiq_arm_log_level,
3250 + "Unknown minor device: %d", dev);
3257 +/****************************************************************************
3261 +***************************************************************************/
3264 +vchiq_release(struct inode *inode, struct file *file)
3266 + int dev = iminor(inode) & 0x0f;
3269 + case VCHIQ_MINOR: {
3270 + VCHIQ_INSTANCE_T instance = file->private_data;
3271 + VCHIQ_STATE_T *state = vchiq_get_state();
3272 + VCHIQ_SERVICE_T *service;
3275 + vchiq_log_info(vchiq_arm_log_level,
3276 + "vchiq_release: instance=%lx",
3277 + (unsigned long)instance);
3284 + /* Ensure videocore is awake to allow termination. */
3285 + vchiq_use_internal(instance->state, NULL,
3288 + mutex_lock(&instance->completion_mutex);
3290 + /* Wake the completion thread and ask it to exit */
3291 + instance->closing = 1;
3292 + up(&instance->insert_event);
3294 + mutex_unlock(&instance->completion_mutex);
3296 + /* Wake the slot handler if the completion queue is full. */
3297 + up(&instance->remove_event);
3299 + /* Mark all services for termination... */
3301 + while ((service = next_service_by_instance(state, instance,
3303 + USER_SERVICE_T *user_service = service->base.userdata;
3305 + /* Wake the slot handler if the msg queue is full. */
3306 + up(&user_service->remove_event);
3308 + vchiq_terminate_service_internal(service);
3309 + unlock_service(service);
3312 + /* ...and wait for them to die */
3314 + while ((service = next_service_by_instance(state, instance, &i))
3316 + USER_SERVICE_T *user_service = service->base.userdata;
3318 + down(&service->remove_event);
3320 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3322 + spin_lock(&msg_queue_spinlock);
3324 + while (user_service->msg_remove !=
3325 + user_service->msg_insert) {
3326 + VCHIQ_HEADER_T *header = user_service->
3327 + msg_queue[user_service->msg_remove &
3328 + (MSG_QUEUE_SIZE - 1)];
3329 + user_service->msg_remove++;
3330 + spin_unlock(&msg_queue_spinlock);
3333 + vchiq_release_message(
3336 + spin_lock(&msg_queue_spinlock);
3339 + spin_unlock(&msg_queue_spinlock);
3341 + unlock_service(service);
3344 + /* Release any closed services */
3345 + while (instance->completion_remove !=
3346 + instance->completion_insert) {
3347 + VCHIQ_COMPLETION_DATA_T *completion;
3348 + VCHIQ_SERVICE_T *service;
3349 + completion = &instance->completions[
3350 + instance->completion_remove &
3351 + (MAX_COMPLETIONS - 1)];
3352 + service = completion->service_userdata;
3353 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
3354 + unlock_service(service);
3355 + instance->completion_remove++;
3358 + /* Release the PEER service count. */
3359 + vchiq_release_internal(instance->state, NULL);
3362 + struct list_head *pos, *next;
3363 + list_for_each_safe(pos, next,
3364 + &instance->bulk_waiter_list) {
3365 + struct bulk_waiter_node *waiter;
3366 + waiter = list_entry(pos,
3367 + struct bulk_waiter_node,
3370 + vchiq_log_info(vchiq_arm_log_level,
3371 + "bulk_waiter - cleaned up %x "
3373 + (unsigned int)waiter, waiter->pid);
3378 + vchiq_proc_remove_instance(instance);
3381 + file->private_data = NULL;
3385 + vchiq_log_error(vchiq_arm_log_level,
3386 + "Unknown minor device: %d", dev);
3394 +/****************************************************************************
3398 +***************************************************************************/
3401 +vchiq_dump(void *dump_context, const char *str, int len)
3403 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3405 + if (context->actual < context->space) {
3407 + if (context->offset > 0) {
3408 + int skip_bytes = min(len, (int)context->offset);
3409 + str += skip_bytes;
3410 + len -= skip_bytes;
3411 + context->offset -= skip_bytes;
3412 + if (context->offset > 0)
3415 + copy_bytes = min(len, (int)(context->space - context->actual));
3416 + if (copy_bytes == 0)
3418 + if (copy_to_user(context->buf + context->actual, str,
3420 + context->actual = -EFAULT;
3421 + context->actual += copy_bytes;
3422 + len -= copy_bytes;
3424 + /* If tne terminating NUL is included in the length, then it
3425 + ** marks the end of a line and should be replaced with a
3426 + ** carriage return. */
3427 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3429 + if (copy_to_user(context->buf + context->actual - 1,
3431 + context->actual = -EFAULT;
3436 +/****************************************************************************
3438 +* vchiq_dump_platform_instance_state
3440 +***************************************************************************/
3443 +vchiq_dump_platform_instances(void *dump_context)
3445 + VCHIQ_STATE_T *state = vchiq_get_state();
3450 + /* There is no list of instances, so instead scan all services,
3451 + marking those that have been dumped. */
3453 + for (i = 0; i < state->unused_service; i++) {
3454 + VCHIQ_SERVICE_T *service = state->services[i];
3455 + VCHIQ_INSTANCE_T instance;
3457 + if (service && (service->base.callback == service_callback)) {
3458 + instance = service->instance;
3460 + instance->mark = 0;
3464 + for (i = 0; i < state->unused_service; i++) {
3465 + VCHIQ_SERVICE_T *service = state->services[i];
3466 + VCHIQ_INSTANCE_T instance;
3468 + if (service && (service->base.callback == service_callback)) {
3469 + instance = service->instance;
3470 + if (instance && !instance->mark) {
3471 + len = snprintf(buf, sizeof(buf),
3472 + "Instance %x: pid %d,%s completions "
3474 + (unsigned int)instance, instance->pid,
3475 + instance->connected ? " connected, " :
3477 + instance->completion_insert -
3478 + instance->completion_remove,
3481 + vchiq_dump(dump_context, buf, len + 1);
3483 + instance->mark = 1;
3489 +/****************************************************************************
3491 +* vchiq_dump_platform_service_state
3493 +***************************************************************************/
3496 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3498 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3502 + len = snprintf(buf, sizeof(buf), " instance %x",
3503 + (unsigned int)service->instance);
3505 + if ((service->base.callback == service_callback) &&
3506 + user_service->is_vchi) {
3507 + len += snprintf(buf + len, sizeof(buf) - len,
3508 + ", %d/%d messages",
3509 + user_service->msg_insert - user_service->msg_remove,
3512 + if (user_service->dequeue_pending)
3513 + len += snprintf(buf + len, sizeof(buf) - len,
3514 + " (dequeue pending)");
3517 + vchiq_dump(dump_context, buf, len + 1);
3520 +/****************************************************************************
3524 +***************************************************************************/
3527 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3530 + uint8_t *end_virt_addr = virt_addr + num_bytes;
3536 + struct page *page;
3537 + struct page **pages;
3538 + uint8_t *kmapped_virt_ptr;
3540 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3542 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3543 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3546 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3547 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3549 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3551 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3552 + if (pages == NULL) {
3553 + vchiq_log_error(vchiq_arm_log_level,
3554 + "Unable to allocation memory for %d pages\n",
3559 + down_read(¤t->mm->mmap_sem);
3560 + rc = get_user_pages(current, /* task */
3561 + current->mm, /* mm */
3562 + (unsigned long)virt_addr, /* start */
3563 + num_pages, /* len */
3566 + pages, /* pages (array of page pointers) */
3568 + up_read(¤t->mm->mmap_sem);
3573 + while (offset < end_offset) {
3575 + int page_offset = offset % PAGE_SIZE;
3576 + page_idx = offset / PAGE_SIZE;
3578 + if (page_idx != prev_idx) {
3582 + page = pages[page_idx];
3583 + kmapped_virt_ptr = kmap(page);
3585 + prev_idx = page_idx;
3588 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3589 + vchiq_log_dump_mem("ph",
3590 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
3592 + &kmapped_virt_ptr[page_offset], 16);
3599 + for (page_idx = 0; page_idx < num_pages; page_idx++)
3600 + page_cache_release(pages[page_idx]);
3605 +/****************************************************************************
3609 +***************************************************************************/
3612 +vchiq_read(struct file *file, char __user *buf,
3613 + size_t count, loff_t *ppos)
3615 + DUMP_CONTEXT_T context;
3616 + context.buf = buf;
3617 + context.actual = 0;
3618 + context.space = count;
3619 + context.offset = *ppos;
3621 + vchiq_dump_state(&context, &g_state);
3623 + *ppos += context.actual;
3625 + return context.actual;
3629 +vchiq_get_state(void)
3632 + if (g_state.remote == NULL)
3633 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3634 + else if (g_state.remote->initialised != 1)
3635 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3636 + __func__, g_state.remote->initialised);
3638 + return ((g_state.remote != NULL) &&
3639 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
3642 +static const struct file_operations
3644 + .owner = THIS_MODULE,
3645 + .unlocked_ioctl = vchiq_ioctl,
3646 + .open = vchiq_open,
3647 + .release = vchiq_release,
3648 + .read = vchiq_read
3652 + * Autosuspend related functionality
3656 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3658 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3660 + /* autosuspend not supported - always return wanted */
3662 + else if (arm_state->blocked_count)
3664 + else if (!arm_state->videocore_use_count)
3665 + /* usage count zero - check for override unless we're forcing */
3666 + if (arm_state->resume_blocked)
3669 + return vchiq_platform_videocore_wanted(state);
3671 + /* non-zero usage count - videocore still required */
3675 +static VCHIQ_STATUS_T
3676 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3677 + VCHIQ_HEADER_T *header,
3678 + VCHIQ_SERVICE_HANDLE_T service_user,
3681 + vchiq_log_error(vchiq_susp_log_level,
3682 + "%s callback reason %d", __func__, reason);
3687 +vchiq_keepalive_thread_func(void *v)
3689 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
3690 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3692 + VCHIQ_STATUS_T status;
3693 + VCHIQ_INSTANCE_T instance;
3694 + VCHIQ_SERVICE_HANDLE_T ka_handle;
3696 + VCHIQ_SERVICE_PARAMS_T params = {
3697 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
3698 + .callback = vchiq_keepalive_vchiq_callback,
3699 + .version = KEEPALIVE_VER,
3700 + .version_min = KEEPALIVE_VER_MIN
3703 + status = vchiq_initialise(&instance);
3704 + if (status != VCHIQ_SUCCESS) {
3705 + vchiq_log_error(vchiq_susp_log_level,
3706 + "%s vchiq_initialise failed %d", __func__, status);
3710 + status = vchiq_connect(instance);
3711 + if (status != VCHIQ_SUCCESS) {
3712 + vchiq_log_error(vchiq_susp_log_level,
3713 + "%s vchiq_connect failed %d", __func__, status);
3717 + status = vchiq_add_service(instance, ¶ms, &ka_handle);
3718 + if (status != VCHIQ_SUCCESS) {
3719 + vchiq_log_error(vchiq_susp_log_level,
3720 + "%s vchiq_open_service failed %d", __func__, status);
3725 + long rc = 0, uc = 0;
3726 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
3728 + vchiq_log_error(vchiq_susp_log_level,
3729 + "%s interrupted", __func__);
3730 + flush_signals(current);
3734 + /* read and clear counters. Do release_count then use_count to
3735 + * prevent getting more releases than uses */
3736 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
3737 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
3739 + /* Call use/release service the requisite number of times.
3740 + * Process use before release so use counts don't go negative */
3742 + atomic_inc(&arm_state->ka_use_ack_count);
3743 + status = vchiq_use_service(ka_handle);
3744 + if (status != VCHIQ_SUCCESS) {
3745 + vchiq_log_error(vchiq_susp_log_level,
3746 + "%s vchiq_use_service error %d",
3747 + __func__, status);
3751 + status = vchiq_release_service(ka_handle);
3752 + if (status != VCHIQ_SUCCESS) {
3753 + vchiq_log_error(vchiq_susp_log_level,
3754 + "%s vchiq_release_service error %d",
3755 + __func__, status);
3761 + vchiq_shutdown(instance);
3769 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
3771 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3774 + rwlock_init(&arm_state->susp_res_lock);
3776 + init_completion(&arm_state->ka_evt);
3777 + atomic_set(&arm_state->ka_use_count, 0);
3778 + atomic_set(&arm_state->ka_use_ack_count, 0);
3779 + atomic_set(&arm_state->ka_release_count, 0);
3781 + init_completion(&arm_state->vc_suspend_complete);
3783 + init_completion(&arm_state->vc_resume_complete);
3784 + /* Initialise to 'done' state. We only want to block on resume
3785 + * completion while videocore is suspended. */
3786 + set_resume_state(arm_state, VC_RESUME_RESUMED);
3788 + init_completion(&arm_state->resume_blocker);
3789 + /* Initialise to 'done' state. We only want to block on this
3790 + * completion while resume is blocked */
3791 + complete_all(&arm_state->resume_blocker);
3793 + init_completion(&arm_state->blocked_blocker);
3794 + /* Initialise to 'done' state. We only want to block on this
3795 + * completion while things are waiting on the resume blocker */
3796 + complete_all(&arm_state->blocked_blocker);
3798 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
3799 + arm_state->suspend_timer_running = 0;
3800 + init_timer(&arm_state->suspend_timer);
3801 + arm_state->suspend_timer.data = (unsigned long)(state);
3802 + arm_state->suspend_timer.function = suspend_timer_callback;
3804 + arm_state->first_connect = 0;
3811 +** Functions to modify the state variables;
3812 +** set_suspend_state
3813 +** set_resume_state
3815 +** There are more state variables than we might like, so ensure they remain in
3816 +** step. Suspend and resume state are maintained separately, since most of
3817 +** these state machines can operate independently. However, there are a few
3818 +** states where state transitions in one state machine cause a reset to the
3819 +** other state machine. In addition, there are some completion events which
3820 +** need to occur on state machine reset and end-state(s), so these are also
3821 +** dealt with in these functions.
3823 +** In all states we set the state variable according to the input, but in some
3824 +** cases we perform additional steps outlined below;
3826 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
3827 +** The suspend completion is completed after any suspend
3828 +** attempt. When we reset the state machine we also reset
3829 +** the completion. This reset occurs when videocore is
3830 +** resumed, and also if we initiate suspend after a suspend
3833 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
3834 +** suspend - ie from this point on we must try to suspend
3835 +** before resuming can occur. We therefore also reset the
3836 +** resume state machine to VC_RESUME_IDLE in this state.
3838 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
3839 +** complete_all on the suspend completion to notify
3840 +** anything waiting for suspend to happen.
3842 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
3843 +** initiate resume, so no need to alter resume state.
3844 +** We call complete_all on the suspend completion to notify
3845 +** of suspend rejection.
3847 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
3848 +** suspend completion and reset the resume state machine.
3850 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
3851 +** resume completion is in it's 'done' state whenever
3852 +** videcore is running. Therfore, the VC_RESUME_IDLE state
3853 +** implies that videocore is suspended.
3854 +** Hence, any thread which needs to wait until videocore is
3855 +** running can wait on this completion - it will only block
3856 +** if videocore is suspended.
3858 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
3859 +** Call complete_all on the resume completion to unblock
3860 +** any threads waiting for resume. Also reset the suspend
3861 +** state machine to it's idle state.
3863 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
3867 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
3868 + enum vc_suspend_status new_state)
3870 + /* set the state in all cases */
3871 + arm_state->vc_suspend_state = new_state;
3873 + /* state specific additional actions */
3874 + switch (new_state) {
3875 + case VC_SUSPEND_FORCE_CANCELED:
3876 + complete_all(&arm_state->vc_suspend_complete);
3878 + case VC_SUSPEND_REJECTED:
3879 + complete_all(&arm_state->vc_suspend_complete);
3881 + case VC_SUSPEND_FAILED:
3882 + complete_all(&arm_state->vc_suspend_complete);
3883 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
3884 + complete_all(&arm_state->vc_resume_complete);
3886 + case VC_SUSPEND_IDLE:
3887 + INIT_COMPLETION(arm_state->vc_suspend_complete);
3889 + case VC_SUSPEND_REQUESTED:
3891 + case VC_SUSPEND_IN_PROGRESS:
3892 + set_resume_state(arm_state, VC_RESUME_IDLE);
3894 + case VC_SUSPEND_SUSPENDED:
3895 + complete_all(&arm_state->vc_suspend_complete);
3904 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
3905 + enum vc_resume_status new_state)
3907 + /* set the state in all cases */
3908 + arm_state->vc_resume_state = new_state;
3910 + /* state specific additional actions */
3911 + switch (new_state) {
3912 + case VC_RESUME_FAILED:
3914 + case VC_RESUME_IDLE:
3915 + INIT_COMPLETION(arm_state->vc_resume_complete);
3917 + case VC_RESUME_REQUESTED:
3919 + case VC_RESUME_IN_PROGRESS:
3921 + case VC_RESUME_RESUMED:
3922 + complete_all(&arm_state->vc_resume_complete);
3923 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
3932 +/* should be called with the write lock held */
3934 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3936 + del_timer(&arm_state->suspend_timer);
3937 + arm_state->suspend_timer.expires = jiffies +
3938 + msecs_to_jiffies(arm_state->
3939 + suspend_timer_timeout);
3940 + add_timer(&arm_state->suspend_timer);
3941 + arm_state->suspend_timer_running = 1;
3944 +/* should be called with the write lock held */
3946 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3948 + if (arm_state->suspend_timer_running) {
3949 + del_timer(&arm_state->suspend_timer);
3950 + arm_state->suspend_timer_running = 0;
3955 +need_resume(VCHIQ_STATE_T *state)
3957 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3958 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
3959 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
3960 + vchiq_videocore_wanted(state);
3964 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
3966 + int status = VCHIQ_SUCCESS;
3967 + const unsigned long timeout_val =
3968 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
3969 + int resume_count = 0;
3971 + /* Allow any threads which were blocked by the last force suspend to
3972 + * complete if they haven't already. Only give this one shot; if
3973 + * blocked_count is incremented after blocked_blocker is completed
3974 + * (which only happens when blocked_count hits 0) then those threads
3975 + * will have to wait until next time around */
3976 + if (arm_state->blocked_count) {
3977 + INIT_COMPLETION(arm_state->blocked_blocker);
3978 + write_unlock_bh(&arm_state->susp_res_lock);
3979 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
3980 + "blocked clients", __func__);
3981 + if (wait_for_completion_interruptible_timeout(
3982 + &arm_state->blocked_blocker, timeout_val)
3984 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
3985 + "previously blocked clients failed" , __func__);
3986 + status = VCHIQ_ERROR;
3987 + write_lock_bh(&arm_state->susp_res_lock);
3990 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
3991 + "clients resumed", __func__);
3992 + write_lock_bh(&arm_state->susp_res_lock);
3995 + /* We need to wait for resume to complete if it's in process */
3996 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
3997 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
3998 + if (resume_count > 1) {
3999 + status = VCHIQ_ERROR;
4000 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4001 + "many times for resume" , __func__);
4004 + write_unlock_bh(&arm_state->susp_res_lock);
4005 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4007 + if (wait_for_completion_interruptible_timeout(
4008 + &arm_state->vc_resume_complete, timeout_val)
4010 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4011 + "resume failed (%s)", __func__,
4012 + resume_state_names[arm_state->vc_resume_state +
4013 + VC_RESUME_NUM_OFFSET]);
4014 + status = VCHIQ_ERROR;
4015 + write_lock_bh(&arm_state->susp_res_lock);
4018 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4019 + write_lock_bh(&arm_state->susp_res_lock);
4022 + INIT_COMPLETION(arm_state->resume_blocker);
4023 + arm_state->resume_blocked = 1;
4030 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4032 + complete_all(&arm_state->resume_blocker);
4033 + arm_state->resume_blocked = 0;
4036 +/* Initiate suspend via slot handler. Should be called with the write lock
4039 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4041 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4042 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4047 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4048 + status = VCHIQ_SUCCESS;
4051 + switch (arm_state->vc_suspend_state) {
4052 + case VC_SUSPEND_REQUESTED:
4053 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4054 + "requested", __func__);
4056 + case VC_SUSPEND_IN_PROGRESS:
4057 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4058 + "progress", __func__);
4062 + /* We don't expect to be in other states, so log but continue
4064 + vchiq_log_error(vchiq_susp_log_level,
4065 + "%s unexpected suspend state %s", __func__,
4066 + suspend_state_names[arm_state->vc_suspend_state +
4067 + VC_SUSPEND_NUM_OFFSET]);
4068 + /* fall through */
4069 + case VC_SUSPEND_REJECTED:
4070 + case VC_SUSPEND_FAILED:
4071 + /* Ensure any idle state actions have been run */
4072 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4073 + /* fall through */
4074 + case VC_SUSPEND_IDLE:
4075 + vchiq_log_info(vchiq_susp_log_level,
4076 + "%s: suspending", __func__);
4077 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4078 + /* kick the slot handler thread to initiate suspend */
4079 + request_poll(state, NULL, 0);
4084 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4089 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4091 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4097 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4099 + write_lock_bh(&arm_state->susp_res_lock);
4100 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4101 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4102 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4105 + write_unlock_bh(&arm_state->susp_res_lock);
4108 + vchiq_platform_suspend(state);
4111 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4117 +output_timeout_error(VCHIQ_STATE_T *state)
4119 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4120 + char service_err[50] = "";
4121 + int vc_use_count = arm_state->videocore_use_count;
4122 + int active_services = state->unused_service;
4125 + if (!arm_state->videocore_use_count) {
4126 + snprintf(service_err, 50, " Videocore usecount is 0");
4129 + for (i = 0; i < active_services; i++) {
4130 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4131 + if (service_ptr && service_ptr->service_use_count &&
4132 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4133 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4134 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4135 + service_ptr->base.fourcc),
4136 + service_ptr->client_id,
4137 + service_ptr->service_use_count,
4138 + service_ptr->service_use_count ==
4139 + vc_use_count ? "" : " (+ more)");
4145 + vchiq_log_error(vchiq_susp_log_level,
4146 + "timed out waiting for vc suspend (%d).%s",
4147 + arm_state->autosuspend_override, service_err);
4151 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4152 +** We don't actually force suspend, since videocore may get into a bad state
4153 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
4154 +** determine a good point to suspend. If this doesn't happen within 100ms we
4157 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4158 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4161 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4163 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4164 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4171 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4173 + write_lock_bh(&arm_state->susp_res_lock);
4175 + status = block_resume(arm_state);
4176 + if (status != VCHIQ_SUCCESS)
4178 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4179 + /* Already suspended - just block resume and exit */
4180 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4182 + status = VCHIQ_SUCCESS;
4184 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4185 + /* initiate suspend immediately in the case that we're waiting
4186 + * for the timeout */
4187 + stop_suspend_timer(arm_state);
4188 + if (!vchiq_videocore_wanted(state)) {
4189 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4190 + "idle, initiating suspend", __func__);
4191 + status = vchiq_arm_vcsuspend(state);
4192 + } else if (arm_state->autosuspend_override <
4193 + FORCE_SUSPEND_FAIL_MAX) {
4194 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
4195 + "videocore go idle", __func__);
4196 + status = VCHIQ_SUCCESS;
4198 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4199 + "many times - attempting suspend", __func__);
4200 + status = vchiq_arm_vcsuspend(state);
4203 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4204 + "in progress - wait for completion", __func__);
4205 + status = VCHIQ_SUCCESS;
4208 + /* Wait for suspend to happen due to system idle (not forced..) */
4209 + if (status != VCHIQ_SUCCESS)
4210 + goto unblock_resume;
4213 + write_unlock_bh(&arm_state->susp_res_lock);
4215 + rc = wait_for_completion_interruptible_timeout(
4216 + &arm_state->vc_suspend_complete,
4217 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4219 + write_lock_bh(&arm_state->susp_res_lock);
4221 + vchiq_log_warning(vchiq_susp_log_level, "%s "
4222 + "interrupted waiting for suspend", __func__);
4223 + status = VCHIQ_ERROR;
4224 + goto unblock_resume;
4225 + } else if (rc == 0) {
4226 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4227 + /* Repeat timeout once if in progress */
4233 + arm_state->autosuspend_override++;
4234 + output_timeout_error(state);
4236 + status = VCHIQ_RETRY;
4237 + goto unblock_resume;
4239 + } while (0 < (repeat--));
4241 + /* Check and report state in case we need to abort ARM suspend */
4242 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4243 + status = VCHIQ_RETRY;
4244 + vchiq_log_error(vchiq_susp_log_level,
4245 + "%s videocore suspend failed (state %s)", __func__,
4246 + suspend_state_names[arm_state->vc_suspend_state +
4247 + VC_SUSPEND_NUM_OFFSET]);
4248 + /* Reset the state only if it's still in an error state.
4249 + * Something could have already initiated another suspend. */
4250 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4251 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4253 + goto unblock_resume;
4256 + /* successfully suspended - unlock and exit */
4260 + /* all error states need to unblock resume before exit */
4261 + unblock_resume(arm_state);
4264 + write_unlock_bh(&arm_state->susp_res_lock);
4267 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4272 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4274 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4279 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4281 + write_lock_bh(&arm_state->susp_res_lock);
4282 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4283 + arm_state->first_connect &&
4284 + !vchiq_videocore_wanted(state)) {
4285 + vchiq_arm_vcsuspend(state);
4287 + write_unlock_bh(&arm_state->susp_res_lock);
4290 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4296 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4298 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4305 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4307 + write_lock_bh(&arm_state->susp_res_lock);
4308 + unblock_resume(arm_state);
4309 + resume = vchiq_check_resume(state);
4310 + write_unlock_bh(&arm_state->susp_res_lock);
4313 + if (wait_for_completion_interruptible(
4314 + &arm_state->vc_resume_complete) < 0) {
4315 + vchiq_log_error(vchiq_susp_log_level,
4316 + "%s interrupted", __func__);
4317 + /* failed, cannot accurately derive suspend
4318 + * state, so exit early. */
4323 + read_lock_bh(&arm_state->susp_res_lock);
4324 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4325 + vchiq_log_info(vchiq_susp_log_level,
4326 + "%s: Videocore remains suspended", __func__);
4328 + vchiq_log_info(vchiq_susp_log_level,
4329 + "%s: Videocore resumed", __func__);
4332 + read_unlock_bh(&arm_state->susp_res_lock);
4334 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4338 +/* This function should be called with the write lock held */
4340 +vchiq_check_resume(VCHIQ_STATE_T *state)
4342 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4348 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4350 + if (need_resume(state)) {
4351 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4352 + request_poll(state, NULL, 0);
4357 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4362 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4364 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4370 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4372 + write_lock_bh(&arm_state->susp_res_lock);
4373 + if (arm_state->wake_address == 0) {
4374 + vchiq_log_info(vchiq_susp_log_level,
4375 + "%s: already awake", __func__);
4378 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4379 + vchiq_log_info(vchiq_susp_log_level,
4380 + "%s: already resuming", __func__);
4384 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4385 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4388 + vchiq_log_trace(vchiq_susp_log_level,
4389 + "%s: not resuming (resume state %s)", __func__,
4390 + resume_state_names[arm_state->vc_resume_state +
4391 + VC_RESUME_NUM_OFFSET]);
4394 + write_unlock_bh(&arm_state->susp_res_lock);
4397 + vchiq_platform_resume(state);
4400 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4408 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4409 + enum USE_TYPE_E use_type)
4411 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4412 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4415 + int local_uc, local_entity_uc;
4420 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4422 + if (use_type == USE_TYPE_VCHIQ) {
4423 + sprintf(entity, "VCHIQ: ");
4424 + entity_uc = &arm_state->peer_use_count;
4425 + } else if (service) {
4426 + sprintf(entity, "%c%c%c%c:%03d",
4427 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4428 + service->client_id);
4429 + entity_uc = &service->service_use_count;
4431 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
4433 + ret = VCHIQ_ERROR;
4437 + write_lock_bh(&arm_state->susp_res_lock);
4438 + while (arm_state->resume_blocked) {
4439 + /* If we call 'use' while force suspend is waiting for suspend,
4440 + * then we're about to block the thread which the force is
4441 + * waiting to complete, so we're bound to just time out. In this
4442 + * case, set the suspend state such that the wait will be
4443 + * canceled, so we can complete as quickly as possible. */
4444 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4445 + VC_SUSPEND_IDLE) {
4446 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4449 + /* If suspend is already in progress then we need to block */
4450 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4451 + /* Indicate that there are threads waiting on the resume
4452 + * blocker. These need to be allowed to complete before
4453 + * a _second_ call to force suspend can complete,
4454 + * otherwise low priority threads might never actually
4456 + arm_state->blocked_count++;
4457 + write_unlock_bh(&arm_state->susp_res_lock);
4458 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4459 + "blocked - waiting...", __func__, entity);
4460 + if (wait_for_completion_killable(
4461 + &arm_state->resume_blocker) != 0) {
4462 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
4463 + "wait for resume blocker interrupted",
4464 + __func__, entity);
4465 + ret = VCHIQ_ERROR;
4466 + write_lock_bh(&arm_state->susp_res_lock);
4467 + arm_state->blocked_count--;
4468 + write_unlock_bh(&arm_state->susp_res_lock);
4471 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4472 + "unblocked", __func__, entity);
4473 + write_lock_bh(&arm_state->susp_res_lock);
4474 + if (--arm_state->blocked_count == 0)
4475 + complete_all(&arm_state->blocked_blocker);
4479 + stop_suspend_timer(arm_state);
4481 + local_uc = ++arm_state->videocore_use_count;
4482 + local_entity_uc = ++(*entity_uc);
4484 + /* If there's a pending request which hasn't yet been serviced then
4485 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
4486 + * vc_resume_complete will block until we either resume or fail to
4488 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4489 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4491 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4492 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4493 + vchiq_log_info(vchiq_susp_log_level,
4494 + "%s %s count %d, state count %d",
4495 + __func__, entity, local_entity_uc, local_uc);
4496 + request_poll(state, NULL, 0);
4498 + vchiq_log_trace(vchiq_susp_log_level,
4499 + "%s %s count %d, state count %d",
4500 + __func__, entity, *entity_uc, local_uc);
4503 + write_unlock_bh(&arm_state->susp_res_lock);
4505 + /* Completion is in a done state when we're not suspended, so this won't
4506 + * block for the non-suspended case. */
4507 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4508 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4509 + __func__, entity);
4510 + if (wait_for_completion_killable(
4511 + &arm_state->vc_resume_complete) != 0) {
4512 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4513 + "resume interrupted", __func__, entity);
4514 + ret = VCHIQ_ERROR;
4517 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4521 + if (ret == VCHIQ_SUCCESS) {
4522 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4523 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4524 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4525 + /* Send the use notify to videocore */
4526 + status = vchiq_send_remote_use_active(state);
4527 + if (status == VCHIQ_SUCCESS)
4530 + atomic_add(ack_cnt,
4531 + &arm_state->ka_use_ack_count);
4536 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4541 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4543 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4544 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4547 + int local_uc, local_entity_uc;
4552 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4555 + sprintf(entity, "%c%c%c%c:%03d",
4556 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4557 + service->client_id);
4558 + entity_uc = &service->service_use_count;
4560 + sprintf(entity, "PEER: ");
4561 + entity_uc = &arm_state->peer_use_count;
4564 + write_lock_bh(&arm_state->susp_res_lock);
4565 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
4566 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
4567 + WARN_ON(!arm_state->videocore_use_count);
4568 + WARN_ON(!(*entity_uc));
4569 + ret = VCHIQ_ERROR;
4572 + local_uc = --arm_state->videocore_use_count;
4573 + local_entity_uc = --(*entity_uc);
4575 + if (!vchiq_videocore_wanted(state)) {
4576 + if (vchiq_platform_use_suspend_timer() &&
4577 + !arm_state->resume_blocked) {
4578 + /* Only use the timer if we're not trying to force
4579 + * suspend (=> resume_blocked) */
4580 + start_suspend_timer(arm_state);
4582 + vchiq_log_info(vchiq_susp_log_level,
4583 + "%s %s count %d, state count %d - suspending",
4584 + __func__, entity, *entity_uc,
4585 + arm_state->videocore_use_count);
4586 + vchiq_arm_vcsuspend(state);
4589 + vchiq_log_trace(vchiq_susp_log_level,
4590 + "%s %s count %d, state count %d",
4591 + __func__, entity, *entity_uc,
4592 + arm_state->videocore_use_count);
4595 + write_unlock_bh(&arm_state->susp_res_lock);
4598 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4603 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4605 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4606 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4607 + atomic_inc(&arm_state->ka_use_count);
4608 + complete(&arm_state->ka_evt);
4612 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4614 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4615 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4616 + atomic_inc(&arm_state->ka_release_count);
4617 + complete(&arm_state->ka_evt);
4621 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4623 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4627 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4629 + return vchiq_release_internal(service->state, service);
4632 +static void suspend_timer_callback(unsigned long context)
4634 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
4635 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4638 + vchiq_log_info(vchiq_susp_log_level,
4639 + "%s - suspend timer expired - check suspend", __func__);
4640 + vchiq_check_suspend(state);
4646 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
4648 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4649 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4651 + ret = vchiq_use_internal(service->state, service,
4652 + USE_TYPE_SERVICE_NO_RESUME);
4653 + unlock_service(service);
4659 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
4661 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4662 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4664 + ret = vchiq_use_internal(service->state, service,
4665 + USE_TYPE_SERVICE);
4666 + unlock_service(service);
4672 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
4674 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4675 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4677 + ret = vchiq_release_internal(service->state, service);
4678 + unlock_service(service);
4684 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
4686 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4688 + /* Only dump 64 services */
4689 + static const int local_max_services = 64;
4690 + /* If there's more than 64 services, only dump ones with
4691 + * non-zero counts */
4692 + int only_nonzero = 0;
4693 + static const char *nz = "<-- preventing suspend";
4695 + enum vc_suspend_status vc_suspend_state;
4696 + enum vc_resume_status vc_resume_state;
4699 + int active_services;
4700 + struct service_data_struct {
4704 + } service_data[local_max_services];
4709 + read_lock_bh(&arm_state->susp_res_lock);
4710 + vc_suspend_state = arm_state->vc_suspend_state;
4711 + vc_resume_state = arm_state->vc_resume_state;
4712 + peer_count = arm_state->peer_use_count;
4713 + vc_use_count = arm_state->videocore_use_count;
4714 + active_services = state->unused_service;
4715 + if (active_services > local_max_services)
4718 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
4719 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4723 + if (only_nonzero && !service_ptr->service_use_count)
4726 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
4727 + service_data[j].fourcc = service_ptr->base.fourcc;
4728 + service_data[j].clientid = service_ptr->client_id;
4729 + service_data[j++].use_count = service_ptr->
4730 + service_use_count;
4734 + read_unlock_bh(&arm_state->susp_res_lock);
4736 + vchiq_log_warning(vchiq_susp_log_level,
4737 + "-- Videcore suspend state: %s --",
4738 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
4739 + vchiq_log_warning(vchiq_susp_log_level,
4740 + "-- Videcore resume state: %s --",
4741 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
4744 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
4745 + "services (%d). Only dumping up to first %d services "
4746 + "with non-zero use-count", active_services,
4747 + local_max_services);
4749 + for (i = 0; i < j; i++) {
4750 + vchiq_log_warning(vchiq_susp_log_level,
4751 + "----- %c%c%c%c:%d service count %d %s",
4752 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
4753 + service_data[i].clientid,
4754 + service_data[i].use_count,
4755 + service_data[i].use_count ? nz : "");
4757 + vchiq_log_warning(vchiq_susp_log_level,
4758 + "----- VCHIQ use count count %d", peer_count);
4759 + vchiq_log_warning(vchiq_susp_log_level,
4760 + "--- Overall vchiq instance use count %d", vc_use_count);
4762 + vchiq_dump_platform_use_state(state);
4766 +vchiq_check_service(VCHIQ_SERVICE_T *service)
4768 + VCHIQ_ARM_STATE_T *arm_state;
4769 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4771 + if (!service || !service->state)
4774 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4776 + arm_state = vchiq_platform_get_arm_state(service->state);
4778 + read_lock_bh(&arm_state->susp_res_lock);
4779 + if (service->service_use_count)
4780 + ret = VCHIQ_SUCCESS;
4781 + read_unlock_bh(&arm_state->susp_res_lock);
4783 + if (ret == VCHIQ_ERROR) {
4784 + vchiq_log_error(vchiq_susp_log_level,
4785 + "%s ERROR - %c%c%c%c:%d service count %d, "
4786 + "state count %d, videocore suspend state %s", __func__,
4787 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4788 + service->client_id, service->service_use_count,
4789 + arm_state->videocore_use_count,
4790 + suspend_state_names[arm_state->vc_suspend_state +
4791 + VC_SUSPEND_NUM_OFFSET]);
4792 + vchiq_dump_service_use_state(service->state);
4798 +/* stub functions */
4799 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
4804 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
4805 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
4807 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4808 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
4809 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
4810 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
4811 + write_lock_bh(&arm_state->susp_res_lock);
4812 + if (!arm_state->first_connect) {
4813 + char threadname[10];
4814 + arm_state->first_connect = 1;
4815 + write_unlock_bh(&arm_state->susp_res_lock);
4816 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
4818 + arm_state->ka_thread = kthread_create(
4819 + &vchiq_keepalive_thread_func,
4822 + if (arm_state->ka_thread == NULL) {
4823 + vchiq_log_error(vchiq_susp_log_level,
4824 + "vchiq: FATAL: couldn't create thread %s",
4827 + wake_up_process(arm_state->ka_thread);
4830 + write_unlock_bh(&arm_state->susp_res_lock);
4835 +/****************************************************************************
4837 +* vchiq_init - called when the module is loaded.
4839 +***************************************************************************/
4847 + /* create proc entries */
4848 + err = vchiq_proc_init();
4850 + goto failed_proc_init;
4852 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
4854 + vchiq_log_error(vchiq_arm_log_level,
4855 + "Unable to allocate device number");
4856 + goto failed_alloc_chrdev;
4858 + cdev_init(&vchiq_cdev, &vchiq_fops);
4859 + vchiq_cdev.owner = THIS_MODULE;
4860 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
4862 + vchiq_log_error(vchiq_arm_log_level,
4863 + "Unable to register device");
4864 + goto failed_cdev_add;
4867 + /* create sysfs entries */
4868 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
4869 + ptr_err = vchiq_class;
4870 + if (IS_ERR(ptr_err))
4871 + goto failed_class_create;
4873 + vchiq_dev = device_create(vchiq_class, NULL,
4874 + vchiq_devid, NULL, "vchiq");
4875 + ptr_err = vchiq_dev;
4876 + if (IS_ERR(ptr_err))
4877 + goto failed_device_create;
4879 + err = vchiq_platform_init(&g_state);
4881 + goto failed_platform_init;
4883 + vchiq_log_info(vchiq_arm_log_level,
4884 + "vchiq: initialised - version %d (min %d), device %d.%d",
4885 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
4886 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
4890 +failed_platform_init:
4891 + device_destroy(vchiq_class, vchiq_devid);
4892 +failed_device_create:
4893 + class_destroy(vchiq_class);
4894 +failed_class_create:
4895 + cdev_del(&vchiq_cdev);
4896 + err = PTR_ERR(ptr_err);
4898 + unregister_chrdev_region(vchiq_devid, 1);
4899 +failed_alloc_chrdev:
4900 + vchiq_proc_deinit();
4902 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
4906 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4908 + VCHIQ_SERVICE_T *service;
4909 + int use_count = 0, i;
4911 + while ((service = next_service_by_instance(instance->state,
4912 + instance, &i)) != NULL) {
4913 + use_count += service->service_use_count;
4914 + unlock_service(service);
4919 +/* read the per-process use-count */
4920 +static int proc_read_use_count(char *page, char **start,
4921 + off_t off, int count,
4922 + int *eof, void *data)
4924 + VCHIQ_INSTANCE_T instance = data;
4925 + int len, use_count;
4927 + use_count = vchiq_instance_get_use_count(instance);
4928 + len = snprintf(page+off, count, "%d\n", use_count);
4933 +/* add an instance (process) to the proc entries */
4934 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
4940 + struct proc_dir_entry *top, *use_count;
4941 + struct proc_dir_entry *clients = vchiq_clients_top();
4942 + int pid = instance->pid;
4944 + snprintf(pidstr, sizeof(pidstr), "%d", pid);
4945 + top = proc_mkdir(pidstr, clients);
4949 + use_count = create_proc_read_entry("use_count",
4951 + proc_read_use_count,
4954 + goto fail_use_count;
4956 + instance->proc_entry = top;
4961 + remove_proc_entry(top->name, clients);
4967 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
4970 + struct proc_dir_entry *clients = vchiq_clients_top();
4971 + remove_proc_entry("use_count", instance->proc_entry);
4972 + remove_proc_entry(instance->proc_entry->name, clients);
4976 +/****************************************************************************
4978 +* vchiq_exit - called when the module is unloaded.
4980 +***************************************************************************/
4985 + vchiq_platform_exit(&g_state);
4986 + device_destroy(vchiq_class, vchiq_devid);
4987 + class_destroy(vchiq_class);
4988 + cdev_del(&vchiq_cdev);
4989 + unregister_chrdev_region(vchiq_devid, 1);
4992 +module_init(vchiq_init);
4993 +module_exit(vchiq_exit);
4994 +MODULE_LICENSE("GPL");
4995 +MODULE_AUTHOR("Broadcom Corporation");
4997 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5000 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5002 + * Redistribution and use in source and binary forms, with or without
5003 + * modification, are permitted provided that the following conditions
5005 + * 1. Redistributions of source code must retain the above copyright
5006 + * notice, this list of conditions, and the following disclaimer,
5007 + * without modification.
5008 + * 2. Redistributions in binary form must reproduce the above copyright
5009 + * notice, this list of conditions and the following disclaimer in the
5010 + * documentation and/or other materials provided with the distribution.
5011 + * 3. The names of the above-listed copyright holders may not be used
5012 + * to endorse or promote products derived from this software without
5013 + * specific prior written permission.
5015 + * ALTERNATIVELY, this software may be distributed under the terms of the
5016 + * GNU General Public License ("GPL") version 2, as published by the Free
5017 + * Software Foundation.
5019 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5020 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5021 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5022 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5023 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5024 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5025 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5026 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5027 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5028 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5029 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5032 +#ifndef VCHIQ_ARM_H
5033 +#define VCHIQ_ARM_H
5035 +#include <linux/mutex.h>
5036 +#include <linux/semaphore.h>
5037 +#include <linux/atomic.h>
5038 +#include "vchiq_core.h"
5041 +enum vc_suspend_status {
5042 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5043 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
5044 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
5045 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
5046 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
5047 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
5048 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
5051 +enum vc_resume_status {
5052 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
5053 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
5054 + VC_RESUME_REQUESTED, /* User has requested resume */
5055 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5056 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
5062 + USE_TYPE_SERVICE_NO_RESUME,
5068 +typedef struct vchiq_arm_state_struct {
5069 + /* Keepalive-related data */
5070 + struct task_struct *ka_thread;
5071 + struct completion ka_evt;
5072 + atomic_t ka_use_count;
5073 + atomic_t ka_use_ack_count;
5074 + atomic_t ka_release_count;
5076 + struct completion vc_suspend_complete;
5077 + struct completion vc_resume_complete;
5079 + rwlock_t susp_res_lock;
5080 + enum vc_suspend_status vc_suspend_state;
5081 + enum vc_resume_status vc_resume_state;
5083 + unsigned int wake_address;
5085 + struct timer_list suspend_timer;
5086 + int suspend_timer_timeout;
5087 + int suspend_timer_running;
5089 + /* Global use count for videocore.
5090 + ** This is equal to the sum of the use counts for all services. When
5091 + ** this hits zero the videocore suspend procedure will be initiated.
5093 + int videocore_use_count;
5095 + /* Use count to track requests from videocore peer.
5096 + ** This use count is not associated with a service, so needs to be
5097 + ** tracked separately with the state.
5099 + int peer_use_count;
5101 + /* Flag to indicate whether resume is blocked. This happens when the
5102 + ** ARM is suspending
5104 + struct completion resume_blocker;
5105 + int resume_blocked;
5106 + struct completion blocked_blocker;
5107 + int blocked_count;
5109 + int autosuspend_override;
5111 + /* Flag to indicate that the first vchiq connect has made it through.
5112 + ** This means that both sides should be fully ready, and we should
5113 + ** be able to suspend after this point.
5115 + int first_connect;
5117 + unsigned long long suspend_start_time;
5118 + unsigned long long sleep_start_time;
5119 + unsigned long long resume_start_time;
5120 + unsigned long long last_wake_time;
5122 +} VCHIQ_ARM_STATE_T;
5124 +extern int vchiq_arm_log_level;
5125 +extern int vchiq_susp_log_level;
5128 +vchiq_platform_init(VCHIQ_STATE_T *state);
5131 +vchiq_platform_exit(VCHIQ_STATE_T *state);
5133 +extern VCHIQ_STATE_T *
5134 +vchiq_get_state(void);
5136 +extern VCHIQ_STATUS_T
5137 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5139 +extern VCHIQ_STATUS_T
5140 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5143 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5145 +extern VCHIQ_STATUS_T
5146 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5148 +extern VCHIQ_STATUS_T
5149 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5152 +vchiq_check_resume(VCHIQ_STATE_T *state);
5155 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5157 +extern VCHIQ_STATUS_T
5158 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5160 +extern VCHIQ_STATUS_T
5161 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5163 +extern VCHIQ_STATUS_T
5164 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5166 +extern VCHIQ_STATUS_T
5167 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5170 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5173 +vchiq_platform_use_suspend_timer(void);
5176 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5179 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5181 +extern VCHIQ_ARM_STATE_T*
5182 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5185 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5187 +extern VCHIQ_STATUS_T
5188 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5189 + enum USE_TYPE_E use_type);
5190 +extern VCHIQ_STATUS_T
5191 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5194 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5195 + enum vc_suspend_status new_state);
5198 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5199 + enum vc_resume_status new_state);
5202 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5204 +extern int vchiq_proc_init(void);
5205 +extern void vchiq_proc_deinit(void);
5206 +extern struct proc_dir_entry *vchiq_proc_top(void);
5207 +extern struct proc_dir_entry *vchiq_clients_top(void);
5210 +#endif /* VCHIQ_ARM_H */
5212 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5215 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5217 + * Redistribution and use in source and binary forms, with or without
5218 + * modification, are permitted provided that the following conditions
5220 + * 1. Redistributions of source code must retain the above copyright
5221 + * notice, this list of conditions, and the following disclaimer,
5222 + * without modification.
5223 + * 2. Redistributions in binary form must reproduce the above copyright
5224 + * notice, this list of conditions and the following disclaimer in the
5225 + * documentation and/or other materials provided with the distribution.
5226 + * 3. The names of the above-listed copyright holders may not be used
5227 + * to endorse or promote products derived from this software without
5228 + * specific prior written permission.
5230 + * ALTERNATIVELY, this software may be distributed under the terms of the
5231 + * GNU General Public License ("GPL") version 2, as published by the Free
5232 + * Software Foundation.
5234 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5235 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5236 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5237 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5238 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5239 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5240 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5241 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5242 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5243 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5244 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5247 +const char *vchiq_get_build_hostname(void);
5248 +const char *vchiq_get_build_version(void);
5249 +const char *vchiq_get_build_time(void);
5250 +const char *vchiq_get_build_date(void);
5252 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5255 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5257 + * Redistribution and use in source and binary forms, with or without
5258 + * modification, are permitted provided that the following conditions
5260 + * 1. Redistributions of source code must retain the above copyright
5261 + * notice, this list of conditions, and the following disclaimer,
5262 + * without modification.
5263 + * 2. Redistributions in binary form must reproduce the above copyright
5264 + * notice, this list of conditions and the following disclaimer in the
5265 + * documentation and/or other materials provided with the distribution.
5266 + * 3. The names of the above-listed copyright holders may not be used
5267 + * to endorse or promote products derived from this software without
5268 + * specific prior written permission.
5270 + * ALTERNATIVELY, this software may be distributed under the terms of the
5271 + * GNU General Public License ("GPL") version 2, as published by the Free
5272 + * Software Foundation.
5274 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5275 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5276 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5277 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5278 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5279 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5280 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5281 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5282 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5283 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5284 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5287 +#ifndef VCHIQ_CFG_H
5288 +#define VCHIQ_CFG_H
5290 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5291 +/* The version of VCHIQ - change with any non-trivial change */
5292 +#define VCHIQ_VERSION 6
5293 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5294 +** incompatible change */
5295 +#define VCHIQ_VERSION_MIN 3
5297 +#define VCHIQ_MAX_STATES 1
5298 +#define VCHIQ_MAX_SERVICES 4096
5299 +#define VCHIQ_MAX_SLOTS 128
5300 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5302 +#define VCHIQ_NUM_CURRENT_BULKS 32
5303 +#define VCHIQ_NUM_SERVICE_BULKS 4
5305 +#ifndef VCHIQ_ENABLE_DEBUG
5306 +#define VCHIQ_ENABLE_DEBUG 1
5309 +#ifndef VCHIQ_ENABLE_STATS
5310 +#define VCHIQ_ENABLE_STATS 1
5313 +#endif /* VCHIQ_CFG_H */
5315 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5318 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5320 + * Redistribution and use in source and binary forms, with or without
5321 + * modification, are permitted provided that the following conditions
5323 + * 1. Redistributions of source code must retain the above copyright
5324 + * notice, this list of conditions, and the following disclaimer,
5325 + * without modification.
5326 + * 2. Redistributions in binary form must reproduce the above copyright
5327 + * notice, this list of conditions and the following disclaimer in the
5328 + * documentation and/or other materials provided with the distribution.
5329 + * 3. The names of the above-listed copyright holders may not be used
5330 + * to endorse or promote products derived from this software without
5331 + * specific prior written permission.
5333 + * ALTERNATIVELY, this software may be distributed under the terms of the
5334 + * GNU General Public License ("GPL") version 2, as published by the Free
5335 + * Software Foundation.
5337 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5338 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5339 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5340 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5341 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5342 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5343 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5344 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5345 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5346 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5347 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5350 +#include "vchiq_connected.h"
5351 +#include "vchiq_core.h"
5352 +#include <linux/module.h>
5353 +#include <linux/mutex.h>
5355 +#define MAX_CALLBACKS 10
5357 +static int g_connected;
5358 +static int g_num_deferred_callbacks;
5359 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5360 +static int g_once_init;
5361 +static struct mutex g_connected_mutex;
5363 +/****************************************************************************
5365 +* Function to initialize our lock.
5367 +***************************************************************************/
5369 +static void connected_init(void)
5371 + if (!g_once_init) {
5372 + mutex_init(&g_connected_mutex);
5377 +/****************************************************************************
5379 +* This function is used to defer initialization until the vchiq stack is
5380 +* initialized. If the stack is already initialized, then the callback will
5381 +* be made immediately, otherwise it will be deferred until
5382 +* vchiq_call_connected_callbacks is called.
5384 +***************************************************************************/
5386 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5390 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5394 + /* We're already connected. Call the callback immediately. */
5398 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5399 + vchiq_log_error(vchiq_core_log_level,
5400 + "There already %d callback registered - "
5401 + "please increase MAX_CALLBACKS",
5402 + g_num_deferred_callbacks);
5404 + g_deferred_callback[g_num_deferred_callbacks] =
5406 + g_num_deferred_callbacks++;
5409 + mutex_unlock(&g_connected_mutex);
5412 +/****************************************************************************
5414 +* This function is called by the vchiq stack once it has been connected to
5415 +* the videocore and clients can start to use the stack.
5417 +***************************************************************************/
5419 +void vchiq_call_connected_callbacks(void)
5425 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5428 + for (i = 0; i < g_num_deferred_callbacks; i++)
5429 + g_deferred_callback[i]();
5431 + g_num_deferred_callbacks = 0;
5433 + mutex_unlock(&g_connected_mutex);
5435 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5437 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5440 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5442 + * Redistribution and use in source and binary forms, with or without
5443 + * modification, are permitted provided that the following conditions
5445 + * 1. Redistributions of source code must retain the above copyright
5446 + * notice, this list of conditions, and the following disclaimer,
5447 + * without modification.
5448 + * 2. Redistributions in binary form must reproduce the above copyright
5449 + * notice, this list of conditions and the following disclaimer in the
5450 + * documentation and/or other materials provided with the distribution.
5451 + * 3. The names of the above-listed copyright holders may not be used
5452 + * to endorse or promote products derived from this software without
5453 + * specific prior written permission.
5455 + * ALTERNATIVELY, this software may be distributed under the terms of the
5456 + * GNU General Public License ("GPL") version 2, as published by the Free
5457 + * Software Foundation.
5459 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5460 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5461 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5462 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5463 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5464 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5465 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5466 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5467 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5468 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5469 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5472 +#ifndef VCHIQ_CONNECTED_H
5473 +#define VCHIQ_CONNECTED_H
5475 +/* ---- Include Files ----------------------------------------------------- */
5477 +/* ---- Constants and Types ---------------------------------------------- */
5479 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5481 +/* ---- Variable Externs ------------------------------------------------- */
5483 +/* ---- Function Prototypes ---------------------------------------------- */
5485 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5486 +void vchiq_call_connected_callbacks(void);
5488 +#endif /* VCHIQ_CONNECTED_H */
5491 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5494 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5496 + * Redistribution and use in source and binary forms, with or without
5497 + * modification, are permitted provided that the following conditions
5499 + * 1. Redistributions of source code must retain the above copyright
5500 + * notice, this list of conditions, and the following disclaimer,
5501 + * without modification.
5502 + * 2. Redistributions in binary form must reproduce the above copyright
5503 + * notice, this list of conditions and the following disclaimer in the
5504 + * documentation and/or other materials provided with the distribution.
5505 + * 3. The names of the above-listed copyright holders may not be used
5506 + * to endorse or promote products derived from this software without
5507 + * specific prior written permission.
5509 + * ALTERNATIVELY, this software may be distributed under the terms of the
5510 + * GNU General Public License ("GPL") version 2, as published by the Free
5511 + * Software Foundation.
5513 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5514 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5515 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5516 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5517 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5518 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5519 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5520 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5521 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5522 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5523 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5526 +#include "vchiq_core.h"
5528 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5530 +#define HANDLE_STATE_SHIFT 12
5532 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5533 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5534 +#define SLOT_INDEX_FROM_DATA(state, data) \
5535 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5537 +#define SLOT_INDEX_FROM_INFO(state, info) \
5538 + ((unsigned int)(info - state->slot_info))
5539 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5540 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5543 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5546 +struct vchiq_open_payload {
5550 + short version_min;
5553 +struct vchiq_openack_payload {
5557 +/* we require this for consistency between endpoints */
5558 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5559 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5560 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5561 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5562 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5563 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5565 +/* Run time control of log level, based on KERN_XXX level. */
5566 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5567 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5568 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5570 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5572 +static DEFINE_SPINLOCK(service_spinlock);
5573 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5574 +DEFINE_SPINLOCK(quota_spinlock);
5576 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5577 +static unsigned int handle_seq;
5579 +static const char *const srvstate_names[] = {
5592 +static const char *const reason_names[] = {
5595 + "MESSAGE_AVAILABLE",
5596 + "BULK_TRANSMIT_DONE",
5597 + "BULK_RECEIVE_DONE",
5598 + "BULK_TRANSMIT_ABORTED",
5599 + "BULK_RECEIVE_ABORTED"
5602 +static const char *const conn_state_names[] = {
5616 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5618 +static const char *msg_type_str(unsigned int msg_type)
5620 + switch (msg_type) {
5621 + case VCHIQ_MSG_PADDING: return "PADDING";
5622 + case VCHIQ_MSG_CONNECT: return "CONNECT";
5623 + case VCHIQ_MSG_OPEN: return "OPEN";
5624 + case VCHIQ_MSG_OPENACK: return "OPENACK";
5625 + case VCHIQ_MSG_CLOSE: return "CLOSE";
5626 + case VCHIQ_MSG_DATA: return "DATA";
5627 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
5628 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
5629 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
5630 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
5631 + case VCHIQ_MSG_PAUSE: return "PAUSE";
5632 + case VCHIQ_MSG_RESUME: return "RESUME";
5633 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
5634 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
5635 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
5641 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5643 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5644 + service->state->id, service->localport,
5645 + srvstate_names[service->srvstate],
5646 + srvstate_names[newstate]);
5647 + service->srvstate = newstate;
5651 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5653 + VCHIQ_SERVICE_T *service;
5655 + spin_lock(&service_spinlock);
5656 + service = handle_to_service(handle);
5657 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5658 + (service->handle == handle)) {
5659 + BUG_ON(service->ref_count == 0);
5660 + service->ref_count++;
5663 + spin_unlock(&service_spinlock);
5666 + vchiq_log_info(vchiq_core_log_level,
5667 + "Invalid service handle 0x%x", handle);
5673 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
5675 + VCHIQ_SERVICE_T *service = NULL;
5676 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
5677 + spin_lock(&service_spinlock);
5678 + service = state->services[localport];
5679 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
5680 + BUG_ON(service->ref_count == 0);
5681 + service->ref_count++;
5684 + spin_unlock(&service_spinlock);
5688 + vchiq_log_info(vchiq_core_log_level,
5689 + "Invalid port %d", localport);
5695 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
5696 + VCHIQ_SERVICE_HANDLE_T handle) {
5697 + VCHIQ_SERVICE_T *service;
5699 + spin_lock(&service_spinlock);
5700 + service = handle_to_service(handle);
5701 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5702 + (service->handle == handle) &&
5703 + (service->instance == instance)) {
5704 + BUG_ON(service->ref_count == 0);
5705 + service->ref_count++;
5708 + spin_unlock(&service_spinlock);
5711 + vchiq_log_info(vchiq_core_log_level,
5712 + "Invalid service handle 0x%x", handle);
5718 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
5721 + VCHIQ_SERVICE_T *service = NULL;
5724 + spin_lock(&service_spinlock);
5725 + while (idx < state->unused_service) {
5726 + VCHIQ_SERVICE_T *srv = state->services[idx++];
5727 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
5728 + (srv->instance == instance)) {
5730 + BUG_ON(service->ref_count == 0);
5731 + service->ref_count++;
5735 + spin_unlock(&service_spinlock);
5743 +lock_service(VCHIQ_SERVICE_T *service)
5745 + spin_lock(&service_spinlock);
5746 + BUG_ON(!service || (service->ref_count == 0));
5748 + service->ref_count++;
5749 + spin_unlock(&service_spinlock);
5753 +unlock_service(VCHIQ_SERVICE_T *service)
5755 + VCHIQ_STATE_T *state = service->state;
5756 + spin_lock(&service_spinlock);
5757 + BUG_ON(!service || (service->ref_count == 0));
5758 + if (service && service->ref_count) {
5759 + service->ref_count--;
5760 + if (!service->ref_count) {
5761 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
5762 + state->services[service->localport] = NULL;
5766 + spin_unlock(&service_spinlock);
5768 + if (service && service->userdata_term)
5769 + service->userdata_term(service->base.userdata);
5775 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
5777 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5780 + id = service ? service->client_id : 0;
5782 + unlock_service(service);
5788 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
5790 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5792 + return service ? service->base.userdata : NULL;
5796 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
5798 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5800 + return service ? service->base.fourcc : 0;
5804 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
5806 + VCHIQ_STATE_T *state = service->state;
5807 + VCHIQ_SERVICE_QUOTA_T *service_quota;
5809 + service->closing = 1;
5811 + /* Synchronise with other threads. */
5812 + mutex_lock(&state->recycle_mutex);
5813 + mutex_unlock(&state->recycle_mutex);
5814 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
5815 + /* If we're pausing then the slot_mutex is held until resume
5816 + * by the slot handler. Therefore don't try to acquire this
5817 + * mutex if we're the slot handler and in the pause sent state.
5818 + * We don't need to in this case anyway. */
5819 + mutex_lock(&state->slot_mutex);
5820 + mutex_unlock(&state->slot_mutex);
5823 + /* Unblock any sending thread. */
5824 + service_quota = &state->service_quotas[service->localport];
5825 + up(&service_quota->quota_event);
5829 +mark_service_closing(VCHIQ_SERVICE_T *service)
5831 + mark_service_closing_internal(service, 0);
5834 +static inline VCHIQ_STATUS_T
5835 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
5836 + VCHIQ_HEADER_T *header, void *bulk_userdata)
5838 + VCHIQ_STATUS_T status;
5839 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
5840 + service->state->id, service->localport, reason_names[reason],
5841 + (unsigned int)header, (unsigned int)bulk_userdata);
5842 + status = service->base.callback(reason, header, service->handle,
5844 + if (status == VCHIQ_ERROR) {
5845 + vchiq_log_warning(vchiq_core_log_level,
5846 + "%d: ignoring ERROR from callback to service %x",
5847 + service->state->id, service->handle);
5848 + status = VCHIQ_SUCCESS;
5854 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
5856 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
5857 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
5858 + conn_state_names[oldstate],
5859 + conn_state_names[newstate]);
5860 + state->conn_state = newstate;
5861 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
5865 +remote_event_create(REMOTE_EVENT_T *event)
5868 + /* Don't clear the 'fired' flag because it may already have been set
5869 + ** by the other side. */
5870 + sema_init(event->event, 0);
5874 +remote_event_destroy(REMOTE_EVENT_T *event)
5880 +remote_event_wait(REMOTE_EVENT_T *event)
5882 + if (!event->fired) {
5885 + if (!event->fired) {
5886 + if (down_interruptible(event->event) != 0) {
5900 +remote_event_signal_local(REMOTE_EVENT_T *event)
5907 +remote_event_poll(REMOTE_EVENT_T *event)
5909 + if (event->fired && event->armed)
5910 + remote_event_signal_local(event);
5914 +remote_event_pollall(VCHIQ_STATE_T *state)
5916 + remote_event_poll(&state->local->sync_trigger);
5917 + remote_event_poll(&state->local->sync_release);
5918 + remote_event_poll(&state->local->trigger);
5919 + remote_event_poll(&state->local->recycle);
5922 +/* Round up message sizes so that any space at the end of a slot is always big
5923 +** enough for a header. This relies on header size being a power of two, which
5924 +** has been verified earlier by a static assertion. */
5926 +static inline unsigned int
5927 +calc_stride(unsigned int size)
5929 + /* Allow room for the header */
5930 + size += sizeof(VCHIQ_HEADER_T);
5933 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
5937 +/* Called by the slot handler thread */
5938 +static VCHIQ_SERVICE_T *
5939 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
5943 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
5945 + for (i = 0; i < state->unused_service; i++) {
5946 + VCHIQ_SERVICE_T *service = state->services[i];
5948 + (service->public_fourcc == fourcc) &&
5949 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
5950 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
5951 + (service->remoteport == VCHIQ_PORT_FREE)))) {
5952 + lock_service(service);
5960 +/* Called by the slot handler thread */
5961 +static VCHIQ_SERVICE_T *
5962 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
5965 + for (i = 0; i < state->unused_service; i++) {
5966 + VCHIQ_SERVICE_T *service = state->services[i];
5967 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
5968 + && (service->remoteport == port)) {
5969 + lock_service(service);
5977 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
5983 + value = atomic_read(&service->poll_flags);
5984 + } while (atomic_cmpxchg(&service->poll_flags, value,
5985 + value | (1 << poll_type)) != value);
5988 + value = atomic_read(&state->poll_services[
5989 + service->localport>>5]);
5990 + } while (atomic_cmpxchg(
5991 + &state->poll_services[service->localport>>5],
5992 + value, value | (1 << (service->localport & 0x1f)))
5996 + state->poll_needed = 1;
5999 + /* ... and ensure the slot handler runs. */
6000 + remote_event_signal_local(&state->local->trigger);
6003 +/* Called from queue_message, by the slot handler and application threads,
6004 +** with slot_mutex held */
6005 +static VCHIQ_HEADER_T *
6006 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6008 + VCHIQ_SHARED_STATE_T *local = state->local;
6009 + int tx_pos = state->local_tx_pos;
6010 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6012 + if (space > slot_space) {
6013 + VCHIQ_HEADER_T *header;
6014 + /* Fill the remaining space with padding */
6015 + WARN_ON(state->tx_data == NULL);
6016 + header = (VCHIQ_HEADER_T *)
6017 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6018 + header->msgid = VCHIQ_MSGID_PADDING;
6019 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6021 + tx_pos += slot_space;
6024 + /* If necessary, get the next slot. */
6025 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6028 + /* If there is no free slot... */
6030 + if (down_trylock(&state->slot_available_event) != 0) {
6031 + /* ...wait for one. */
6033 + VCHIQ_STATS_INC(state, slot_stalls);
6035 + /* But first, flush through the last slot. */
6036 + state->local_tx_pos = tx_pos;
6037 + local->tx_pos = tx_pos;
6038 + remote_event_signal(&state->remote->trigger);
6040 + if (!is_blocking ||
6041 + (down_interruptible(
6042 + &state->slot_available_event) != 0))
6043 + return NULL; /* No space available */
6047 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6049 + slot_index = local->slot_queue[
6050 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6051 + VCHIQ_SLOT_QUEUE_MASK];
6053 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6056 + state->local_tx_pos = tx_pos + space;
6058 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6061 +/* Called by the recycle thread. */
6063 +process_free_queue(VCHIQ_STATE_T *state)
6065 + VCHIQ_SHARED_STATE_T *local = state->local;
6066 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6067 + int slot_queue_available;
6069 + /* Use a read memory barrier to ensure that any state that may have
6070 + ** been modified by another thread is not masked by stale prefetched
6074 + /* Find slots which have been freed by the other side, and return them
6075 + ** to the available queue. */
6076 + slot_queue_available = state->slot_queue_available;
6078 + while (slot_queue_available != local->slot_queue_recycle) {
6080 + int slot_index = local->slot_queue[slot_queue_available++ &
6081 + VCHIQ_SLOT_QUEUE_MASK];
6082 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6083 + int data_found = 0;
6085 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6086 + state->id, slot_index, (unsigned int)data,
6087 + local->slot_queue_recycle, slot_queue_available);
6089 + /* Initialise the bitmask for services which have used this
6091 + BITSET_ZERO(service_found);
6095 + while (pos < VCHIQ_SLOT_SIZE) {
6096 + VCHIQ_HEADER_T *header =
6097 + (VCHIQ_HEADER_T *)(data + pos);
6098 + int msgid = header->msgid;
6099 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6100 + int port = VCHIQ_MSG_SRCPORT(msgid);
6101 + VCHIQ_SERVICE_QUOTA_T *service_quota =
6102 + &state->service_quotas[port];
6104 + spin_lock("a_spinlock);
6105 + count = service_quota->message_use_count;
6107 + service_quota->message_use_count =
6109 + spin_unlock("a_spinlock);
6111 + if (count == service_quota->message_quota)
6112 + /* Signal the service that it
6113 + ** has dropped below its quota
6115 + up(&service_quota->quota_event);
6116 + else if (count == 0) {
6117 + vchiq_log_error(vchiq_core_log_level,
6119 + "message_use_count=%d "
6120 + "(header %x, msgid %x, "
6121 + "header->msgid %x, "
6122 + "header->size %x)",
6125 + message_use_count,
6126 + (unsigned int)header, msgid,
6129 + WARN(1, "invalid message use count\n");
6131 + if (!BITSET_IS_SET(service_found, port)) {
6132 + /* Set the found bit for this service */
6133 + BITSET_SET(service_found, port);
6135 + spin_lock("a_spinlock);
6136 + count = service_quota->slot_use_count;
6138 + service_quota->slot_use_count =
6140 + spin_unlock("a_spinlock);
6143 + /* Signal the service in case
6144 + ** it has dropped below its
6146 + up(&service_quota->quota_event);
6148 + vchiq_core_log_level,
6149 + "%d: pfq:%d %x@%x - "
6153 + (unsigned int)header,
6157 + vchiq_core_log_level,
6166 + (unsigned int)header,
6170 + WARN(1, "bad slot use count\n");
6177 + pos += calc_stride(header->size);
6178 + if (pos > VCHIQ_SLOT_SIZE) {
6179 + vchiq_log_error(vchiq_core_log_level,
6180 + "pfq - pos %x: header %x, msgid %x, "
6181 + "header->msgid %x, header->size %x",
6182 + pos, (unsigned int)header, msgid,
6183 + header->msgid, header->size);
6184 + WARN(1, "invalid slot position\n");
6190 + spin_lock("a_spinlock);
6191 + count = state->data_use_count;
6193 + state->data_use_count =
6195 + spin_unlock("a_spinlock);
6196 + if (count == state->data_quota)
6197 + up(&state->data_quota_event);
6200 + state->slot_queue_available = slot_queue_available;
6201 + up(&state->slot_available_event);
6205 +/* Called by the slot handler and application threads */
6206 +static VCHIQ_STATUS_T
6207 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6208 + int msgid, const VCHIQ_ELEMENT_T *elements,
6209 + int count, int size, int is_blocking)
6211 + VCHIQ_SHARED_STATE_T *local;
6212 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6213 + VCHIQ_HEADER_T *header;
6214 + int type = VCHIQ_MSG_TYPE(msgid);
6216 + unsigned int stride;
6218 + local = state->local;
6220 + stride = calc_stride(size);
6222 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6224 + if ((type != VCHIQ_MSG_RESUME) &&
6225 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
6226 + return VCHIQ_RETRY;
6228 + if (type == VCHIQ_MSG_DATA) {
6233 + if (service->closing) {
6234 + /* The service has been closed */
6235 + mutex_unlock(&state->slot_mutex);
6236 + return VCHIQ_ERROR;
6239 + service_quota = &state->service_quotas[service->localport];
6241 + spin_lock("a_spinlock);
6243 + /* Ensure this service doesn't use more than its quota of
6244 + ** messages or slots */
6245 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6246 + state->local_tx_pos + stride - 1);
6248 + /* Ensure data messages don't use more than their quota of
6250 + while ((tx_end_index != state->previous_data_index) &&
6251 + (state->data_use_count == state->data_quota)) {
6252 + VCHIQ_STATS_INC(state, data_stalls);
6253 + spin_unlock("a_spinlock);
6254 + mutex_unlock(&state->slot_mutex);
6256 + if (down_interruptible(&state->data_quota_event)
6258 + return VCHIQ_RETRY;
6260 + mutex_lock(&state->slot_mutex);
6261 + spin_lock("a_spinlock);
6262 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6263 + state->local_tx_pos + stride - 1);
6264 + if ((tx_end_index == state->previous_data_index) ||
6265 + (state->data_use_count < state->data_quota)) {
6266 + /* Pass the signal on to other waiters */
6267 + up(&state->data_quota_event);
6272 + while ((service_quota->message_use_count ==
6273 + service_quota->message_quota) ||
6274 + ((tx_end_index != service_quota->previous_tx_index) &&
6275 + (service_quota->slot_use_count ==
6276 + service_quota->slot_quota))) {
6277 + spin_unlock("a_spinlock);
6278 + vchiq_log_trace(vchiq_core_log_level,
6279 + "%d: qm:%d %s,%x - quota stall "
6280 + "(msg %d, slot %d)",
6281 + state->id, service->localport,
6282 + msg_type_str(type), size,
6283 + service_quota->message_use_count,
6284 + service_quota->slot_use_count);
6285 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6286 + mutex_unlock(&state->slot_mutex);
6287 + if (down_interruptible(&service_quota->quota_event)
6289 + return VCHIQ_RETRY;
6290 + if (service->closing)
6291 + return VCHIQ_ERROR;
6292 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6293 + return VCHIQ_RETRY;
6294 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6295 + /* The service has been closed */
6296 + mutex_unlock(&state->slot_mutex);
6297 + return VCHIQ_ERROR;
6299 + spin_lock("a_spinlock);
6300 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6301 + state->local_tx_pos + stride - 1);
6304 + spin_unlock("a_spinlock);
6307 + header = reserve_space(state, stride, is_blocking);
6311 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6312 + mutex_unlock(&state->slot_mutex);
6313 + return VCHIQ_RETRY;
6316 + if (type == VCHIQ_MSG_DATA) {
6319 + int slot_use_count;
6321 + vchiq_log_info(vchiq_core_log_level,
6322 + "%d: qm %s@%x,%x (%d->%d)",
6324 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6325 + (unsigned int)header, size,
6326 + VCHIQ_MSG_SRCPORT(msgid),
6327 + VCHIQ_MSG_DSTPORT(msgid));
6331 + for (i = 0, pos = 0; i < (unsigned int)count;
6332 + pos += elements[i++].size)
6333 + if (elements[i].size) {
6334 + if (vchiq_copy_from_user
6335 + (header->data + pos, elements[i].data,
6336 + (size_t) elements[i].size) !=
6338 + mutex_unlock(&state->slot_mutex);
6339 + VCHIQ_SERVICE_STATS_INC(service,
6341 + return VCHIQ_ERROR;
6344 + if (vchiq_core_msg_log_level >=
6346 + vchiq_log_dump_mem("Sent", 0,
6347 + header->data + pos,
6349 + elements[0].size));
6353 + spin_lock("a_spinlock);
6354 + service_quota->message_use_count++;
6357 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6359 + /* If this transmission can't fit in the last slot used by any
6360 + ** service, the data_use_count must be increased. */
6361 + if (tx_end_index != state->previous_data_index) {
6362 + state->previous_data_index = tx_end_index;
6363 + state->data_use_count++;
6366 + /* If this isn't the same slot last used by this service,
6367 + ** the service's slot_use_count must be increased. */
6368 + if (tx_end_index != service_quota->previous_tx_index) {
6369 + service_quota->previous_tx_index = tx_end_index;
6370 + slot_use_count = ++service_quota->slot_use_count;
6372 + slot_use_count = 0;
6375 + spin_unlock("a_spinlock);
6377 + if (slot_use_count)
6378 + vchiq_log_trace(vchiq_core_log_level,
6379 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6380 + state->id, service->localport,
6381 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6382 + slot_use_count, header);
6384 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6385 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6387 + vchiq_log_info(vchiq_core_log_level,
6388 + "%d: qm %s@%x,%x (%d->%d)", state->id,
6389 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6390 + (unsigned int)header, size,
6391 + VCHIQ_MSG_SRCPORT(msgid),
6392 + VCHIQ_MSG_DSTPORT(msgid));
6394 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6395 + memcpy(header->data, elements[0].data,
6396 + elements[0].size);
6398 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6401 + header->msgid = msgid;
6402 + header->size = size;
6407 + svc_fourcc = service
6408 + ? service->base.fourcc
6409 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6411 + vchiq_log_info(vchiq_core_msg_log_level,
6412 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6413 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6414 + VCHIQ_MSG_TYPE(msgid),
6415 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6416 + VCHIQ_MSG_SRCPORT(msgid),
6417 + VCHIQ_MSG_DSTPORT(msgid),
6421 + /* Make sure the new header is visible to the peer. */
6424 + /* Make the new tx_pos visible to the peer. */
6425 + local->tx_pos = state->local_tx_pos;
6428 + if (service && (type == VCHIQ_MSG_CLOSE))
6429 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6431 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6432 + mutex_unlock(&state->slot_mutex);
6434 + remote_event_signal(&state->remote->trigger);
6436 + return VCHIQ_SUCCESS;
6439 +/* Called by the slot handler and application threads */
6440 +static VCHIQ_STATUS_T
6441 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6442 + int msgid, const VCHIQ_ELEMENT_T *elements,
6443 + int count, int size, int is_blocking)
6445 + VCHIQ_SHARED_STATE_T *local;
6446 + VCHIQ_HEADER_T *header;
6448 + local = state->local;
6450 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6451 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
6452 + return VCHIQ_RETRY;
6454 + remote_event_wait(&local->sync_release);
6458 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6459 + local->slot_sync);
6462 + int oldmsgid = header->msgid;
6463 + if (oldmsgid != VCHIQ_MSGID_PADDING)
6464 + vchiq_log_error(vchiq_core_log_level,
6465 + "%d: qms - msgid %x, not PADDING",
6466 + state->id, oldmsgid);
6472 + vchiq_log_info(vchiq_sync_log_level,
6473 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6474 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6475 + (unsigned int)header, size,
6476 + VCHIQ_MSG_SRCPORT(msgid),
6477 + VCHIQ_MSG_DSTPORT(msgid));
6479 + for (i = 0, pos = 0; i < (unsigned int)count;
6480 + pos += elements[i++].size)
6481 + if (elements[i].size) {
6482 + if (vchiq_copy_from_user
6483 + (header->data + pos, elements[i].data,
6484 + (size_t) elements[i].size) !=
6486 + mutex_unlock(&state->sync_mutex);
6487 + VCHIQ_SERVICE_STATS_INC(service,
6489 + return VCHIQ_ERROR;
6492 + if (vchiq_sync_log_level >=
6494 + vchiq_log_dump_mem("Sent Sync",
6495 + 0, header->data + pos,
6497 + elements[0].size));
6501 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6502 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6504 + vchiq_log_info(vchiq_sync_log_level,
6505 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6506 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6507 + (unsigned int)header, size,
6508 + VCHIQ_MSG_SRCPORT(msgid),
6509 + VCHIQ_MSG_DSTPORT(msgid));
6511 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6512 + memcpy(header->data, elements[0].data,
6513 + elements[0].size);
6515 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6518 + header->size = size;
6519 + header->msgid = msgid;
6521 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6524 + svc_fourcc = service
6525 + ? service->base.fourcc
6526 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6528 + vchiq_log_trace(vchiq_sync_log_level,
6529 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6530 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6531 + VCHIQ_MSG_TYPE(msgid),
6532 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6533 + VCHIQ_MSG_SRCPORT(msgid),
6534 + VCHIQ_MSG_DSTPORT(msgid),
6538 + /* Make sure the new header is visible to the peer. */
6541 + remote_event_signal(&state->remote->sync_trigger);
6543 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6544 + mutex_unlock(&state->sync_mutex);
6546 + return VCHIQ_SUCCESS;
6550 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6552 + slot->use_count++;
6556 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6557 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6559 + int release_count;
6561 + mutex_lock(&state->recycle_mutex);
6564 + int msgid = header->msgid;
6565 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6566 + (service && service->closing)) {
6567 + mutex_unlock(&state->recycle_mutex);
6571 + /* Rewrite the message header to prevent a double
6573 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6576 + release_count = slot_info->release_count;
6577 + slot_info->release_count = ++release_count;
6579 + if (release_count == slot_info->use_count) {
6580 + int slot_queue_recycle;
6581 + /* Add to the freed queue */
6583 + /* A read barrier is necessary here to prevent speculative
6584 + ** fetches of remote->slot_queue_recycle from overtaking the
6588 + slot_queue_recycle = state->remote->slot_queue_recycle;
6589 + state->remote->slot_queue[slot_queue_recycle &
6590 + VCHIQ_SLOT_QUEUE_MASK] =
6591 + SLOT_INDEX_FROM_INFO(state, slot_info);
6592 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6593 + vchiq_log_info(vchiq_core_log_level,
6594 + "%d: release_slot %d - recycle->%x",
6595 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6596 + state->remote->slot_queue_recycle);
6598 + /* A write barrier is necessary, but remote_event_signal
6599 + ** contains one. */
6600 + remote_event_signal(&state->remote->recycle);
6603 + mutex_unlock(&state->recycle_mutex);
6606 +/* Called by the slot handler - don't hold the bulk mutex */
6607 +static VCHIQ_STATUS_T
6608 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6611 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6613 + vchiq_log_trace(vchiq_core_log_level,
6614 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
6615 + service->state->id, service->localport,
6616 + (queue == &service->bulk_tx) ? 't' : 'r',
6617 + queue->process, queue->remote_notify, queue->remove);
6619 + if (service->state->is_master) {
6620 + while (queue->remote_notify != queue->process) {
6621 + VCHIQ_BULK_T *bulk =
6622 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
6623 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6624 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6625 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6626 + service->remoteport);
6627 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6628 + /* Only reply to non-dummy bulk requests */
6629 + if (bulk->remote_data) {
6630 + status = queue_message(service->state, NULL,
6631 + msgid, &element, 1, 4, 0);
6632 + if (status != VCHIQ_SUCCESS)
6635 + queue->remote_notify++;
6638 + queue->remote_notify = queue->process;
6641 + if (status == VCHIQ_SUCCESS) {
6642 + while (queue->remove != queue->remote_notify) {
6643 + VCHIQ_BULK_T *bulk =
6644 + &queue->bulks[BULK_INDEX(queue->remove)];
6646 + /* Only generate callbacks for non-dummy bulk
6647 + ** requests, and non-terminated services */
6648 + if (bulk->data && service->instance) {
6649 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
6650 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
6651 + VCHIQ_SERVICE_STATS_INC(service,
6653 + VCHIQ_SERVICE_STATS_ADD(service,
6657 + VCHIQ_SERVICE_STATS_INC(service,
6659 + VCHIQ_SERVICE_STATS_ADD(service,
6664 + VCHIQ_SERVICE_STATS_INC(service,
6665 + bulk_aborted_count);
6667 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
6668 + struct bulk_waiter *waiter;
6669 + spin_lock(&bulk_waiter_spinlock);
6670 + waiter = bulk->userdata;
6672 + waiter->actual = bulk->actual;
6673 + up(&waiter->event);
6675 + spin_unlock(&bulk_waiter_spinlock);
6676 + } else if (bulk->mode ==
6677 + VCHIQ_BULK_MODE_CALLBACK) {
6678 + VCHIQ_REASON_T reason = (bulk->dir ==
6679 + VCHIQ_BULK_TRANSMIT) ?
6681 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6682 + VCHIQ_BULK_TRANSMIT_ABORTED :
6683 + VCHIQ_BULK_TRANSMIT_DONE) :
6685 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6686 + VCHIQ_BULK_RECEIVE_ABORTED :
6687 + VCHIQ_BULK_RECEIVE_DONE);
6688 + status = make_service_callback(service,
6689 + reason, NULL, bulk->userdata);
6690 + if (status == VCHIQ_RETRY)
6696 + up(&service->bulk_remove_event);
6699 + status = VCHIQ_SUCCESS;
6702 + if (status == VCHIQ_RETRY)
6703 + request_poll(service->state, service,
6704 + (queue == &service->bulk_tx) ?
6705 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
6710 +/* Called by the slot handler thread */
6712 +poll_services(VCHIQ_STATE_T *state)
6716 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
6718 + flags = atomic_xchg(&state->poll_services[group], 0);
6719 + for (i = 0; flags; i++) {
6720 + if (flags & (1 << i)) {
6721 + VCHIQ_SERVICE_T *service =
6722 + find_service_by_port(state,
6724 + uint32_t service_flags;
6725 + flags &= ~(1 << i);
6729 + atomic_xchg(&service->poll_flags, 0);
6730 + if (service_flags &
6731 + (1 << VCHIQ_POLL_REMOVE)) {
6732 + vchiq_log_info(vchiq_core_log_level,
6733 + "%d: ps - remove %d<->%d",
6734 + state->id, service->localport,
6735 + service->remoteport);
6737 + /* Make it look like a client, because
6738 + it must be removed and not left in
6739 + the LISTENING state. */
6740 + service->public_fourcc =
6741 + VCHIQ_FOURCC_INVALID;
6743 + if (vchiq_close_service_internal(
6744 + service, 0/*!close_recvd*/) !=
6746 + request_poll(state, service,
6747 + VCHIQ_POLL_REMOVE);
6748 + } else if (service_flags &
6749 + (1 << VCHIQ_POLL_TERMINATE)) {
6750 + vchiq_log_info(vchiq_core_log_level,
6751 + "%d: ps - terminate %d<->%d",
6752 + state->id, service->localport,
6753 + service->remoteport);
6754 + if (vchiq_close_service_internal(
6755 + service, 0/*!close_recvd*/) !=
6757 + request_poll(state, service,
6758 + VCHIQ_POLL_TERMINATE);
6760 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
6761 + notify_bulks(service,
6762 + &service->bulk_tx,
6764 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
6765 + notify_bulks(service,
6766 + &service->bulk_rx,
6768 + unlock_service(service);
6774 +/* Called by the slot handler or application threads, holding the bulk mutex. */
6776 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6778 + VCHIQ_STATE_T *state = service->state;
6782 + while ((queue->process != queue->local_insert) &&
6783 + (queue->process != queue->remote_insert)) {
6784 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6786 + vchiq_log_trace(vchiq_core_log_level,
6787 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
6788 + state->id, service->localport,
6789 + (queue == &service->bulk_tx) ? 't' : 'r',
6790 + queue->local_insert, queue->remote_insert,
6793 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
6794 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
6796 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
6800 + vchiq_transfer_bulk(bulk);
6801 + mutex_unlock(&state->bulk_transfer_mutex);
6803 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
6804 + const char *header = (queue == &service->bulk_tx) ?
6805 + "Send Bulk to" : "Recv Bulk from";
6806 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
6807 + vchiq_log_info(vchiq_core_msg_log_level,
6808 + "%s %c%c%c%c d:%d len:%d %x<->%x",
6810 + VCHIQ_FOURCC_AS_4CHARS(
6811 + service->base.fourcc),
6812 + service->remoteport,
6814 + (unsigned int)bulk->data,
6815 + (unsigned int)bulk->remote_data);
6817 + vchiq_log_info(vchiq_core_msg_log_level,
6818 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
6819 + " rx len:%d %x<->%x",
6821 + VCHIQ_FOURCC_AS_4CHARS(
6822 + service->base.fourcc),
6823 + service->remoteport,
6825 + bulk->remote_size,
6826 + (unsigned int)bulk->data,
6827 + (unsigned int)bulk->remote_data);
6830 + vchiq_complete_bulk(bulk);
6837 +/* Called with the bulk_mutex held */
6839 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6841 + int is_tx = (queue == &service->bulk_tx);
6842 + vchiq_log_trace(vchiq_core_log_level,
6843 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
6844 + service->state->id, service->localport, is_tx ? 't' : 'r',
6845 + queue->local_insert, queue->remote_insert, queue->process);
6847 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
6848 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
6850 + while ((queue->process != queue->local_insert) ||
6851 + (queue->process != queue->remote_insert)) {
6852 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6854 + if (queue->process == queue->remote_insert) {
6855 + /* fabricate a matching dummy bulk */
6856 + bulk->remote_data = NULL;
6857 + bulk->remote_size = 0;
6858 + queue->remote_insert++;
6861 + if (queue->process != queue->local_insert) {
6862 + vchiq_complete_bulk(bulk);
6864 + vchiq_log_info(vchiq_core_msg_log_level,
6865 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
6867 + is_tx ? "Send Bulk to" : "Recv Bulk from",
6868 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
6869 + service->remoteport,
6871 + bulk->remote_size);
6873 + /* fabricate a matching dummy bulk */
6874 + bulk->data = NULL;
6876 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
6877 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
6878 + VCHIQ_BULK_RECEIVE;
6879 + queue->local_insert++;
6886 +/* Called from the slot handler thread */
6888 +pause_bulks(VCHIQ_STATE_T *state)
6890 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
6892 + atomic_set(&pause_bulks_count, 1);
6896 + /* Block bulk transfers from all services */
6897 + mutex_lock(&state->bulk_transfer_mutex);
6900 +/* Called from the slot handler thread */
6902 +resume_bulks(VCHIQ_STATE_T *state)
6905 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
6907 + atomic_set(&pause_bulks_count, 0);
6911 + /* Allow bulk transfers from all services */
6912 + mutex_unlock(&state->bulk_transfer_mutex);
6914 + if (state->deferred_bulks == 0)
6917 + /* Deal with any bulks which had to be deferred due to being in
6918 + * paused state. Don't try to match up to number of deferred bulks
6919 + * in case we've had something come and close the service in the
6920 + * interim - just process all bulk queues for all services */
6921 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
6922 + __func__, state->deferred_bulks);
6924 + for (i = 0; i < state->unused_service; i++) {
6925 + VCHIQ_SERVICE_T *service = state->services[i];
6926 + int resolved_rx = 0;
6927 + int resolved_tx = 0;
6928 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
6931 + mutex_lock(&service->bulk_mutex);
6932 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
6933 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
6934 + mutex_unlock(&service->bulk_mutex);
6936 + notify_bulks(service, &service->bulk_rx, 1);
6938 + notify_bulks(service, &service->bulk_tx, 1);
6940 + state->deferred_bulks = 0;
6944 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
6946 + VCHIQ_SERVICE_T *service = NULL;
6949 + unsigned int localport, remoteport;
6951 + msgid = header->msgid;
6952 + size = header->size;
6953 + type = VCHIQ_MSG_TYPE(msgid);
6954 + localport = VCHIQ_MSG_DSTPORT(msgid);
6955 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
6956 + if (size >= sizeof(struct vchiq_open_payload)) {
6957 + const struct vchiq_open_payload *payload =
6958 + (struct vchiq_open_payload *)header->data;
6959 + unsigned int fourcc;
6961 + fourcc = payload->fourcc;
6962 + vchiq_log_info(vchiq_core_log_level,
6963 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
6964 + state->id, (unsigned int)header,
6966 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
6968 + service = get_listening_service(state, fourcc);
6971 + /* A matching service exists */
6972 + short version = payload->version;
6973 + short version_min = payload->version_min;
6974 + if ((service->version < version_min) ||
6975 + (version < service->version_min)) {
6976 + /* Version mismatch */
6977 + vchiq_loud_error_header();
6978 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
6979 + "version mismatch - local (%d, min %d)"
6980 + " vs. remote (%d, min %d)",
6981 + state->id, service->localport,
6982 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
6983 + service->version, service->version_min,
6984 + version, version_min);
6985 + vchiq_loud_error_footer();
6986 + unlock_service(service);
6990 + service->peer_version = version;
6992 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
6993 + struct vchiq_openack_payload ack_payload = {
6996 + VCHIQ_ELEMENT_T body = {
6998 + sizeof(ack_payload)
7001 + /* Acknowledge the OPEN */
7002 + if (service->sync) {
7003 + if (queue_message_sync(state, NULL,
7005 + VCHIQ_MSG_OPENACK,
7006 + service->localport,
7008 + &body, 1, sizeof(ack_payload),
7009 + 0) == VCHIQ_RETRY)
7010 + goto bail_not_ready;
7012 + if (queue_message(state, NULL,
7014 + VCHIQ_MSG_OPENACK,
7015 + service->localport,
7017 + &body, 1, sizeof(ack_payload),
7018 + 0) == VCHIQ_RETRY)
7019 + goto bail_not_ready;
7022 + /* The service is now open */
7023 + vchiq_set_service_state(service,
7024 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7025 + : VCHIQ_SRVSTATE_OPEN);
7028 + service->remoteport = remoteport;
7029 + service->client_id = ((int *)header->data)[1];
7030 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7031 + NULL, NULL) == VCHIQ_RETRY) {
7032 + /* Bail out if not ready */
7033 + service->remoteport = VCHIQ_PORT_FREE;
7034 + goto bail_not_ready;
7037 + /* Success - the message has been dealt with */
7038 + unlock_service(service);
7044 + /* No available service, or an invalid request - send a CLOSE */
7045 + if (queue_message(state, NULL,
7046 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7047 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7048 + goto bail_not_ready;
7054 + unlock_service(service);
7059 +/* Called by the slot handler thread */
7061 +parse_rx_slots(VCHIQ_STATE_T *state)
7063 + VCHIQ_SHARED_STATE_T *remote = state->remote;
7064 + VCHIQ_SERVICE_T *service = NULL;
7066 + DEBUG_INITIALISE(state->local)
7068 + tx_pos = remote->tx_pos;
7070 + while (state->rx_pos != tx_pos) {
7071 + VCHIQ_HEADER_T *header;
7074 + unsigned int localport, remoteport;
7076 + DEBUG_TRACE(PARSE_LINE);
7077 + if (!state->rx_data) {
7079 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7080 + rx_index = remote->slot_queue[
7081 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7082 + VCHIQ_SLOT_QUEUE_MASK];
7083 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7085 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7087 + /* Initialise use_count to one, and increment
7088 + ** release_count at the end of the slot to avoid
7089 + ** releasing the slot prematurely. */
7090 + state->rx_info->use_count = 1;
7091 + state->rx_info->release_count = 0;
7094 + header = (VCHIQ_HEADER_T *)(state->rx_data +
7095 + (state->rx_pos & VCHIQ_SLOT_MASK));
7096 + DEBUG_VALUE(PARSE_HEADER, (int)header);
7097 + msgid = header->msgid;
7098 + DEBUG_VALUE(PARSE_MSGID, msgid);
7099 + size = header->size;
7100 + type = VCHIQ_MSG_TYPE(msgid);
7101 + localport = VCHIQ_MSG_DSTPORT(msgid);
7102 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7104 + if (type != VCHIQ_MSG_DATA)
7105 + VCHIQ_STATS_INC(state, ctrl_rx_count);
7108 + case VCHIQ_MSG_OPENACK:
7109 + case VCHIQ_MSG_CLOSE:
7110 + case VCHIQ_MSG_DATA:
7111 + case VCHIQ_MSG_BULK_RX:
7112 + case VCHIQ_MSG_BULK_TX:
7113 + case VCHIQ_MSG_BULK_RX_DONE:
7114 + case VCHIQ_MSG_BULK_TX_DONE:
7115 + service = find_service_by_port(state, localport);
7116 + if ((!service || service->remoteport != remoteport) &&
7117 + (localport == 0) &&
7118 + (type == VCHIQ_MSG_CLOSE)) {
7119 + /* This could be a CLOSE from a client which
7120 + hadn't yet received the OPENACK - look for
7121 + the connected service */
7123 + unlock_service(service);
7124 + service = get_connected_service(state,
7127 + vchiq_log_warning(vchiq_core_log_level,
7128 + "%d: prs %s@%x (%d->%d) - "
7129 + "found connected service %d",
7130 + state->id, msg_type_str(type),
7131 + (unsigned int)header,
7132 + remoteport, localport,
7133 + service->localport);
7137 + vchiq_log_error(vchiq_core_log_level,
7138 + "%d: prs %s@%x (%d->%d) - "
7139 + "invalid/closed service %d",
7140 + state->id, msg_type_str(type),
7141 + (unsigned int)header,
7142 + remoteport, localport, localport);
7143 + goto skip_message;
7150 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7153 + svc_fourcc = service
7154 + ? service->base.fourcc
7155 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7156 + vchiq_log_info(vchiq_core_msg_log_level,
7157 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7159 + msg_type_str(type), type,
7160 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7161 + remoteport, localport, size);
7163 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7167 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7168 + > VCHIQ_SLOT_SIZE) {
7169 + vchiq_log_error(vchiq_core_log_level,
7170 + "header %x (msgid %x) - size %x too big for "
7172 + (unsigned int)header, (unsigned int)msgid,
7173 + (unsigned int)size);
7174 + WARN(1, "oversized for slot\n");
7178 + case VCHIQ_MSG_OPEN:
7179 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7180 + if (!parse_open(state, header))
7181 + goto bail_not_ready;
7183 + case VCHIQ_MSG_OPENACK:
7184 + if (size >= sizeof(struct vchiq_openack_payload)) {
7185 + const struct vchiq_openack_payload *payload =
7186 + (struct vchiq_openack_payload *)
7188 + service->peer_version = payload->version;
7190 + vchiq_log_info(vchiq_core_log_level,
7191 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7192 + state->id, (unsigned int)header, size,
7193 + remoteport, localport, service->peer_version);
7194 + if (service->srvstate ==
7195 + VCHIQ_SRVSTATE_OPENING) {
7196 + service->remoteport = remoteport;
7197 + vchiq_set_service_state(service,
7198 + VCHIQ_SRVSTATE_OPEN);
7199 + up(&service->remove_event);
7201 + vchiq_log_error(vchiq_core_log_level,
7202 + "OPENACK received in state %s",
7203 + srvstate_names[service->srvstate]);
7205 + case VCHIQ_MSG_CLOSE:
7206 + WARN_ON(size != 0); /* There should be no data */
7208 + vchiq_log_info(vchiq_core_log_level,
7209 + "%d: prs CLOSE@%x (%d->%d)",
7210 + state->id, (unsigned int)header,
7211 + remoteport, localport);
7213 + mark_service_closing_internal(service, 1);
7215 + if (vchiq_close_service_internal(service,
7216 + 1/*close_recvd*/) == VCHIQ_RETRY)
7217 + goto bail_not_ready;
7219 + vchiq_log_info(vchiq_core_log_level,
7220 + "Close Service %c%c%c%c s:%u d:%d",
7221 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7222 + service->localport,
7223 + service->remoteport);
7225 + case VCHIQ_MSG_DATA:
7226 + vchiq_log_trace(vchiq_core_log_level,
7227 + "%d: prs DATA@%x,%x (%d->%d)",
7228 + state->id, (unsigned int)header, size,
7229 + remoteport, localport);
7231 + if ((service->remoteport == remoteport)
7232 + && (service->srvstate ==
7233 + VCHIQ_SRVSTATE_OPEN)) {
7234 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7235 + claim_slot(state->rx_info);
7236 + DEBUG_TRACE(PARSE_LINE);
7237 + if (make_service_callback(service,
7238 + VCHIQ_MESSAGE_AVAILABLE, header,
7239 + NULL) == VCHIQ_RETRY) {
7240 + DEBUG_TRACE(PARSE_LINE);
7241 + goto bail_not_ready;
7243 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7244 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7247 + VCHIQ_STATS_INC(state, error_count);
7250 + case VCHIQ_MSG_CONNECT:
7251 + vchiq_log_info(vchiq_core_log_level,
7252 + "%d: prs CONNECT@%x",
7253 + state->id, (unsigned int)header);
7254 + up(&state->connect);
7256 + case VCHIQ_MSG_BULK_RX:
7257 + case VCHIQ_MSG_BULK_TX: {
7258 + VCHIQ_BULK_QUEUE_T *queue;
7259 + WARN_ON(!state->is_master);
7260 + queue = (type == VCHIQ_MSG_BULK_RX) ?
7261 + &service->bulk_tx : &service->bulk_rx;
7262 + if ((service->remoteport == remoteport)
7263 + && (service->srvstate ==
7264 + VCHIQ_SRVSTATE_OPEN)) {
7265 + VCHIQ_BULK_T *bulk;
7268 + DEBUG_TRACE(PARSE_LINE);
7269 + if (mutex_lock_interruptible(
7270 + &service->bulk_mutex) != 0) {
7271 + DEBUG_TRACE(PARSE_LINE);
7272 + goto bail_not_ready;
7275 + WARN_ON(!(queue->remote_insert < queue->remove +
7276 + VCHIQ_NUM_SERVICE_BULKS));
7277 + bulk = &queue->bulks[
7278 + BULK_INDEX(queue->remote_insert)];
7279 + bulk->remote_data =
7280 + (void *)((int *)header->data)[0];
7281 + bulk->remote_size = ((int *)header->data)[1];
7284 + vchiq_log_info(vchiq_core_log_level,
7285 + "%d: prs %s@%x (%d->%d) %x@%x",
7286 + state->id, msg_type_str(type),
7287 + (unsigned int)header,
7288 + remoteport, localport,
7289 + bulk->remote_size,
7290 + (unsigned int)bulk->remote_data);
7292 + queue->remote_insert++;
7294 + if (atomic_read(&pause_bulks_count)) {
7295 + state->deferred_bulks++;
7296 + vchiq_log_info(vchiq_core_log_level,
7297 + "%s: deferring bulk (%d)",
7299 + state->deferred_bulks);
7300 + if (state->conn_state !=
7301 + VCHIQ_CONNSTATE_PAUSE_SENT)
7303 + vchiq_core_log_level,
7304 + "%s: bulks paused in "
7305 + "unexpected state %s",
7308 + state->conn_state]);
7309 + } else if (state->conn_state ==
7310 + VCHIQ_CONNSTATE_CONNECTED) {
7311 + DEBUG_TRACE(PARSE_LINE);
7312 + resolved = resolve_bulks(service,
7316 + mutex_unlock(&service->bulk_mutex);
7318 + notify_bulks(service, queue,
7322 + case VCHIQ_MSG_BULK_RX_DONE:
7323 + case VCHIQ_MSG_BULK_TX_DONE:
7324 + WARN_ON(state->is_master);
7325 + if ((service->remoteport == remoteport)
7326 + && (service->srvstate !=
7327 + VCHIQ_SRVSTATE_FREE)) {
7328 + VCHIQ_BULK_QUEUE_T *queue;
7329 + VCHIQ_BULK_T *bulk;
7331 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7332 + &service->bulk_rx : &service->bulk_tx;
7334 + DEBUG_TRACE(PARSE_LINE);
7335 + if (mutex_lock_interruptible(
7336 + &service->bulk_mutex) != 0) {
7337 + DEBUG_TRACE(PARSE_LINE);
7338 + goto bail_not_ready;
7340 + if ((int)(queue->remote_insert -
7341 + queue->local_insert) >= 0) {
7342 + vchiq_log_error(vchiq_core_log_level,
7343 + "%d: prs %s@%x (%d->%d) "
7344 + "unexpected (ri=%d,li=%d)",
7345 + state->id, msg_type_str(type),
7346 + (unsigned int)header,
7347 + remoteport, localport,
7348 + queue->remote_insert,
7349 + queue->local_insert);
7350 + mutex_unlock(&service->bulk_mutex);
7354 + BUG_ON(queue->process == queue->local_insert);
7355 + BUG_ON(queue->process != queue->remote_insert);
7357 + bulk = &queue->bulks[
7358 + BULK_INDEX(queue->remote_insert)];
7359 + bulk->actual = *(int *)header->data;
7360 + queue->remote_insert++;
7362 + vchiq_log_info(vchiq_core_log_level,
7363 + "%d: prs %s@%x (%d->%d) %x@%x",
7364 + state->id, msg_type_str(type),
7365 + (unsigned int)header,
7366 + remoteport, localport,
7367 + bulk->actual, (unsigned int)bulk->data);
7369 + vchiq_log_trace(vchiq_core_log_level,
7370 + "%d: prs:%d %cx li=%x ri=%x p=%x",
7371 + state->id, localport,
7372 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
7374 + queue->local_insert,
7375 + queue->remote_insert, queue->process);
7377 + DEBUG_TRACE(PARSE_LINE);
7378 + WARN_ON(queue->process == queue->local_insert);
7379 + vchiq_complete_bulk(bulk);
7381 + mutex_unlock(&service->bulk_mutex);
7382 + DEBUG_TRACE(PARSE_LINE);
7383 + notify_bulks(service, queue, 1/*retry_poll*/);
7384 + DEBUG_TRACE(PARSE_LINE);
7387 + case VCHIQ_MSG_PADDING:
7388 + vchiq_log_trace(vchiq_core_log_level,
7389 + "%d: prs PADDING@%x,%x",
7390 + state->id, (unsigned int)header, size);
7392 + case VCHIQ_MSG_PAUSE:
7393 + /* If initiated, signal the application thread */
7394 + vchiq_log_trace(vchiq_core_log_level,
7395 + "%d: prs PAUSE@%x,%x",
7396 + state->id, (unsigned int)header, size);
7397 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7398 + vchiq_log_error(vchiq_core_log_level,
7399 + "%d: PAUSE received in state PAUSED",
7403 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7404 + /* Send a PAUSE in response */
7405 + if (queue_message(state, NULL,
7406 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7407 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7408 + goto bail_not_ready;
7409 + if (state->is_master)
7410 + pause_bulks(state);
7412 + /* At this point slot_mutex is held */
7413 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7414 + vchiq_platform_paused(state);
7416 + case VCHIQ_MSG_RESUME:
7417 + vchiq_log_trace(vchiq_core_log_level,
7418 + "%d: prs RESUME@%x,%x",
7419 + state->id, (unsigned int)header, size);
7420 + /* Release the slot mutex */
7421 + mutex_unlock(&state->slot_mutex);
7422 + if (state->is_master)
7423 + resume_bulks(state);
7424 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7425 + vchiq_platform_resumed(state);
7428 + case VCHIQ_MSG_REMOTE_USE:
7429 + vchiq_on_remote_use(state);
7431 + case VCHIQ_MSG_REMOTE_RELEASE:
7432 + vchiq_on_remote_release(state);
7434 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7435 + vchiq_on_remote_use_active(state);
7439 + vchiq_log_error(vchiq_core_log_level,
7440 + "%d: prs invalid msgid %x@%x,%x",
7441 + state->id, msgid, (unsigned int)header, size);
7442 + WARN(1, "invalid message\n");
7448 + unlock_service(service);
7452 + state->rx_pos += calc_stride(size);
7454 + DEBUG_TRACE(PARSE_LINE);
7455 + /* Perform some housekeeping when the end of the slot is
7457 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7458 + /* Remove the extra reference count. */
7459 + release_slot(state, state->rx_info, NULL, NULL);
7460 + state->rx_data = NULL;
7466 + unlock_service(service);
7469 +/* Called by the slot handler thread */
7471 +slot_handler_func(void *v)
7473 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7474 + VCHIQ_SHARED_STATE_T *local = state->local;
7475 + DEBUG_INITIALISE(local)
7478 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
7479 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7480 + remote_event_wait(&local->trigger);
7484 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7485 + if (state->poll_needed) {
7486 + /* Check if we need to suspend - may change our
7488 + vchiq_platform_check_suspend(state);
7490 + state->poll_needed = 0;
7492 + /* Handle service polling and other rare conditions here
7493 + ** out of the mainline code */
7494 + switch (state->conn_state) {
7495 + case VCHIQ_CONNSTATE_CONNECTED:
7496 + /* Poll the services as requested */
7497 + poll_services(state);
7500 + case VCHIQ_CONNSTATE_PAUSING:
7501 + if (state->is_master)
7502 + pause_bulks(state);
7503 + if (queue_message(state, NULL,
7504 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7505 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7506 + vchiq_set_conn_state(state,
7507 + VCHIQ_CONNSTATE_PAUSE_SENT);
7509 + if (state->is_master)
7510 + resume_bulks(state);
7512 + state->poll_needed = 1;
7516 + case VCHIQ_CONNSTATE_PAUSED:
7517 + vchiq_platform_resume(state);
7520 + case VCHIQ_CONNSTATE_RESUMING:
7521 + if (queue_message(state, NULL,
7522 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7523 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7524 + if (state->is_master)
7525 + resume_bulks(state);
7526 + vchiq_set_conn_state(state,
7527 + VCHIQ_CONNSTATE_CONNECTED);
7528 + vchiq_platform_resumed(state);
7530 + /* This should really be impossible,
7531 + ** since the PAUSE should have flushed
7532 + ** through outstanding messages. */
7533 + vchiq_log_error(vchiq_core_log_level,
7534 + "Failed to send RESUME "
7540 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7541 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7542 + vchiq_platform_handle_timeout(state);
7551 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7552 + parse_rx_slots(state);
7558 +/* Called by the recycle thread */
7560 +recycle_func(void *v)
7562 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7563 + VCHIQ_SHARED_STATE_T *local = state->local;
7566 + remote_event_wait(&local->recycle);
7568 + process_free_queue(state);
7574 +/* Called by the sync thread */
7578 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7579 + VCHIQ_SHARED_STATE_T *local = state->local;
7580 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7581 + state->remote->slot_sync);
7584 + VCHIQ_SERVICE_T *service;
7587 + unsigned int localport, remoteport;
7589 + remote_event_wait(&local->sync_trigger);
7593 + msgid = header->msgid;
7594 + size = header->size;
7595 + type = VCHIQ_MSG_TYPE(msgid);
7596 + localport = VCHIQ_MSG_DSTPORT(msgid);
7597 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7599 + service = find_service_by_port(state, localport);
7602 + vchiq_log_error(vchiq_sync_log_level,
7603 + "%d: sf %s@%x (%d->%d) - "
7604 + "invalid/closed service %d",
7605 + state->id, msg_type_str(type),
7606 + (unsigned int)header,
7607 + remoteport, localport, localport);
7608 + release_message_sync(state, header);
7612 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7615 + svc_fourcc = service
7616 + ? service->base.fourcc
7617 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7618 + vchiq_log_trace(vchiq_sync_log_level,
7619 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7620 + msg_type_str(type),
7621 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7622 + remoteport, localport, size);
7624 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7629 + case VCHIQ_MSG_OPENACK:
7630 + if (size >= sizeof(struct vchiq_openack_payload)) {
7631 + const struct vchiq_openack_payload *payload =
7632 + (struct vchiq_openack_payload *)
7634 + service->peer_version = payload->version;
7636 + vchiq_log_info(vchiq_sync_log_level,
7637 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
7638 + state->id, (unsigned int)header, size,
7639 + remoteport, localport, service->peer_version);
7640 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
7641 + service->remoteport = remoteport;
7642 + vchiq_set_service_state(service,
7643 + VCHIQ_SRVSTATE_OPENSYNC);
7644 + up(&service->remove_event);
7646 + release_message_sync(state, header);
7649 + case VCHIQ_MSG_DATA:
7650 + vchiq_log_trace(vchiq_sync_log_level,
7651 + "%d: sf DATA@%x,%x (%d->%d)",
7652 + state->id, (unsigned int)header, size,
7653 + remoteport, localport);
7655 + if ((service->remoteport == remoteport) &&
7656 + (service->srvstate ==
7657 + VCHIQ_SRVSTATE_OPENSYNC)) {
7658 + if (make_service_callback(service,
7659 + VCHIQ_MESSAGE_AVAILABLE, header,
7660 + NULL) == VCHIQ_RETRY)
7661 + vchiq_log_error(vchiq_sync_log_level,
7662 + "synchronous callback to "
7663 + "service %d returns "
7670 + vchiq_log_error(vchiq_sync_log_level,
7671 + "%d: sf unexpected msgid %x@%x,%x",
7672 + state->id, msgid, (unsigned int)header, size);
7673 + release_message_sync(state, header);
7677 + unlock_service(service);
7685 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
7687 + queue->local_insert = 0;
7688 + queue->remote_insert = 0;
7689 + queue->process = 0;
7690 + queue->remote_notify = 0;
7691 + queue->remove = 0;
7695 +inline const char *
7696 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
7698 + return conn_state_names[conn_state];
7702 +VCHIQ_SLOT_ZERO_T *
7703 +vchiq_init_slots(void *mem_base, int mem_size)
7705 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
7706 + VCHIQ_SLOT_ZERO_T *slot_zero =
7707 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
7708 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
7709 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
7711 + /* Ensure there is enough memory to run an absolutely minimum system */
7712 + num_slots -= first_data_slot;
7714 + if (num_slots < 4) {
7715 + vchiq_log_error(vchiq_core_log_level,
7716 + "vchiq_init_slots - insufficient memory %x bytes",
7721 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
7723 + slot_zero->magic = VCHIQ_MAGIC;
7724 + slot_zero->version = VCHIQ_VERSION;
7725 + slot_zero->version_min = VCHIQ_VERSION_MIN;
7726 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
7727 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
7728 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
7729 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
7731 + slot_zero->master.slot_sync = first_data_slot;
7732 + slot_zero->master.slot_first = first_data_slot + 1;
7733 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
7734 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
7735 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
7736 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
7742 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
7745 + VCHIQ_SHARED_STATE_T *local;
7746 + VCHIQ_SHARED_STATE_T *remote;
7747 + VCHIQ_STATUS_T status;
7748 + char threadname[10];
7752 + vchiq_log_warning(vchiq_core_log_level,
7753 + "%s: slot_zero = 0x%08lx, is_master = %d",
7754 + __func__, (unsigned long)slot_zero, is_master);
7756 + /* Check the input configuration */
7758 + if (slot_zero->magic != VCHIQ_MAGIC) {
7759 + vchiq_loud_error_header();
7760 + vchiq_loud_error("Invalid VCHIQ magic value found.");
7761 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
7762 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
7763 + vchiq_loud_error_footer();
7764 + return VCHIQ_ERROR;
7767 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
7768 + vchiq_loud_error_header();
7769 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7770 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
7772 + (unsigned int)slot_zero, slot_zero->version,
7773 + VCHIQ_VERSION_MIN);
7774 + vchiq_loud_error("Restart with a newer VideoCore image.");
7775 + vchiq_loud_error_footer();
7776 + return VCHIQ_ERROR;
7779 + if (VCHIQ_VERSION < slot_zero->version_min) {
7780 + vchiq_loud_error_header();
7781 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7782 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
7784 + (unsigned int)slot_zero, VCHIQ_VERSION,
7785 + slot_zero->version_min);
7786 + vchiq_loud_error("Restart with a newer kernel.");
7787 + vchiq_loud_error_footer();
7788 + return VCHIQ_ERROR;
7791 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
7792 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
7793 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
7794 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
7795 + vchiq_loud_error_header();
7796 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
7797 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
7799 + (unsigned int)slot_zero,
7800 + slot_zero->slot_zero_size,
7801 + sizeof(VCHIQ_SLOT_ZERO_T));
7802 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
7803 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
7805 + (unsigned int)slot_zero, slot_zero->slot_size,
7807 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
7808 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
7810 + (unsigned int)slot_zero, slot_zero->max_slots,
7812 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
7813 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
7815 + (unsigned int)slot_zero,
7816 + slot_zero->max_slots_per_side,
7817 + VCHIQ_MAX_SLOTS_PER_SIDE);
7818 + vchiq_loud_error_footer();
7819 + return VCHIQ_ERROR;
7823 + local = &slot_zero->master;
7824 + remote = &slot_zero->slave;
7826 + local = &slot_zero->slave;
7827 + remote = &slot_zero->master;
7830 + if (local->initialised) {
7831 + vchiq_loud_error_header();
7832 + if (remote->initialised)
7833 + vchiq_loud_error("local state has already been "
7836 + vchiq_loud_error("master/slave mismatch - two %ss",
7837 + is_master ? "master" : "slave");
7838 + vchiq_loud_error_footer();
7839 + return VCHIQ_ERROR;
7842 + memset(state, 0, sizeof(VCHIQ_STATE_T));
7845 + state->is_master = is_master;
7848 + initialize shared state pointers
7851 + state->local = local;
7852 + state->remote = remote;
7853 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
7856 + initialize events and mutexes
7859 + sema_init(&state->connect, 0);
7860 + mutex_init(&state->mutex);
7861 + sema_init(&state->trigger_event, 0);
7862 + sema_init(&state->recycle_event, 0);
7863 + sema_init(&state->sync_trigger_event, 0);
7864 + sema_init(&state->sync_release_event, 0);
7866 + mutex_init(&state->slot_mutex);
7867 + mutex_init(&state->recycle_mutex);
7868 + mutex_init(&state->sync_mutex);
7869 + mutex_init(&state->bulk_transfer_mutex);
7871 + sema_init(&state->slot_available_event, 0);
7872 + sema_init(&state->slot_remove_event, 0);
7873 + sema_init(&state->data_quota_event, 0);
7875 + state->slot_queue_available = 0;
7877 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
7878 + VCHIQ_SERVICE_QUOTA_T *service_quota =
7879 + &state->service_quotas[i];
7880 + sema_init(&service_quota->quota_event, 0);
7883 + for (i = local->slot_first; i <= local->slot_last; i++) {
7884 + local->slot_queue[state->slot_queue_available++] = i;
7885 + up(&state->slot_available_event);
7888 + state->default_slot_quota = state->slot_queue_available/2;
7889 + state->default_message_quota =
7890 + min((unsigned short)(state->default_slot_quota * 256),
7891 + (unsigned short)~0);
7893 + state->previous_data_index = -1;
7894 + state->data_use_count = 0;
7895 + state->data_quota = state->slot_queue_available - 1;
7897 + local->trigger.event = &state->trigger_event;
7898 + remote_event_create(&local->trigger);
7899 + local->tx_pos = 0;
7901 + local->recycle.event = &state->recycle_event;
7902 + remote_event_create(&local->recycle);
7903 + local->slot_queue_recycle = state->slot_queue_available;
7905 + local->sync_trigger.event = &state->sync_trigger_event;
7906 + remote_event_create(&local->sync_trigger);
7908 + local->sync_release.event = &state->sync_release_event;
7909 + remote_event_create(&local->sync_release);
7911 + /* At start-of-day, the slot is empty and available */
7912 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
7913 + = VCHIQ_MSGID_PADDING;
7914 + remote_event_signal_local(&local->sync_release);
7916 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
7918 + status = vchiq_platform_init_state(state);
7921 + bring up slot handler thread
7923 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
7924 + state->slot_handler_thread = kthread_create(&slot_handler_func,
7928 + if (state->slot_handler_thread == NULL) {
7929 + vchiq_loud_error_header();
7930 + vchiq_loud_error("couldn't create thread %s", threadname);
7931 + vchiq_loud_error_footer();
7932 + return VCHIQ_ERROR;
7934 + set_user_nice(state->slot_handler_thread, -19);
7935 + wake_up_process(state->slot_handler_thread);
7937 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
7938 + state->recycle_thread = kthread_create(&recycle_func,
7941 + if (state->recycle_thread == NULL) {
7942 + vchiq_loud_error_header();
7943 + vchiq_loud_error("couldn't create thread %s", threadname);
7944 + vchiq_loud_error_footer();
7945 + return VCHIQ_ERROR;
7947 + set_user_nice(state->recycle_thread, -19);
7948 + wake_up_process(state->recycle_thread);
7950 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
7951 + state->sync_thread = kthread_create(&sync_func,
7954 + if (state->sync_thread == NULL) {
7955 + vchiq_loud_error_header();
7956 + vchiq_loud_error("couldn't create thread %s", threadname);
7957 + vchiq_loud_error_footer();
7958 + return VCHIQ_ERROR;
7960 + set_user_nice(state->sync_thread, -20);
7961 + wake_up_process(state->sync_thread);
7963 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
7964 + vchiq_states[state->id] = state;
7966 + /* Indicate readiness to the other side */
7967 + local->initialised = 1;
7972 +/* Called from application thread when a client or server service is created. */
7974 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
7975 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
7976 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
7978 + VCHIQ_SERVICE_T *service;
7980 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
7982 + service->base.fourcc = params->fourcc;
7983 + service->base.callback = params->callback;
7984 + service->base.userdata = params->userdata;
7985 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
7986 + service->ref_count = 1;
7987 + service->srvstate = VCHIQ_SRVSTATE_FREE;
7988 + service->userdata_term = userdata_term;
7989 + service->localport = VCHIQ_PORT_FREE;
7990 + service->remoteport = VCHIQ_PORT_FREE;
7992 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
7993 + VCHIQ_FOURCC_INVALID : params->fourcc;
7994 + service->client_id = 0;
7995 + service->auto_close = 1;
7996 + service->sync = 0;
7997 + service->closing = 0;
7998 + atomic_set(&service->poll_flags, 0);
7999 + service->version = params->version;
8000 + service->version_min = params->version_min;
8001 + service->state = state;
8002 + service->instance = instance;
8003 + service->service_use_count = 0;
8004 + init_bulk_queue(&service->bulk_tx);
8005 + init_bulk_queue(&service->bulk_rx);
8006 + sema_init(&service->remove_event, 0);
8007 + sema_init(&service->bulk_remove_event, 0);
8008 + mutex_init(&service->bulk_mutex);
8009 + memset(&service->stats, 0, sizeof(service->stats));
8011 + vchiq_log_error(vchiq_core_log_level,
8016 + VCHIQ_SERVICE_T **pservice = NULL;
8019 + /* Although it is perfectly possible to use service_spinlock
8020 + ** to protect the creation of services, it is overkill as it
8021 + ** disables interrupts while the array is searched.
8022 + ** The only danger is of another thread trying to create a
8023 + ** service - service deletion is safe.
8024 + ** Therefore it is preferable to use state->mutex which,
8025 + ** although slower to claim, doesn't block interrupts while
8029 + mutex_lock(&state->mutex);
8031 + /* Prepare to use a previously unused service */
8032 + if (state->unused_service < VCHIQ_MAX_SERVICES)
8033 + pservice = &state->services[state->unused_service];
8035 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8036 + for (i = 0; i < state->unused_service; i++) {
8037 + VCHIQ_SERVICE_T *srv = state->services[i];
8039 + pservice = &state->services[i];
8044 + for (i = (state->unused_service - 1); i >= 0; i--) {
8045 + VCHIQ_SERVICE_T *srv = state->services[i];
8047 + pservice = &state->services[i];
8048 + else if ((srv->public_fourcc == params->fourcc)
8049 + && ((srv->instance != instance) ||
8050 + (srv->base.callback !=
8051 + params->callback))) {
8052 + /* There is another server using this
8053 + ** fourcc which doesn't match. */
8061 + service->localport = (pservice - state->services);
8063 + handle_seq = VCHIQ_MAX_STATES *
8064 + VCHIQ_MAX_SERVICES;
8065 + service->handle = handle_seq |
8066 + (state->id * VCHIQ_MAX_SERVICES) |
8067 + service->localport;
8068 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8069 + *pservice = service;
8070 + if (pservice == &state->services[state->unused_service])
8071 + state->unused_service++;
8074 + mutex_unlock(&state->mutex);
8083 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8084 + &state->service_quotas[service->localport];
8085 + service_quota->slot_quota = state->default_slot_quota;
8086 + service_quota->message_quota = state->default_message_quota;
8087 + if (service_quota->slot_use_count == 0)
8088 + service_quota->previous_tx_index =
8089 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8092 + /* Bring this service online */
8093 + vchiq_set_service_state(service, srvstate);
8095 + vchiq_log_info(vchiq_core_msg_log_level,
8096 + "%s Service %c%c%c%c SrcPort:%d",
8097 + (srvstate == VCHIQ_SRVSTATE_OPENING)
8099 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8100 + service->localport);
8103 + /* Don't unlock the service - leave it with a ref_count of 1. */
8109 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8111 + struct vchiq_open_payload payload = {
8112 + service->base.fourcc,
8115 + service->version_min
8117 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8118 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8120 + service->client_id = client_id;
8121 + vchiq_use_service_internal(service);
8122 + status = queue_message(service->state, NULL,
8123 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8124 + &body, 1, sizeof(payload), 1);
8125 + if (status == VCHIQ_SUCCESS) {
8126 + if (down_interruptible(&service->remove_event) != 0) {
8127 + status = VCHIQ_RETRY;
8128 + vchiq_release_service_internal(service);
8129 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8130 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8131 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8132 + vchiq_log_error(vchiq_core_log_level,
8133 + "%d: osi - srvstate = %s (ref %d)",
8134 + service->state->id,
8135 + srvstate_names[service->srvstate],
8136 + service->ref_count);
8137 + status = VCHIQ_ERROR;
8138 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8139 + vchiq_release_service_internal(service);
8146 +release_service_messages(VCHIQ_SERVICE_T *service)
8148 + VCHIQ_STATE_T *state = service->state;
8149 + int slot_last = state->remote->slot_last;
8152 + /* Release any claimed messages */
8153 + for (i = state->remote->slot_first; i <= slot_last; i++) {
8154 + VCHIQ_SLOT_INFO_T *slot_info =
8155 + SLOT_INFO_FROM_INDEX(state, i);
8156 + if (slot_info->release_count != slot_info->use_count) {
8158 + (char *)SLOT_DATA_FROM_INDEX(state, i);
8159 + unsigned int pos, end;
8161 + end = VCHIQ_SLOT_SIZE;
8162 + if (data == state->rx_data)
8163 + /* This buffer is still being read from - stop
8164 + ** at the current read position */
8165 + end = state->rx_pos & VCHIQ_SLOT_MASK;
8169 + while (pos < end) {
8170 + VCHIQ_HEADER_T *header =
8171 + (VCHIQ_HEADER_T *)(data + pos);
8172 + int msgid = header->msgid;
8173 + int port = VCHIQ_MSG_DSTPORT(msgid);
8174 + if ((port == service->localport) &&
8175 + (msgid & VCHIQ_MSGID_CLAIMED)) {
8176 + vchiq_log_info(vchiq_core_log_level,
8178 + (unsigned int)header);
8179 + release_slot(state, slot_info, header,
8182 + pos += calc_stride(header->size);
8183 + if (pos > VCHIQ_SLOT_SIZE) {
8184 + vchiq_log_error(vchiq_core_log_level,
8185 + "fsi - pos %x: header %x, "
8186 + "msgid %x, header->msgid %x, "
8187 + "header->size %x",
8188 + pos, (unsigned int)header,
8189 + msgid, header->msgid,
8191 + WARN(1, "invalid slot position\n");
8199 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8201 + VCHIQ_STATUS_T status;
8203 + /* Abort any outstanding bulk transfers */
8204 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8206 + abort_outstanding_bulks(service, &service->bulk_tx);
8207 + abort_outstanding_bulks(service, &service->bulk_rx);
8208 + mutex_unlock(&service->bulk_mutex);
8210 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8211 + if (status == VCHIQ_SUCCESS)
8212 + status = notify_bulks(service, &service->bulk_rx,
8213 + 0/*!retry_poll*/);
8214 + return (status == VCHIQ_SUCCESS);
8217 +static VCHIQ_STATUS_T
8218 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8220 + VCHIQ_STATUS_T status;
8221 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8224 + switch (service->srvstate) {
8225 + case VCHIQ_SRVSTATE_OPEN:
8226 + case VCHIQ_SRVSTATE_CLOSESENT:
8227 + case VCHIQ_SRVSTATE_CLOSERECVD:
8229 + if (service->auto_close) {
8230 + service->client_id = 0;
8231 + service->remoteport = VCHIQ_PORT_FREE;
8232 + newstate = VCHIQ_SRVSTATE_LISTENING;
8234 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8236 + newstate = VCHIQ_SRVSTATE_CLOSED;
8237 + vchiq_set_service_state(service, newstate);
8239 + case VCHIQ_SRVSTATE_LISTENING:
8242 + vchiq_log_error(vchiq_core_log_level,
8243 + "close_service_complete(%x) called in state %s",
8244 + service->handle, srvstate_names[service->srvstate]);
8245 + WARN(1, "close_service_complete in unexpected state\n");
8246 + return VCHIQ_ERROR;
8249 + status = make_service_callback(service,
8250 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
8252 + if (status != VCHIQ_RETRY) {
8253 + int uc = service->service_use_count;
8255 + /* Complete the close process */
8256 + for (i = 0; i < uc; i++)
8257 + /* cater for cases where close is forced and the
8258 + ** client may not close all it's handles */
8259 + vchiq_release_service_internal(service);
8261 + service->client_id = 0;
8262 + service->remoteport = VCHIQ_PORT_FREE;
8264 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8265 + vchiq_free_service_internal(service);
8266 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8268 + service->closing = 0;
8270 + up(&service->remove_event);
8273 + vchiq_set_service_state(service, failstate);
8278 +/* Called by the slot handler */
8280 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8282 + VCHIQ_STATE_T *state = service->state;
8283 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8284 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8286 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8287 + service->state->id, service->localport, close_recvd,
8288 + srvstate_names[service->srvstate]);
8290 + switch (service->srvstate) {
8291 + case VCHIQ_SRVSTATE_CLOSED:
8292 + case VCHIQ_SRVSTATE_HIDDEN:
8293 + case VCHIQ_SRVSTATE_LISTENING:
8294 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8296 + vchiq_log_error(vchiq_core_log_level,
8297 + "vchiq_close_service_internal(1) called "
8299 + srvstate_names[service->srvstate]);
8300 + else if (is_server) {
8301 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8302 + status = VCHIQ_ERROR;
8304 + service->client_id = 0;
8305 + service->remoteport = VCHIQ_PORT_FREE;
8306 + if (service->srvstate ==
8307 + VCHIQ_SRVSTATE_CLOSEWAIT)
8308 + vchiq_set_service_state(service,
8309 + VCHIQ_SRVSTATE_LISTENING);
8311 + up(&service->remove_event);
8313 + vchiq_free_service_internal(service);
8315 + case VCHIQ_SRVSTATE_OPENING:
8316 + if (close_recvd) {
8317 + /* The open was rejected - tell the user */
8318 + vchiq_set_service_state(service,
8319 + VCHIQ_SRVSTATE_CLOSEWAIT);
8320 + up(&service->remove_event);
8322 + /* Shutdown mid-open - let the other side know */
8323 + status = queue_message(state, service,
8326 + service->localport,
8327 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8332 + case VCHIQ_SRVSTATE_OPENSYNC:
8333 + mutex_lock(&state->sync_mutex);
8334 + /* Drop through */
8336 + case VCHIQ_SRVSTATE_OPEN:
8337 + if (state->is_master || close_recvd) {
8338 + if (!do_abort_bulks(service))
8339 + status = VCHIQ_RETRY;
8342 + release_service_messages(service);
8344 + if (status == VCHIQ_SUCCESS)
8345 + status = queue_message(state, service,
8348 + service->localport,
8349 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8352 + if (status == VCHIQ_SUCCESS) {
8355 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8356 + mutex_unlock(&state->sync_mutex);
8361 + status = close_service_complete(service,
8362 + VCHIQ_SRVSTATE_CLOSERECVD);
8365 + case VCHIQ_SRVSTATE_CLOSESENT:
8367 + /* This happens when a process is killed mid-close */
8370 + if (!state->is_master) {
8371 + if (!do_abort_bulks(service)) {
8372 + status = VCHIQ_RETRY;
8377 + if (status == VCHIQ_SUCCESS)
8378 + status = close_service_complete(service,
8379 + VCHIQ_SRVSTATE_CLOSERECVD);
8382 + case VCHIQ_SRVSTATE_CLOSERECVD:
8383 + if (!close_recvd && is_server)
8384 + /* Force into LISTENING mode */
8385 + vchiq_set_service_state(service,
8386 + VCHIQ_SRVSTATE_LISTENING);
8387 + status = close_service_complete(service,
8388 + VCHIQ_SRVSTATE_CLOSERECVD);
8392 + vchiq_log_error(vchiq_core_log_level,
8393 + "vchiq_close_service_internal(%d) called in state %s",
8394 + close_recvd, srvstate_names[service->srvstate]);
8401 +/* Called from the application process upon process death */
8403 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8405 + VCHIQ_STATE_T *state = service->state;
8407 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8408 + state->id, service->localport, service->remoteport);
8410 + mark_service_closing(service);
8412 + /* Mark the service for removal by the slot handler */
8413 + request_poll(state, service, VCHIQ_POLL_REMOVE);
8416 +/* Called from the slot handler */
8418 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8420 + VCHIQ_STATE_T *state = service->state;
8422 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8423 + state->id, service->localport);
8425 + switch (service->srvstate) {
8426 + case VCHIQ_SRVSTATE_OPENING:
8427 + case VCHIQ_SRVSTATE_CLOSED:
8428 + case VCHIQ_SRVSTATE_HIDDEN:
8429 + case VCHIQ_SRVSTATE_LISTENING:
8430 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8433 + vchiq_log_error(vchiq_core_log_level,
8434 + "%d: fsi - (%d) in state %s",
8435 + state->id, service->localport,
8436 + srvstate_names[service->srvstate]);
8440 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8442 + up(&service->remove_event);
8444 + /* Release the initial lock */
8445 + unlock_service(service);
8449 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8451 + VCHIQ_SERVICE_T *service;
8454 + /* Find all services registered to this client and enable them. */
8456 + while ((service = next_service_by_instance(state, instance,
8458 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8459 + vchiq_set_service_state(service,
8460 + VCHIQ_SRVSTATE_LISTENING);
8461 + unlock_service(service);
8464 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8465 + if (queue_message(state, NULL,
8466 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8467 + 0, 1) == VCHIQ_RETRY)
8468 + return VCHIQ_RETRY;
8470 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8473 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8474 + if (down_interruptible(&state->connect) != 0)
8475 + return VCHIQ_RETRY;
8477 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8478 + up(&state->connect);
8481 + return VCHIQ_SUCCESS;
8485 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8487 + VCHIQ_SERVICE_T *service;
8490 + /* Find all services registered to this client and enable them. */
8492 + while ((service = next_service_by_instance(state, instance,
8494 + (void)vchiq_remove_service(service->handle);
8495 + unlock_service(service);
8498 + return VCHIQ_SUCCESS;
8502 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8504 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8506 + switch (state->conn_state) {
8507 + case VCHIQ_CONNSTATE_CONNECTED:
8508 + /* Request a pause */
8509 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8510 + request_poll(state, NULL, 0);
8513 + vchiq_log_error(vchiq_core_log_level,
8514 + "vchiq_pause_internal in state %s\n",
8515 + conn_state_names[state->conn_state]);
8516 + status = VCHIQ_ERROR;
8517 + VCHIQ_STATS_INC(state, error_count);
8525 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8527 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8529 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8530 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8531 + request_poll(state, NULL, 0);
8533 + status = VCHIQ_ERROR;
8534 + VCHIQ_STATS_INC(state, error_count);
8541 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8543 + /* Unregister the service */
8544 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8545 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8548 + return VCHIQ_ERROR;
8550 + vchiq_log_info(vchiq_core_log_level,
8551 + "%d: close_service:%d",
8552 + service->state->id, service->localport);
8554 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8555 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8556 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8557 + unlock_service(service);
8558 + return VCHIQ_ERROR;
8561 + mark_service_closing(service);
8563 + if (current == service->state->slot_handler_thread) {
8564 + status = vchiq_close_service_internal(service,
8565 + 0/*!close_recvd*/);
8566 + BUG_ON(status == VCHIQ_RETRY);
8568 + /* Mark the service for termination by the slot handler */
8569 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8573 + if (down_interruptible(&service->remove_event) != 0) {
8574 + status = VCHIQ_RETRY;
8578 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8579 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8580 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8583 + vchiq_log_warning(vchiq_core_log_level,
8584 + "%d: close_service:%d - waiting in state %s",
8585 + service->state->id, service->localport,
8586 + srvstate_names[service->srvstate]);
8589 + if ((status == VCHIQ_SUCCESS) &&
8590 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8591 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8592 + status = VCHIQ_ERROR;
8594 + unlock_service(service);
8600 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
8602 + /* Unregister the service */
8603 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8604 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8607 + return VCHIQ_ERROR;
8609 + vchiq_log_info(vchiq_core_log_level,
8610 + "%d: remove_service:%d",
8611 + service->state->id, service->localport);
8613 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
8614 + unlock_service(service);
8615 + return VCHIQ_ERROR;
8618 + mark_service_closing(service);
8620 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
8621 + (current == service->state->slot_handler_thread)) {
8622 + /* Make it look like a client, because it must be removed and
8623 + not left in the LISTENING state. */
8624 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
8626 + status = vchiq_close_service_internal(service,
8627 + 0/*!close_recvd*/);
8628 + BUG_ON(status == VCHIQ_RETRY);
8630 + /* Mark the service for removal by the slot handler */
8631 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
8634 + if (down_interruptible(&service->remove_event) != 0) {
8635 + status = VCHIQ_RETRY;
8639 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8640 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8643 + vchiq_log_warning(vchiq_core_log_level,
8644 + "%d: remove_service:%d - waiting in state %s",
8645 + service->state->id, service->localport,
8646 + srvstate_names[service->srvstate]);
8649 + if ((status == VCHIQ_SUCCESS) &&
8650 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
8651 + status = VCHIQ_ERROR;
8653 + unlock_service(service);
8659 +/* This function may be called by kernel threads or user threads.
8660 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
8661 + * received and the call should be retried after being returned to user
8663 + * When called in blocking mode, the userdata field points to a bulk_waiter
8667 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
8668 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
8669 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
8671 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8672 + VCHIQ_BULK_QUEUE_T *queue;
8673 + VCHIQ_BULK_T *bulk;
8674 + VCHIQ_STATE_T *state;
8675 + struct bulk_waiter *bulk_waiter = NULL;
8676 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
8677 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
8678 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
8679 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8682 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
8683 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
8684 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8688 + case VCHIQ_BULK_MODE_NOCALLBACK:
8689 + case VCHIQ_BULK_MODE_CALLBACK:
8691 + case VCHIQ_BULK_MODE_BLOCKING:
8692 + bulk_waiter = (struct bulk_waiter *)userdata;
8693 + sema_init(&bulk_waiter->event, 0);
8694 + bulk_waiter->actual = 0;
8695 + bulk_waiter->bulk = NULL;
8697 + case VCHIQ_BULK_MODE_WAITING:
8698 + bulk_waiter = (struct bulk_waiter *)userdata;
8699 + bulk = bulk_waiter->bulk;
8705 + state = service->state;
8707 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
8708 + &service->bulk_tx : &service->bulk_rx;
8710 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
8711 + status = VCHIQ_RETRY;
8715 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
8716 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
8718 + mutex_unlock(&service->bulk_mutex);
8719 + if (down_interruptible(&service->bulk_remove_event)
8721 + status = VCHIQ_RETRY;
8724 + if (mutex_lock_interruptible(&service->bulk_mutex)
8726 + status = VCHIQ_RETRY;
8729 + } while (queue->local_insert == queue->remove +
8730 + VCHIQ_NUM_SERVICE_BULKS);
8733 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
8735 + bulk->mode = mode;
8737 + bulk->userdata = userdata;
8738 + bulk->size = size;
8739 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
8741 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
8743 + goto unlock_error_exit;
8747 + vchiq_log_info(vchiq_core_log_level,
8748 + "%d: bt (%d->%d) %cx %x@%x %x",
8750 + service->localport, service->remoteport, dir_char,
8751 + size, (unsigned int)bulk->data, (unsigned int)userdata);
8753 + if (state->is_master) {
8754 + queue->local_insert++;
8755 + if (resolve_bulks(service, queue))
8756 + request_poll(state, service,
8757 + (dir == VCHIQ_BULK_TRANSMIT) ?
8758 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
8760 + int payload[2] = { (int)bulk->data, bulk->size };
8761 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
8763 + status = queue_message(state, NULL,
8764 + VCHIQ_MAKE_MSG(dir_msgtype,
8765 + service->localport, service->remoteport),
8766 + &element, 1, sizeof(payload), 1);
8767 + if (status != VCHIQ_SUCCESS) {
8768 + vchiq_complete_bulk(bulk);
8769 + goto unlock_error_exit;
8771 + queue->local_insert++;
8774 + mutex_unlock(&service->bulk_mutex);
8776 + vchiq_log_trace(vchiq_core_log_level,
8777 + "%d: bt:%d %cx li=%x ri=%x p=%x",
8779 + service->localport, dir_char,
8780 + queue->local_insert, queue->remote_insert, queue->process);
8783 + unlock_service(service);
8785 + status = VCHIQ_SUCCESS;
8787 + if (bulk_waiter) {
8788 + bulk_waiter->bulk = bulk;
8789 + if (down_interruptible(&bulk_waiter->event) != 0)
8790 + status = VCHIQ_RETRY;
8791 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
8792 + status = VCHIQ_ERROR;
8798 + mutex_unlock(&service->bulk_mutex);
8802 + unlock_service(service);
8807 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
8808 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
8810 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8811 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8813 + unsigned int size = 0;
8817 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8820 + for (i = 0; i < (unsigned int)count; i++) {
8821 + if (elements[i].size) {
8822 + if (elements[i].data == NULL) {
8823 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8826 + size += elements[i].size;
8830 + if (size > VCHIQ_MAX_MSG_SIZE) {
8831 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8835 + switch (service->srvstate) {
8836 + case VCHIQ_SRVSTATE_OPEN:
8837 + status = queue_message(service->state, service,
8838 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8839 + service->localport,
8840 + service->remoteport),
8841 + elements, count, size, 1);
8843 + case VCHIQ_SRVSTATE_OPENSYNC:
8844 + status = queue_message_sync(service->state, service,
8845 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8846 + service->localport,
8847 + service->remoteport),
8848 + elements, count, size, 1);
8851 + status = VCHIQ_ERROR;
8857 + unlock_service(service);
8863 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
8865 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8866 + VCHIQ_SHARED_STATE_T *remote;
8867 + VCHIQ_STATE_T *state;
8873 + state = service->state;
8874 + remote = state->remote;
8876 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
8878 + if ((slot_index >= remote->slot_first) &&
8879 + (slot_index <= remote->slot_last)) {
8880 + int msgid = header->msgid;
8881 + if (msgid & VCHIQ_MSGID_CLAIMED) {
8882 + VCHIQ_SLOT_INFO_T *slot_info =
8883 + SLOT_INFO_FROM_INDEX(state, slot_index);
8885 + release_slot(state, slot_info, header, service);
8887 + } else if (slot_index == remote->slot_sync)
8888 + release_message_sync(state, header);
8890 + unlock_service(service);
8894 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8896 + header->msgid = VCHIQ_MSGID_PADDING;
8898 + remote_event_signal(&state->remote->sync_release);
8902 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
8904 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8905 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8908 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
8911 + *peer_version = service->peer_version;
8912 + status = VCHIQ_SUCCESS;
8916 + unlock_service(service);
8921 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
8922 + int config_size, VCHIQ_CONFIG_T *pconfig)
8924 + VCHIQ_CONFIG_T config;
8928 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
8929 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
8930 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
8931 + config.max_services = VCHIQ_MAX_SERVICES;
8932 + config.version = VCHIQ_VERSION;
8933 + config.version_min = VCHIQ_VERSION_MIN;
8935 + if (config_size > sizeof(VCHIQ_CONFIG_T))
8936 + return VCHIQ_ERROR;
8938 + memcpy(pconfig, &config,
8939 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
8941 + return VCHIQ_SUCCESS;
8945 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
8946 + VCHIQ_SERVICE_OPTION_T option, int value)
8948 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8949 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8953 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
8954 + service->auto_close = value;
8955 + status = VCHIQ_SUCCESS;
8958 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
8959 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8960 + &service->state->service_quotas[
8961 + service->localport];
8963 + value = service->state->default_slot_quota;
8964 + if ((value >= service_quota->slot_use_count) &&
8965 + (value < (unsigned short)~0)) {
8966 + service_quota->slot_quota = value;
8967 + if ((value >= service_quota->slot_use_count) &&
8968 + (service_quota->message_quota >=
8969 + service_quota->message_use_count)) {
8970 + /* Signal the service that it may have
8971 + ** dropped below its quota */
8972 + up(&service_quota->quota_event);
8974 + status = VCHIQ_SUCCESS;
8978 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
8979 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8980 + &service->state->service_quotas[
8981 + service->localport];
8983 + value = service->state->default_message_quota;
8984 + if ((value >= service_quota->message_use_count) &&
8985 + (value < (unsigned short)~0)) {
8986 + service_quota->message_quota = value;
8988 + service_quota->message_use_count) &&
8989 + (service_quota->slot_quota >=
8990 + service_quota->slot_use_count))
8991 + /* Signal the service that it may have
8992 + ** dropped below its quota */
8993 + up(&service_quota->quota_event);
8994 + status = VCHIQ_SUCCESS;
8998 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
8999 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9000 + (service->srvstate ==
9001 + VCHIQ_SRVSTATE_LISTENING)) {
9002 + service->sync = value;
9003 + status = VCHIQ_SUCCESS;
9010 + unlock_service(service);
9017 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9018 + VCHIQ_SHARED_STATE_T *shared, const char *label)
9020 + static const char *const debug_names[] = {
9022 + "SLOT_HANDLER_COUNT",
9023 + "SLOT_HANDLER_LINE",
9027 + "AWAIT_COMPLETION_LINE",
9028 + "DEQUEUE_MESSAGE_LINE",
9029 + "SERVICE_CALLBACK_LINE",
9030 + "MSG_QUEUE_FULL_COUNT",
9031 + "COMPLETION_QUEUE_FULL_COUNT"
9037 + len = snprintf(buf, sizeof(buf),
9038 + " %s: slots %d-%d tx_pos=%x recycle=%x",
9039 + label, shared->slot_first, shared->slot_last,
9040 + shared->tx_pos, shared->slot_queue_recycle);
9041 + vchiq_dump(dump_context, buf, len + 1);
9043 + len = snprintf(buf, sizeof(buf),
9044 + " Slots claimed:");
9045 + vchiq_dump(dump_context, buf, len + 1);
9047 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
9048 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9049 + if (slot_info.use_count != slot_info.release_count) {
9050 + len = snprintf(buf, sizeof(buf),
9051 + " %d: %d/%d", i, slot_info.use_count,
9052 + slot_info.release_count);
9053 + vchiq_dump(dump_context, buf, len + 1);
9057 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9058 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
9059 + debug_names[i], shared->debug[i], shared->debug[i]);
9060 + vchiq_dump(dump_context, buf, len + 1);
9065 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9071 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9072 + conn_state_names[state->conn_state]);
9073 + vchiq_dump(dump_context, buf, len + 1);
9075 + len = snprintf(buf, sizeof(buf),
9076 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
9077 + state->local->tx_pos,
9078 + (uint32_t)state->tx_data +
9079 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
9081 + (uint32_t)state->rx_data +
9082 + (state->rx_pos & VCHIQ_SLOT_MASK));
9083 + vchiq_dump(dump_context, buf, len + 1);
9085 + len = snprintf(buf, sizeof(buf),
9086 + " Version: %d (min %d)",
9087 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9088 + vchiq_dump(dump_context, buf, len + 1);
9090 + if (VCHIQ_ENABLE_STATS) {
9091 + len = snprintf(buf, sizeof(buf),
9092 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9094 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9095 + state->stats.error_count);
9096 + vchiq_dump(dump_context, buf, len + 1);
9099 + len = snprintf(buf, sizeof(buf),
9100 + " Slots: %d available (%d data), %d recyclable, %d stalls "
9102 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9103 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9104 + state->data_quota - state->data_use_count,
9105 + state->local->slot_queue_recycle - state->slot_queue_available,
9106 + state->stats.slot_stalls, state->stats.data_stalls);
9107 + vchiq_dump(dump_context, buf, len + 1);
9109 + vchiq_dump_platform_state(dump_context);
9111 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9112 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9114 + vchiq_dump_platform_instances(dump_context);
9116 + for (i = 0; i < state->unused_service; i++) {
9117 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9120 + vchiq_dump_service_state(dump_context, service);
9121 + unlock_service(service);
9127 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9132 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9133 + service->localport, srvstate_names[service->srvstate],
9134 + service->ref_count - 1); /*Don't include the lock just taken*/
9136 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9137 + char remoteport[30];
9138 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9139 + &service->state->service_quotas[service->localport];
9140 + int fourcc = service->base.fourcc;
9141 + int tx_pending, rx_pending;
9142 + if (service->remoteport != VCHIQ_PORT_FREE) {
9143 + int len2 = snprintf(remoteport, sizeof(remoteport),
9144 + "%d", service->remoteport);
9145 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9146 + snprintf(remoteport + len2,
9147 + sizeof(remoteport) - len2,
9148 + " (client %x)", service->client_id);
9150 + strcpy(remoteport, "n/a");
9152 + len += snprintf(buf + len, sizeof(buf) - len,
9153 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9154 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
9156 + service_quota->message_use_count,
9157 + service_quota->message_quota,
9158 + service_quota->slot_use_count,
9159 + service_quota->slot_quota);
9161 + vchiq_dump(dump_context, buf, len + 1);
9163 + tx_pending = service->bulk_tx.local_insert -
9164 + service->bulk_tx.remote_insert;
9166 + rx_pending = service->bulk_rx.local_insert -
9167 + service->bulk_rx.remote_insert;
9169 + len = snprintf(buf, sizeof(buf),
9170 + " Bulk: tx_pending=%d (size %d),"
9171 + " rx_pending=%d (size %d)",
9173 + tx_pending ? service->bulk_tx.bulks[
9174 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
9176 + rx_pending ? service->bulk_rx.bulks[
9177 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
9179 + if (VCHIQ_ENABLE_STATS) {
9180 + vchiq_dump(dump_context, buf, len + 1);
9182 + len = snprintf(buf, sizeof(buf),
9183 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
9184 + "rx_count=%d, rx_bytes=%llu",
9185 + service->stats.ctrl_tx_count,
9186 + service->stats.ctrl_tx_bytes,
9187 + service->stats.ctrl_rx_count,
9188 + service->stats.ctrl_rx_bytes);
9189 + vchiq_dump(dump_context, buf, len + 1);
9191 + len = snprintf(buf, sizeof(buf),
9192 + " Bulk: tx_count=%d, tx_bytes=%llu, "
9193 + "rx_count=%d, rx_bytes=%llu",
9194 + service->stats.bulk_tx_count,
9195 + service->stats.bulk_tx_bytes,
9196 + service->stats.bulk_rx_count,
9197 + service->stats.bulk_rx_bytes);
9198 + vchiq_dump(dump_context, buf, len + 1);
9200 + len = snprintf(buf, sizeof(buf),
9201 + " %d quota stalls, %d slot stalls, "
9202 + "%d bulk stalls, %d aborted, %d errors",
9203 + service->stats.quota_stalls,
9204 + service->stats.slot_stalls,
9205 + service->stats.bulk_stalls,
9206 + service->stats.bulk_aborted_count,
9207 + service->stats.error_count);
9211 + vchiq_dump(dump_context, buf, len + 1);
9213 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9214 + vchiq_dump_platform_service_state(dump_context, service);
9219 +vchiq_loud_error_header(void)
9221 + vchiq_log_error(vchiq_core_log_level,
9222 + "============================================================"
9223 + "================");
9224 + vchiq_log_error(vchiq_core_log_level,
9225 + "============================================================"
9226 + "================");
9227 + vchiq_log_error(vchiq_core_log_level, "=====");
9231 +vchiq_loud_error_footer(void)
9233 + vchiq_log_error(vchiq_core_log_level, "=====");
9234 + vchiq_log_error(vchiq_core_log_level,
9235 + "============================================================"
9236 + "================");
9237 + vchiq_log_error(vchiq_core_log_level,
9238 + "============================================================"
9239 + "================");
9243 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9245 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9246 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9247 + status = queue_message(state, NULL,
9248 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9253 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9255 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9256 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9257 + status = queue_message(state, NULL,
9258 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9263 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9265 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9266 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9267 + status = queue_message(state, NULL,
9268 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9273 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9276 + const uint8_t *mem = (const uint8_t *)voidMem;
9278 + char lineBuf[100];
9281 + while (numBytes > 0) {
9284 + for (offset = 0; offset < 16; offset++) {
9285 + if (offset < numBytes)
9286 + s += snprintf(s, 4, "%02x ", mem[offset]);
9288 + s += snprintf(s, 4, " ");
9291 + for (offset = 0; offset < 16; offset++) {
9292 + if (offset < numBytes) {
9293 + uint8_t ch = mem[offset];
9295 + if ((ch < ' ') || (ch > '~'))
9302 + if ((label != NULL) && (*label != '\0'))
9303 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9304 + "%s: %08x: %s", label, addr, lineBuf);
9306 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9307 + "%08x: %s", addr, lineBuf);
9311 + if (numBytes > 16)
9318 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9321 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9323 + * Redistribution and use in source and binary forms, with or without
9324 + * modification, are permitted provided that the following conditions
9326 + * 1. Redistributions of source code must retain the above copyright
9327 + * notice, this list of conditions, and the following disclaimer,
9328 + * without modification.
9329 + * 2. Redistributions in binary form must reproduce the above copyright
9330 + * notice, this list of conditions and the following disclaimer in the
9331 + * documentation and/or other materials provided with the distribution.
9332 + * 3. The names of the above-listed copyright holders may not be used
9333 + * to endorse or promote products derived from this software without
9334 + * specific prior written permission.
9336 + * ALTERNATIVELY, this software may be distributed under the terms of the
9337 + * GNU General Public License ("GPL") version 2, as published by the Free
9338 + * Software Foundation.
9340 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9341 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9342 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9343 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9344 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9345 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9346 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9347 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9348 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9349 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9350 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9353 +#ifndef VCHIQ_CORE_H
9354 +#define VCHIQ_CORE_H
9356 +#include <linux/mutex.h>
9357 +#include <linux/semaphore.h>
9358 +#include <linux/kthread.h>
9360 +#include "vchiq_cfg.h"
9364 +/* Run time control of log level, based on KERN_XXX level. */
9365 +#define VCHIQ_LOG_DEFAULT 4
9366 +#define VCHIQ_LOG_ERROR 3
9367 +#define VCHIQ_LOG_WARNING 4
9368 +#define VCHIQ_LOG_INFO 6
9369 +#define VCHIQ_LOG_TRACE 7
9371 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
9373 +#ifndef vchiq_log_error
9374 +#define vchiq_log_error(cat, fmt, ...) \
9375 + do { if (cat >= VCHIQ_LOG_ERROR) \
9376 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9378 +#ifndef vchiq_log_warning
9379 +#define vchiq_log_warning(cat, fmt, ...) \
9380 + do { if (cat >= VCHIQ_LOG_WARNING) \
9381 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9383 +#ifndef vchiq_log_info
9384 +#define vchiq_log_info(cat, fmt, ...) \
9385 + do { if (cat >= VCHIQ_LOG_INFO) \
9386 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9388 +#ifndef vchiq_log_trace
9389 +#define vchiq_log_trace(cat, fmt, ...) \
9390 + do { if (cat >= VCHIQ_LOG_TRACE) \
9391 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9394 +#define vchiq_loud_error(...) \
9395 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9397 +#ifndef vchiq_static_assert
9398 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9399 + extern int vchiq_static_assert[(cond) ? 1 : -1]
9402 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9404 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9405 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9406 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9407 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9409 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
9410 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9411 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9412 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9414 +#define VCHIQ_MSG_PADDING 0 /* - */
9415 +#define VCHIQ_MSG_CONNECT 1 /* - */
9416 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
9417 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
9418 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
9419 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
9420 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
9421 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
9422 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
9423 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
9424 +#define VCHIQ_MSG_PAUSE 10 /* - */
9425 +#define VCHIQ_MSG_RESUME 11 /* - */
9426 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
9427 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
9428 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
9430 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
9431 +#define VCHIQ_PORT_FREE 0x1000
9432 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
9433 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9434 + ((type<<24) | (srcport<<12) | (dstport<<0))
9435 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
9436 +#define VCHIQ_MSG_SRCPORT(msgid) \
9437 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9438 +#define VCHIQ_MSG_DSTPORT(msgid) \
9439 + ((unsigned short)msgid & 0xfff)
9441 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9442 + ((fourcc) >> 24) & 0xff, \
9443 + ((fourcc) >> 16) & 0xff, \
9444 + ((fourcc) >> 8) & 0xff, \
9447 +/* Ensure the fields are wide enough */
9448 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9450 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9451 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9452 + (unsigned int)VCHIQ_PORT_FREE);
9454 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9455 +#define VCHIQ_MSGID_CLAIMED 0x40000000
9457 +#define VCHIQ_FOURCC_INVALID 0x00000000
9458 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
9460 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9462 +typedef uint32_t BITSET_T;
9464 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9466 +#define BITSET_SIZE(b) ((b + 31) >> 5)
9467 +#define BITSET_WORD(b) (b >> 5)
9468 +#define BITSET_BIT(b) (1 << (b & 31))
9469 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
9470 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9471 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9472 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9474 +#if VCHIQ_ENABLE_STATS
9475 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9476 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9477 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9478 + (service->stats. stat += addend)
9480 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9481 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9482 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9487 +#if VCHIQ_ENABLE_DEBUG
9488 + DEBUG_SLOT_HANDLER_COUNT,
9489 + DEBUG_SLOT_HANDLER_LINE,
9491 + DEBUG_PARSE_HEADER,
9492 + DEBUG_PARSE_MSGID,
9493 + DEBUG_AWAIT_COMPLETION_LINE,
9494 + DEBUG_DEQUEUE_MESSAGE_LINE,
9495 + DEBUG_SERVICE_CALLBACK_LINE,
9496 + DEBUG_MSG_QUEUE_FULL_COUNT,
9497 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9502 +#if VCHIQ_ENABLE_DEBUG
9504 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9505 +#define DEBUG_TRACE(d) \
9506 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9507 +#define DEBUG_VALUE(d, v) \
9508 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9509 +#define DEBUG_COUNT(d) \
9510 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9512 +#else /* VCHIQ_ENABLE_DEBUG */
9514 +#define DEBUG_INITIALISE(local)
9515 +#define DEBUG_TRACE(d)
9516 +#define DEBUG_VALUE(d, v)
9517 +#define DEBUG_COUNT(d)
9519 +#endif /* VCHIQ_ENABLE_DEBUG */
9522 + VCHIQ_CONNSTATE_DISCONNECTED,
9523 + VCHIQ_CONNSTATE_CONNECTING,
9524 + VCHIQ_CONNSTATE_CONNECTED,
9525 + VCHIQ_CONNSTATE_PAUSING,
9526 + VCHIQ_CONNSTATE_PAUSE_SENT,
9527 + VCHIQ_CONNSTATE_PAUSED,
9528 + VCHIQ_CONNSTATE_RESUMING,
9529 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9530 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
9531 +} VCHIQ_CONNSTATE_T;
9534 + VCHIQ_SRVSTATE_FREE,
9535 + VCHIQ_SRVSTATE_HIDDEN,
9536 + VCHIQ_SRVSTATE_LISTENING,
9537 + VCHIQ_SRVSTATE_OPENING,
9538 + VCHIQ_SRVSTATE_OPEN,
9539 + VCHIQ_SRVSTATE_OPENSYNC,
9540 + VCHIQ_SRVSTATE_CLOSESENT,
9541 + VCHIQ_SRVSTATE_CLOSERECVD,
9542 + VCHIQ_SRVSTATE_CLOSEWAIT,
9543 + VCHIQ_SRVSTATE_CLOSED
9547 + VCHIQ_POLL_TERMINATE,
9548 + VCHIQ_POLL_REMOVE,
9549 + VCHIQ_POLL_TXNOTIFY,
9550 + VCHIQ_POLL_RXNOTIFY,
9555 + VCHIQ_BULK_TRANSMIT,
9556 + VCHIQ_BULK_RECEIVE
9557 +} VCHIQ_BULK_DIR_T;
9559 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9561 +typedef struct vchiq_bulk_struct {
9565 + VCHI_MEM_HANDLE_T handle;
9568 + void *remote_data;
9573 +typedef struct vchiq_bulk_queue_struct {
9574 + int local_insert; /* Where to insert the next local bulk */
9575 + int remote_insert; /* Where to insert the next remote bulk (master) */
9576 + int process; /* Bulk to transfer next */
9577 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
9578 + int remove; /* Bulk to notify the local client of, and remove,
9580 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
9581 +} VCHIQ_BULK_QUEUE_T;
9583 +typedef struct remote_event_struct {
9586 + struct semaphore *event;
9589 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
9591 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
9593 +typedef struct vchiq_slot_struct {
9594 + char data[VCHIQ_SLOT_SIZE];
9597 +typedef struct vchiq_slot_info_struct {
9598 + /* Use two counters rather than one to avoid the need for a mutex. */
9600 + short release_count;
9601 +} VCHIQ_SLOT_INFO_T;
9603 +typedef struct vchiq_service_struct {
9604 + VCHIQ_SERVICE_BASE_T base;
9605 + VCHIQ_SERVICE_HANDLE_T handle;
9606 + unsigned int ref_count;
9608 + VCHIQ_USERDATA_TERM_T userdata_term;
9609 + unsigned int localport;
9610 + unsigned int remoteport;
9611 + int public_fourcc;
9616 + atomic_t poll_flags;
9618 + short version_min;
9619 + short peer_version;
9621 + VCHIQ_STATE_T *state;
9622 + VCHIQ_INSTANCE_T instance;
9624 + int service_use_count;
9626 + VCHIQ_BULK_QUEUE_T bulk_tx;
9627 + VCHIQ_BULK_QUEUE_T bulk_rx;
9629 + struct semaphore remove_event;
9630 + struct semaphore bulk_remove_event;
9631 + struct mutex bulk_mutex;
9633 + struct service_stats_struct {
9638 + int ctrl_tx_count;
9639 + int ctrl_rx_count;
9640 + int bulk_tx_count;
9641 + int bulk_rx_count;
9642 + int bulk_aborted_count;
9643 + uint64_t ctrl_tx_bytes;
9644 + uint64_t ctrl_rx_bytes;
9645 + uint64_t bulk_tx_bytes;
9646 + uint64_t bulk_rx_bytes;
9650 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
9651 + statically allocated, since for accounting reasons a service's slot
9652 + usage is carried over between users of the same port number.
9654 +typedef struct vchiq_service_quota_struct {
9655 + unsigned short slot_quota;
9656 + unsigned short slot_use_count;
9657 + unsigned short message_quota;
9658 + unsigned short message_use_count;
9659 + struct semaphore quota_event;
9660 + int previous_tx_index;
9661 +} VCHIQ_SERVICE_QUOTA_T;
9663 +typedef struct vchiq_shared_state_struct {
9665 + /* A non-zero value here indicates that the content is valid. */
9668 + /* The first and last (inclusive) slots allocated to the owner. */
9672 + /* The slot allocated to synchronous messages from the owner. */
9675 + /* Signalling this event indicates that owner's slot handler thread
9677 + REMOTE_EVENT_T trigger;
9679 + /* Indicates the byte position within the stream where the next message
9680 + ** will be written. The least significant bits are an index into the
9681 + ** slot. The next bits are the index of the slot in slot_queue. */
9684 + /* This event should be signalled when a slot is recycled. */
9685 + REMOTE_EVENT_T recycle;
9687 + /* The slot_queue index where the next recycled slot will be written. */
9688 + int slot_queue_recycle;
9690 + /* This event should be signalled when a synchronous message is sent. */
9691 + REMOTE_EVENT_T sync_trigger;
9693 + /* This event should be signalled when a synchronous message has been
9695 + REMOTE_EVENT_T sync_release;
9697 + /* A circular buffer of slot indexes. */
9698 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
9700 + /* Debugging state */
9701 + int debug[DEBUG_MAX];
9702 +} VCHIQ_SHARED_STATE_T;
9704 +typedef struct vchiq_slot_zero_struct {
9707 + short version_min;
9708 + int slot_zero_size;
9711 + int max_slots_per_side;
9712 + int platform_data[2];
9713 + VCHIQ_SHARED_STATE_T master;
9714 + VCHIQ_SHARED_STATE_T slave;
9715 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
9716 +} VCHIQ_SLOT_ZERO_T;
9718 +struct vchiq_state_struct {
9721 + VCHIQ_CONNSTATE_T conn_state;
9724 + VCHIQ_SHARED_STATE_T *local;
9725 + VCHIQ_SHARED_STATE_T *remote;
9726 + VCHIQ_SLOT_T *slot_data;
9728 + unsigned short default_slot_quota;
9729 + unsigned short default_message_quota;
9731 + /* Event indicating connect message received */
9732 + struct semaphore connect;
9734 + /* Mutex protecting services */
9735 + struct mutex mutex;
9736 + VCHIQ_INSTANCE_T *instance;
9738 + /* Processes incoming messages */
9739 + struct task_struct *slot_handler_thread;
9741 + /* Processes recycled slots */
9742 + struct task_struct *recycle_thread;
9744 + /* Processes synchronous messages */
9745 + struct task_struct *sync_thread;
9747 + /* Local implementation of the trigger remote event */
9748 + struct semaphore trigger_event;
9750 + /* Local implementation of the recycle remote event */
9751 + struct semaphore recycle_event;
9753 + /* Local implementation of the sync trigger remote event */
9754 + struct semaphore sync_trigger_event;
9756 + /* Local implementation of the sync release remote event */
9757 + struct semaphore sync_release_event;
9761 + VCHIQ_SLOT_INFO_T *rx_info;
9763 + struct mutex slot_mutex;
9765 + struct mutex recycle_mutex;
9767 + struct mutex sync_mutex;
9769 + struct mutex bulk_transfer_mutex;
9771 + /* Indicates the byte position within the stream from where the next
9772 + ** message will be read. The least significant bits are an index into
9773 + ** the slot.The next bits are the index of the slot in
9774 + ** remote->slot_queue. */
9777 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
9778 + from remote->tx_pos. */
9781 + /* The slot_queue index of the slot to become available next. */
9782 + int slot_queue_available;
9784 + /* A flag to indicate if any poll has been requested */
9787 + /* Ths index of the previous slot used for data messages. */
9788 + int previous_data_index;
9790 + /* The number of slots occupied by data messages. */
9791 + unsigned short data_use_count;
9793 + /* The maximum number of slots to be occupied by data messages. */
9794 + unsigned short data_quota;
9796 + /* An array of bit sets indicating which services must be polled. */
9797 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
9799 + /* The number of the first unused service */
9800 + int unused_service;
9802 + /* Signalled when a free slot becomes available. */
9803 + struct semaphore slot_available_event;
9805 + struct semaphore slot_remove_event;
9807 + /* Signalled when a free data slot becomes available. */
9808 + struct semaphore data_quota_event;
9810 + /* Incremented when there are bulk transfers which cannot be processed
9811 + * whilst paused and must be processed on resume */
9812 + int deferred_bulks;
9814 + struct state_stats_struct {
9817 + int ctrl_tx_count;
9818 + int ctrl_rx_count;
9822 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
9823 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
9824 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
9826 + VCHIQ_PLATFORM_STATE_T platform_state;
9829 +struct bulk_waiter {
9830 + VCHIQ_BULK_T *bulk;
9831 + struct semaphore event;
9835 +extern spinlock_t bulk_waiter_spinlock;
9837 +extern int vchiq_core_log_level;
9838 +extern int vchiq_core_msg_log_level;
9839 +extern int vchiq_sync_log_level;
9841 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
9843 +extern const char *
9844 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
9846 +extern VCHIQ_SLOT_ZERO_T *
9847 +vchiq_init_slots(void *mem_base, int mem_size);
9849 +extern VCHIQ_STATUS_T
9850 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
9853 +extern VCHIQ_STATUS_T
9854 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9856 +extern VCHIQ_SERVICE_T *
9857 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9858 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9859 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
9861 +extern VCHIQ_STATUS_T
9862 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
9864 +extern VCHIQ_STATUS_T
9865 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
9868 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
9871 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
9873 +extern VCHIQ_STATUS_T
9874 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9876 +extern VCHIQ_STATUS_T
9877 +vchiq_pause_internal(VCHIQ_STATE_T *state);
9879 +extern VCHIQ_STATUS_T
9880 +vchiq_resume_internal(VCHIQ_STATE_T *state);
9883 +remote_event_pollall(VCHIQ_STATE_T *state);
9885 +extern VCHIQ_STATUS_T
9886 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9887 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9888 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
9891 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
9894 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
9897 +vchiq_loud_error_header(void);
9900 +vchiq_loud_error_footer(void);
9903 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
9905 +static inline VCHIQ_SERVICE_T *
9906 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
9908 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
9909 + (VCHIQ_MAX_STATES - 1)];
9913 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
9916 +extern VCHIQ_SERVICE_T *
9917 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
9919 +extern VCHIQ_SERVICE_T *
9920 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
9922 +extern VCHIQ_SERVICE_T *
9923 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
9924 + VCHIQ_SERVICE_HANDLE_T handle);
9926 +extern VCHIQ_SERVICE_T *
9927 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
9931 +lock_service(VCHIQ_SERVICE_T *service);
9934 +unlock_service(VCHIQ_SERVICE_T *service);
9936 +/* The following functions are called from vchiq_core, and external
9937 +** implementations must be provided. */
9939 +extern VCHIQ_STATUS_T
9940 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
9941 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
9944 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
9947 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
9949 +extern VCHIQ_STATUS_T
9950 +vchiq_copy_from_user(void *dst, const void *src, int size);
9953 +remote_event_signal(REMOTE_EVENT_T *event);
9956 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
9959 +vchiq_platform_paused(VCHIQ_STATE_T *state);
9961 +extern VCHIQ_STATUS_T
9962 +vchiq_platform_resume(VCHIQ_STATE_T *state);
9965 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
9968 +vchiq_dump(void *dump_context, const char *str, int len);
9971 +vchiq_dump_platform_state(void *dump_context);
9974 +vchiq_dump_platform_instances(void *dump_context);
9977 +vchiq_dump_platform_service_state(void *dump_context,
9978 + VCHIQ_SERVICE_T *service);
9980 +extern VCHIQ_STATUS_T
9981 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
9983 +extern VCHIQ_STATUS_T
9984 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
9987 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
9990 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
9992 +extern VCHIQ_STATUS_T
9993 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
9995 +extern VCHIQ_STATUS_T
9996 +vchiq_check_service(VCHIQ_SERVICE_T *service);
9999 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10001 +extern VCHIQ_STATUS_T
10002 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10004 +extern VCHIQ_STATUS_T
10005 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10007 +extern VCHIQ_STATUS_T
10008 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10011 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10012 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10015 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10018 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10022 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10023 + size_t numBytes);
10027 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10029 +#!/usr/bin/perl -w
10034 +# Generate a version from available information
10037 +my $prefix = shift @ARGV;
10038 +my $root = shift @ARGV;
10041 +if ( not defined $root ) {
10042 + die "usage: $0 prefix root-dir\n";
10045 +if ( ! -d $root ) {
10046 + die "root directory $root not found\n";
10049 +my $version = "unknown";
10052 +if ( -d "$root/.git" ) {
10053 + # attempt to work out git version. only do so
10054 + # on a linux build host, as cygwin builds are
10055 + # already slow enough
10057 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10058 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10059 + $version = "no git version";
10063 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10064 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10067 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10069 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10070 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10071 + if (length $tainted) {
10072 + $version = join ' ', $version, "(tainted)";
10075 + $version = join ' ', $version, "(clean)";
10081 +my $hostname = `hostname`;
10082 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10083 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10086 +print STDERR "Version $version\n";
10088 +#include "${prefix}_build_info.h"
10089 +#include <linux/broadcom/vc_debug_sym.h>
10091 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10092 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10093 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
10094 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
10096 +const char *vchiq_get_build_hostname( void )
10098 + return vchiq_build_hostname;
10101 +const char *vchiq_get_build_version( void )
10103 + return vchiq_build_version;
10106 +const char *vchiq_get_build_date( void )
10108 + return vchiq_build_date;
10111 +const char *vchiq_get_build_time( void )
10113 + return vchiq_build_time;
10119 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10122 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10124 + * Redistribution and use in source and binary forms, with or without
10125 + * modification, are permitted provided that the following conditions
10127 + * 1. Redistributions of source code must retain the above copyright
10128 + * notice, this list of conditions, and the following disclaimer,
10129 + * without modification.
10130 + * 2. Redistributions in binary form must reproduce the above copyright
10131 + * notice, this list of conditions and the following disclaimer in the
10132 + * documentation and/or other materials provided with the distribution.
10133 + * 3. The names of the above-listed copyright holders may not be used
10134 + * to endorse or promote products derived from this software without
10135 + * specific prior written permission.
10137 + * ALTERNATIVELY, this software may be distributed under the terms of the
10138 + * GNU General Public License ("GPL") version 2, as published by the Free
10139 + * Software Foundation.
10141 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10142 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10143 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10144 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10145 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10146 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10147 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10148 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10149 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10150 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10151 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10154 +#ifndef VCHIQ_IF_H
10155 +#define VCHIQ_IF_H
10157 +#include "interface/vchi/vchi_mh.h"
10159 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
10161 +#define VCHIQ_SLOT_SIZE 4096
10162 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
10163 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
10165 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
10166 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
10167 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
10168 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
10171 + VCHIQ_SERVICE_OPENED, /* service, -, - */
10172 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
10173 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
10174 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
10175 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
10176 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
10177 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
10181 + VCHIQ_ERROR = -1,
10182 + VCHIQ_SUCCESS = 0,
10187 + VCHIQ_BULK_MODE_CALLBACK,
10188 + VCHIQ_BULK_MODE_BLOCKING,
10189 + VCHIQ_BULK_MODE_NOCALLBACK,
10190 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
10191 +} VCHIQ_BULK_MODE_T;
10194 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
10195 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
10196 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
10197 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS
10198 +} VCHIQ_SERVICE_OPTION_T;
10200 +typedef struct vchiq_header_struct {
10201 + /* The message identifier - opaque to applications. */
10204 + /* Size of message data. */
10205 + unsigned int size;
10207 + char data[0]; /* message */
10211 + const void *data;
10212 + unsigned int size;
10213 +} VCHIQ_ELEMENT_T;
10215 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
10217 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
10218 + VCHIQ_SERVICE_HANDLE_T, void *);
10220 +typedef struct vchiq_service_base_struct {
10222 + VCHIQ_CALLBACK_T callback;
10224 +} VCHIQ_SERVICE_BASE_T;
10226 +typedef struct vchiq_service_params_struct {
10228 + VCHIQ_CALLBACK_T callback;
10230 + short version; /* Increment for non-trivial changes */
10231 + short version_min; /* Update for incompatible changes */
10232 +} VCHIQ_SERVICE_PARAMS_T;
10234 +typedef struct vchiq_config_struct {
10235 + unsigned int max_msg_size;
10236 + unsigned int bulk_threshold; /* The message size above which it
10237 + is better to use a bulk transfer
10238 + (<= max_msg_size) */
10239 + unsigned int max_outstanding_bulks;
10240 + unsigned int max_services;
10241 + short version; /* The version of VCHIQ */
10242 + short version_min; /* The minimum compatible version of VCHIQ */
10245 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
10246 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
10248 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
10249 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
10250 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
10251 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
10252 + const VCHIQ_SERVICE_PARAMS_T *params,
10253 + VCHIQ_SERVICE_HANDLE_T *pservice);
10254 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
10255 + const VCHIQ_SERVICE_PARAMS_T *params,
10256 + VCHIQ_SERVICE_HANDLE_T *pservice);
10257 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
10258 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
10259 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
10260 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
10261 + VCHIQ_SERVICE_HANDLE_T service);
10262 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
10264 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
10265 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
10266 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
10267 + VCHIQ_HEADER_T *header);
10268 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10269 + const void *data, unsigned int size, void *userdata);
10270 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10271 + void *data, unsigned int size, void *userdata);
10272 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
10273 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10274 + const void *offset, unsigned int size, void *userdata);
10275 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
10276 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10277 + void *offset, unsigned int size, void *userdata);
10278 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10279 + const void *data, unsigned int size, void *userdata,
10280 + VCHIQ_BULK_MODE_T mode);
10281 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10282 + void *data, unsigned int size, void *userdata,
10283 + VCHIQ_BULK_MODE_T mode);
10284 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
10285 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
10286 + void *userdata, VCHIQ_BULK_MODE_T mode);
10287 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
10288 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
10289 + void *userdata, VCHIQ_BULK_MODE_T mode);
10290 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
10291 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
10292 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
10293 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
10294 + int config_size, VCHIQ_CONFIG_T *pconfig);
10295 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
10296 + VCHIQ_SERVICE_OPTION_T option, int value);
10298 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
10299 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
10300 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
10302 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
10303 + void *ptr, size_t num_bytes);
10305 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
10306 + short *peer_version);
10308 +#endif /* VCHIQ_IF_H */
10310 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10313 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10315 + * Redistribution and use in source and binary forms, with or without
10316 + * modification, are permitted provided that the following conditions
10318 + * 1. Redistributions of source code must retain the above copyright
10319 + * notice, this list of conditions, and the following disclaimer,
10320 + * without modification.
10321 + * 2. Redistributions in binary form must reproduce the above copyright
10322 + * notice, this list of conditions and the following disclaimer in the
10323 + * documentation and/or other materials provided with the distribution.
10324 + * 3. The names of the above-listed copyright holders may not be used
10325 + * to endorse or promote products derived from this software without
10326 + * specific prior written permission.
10328 + * ALTERNATIVELY, this software may be distributed under the terms of the
10329 + * GNU General Public License ("GPL") version 2, as published by the Free
10330 + * Software Foundation.
10332 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10333 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10334 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10335 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10336 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10337 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10338 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10339 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10340 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10341 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10342 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10345 +#ifndef VCHIQ_IOCTLS_H
10346 +#define VCHIQ_IOCTLS_H
10348 +#include <linux/ioctl.h>
10349 +#include "vchiq_if.h"
10351 +#define VCHIQ_IOC_MAGIC 0xc4
10352 +#define VCHIQ_INVALID_HANDLE (~0)
10355 + VCHIQ_SERVICE_PARAMS_T params;
10358 + unsigned int handle; /* OUT */
10359 +} VCHIQ_CREATE_SERVICE_T;
10362 + unsigned int handle;
10363 + unsigned int count;
10364 + const VCHIQ_ELEMENT_T *elements;
10365 +} VCHIQ_QUEUE_MESSAGE_T;
10368 + unsigned int handle;
10370 + unsigned int size;
10372 + VCHIQ_BULK_MODE_T mode;
10373 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
10376 + VCHIQ_REASON_T reason;
10377 + VCHIQ_HEADER_T *header;
10378 + void *service_userdata;
10379 + void *bulk_userdata;
10380 +} VCHIQ_COMPLETION_DATA_T;
10383 + unsigned int count;
10384 + VCHIQ_COMPLETION_DATA_T *buf;
10385 + unsigned int msgbufsize;
10386 + unsigned int msgbufcount; /* IN/OUT */
10388 +} VCHIQ_AWAIT_COMPLETION_T;
10391 + unsigned int handle;
10393 + unsigned int bufsize;
10395 +} VCHIQ_DEQUEUE_MESSAGE_T;
10398 + unsigned int config_size;
10399 + VCHIQ_CONFIG_T *pconfig;
10400 +} VCHIQ_GET_CONFIG_T;
10403 + unsigned int handle;
10404 + VCHIQ_SERVICE_OPTION_T option;
10406 +} VCHIQ_SET_SERVICE_OPTION_T;
10410 + size_t num_bytes;
10411 +} VCHIQ_DUMP_MEM_T;
10413 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
10414 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
10415 +#define VCHIQ_IOC_CREATE_SERVICE \
10416 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
10417 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
10418 +#define VCHIQ_IOC_QUEUE_MESSAGE \
10419 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
10420 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
10421 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
10422 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
10423 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
10424 +#define VCHIQ_IOC_AWAIT_COMPLETION \
10425 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
10426 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
10427 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
10428 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
10429 +#define VCHIQ_IOC_GET_CONFIG \
10430 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
10431 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
10432 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
10433 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
10434 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
10435 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
10436 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
10437 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
10438 +#define VCHIQ_IOC_MAX 15
10442 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10445 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10447 + * Redistribution and use in source and binary forms, with or without
10448 + * modification, are permitted provided that the following conditions
10450 + * 1. Redistributions of source code must retain the above copyright
10451 + * notice, this list of conditions, and the following disclaimer,
10452 + * without modification.
10453 + * 2. Redistributions in binary form must reproduce the above copyright
10454 + * notice, this list of conditions and the following disclaimer in the
10455 + * documentation and/or other materials provided with the distribution.
10456 + * 3. The names of the above-listed copyright holders may not be used
10457 + * to endorse or promote products derived from this software without
10458 + * specific prior written permission.
10460 + * ALTERNATIVELY, this software may be distributed under the terms of the
10461 + * GNU General Public License ("GPL") version 2, as published by the Free
10462 + * Software Foundation.
10464 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10465 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10466 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10467 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10468 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10469 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10470 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10471 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10472 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10473 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10474 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10477 +/* ---- Include Files ---------------------------------------------------- */
10479 +#include <linux/kernel.h>
10480 +#include <linux/module.h>
10481 +#include <linux/mutex.h>
10483 +#include "vchiq_core.h"
10484 +#include "vchiq_arm.h"
10486 +/* ---- Public Variables ------------------------------------------------- */
10488 +/* ---- Private Constants and Types -------------------------------------- */
10490 +struct bulk_waiter_node {
10491 + struct bulk_waiter bulk_waiter;
10493 + struct list_head list;
10496 +struct vchiq_instance_struct {
10497 + VCHIQ_STATE_T *state;
10501 + struct list_head bulk_waiter_list;
10502 + struct mutex bulk_waiter_list_mutex;
10505 +static VCHIQ_STATUS_T
10506 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10507 + unsigned int size, VCHIQ_BULK_DIR_T dir);
10509 +/****************************************************************************
10511 +* vchiq_initialise
10513 +***************************************************************************/
10514 +#define VCHIQ_INIT_RETRIES 10
10515 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
10517 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10518 + VCHIQ_STATE_T *state;
10519 + VCHIQ_INSTANCE_T instance = NULL;
10522 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
10524 + /* VideoCore may not be ready due to boot up timing.
10525 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
10526 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
10527 + state = vchiq_get_state();
10532 + if (i==VCHIQ_INIT_RETRIES) {
10533 + vchiq_log_error(vchiq_core_log_level,
10534 + "%s: videocore not initialized\n", __func__);
10536 + } else if (i>0) {
10537 + vchiq_log_warning(vchiq_core_log_level,
10538 + "%s: videocore initialized after %d retries\n", __func__, i);
10541 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
10543 + vchiq_log_error(vchiq_core_log_level,
10544 + "%s: error allocating vchiq instance\n", __func__);
10548 + instance->connected = 0;
10549 + instance->state = state;
10550 + mutex_init(&instance->bulk_waiter_list_mutex);
10551 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
10553 + *instanceOut = instance;
10555 + status = VCHIQ_SUCCESS;
10558 + vchiq_log_trace(vchiq_core_log_level,
10559 + "%s(%p): returning %d", __func__, instance, status);
10563 +EXPORT_SYMBOL(vchiq_initialise);
10565 +/****************************************************************************
10569 +***************************************************************************/
10571 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
10573 + VCHIQ_STATUS_T status;
10574 + VCHIQ_STATE_T *state = instance->state;
10576 + vchiq_log_trace(vchiq_core_log_level,
10577 + "%s(%p) called", __func__, instance);
10579 + if (mutex_lock_interruptible(&state->mutex) != 0)
10580 + return VCHIQ_RETRY;
10582 + /* Remove all services */
10583 + status = vchiq_shutdown_internal(state, instance);
10585 + mutex_unlock(&state->mutex);
10587 + vchiq_log_trace(vchiq_core_log_level,
10588 + "%s(%p): returning %d", __func__, instance, status);
10590 + if (status == VCHIQ_SUCCESS) {
10591 + struct list_head *pos, *next;
10592 + list_for_each_safe(pos, next,
10593 + &instance->bulk_waiter_list) {
10594 + struct bulk_waiter_node *waiter;
10595 + waiter = list_entry(pos,
10596 + struct bulk_waiter_node,
10599 + vchiq_log_info(vchiq_arm_log_level,
10600 + "bulk_waiter - cleaned up %x "
10602 + (unsigned int)waiter, waiter->pid);
10610 +EXPORT_SYMBOL(vchiq_shutdown);
10612 +/****************************************************************************
10614 +* vchiq_is_connected
10616 +***************************************************************************/
10618 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
10620 + return instance->connected;
10623 +/****************************************************************************
10627 +***************************************************************************/
10629 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
10631 + VCHIQ_STATUS_T status;
10632 + VCHIQ_STATE_T *state = instance->state;
10634 + vchiq_log_trace(vchiq_core_log_level,
10635 + "%s(%p) called", __func__, instance);
10637 + if (mutex_lock_interruptible(&state->mutex) != 0) {
10638 + vchiq_log_trace(vchiq_core_log_level,
10639 + "%s: call to mutex_lock failed", __func__);
10640 + status = VCHIQ_RETRY;
10643 + status = vchiq_connect_internal(state, instance);
10645 + if (status == VCHIQ_SUCCESS)
10646 + instance->connected = 1;
10648 + mutex_unlock(&state->mutex);
10651 + vchiq_log_trace(vchiq_core_log_level,
10652 + "%s(%p): returning %d", __func__, instance, status);
10656 +EXPORT_SYMBOL(vchiq_connect);
10658 +/****************************************************************************
10660 +* vchiq_add_service
10662 +***************************************************************************/
10664 +VCHIQ_STATUS_T vchiq_add_service(
10665 + VCHIQ_INSTANCE_T instance,
10666 + const VCHIQ_SERVICE_PARAMS_T *params,
10667 + VCHIQ_SERVICE_HANDLE_T *phandle)
10669 + VCHIQ_STATUS_T status;
10670 + VCHIQ_STATE_T *state = instance->state;
10671 + VCHIQ_SERVICE_T *service = NULL;
10674 + vchiq_log_trace(vchiq_core_log_level,
10675 + "%s(%p) called", __func__, instance);
10677 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10679 + srvstate = vchiq_is_connected(instance)
10680 + ? VCHIQ_SRVSTATE_LISTENING
10681 + : VCHIQ_SRVSTATE_HIDDEN;
10683 + service = vchiq_add_service_internal(
10691 + *phandle = service->handle;
10692 + status = VCHIQ_SUCCESS;
10694 + status = VCHIQ_ERROR;
10696 + vchiq_log_trace(vchiq_core_log_level,
10697 + "%s(%p): returning %d", __func__, instance, status);
10701 +EXPORT_SYMBOL(vchiq_add_service);
10703 +/****************************************************************************
10705 +* vchiq_open_service
10707 +***************************************************************************/
10709 +VCHIQ_STATUS_T vchiq_open_service(
10710 + VCHIQ_INSTANCE_T instance,
10711 + const VCHIQ_SERVICE_PARAMS_T *params,
10712 + VCHIQ_SERVICE_HANDLE_T *phandle)
10714 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10715 + VCHIQ_STATE_T *state = instance->state;
10716 + VCHIQ_SERVICE_T *service = NULL;
10718 + vchiq_log_trace(vchiq_core_log_level,
10719 + "%s(%p) called", __func__, instance);
10721 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10723 + if (!vchiq_is_connected(instance))
10726 + service = vchiq_add_service_internal(state,
10728 + VCHIQ_SRVSTATE_OPENING,
10733 + status = vchiq_open_service_internal(service, current->pid);
10734 + if (status == VCHIQ_SUCCESS)
10735 + *phandle = service->handle;
10737 + vchiq_remove_service(service->handle);
10741 + vchiq_log_trace(vchiq_core_log_level,
10742 + "%s(%p): returning %d", __func__, instance, status);
10746 +EXPORT_SYMBOL(vchiq_open_service);
10749 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
10750 + const void *data, unsigned int size, void *userdata)
10752 + return vchiq_bulk_transfer(handle,
10753 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10754 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
10756 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
10759 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10760 + unsigned int size, void *userdata)
10762 + return vchiq_bulk_transfer(handle,
10763 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10764 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
10766 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
10769 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
10770 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10772 + VCHIQ_STATUS_T status;
10775 + case VCHIQ_BULK_MODE_NOCALLBACK:
10776 + case VCHIQ_BULK_MODE_CALLBACK:
10777 + status = vchiq_bulk_transfer(handle,
10778 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10779 + mode, VCHIQ_BULK_TRANSMIT);
10781 + case VCHIQ_BULK_MODE_BLOCKING:
10782 + status = vchiq_blocking_bulk_transfer(handle,
10783 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
10786 + return VCHIQ_ERROR;
10791 +EXPORT_SYMBOL(vchiq_bulk_transmit);
10794 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10795 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10797 + VCHIQ_STATUS_T status;
10800 + case VCHIQ_BULK_MODE_NOCALLBACK:
10801 + case VCHIQ_BULK_MODE_CALLBACK:
10802 + status = vchiq_bulk_transfer(handle,
10803 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10804 + mode, VCHIQ_BULK_RECEIVE);
10806 + case VCHIQ_BULK_MODE_BLOCKING:
10807 + status = vchiq_blocking_bulk_transfer(handle,
10808 + (void *)data, size, VCHIQ_BULK_RECEIVE);
10811 + return VCHIQ_ERROR;
10816 +EXPORT_SYMBOL(vchiq_bulk_receive);
10818 +static VCHIQ_STATUS_T
10819 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10820 + unsigned int size, VCHIQ_BULK_DIR_T dir)
10822 + VCHIQ_INSTANCE_T instance;
10823 + VCHIQ_SERVICE_T *service;
10824 + VCHIQ_STATUS_T status;
10825 + struct bulk_waiter_node *waiter = NULL;
10826 + struct list_head *pos;
10828 + service = find_service_by_handle(handle);
10830 + return VCHIQ_ERROR;
10832 + instance = service->instance;
10834 + unlock_service(service);
10836 + mutex_lock(&instance->bulk_waiter_list_mutex);
10837 + list_for_each(pos, &instance->bulk_waiter_list) {
10838 + if (list_entry(pos, struct bulk_waiter_node,
10839 + list)->pid == current->pid) {
10840 + waiter = list_entry(pos,
10841 + struct bulk_waiter_node,
10847 + mutex_unlock(&instance->bulk_waiter_list_mutex);
10850 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10852 + /* This thread has an outstanding bulk transfer. */
10853 + if ((bulk->data != data) ||
10854 + (bulk->size != size)) {
10855 + /* This is not a retry of the previous one.
10856 + ** Cancel the signal when the transfer
10858 + spin_lock(&bulk_waiter_spinlock);
10859 + bulk->userdata = NULL;
10860 + spin_unlock(&bulk_waiter_spinlock);
10866 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
10868 + vchiq_log_error(vchiq_core_log_level,
10869 + "%s - out of memory", __func__);
10870 + return VCHIQ_ERROR;
10874 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
10875 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
10877 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
10878 + !waiter->bulk_waiter.bulk) {
10879 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10881 + /* Cancel the signal when the transfer
10883 + spin_lock(&bulk_waiter_spinlock);
10884 + bulk->userdata = NULL;
10885 + spin_unlock(&bulk_waiter_spinlock);
10889 + waiter->pid = current->pid;
10890 + mutex_lock(&instance->bulk_waiter_list_mutex);
10891 + list_add(&waiter->list, &instance->bulk_waiter_list);
10892 + mutex_unlock(&instance->bulk_waiter_list_mutex);
10893 + vchiq_log_info(vchiq_arm_log_level,
10894 + "saved bulk_waiter %x for pid %d",
10895 + (unsigned int)waiter, current->pid);
10901 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
10904 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10906 + * Redistribution and use in source and binary forms, with or without
10907 + * modification, are permitted provided that the following conditions
10909 + * 1. Redistributions of source code must retain the above copyright
10910 + * notice, this list of conditions, and the following disclaimer,
10911 + * without modification.
10912 + * 2. Redistributions in binary form must reproduce the above copyright
10913 + * notice, this list of conditions and the following disclaimer in the
10914 + * documentation and/or other materials provided with the distribution.
10915 + * 3. The names of the above-listed copyright holders may not be used
10916 + * to endorse or promote products derived from this software without
10917 + * specific prior written permission.
10919 + * ALTERNATIVELY, this software may be distributed under the terms of the
10920 + * GNU General Public License ("GPL") version 2, as published by the Free
10921 + * Software Foundation.
10923 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10924 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10925 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10926 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10927 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10928 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10929 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10930 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10931 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10932 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10933 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10936 +#ifndef VCHIQ_MEMDRV_H
10937 +#define VCHIQ_MEMDRV_H
10939 +/* ---- Include Files ----------------------------------------------------- */
10941 +#include <linux/kernel.h>
10942 +#include "vchiq_if.h"
10944 +/* ---- Constants and Types ---------------------------------------------- */
10947 + void *armSharedMemVirt;
10948 + dma_addr_t armSharedMemPhys;
10949 + size_t armSharedMemSize;
10951 + void *vcSharedMemVirt;
10952 + dma_addr_t vcSharedMemPhys;
10953 + size_t vcSharedMemSize;
10954 +} VCHIQ_SHARED_MEM_INFO_T;
10956 +/* ---- Variable Externs ------------------------------------------------- */
10958 +/* ---- Function Prototypes ---------------------------------------------- */
10960 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
10962 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
10964 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
10965 + const VCHIQ_PLATFORM_DATA_T * platform_data);
10967 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
10968 + const VCHIQ_PLATFORM_DATA_T * platform_data);
10970 +VCHIQ_STATUS_T vchiq_userdrv_resume(
10971 + const VCHIQ_PLATFORM_DATA_T * platform_data);
10975 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
10978 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10980 + * Redistribution and use in source and binary forms, with or without
10981 + * modification, are permitted provided that the following conditions
10983 + * 1. Redistributions of source code must retain the above copyright
10984 + * notice, this list of conditions, and the following disclaimer,
10985 + * without modification.
10986 + * 2. Redistributions in binary form must reproduce the above copyright
10987 + * notice, this list of conditions and the following disclaimer in the
10988 + * documentation and/or other materials provided with the distribution.
10989 + * 3. The names of the above-listed copyright holders may not be used
10990 + * to endorse or promote products derived from this software without
10991 + * specific prior written permission.
10993 + * ALTERNATIVELY, this software may be distributed under the terms of the
10994 + * GNU General Public License ("GPL") version 2, as published by the Free
10995 + * Software Foundation.
10997 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10998 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10999 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11000 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11001 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11002 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11003 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11004 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11005 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11006 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11007 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11010 +#ifndef VCHIQ_PAGELIST_H
11011 +#define VCHIQ_PAGELIST_H
11014 +#define PAGE_SIZE 4096
11016 +#define CACHE_LINE_SIZE 32
11017 +#define PAGELIST_WRITE 0
11018 +#define PAGELIST_READ 1
11019 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11021 +typedef struct pagelist_struct {
11022 + unsigned long length;
11023 + unsigned short type;
11024 + unsigned short offset;
11025 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11026 + pages at consecutive addresses. */
11029 +typedef struct fragments_struct {
11030 + char headbuf[CACHE_LINE_SIZE];
11031 + char tailbuf[CACHE_LINE_SIZE];
11034 +#endif /* VCHIQ_PAGELIST_H */
11036 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11039 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11041 + * Redistribution and use in source and binary forms, with or without
11042 + * modification, are permitted provided that the following conditions
11044 + * 1. Redistributions of source code must retain the above copyright
11045 + * notice, this list of conditions, and the following disclaimer,
11046 + * without modification.
11047 + * 2. Redistributions in binary form must reproduce the above copyright
11048 + * notice, this list of conditions and the following disclaimer in the
11049 + * documentation and/or other materials provided with the distribution.
11050 + * 3. The names of the above-listed copyright holders may not be used
11051 + * to endorse or promote products derived from this software without
11052 + * specific prior written permission.
11054 + * ALTERNATIVELY, this software may be distributed under the terms of the
11055 + * GNU General Public License ("GPL") version 2, as published by the Free
11056 + * Software Foundation.
11058 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11059 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11060 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11061 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11062 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11063 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11064 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11065 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11066 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11067 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11068 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11072 +#include <linux/proc_fs.h>
11073 +#include "vchiq_core.h"
11074 +#include "vchiq_arm.h"
11078 +int vchiq_proc_init(void)
11083 +void vchiq_proc_deinit(void)
11089 +struct vchiq_proc_info {
11090 + /* Global 'vc' proc entry used by all instances */
11091 + struct proc_dir_entry *vc_cfg_dir;
11093 + /* one entry per client process */
11094 + struct proc_dir_entry *clients;
11096 + /* log categories */
11097 + struct proc_dir_entry *log_categories;
11100 +static struct vchiq_proc_info proc_info;
11102 +struct proc_dir_entry *vchiq_proc_top(void)
11104 + BUG_ON(proc_info.vc_cfg_dir == NULL);
11105 + return proc_info.vc_cfg_dir;
11108 +/****************************************************************************
11110 +* log category entries
11112 +***************************************************************************/
11113 +#define PROC_WRITE_BUF_SIZE 256
11115 +#define VCHIQ_LOG_ERROR_STR "error"
11116 +#define VCHIQ_LOG_WARNING_STR "warning"
11117 +#define VCHIQ_LOG_INFO_STR "info"
11118 +#define VCHIQ_LOG_TRACE_STR "trace"
11120 +static int log_cfg_read(char *buffer,
11128 + char *log_value = NULL;
11130 + switch (*((int *)data)) {
11131 + case VCHIQ_LOG_ERROR:
11132 + log_value = VCHIQ_LOG_ERROR_STR;
11134 + case VCHIQ_LOG_WARNING:
11135 + log_value = VCHIQ_LOG_WARNING_STR;
11137 + case VCHIQ_LOG_INFO:
11138 + log_value = VCHIQ_LOG_INFO_STR;
11140 + case VCHIQ_LOG_TRACE:
11141 + log_value = VCHIQ_LOG_TRACE_STR;
11147 + len += sprintf(buffer + len,
11149 + log_value ? log_value : "(null)");
11155 +static int log_cfg_write(struct file *file,
11156 + const char __user *buffer,
11157 + unsigned long count,
11160 + int *log_module = data;
11161 + char kbuf[PROC_WRITE_BUF_SIZE + 1];
11165 + memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
11166 + if (count >= PROC_WRITE_BUF_SIZE)
11167 + count = PROC_WRITE_BUF_SIZE;
11169 + if (copy_from_user(kbuf,
11173 + kbuf[count - 1] = 0;
11175 + if (strncmp("error", kbuf, strlen("error")) == 0)
11176 + *log_module = VCHIQ_LOG_ERROR;
11177 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
11178 + *log_module = VCHIQ_LOG_WARNING;
11179 + else if (strncmp("info", kbuf, strlen("info")) == 0)
11180 + *log_module = VCHIQ_LOG_INFO;
11181 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
11182 + *log_module = VCHIQ_LOG_TRACE;
11184 + *log_module = VCHIQ_LOG_DEFAULT;
11189 +/* Log category proc entries */
11190 +struct vchiq_proc_log_entry {
11191 + const char *name;
11193 + struct proc_dir_entry *dir;
11196 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
11197 + { "core", &vchiq_core_log_level },
11198 + { "msg", &vchiq_core_msg_log_level },
11199 + { "sync", &vchiq_sync_log_level },
11200 + { "susp", &vchiq_susp_log_level },
11201 + { "arm", &vchiq_arm_log_level },
11203 +static int n_log_entries =
11204 + sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
11206 +/* create an entry under /proc/vc/log for each log category */
11207 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
11209 + struct proc_dir_entry *dir;
11212 + dir = proc_mkdir("log", proc_info.vc_cfg_dir);
11215 + proc_info.log_categories = dir;
11217 + for (i = 0; i < n_log_entries; i++) {
11218 + dir = create_proc_entry(vchiq_proc_log_entries[i].name,
11220 + proc_info.log_categories);
11226 + dir->read_proc = &log_cfg_read;
11227 + dir->write_proc = &log_cfg_write;
11228 + dir->data = (void *)vchiq_proc_log_entries[i].plevel;
11230 + vchiq_proc_log_entries[i].dir = dir;
11236 +int vchiq_proc_init(void)
11238 + BUG_ON(proc_info.vc_cfg_dir != NULL);
11240 + proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
11241 + if (proc_info.vc_cfg_dir == NULL)
11244 + proc_info.clients = proc_mkdir("clients",
11245 + proc_info.vc_cfg_dir);
11246 + if (!proc_info.clients)
11249 + if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
11255 + vchiq_proc_deinit();
11256 + vchiq_log_error(vchiq_arm_log_level,
11257 + "%s: failed to create proc directory",
11263 +/* remove all the proc entries */
11264 +void vchiq_proc_deinit(void)
11266 + /* log category entries */
11267 + if (proc_info.log_categories) {
11269 + for (i = 0; i < n_log_entries; i++)
11270 + if (vchiq_proc_log_entries[i].dir)
11271 + remove_proc_entry(
11272 + vchiq_proc_log_entries[i].name,
11273 + proc_info.log_categories);
11275 + remove_proc_entry(proc_info.log_categories->name,
11276 + proc_info.vc_cfg_dir);
11278 + if (proc_info.clients)
11279 + remove_proc_entry(proc_info.clients->name,
11280 + proc_info.vc_cfg_dir);
11281 + if (proc_info.vc_cfg_dir)
11282 + remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
11285 +struct proc_dir_entry *vchiq_clients_top(void)
11287 + return proc_info.clients;
11293 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11296 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11298 + * Redistribution and use in source and binary forms, with or without
11299 + * modification, are permitted provided that the following conditions
11301 + * 1. Redistributions of source code must retain the above copyright
11302 + * notice, this list of conditions, and the following disclaimer,
11303 + * without modification.
11304 + * 2. Redistributions in binary form must reproduce the above copyright
11305 + * notice, this list of conditions and the following disclaimer in the
11306 + * documentation and/or other materials provided with the distribution.
11307 + * 3. The names of the above-listed copyright holders may not be used
11308 + * to endorse or promote products derived from this software without
11309 + * specific prior written permission.
11311 + * ALTERNATIVELY, this software may be distributed under the terms of the
11312 + * GNU General Public License ("GPL") version 2, as published by the Free
11313 + * Software Foundation.
11315 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11316 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11317 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11318 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11319 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11320 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11321 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11322 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11323 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11324 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11325 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11327 +#include <linux/module.h>
11328 +#include <linux/types.h>
11330 +#include "interface/vchi/vchi.h"
11331 +#include "vchiq.h"
11332 +#include "vchiq_core.h"
11334 +#include "vchiq_util.h"
11336 +#include <stddef.h>
11338 +#define vchiq_status_to_vchi(status) ((int32_t)status)
11341 + VCHIQ_SERVICE_HANDLE_T handle;
11343 + VCHIU_QUEUE_T queue;
11345 + VCHI_CALLBACK_T callback;
11346 + void *callback_param;
11349 +/* ----------------------------------------------------------------------
11350 + * return pointer to the mphi message driver function table
11351 + * -------------------------------------------------------------------- */
11352 +const VCHI_MESSAGE_DRIVER_T *
11353 +vchi_mphi_message_driver_func_table(void)
11358 +/* ----------------------------------------------------------------------
11359 + * return a pointer to the 'single' connection driver fops
11360 + * -------------------------------------------------------------------- */
11361 +const VCHI_CONNECTION_API_T *
11362 +single_get_func_table(void)
11367 +VCHI_CONNECTION_T *vchi_create_connection(
11368 + const VCHI_CONNECTION_API_T *function_table,
11369 + const VCHI_MESSAGE_DRIVER_T *low_level)
11371 + (void)function_table;
11376 +/***********************************************************
11377 + * Name: vchi_msg_peek
11379 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11381 + * uint32_t *msg_size,
11384 + * VCHI_FLAGS_T flags
11386 + * Description: Routine to return a pointer to the current message (to allow in
11387 + * place processing). The message can be removed using
11388 + * vchi_msg_remove when you're finished
11390 + * Returns: int32_t - success == 0
11392 + ***********************************************************/
11393 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
11395 + uint32_t *msg_size,
11396 + VCHI_FLAGS_T flags)
11398 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11399 + VCHIQ_HEADER_T *header;
11401 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11402 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11404 + if (flags == VCHI_FLAGS_NONE)
11405 + if (vchiu_queue_is_empty(&service->queue))
11408 + header = vchiu_queue_peek(&service->queue);
11410 + *data = header->data;
11411 + *msg_size = header->size;
11415 +EXPORT_SYMBOL(vchi_msg_peek);
11417 +/***********************************************************
11418 + * Name: vchi_msg_remove
11420 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11422 + * Description: Routine to remove a message (after it has been read with
11425 + * Returns: int32_t - success == 0
11427 + ***********************************************************/
11428 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
11430 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11431 + VCHIQ_HEADER_T *header;
11433 + header = vchiu_queue_pop(&service->queue);
11435 + vchiq_release_message(service->handle, header);
11439 +EXPORT_SYMBOL(vchi_msg_remove);
11441 +/***********************************************************
11442 + * Name: vchi_msg_queue
11444 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11445 + * const void *data,
11446 + * uint32_t data_size,
11447 + * VCHI_FLAGS_T flags,
11448 + * void *msg_handle,
11450 + * Description: Thin wrapper to queue a message onto a connection
11452 + * Returns: int32_t - success == 0
11454 + ***********************************************************/
11455 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
11456 + const void *data,
11457 + uint32_t data_size,
11458 + VCHI_FLAGS_T flags,
11459 + void *msg_handle)
11461 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11462 + VCHIQ_ELEMENT_T element = {data, data_size};
11463 + VCHIQ_STATUS_T status;
11465 + (void)msg_handle;
11467 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11469 + status = vchiq_queue_message(service->handle, &element, 1);
11471 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
11472 + ** implement a retry mechanism since this function is supposed
11473 + ** to block until queued
11475 + while (status == VCHIQ_RETRY) {
11477 + status = vchiq_queue_message(service->handle, &element, 1);
11480 + return vchiq_status_to_vchi(status);
11482 +EXPORT_SYMBOL(vchi_msg_queue);
11484 +/***********************************************************
11485 + * Name: vchi_bulk_queue_receive
11487 + * Arguments: VCHI_BULK_HANDLE_T handle,
11488 + * void *data_dst,
11489 + * const uint32_t data_size,
11490 + * VCHI_FLAGS_T flags
11491 + * void *bulk_handle
11493 + * Description: Routine to setup a rcv buffer
11495 + * Returns: int32_t - success == 0
11497 + ***********************************************************/
11498 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
11500 + uint32_t data_size,
11501 + VCHI_FLAGS_T flags,
11502 + void *bulk_handle)
11504 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11505 + VCHIQ_BULK_MODE_T mode;
11506 + VCHIQ_STATUS_T status;
11508 + switch ((int)flags) {
11509 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11510 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11511 + WARN_ON(!service->callback);
11512 + mode = VCHIQ_BULK_MODE_CALLBACK;
11514 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11515 + mode = VCHIQ_BULK_MODE_BLOCKING;
11517 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11518 + case VCHI_FLAGS_NONE:
11519 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11522 + WARN(1, "unsupported message\n");
11523 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11526 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
11527 + bulk_handle, mode);
11529 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
11530 + ** implement a retry mechanism since this function is supposed
11531 + ** to block until queued
11533 + while (status == VCHIQ_RETRY) {
11535 + status = vchiq_bulk_receive(service->handle, data_dst,
11536 + data_size, bulk_handle, mode);
11539 + return vchiq_status_to_vchi(status);
11541 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
11543 +/***********************************************************
11544 + * Name: vchi_bulk_queue_transmit
11546 + * Arguments: VCHI_BULK_HANDLE_T handle,
11547 + * const void *data_src,
11548 + * uint32_t data_size,
11549 + * VCHI_FLAGS_T flags,
11550 + * void *bulk_handle
11552 + * Description: Routine to transmit some data
11554 + * Returns: int32_t - success == 0
11556 + ***********************************************************/
11557 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
11558 + const void *data_src,
11559 + uint32_t data_size,
11560 + VCHI_FLAGS_T flags,
11561 + void *bulk_handle)
11563 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11564 + VCHIQ_BULK_MODE_T mode;
11565 + VCHIQ_STATUS_T status;
11567 + switch ((int)flags) {
11568 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11569 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11570 + WARN_ON(!service->callback);
11571 + mode = VCHIQ_BULK_MODE_CALLBACK;
11573 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
11574 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11575 + mode = VCHIQ_BULK_MODE_BLOCKING;
11577 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11578 + case VCHI_FLAGS_NONE:
11579 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11582 + WARN(1, "unsupported message\n");
11583 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11586 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
11587 + bulk_handle, mode);
11589 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
11590 + ** implement a retry mechanism since this function is supposed
11591 + ** to block until queued
11593 + while (status == VCHIQ_RETRY) {
11595 + status = vchiq_bulk_transmit(service->handle, data_src,
11596 + data_size, bulk_handle, mode);
11599 + return vchiq_status_to_vchi(status);
11601 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
11603 +/***********************************************************
11604 + * Name: vchi_msg_dequeue
11606 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11608 + * uint32_t max_data_size_to_read,
11609 + * uint32_t *actual_msg_size
11610 + * VCHI_FLAGS_T flags
11612 + * Description: Routine to dequeue a message into the supplied buffer
11614 + * Returns: int32_t - success == 0
11616 + ***********************************************************/
11617 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
11619 + uint32_t max_data_size_to_read,
11620 + uint32_t *actual_msg_size,
11621 + VCHI_FLAGS_T flags)
11623 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11624 + VCHIQ_HEADER_T *header;
11626 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11627 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11629 + if (flags == VCHI_FLAGS_NONE)
11630 + if (vchiu_queue_is_empty(&service->queue))
11633 + header = vchiu_queue_pop(&service->queue);
11635 + memcpy(data, header->data, header->size < max_data_size_to_read ?
11636 + header->size : max_data_size_to_read);
11638 + *actual_msg_size = header->size;
11640 + vchiq_release_message(service->handle, header);
11644 +EXPORT_SYMBOL(vchi_msg_dequeue);
11646 +/***********************************************************
11647 + * Name: vchi_msg_queuev
11649 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11650 + * VCHI_MSG_VECTOR_T *vector,
11651 + * uint32_t count,
11652 + * VCHI_FLAGS_T flags,
11653 + * void *msg_handle
11655 + * Description: Thin wrapper to queue a message onto a connection
11657 + * Returns: int32_t - success == 0
11659 + ***********************************************************/
11661 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
11662 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
11663 + offsetof(VCHIQ_ELEMENT_T, data));
11664 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
11665 + offsetof(VCHIQ_ELEMENT_T, size));
11667 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
11668 + VCHI_MSG_VECTOR_T *vector,
11670 + VCHI_FLAGS_T flags,
11671 + void *msg_handle)
11673 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11675 + (void)msg_handle;
11677 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11679 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
11680 + (const VCHIQ_ELEMENT_T *)vector, count));
11682 +EXPORT_SYMBOL(vchi_msg_queuev);
11684 +/***********************************************************
11685 + * Name: vchi_held_msg_release
11687 + * Arguments: VCHI_HELD_MSG_T *message
11689 + * Description: Routine to release a held message (after it has been read with
11692 + * Returns: int32_t - success == 0
11694 + ***********************************************************/
11695 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
11697 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
11698 + (VCHIQ_HEADER_T *)message->message);
11703 +/***********************************************************
11704 + * Name: vchi_msg_hold
11706 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11708 + * uint32_t *msg_size,
11709 + * VCHI_FLAGS_T flags,
11710 + * VCHI_HELD_MSG_T *message_handle
11712 + * Description: Routine to return a pointer to the current message (to allow
11713 + * in place processing). The message is dequeued - don't forget
11714 + * to release the message using vchi_held_msg_release when you're
11717 + * Returns: int32_t - success == 0
11719 + ***********************************************************/
11720 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
11722 + uint32_t *msg_size,
11723 + VCHI_FLAGS_T flags,
11724 + VCHI_HELD_MSG_T *message_handle)
11726 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11727 + VCHIQ_HEADER_T *header;
11729 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11730 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11732 + if (flags == VCHI_FLAGS_NONE)
11733 + if (vchiu_queue_is_empty(&service->queue))
11736 + header = vchiu_queue_pop(&service->queue);
11738 + *data = header->data;
11739 + *msg_size = header->size;
11741 + message_handle->service =
11742 + (struct opaque_vchi_service_t *)service->handle;
11743 + message_handle->message = header;
11748 +/***********************************************************
11749 + * Name: vchi_initialise
11751 + * Arguments: VCHI_INSTANCE_T *instance_handle
11752 + * VCHI_CONNECTION_T **connections
11753 + * const uint32_t num_connections
11755 + * Description: Initialises the hardware but does not transmit anything
11756 + * When run as a Host App this will be called twice hence the need
11757 + * to malloc the state information
11759 + * Returns: 0 if successful, failure otherwise
11761 + ***********************************************************/
11763 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
11765 + VCHIQ_INSTANCE_T instance;
11766 + VCHIQ_STATUS_T status;
11768 + status = vchiq_initialise(&instance);
11770 + *instance_handle = (VCHI_INSTANCE_T)instance;
11772 + return vchiq_status_to_vchi(status);
11774 +EXPORT_SYMBOL(vchi_initialise);
11776 +/***********************************************************
11777 + * Name: vchi_connect
11779 + * Arguments: VCHI_CONNECTION_T **connections
11780 + * const uint32_t num_connections
11781 + * VCHI_INSTANCE_T instance_handle)
11783 + * Description: Starts the command service on each connection,
11784 + * causing INIT messages to be pinged back and forth
11786 + * Returns: 0 if successful, failure otherwise
11788 + ***********************************************************/
11789 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
11790 + const uint32_t num_connections,
11791 + VCHI_INSTANCE_T instance_handle)
11793 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11795 + (void)connections;
11796 + (void)num_connections;
11798 + return vchiq_connect(instance);
11800 +EXPORT_SYMBOL(vchi_connect);
11803 +/***********************************************************
11804 + * Name: vchi_disconnect
11806 + * Arguments: VCHI_INSTANCE_T instance_handle
11808 + * Description: Stops the command service on each connection,
11809 + * causing DE-INIT messages to be pinged back and forth
11811 + * Returns: 0 if successful, failure otherwise
11813 + ***********************************************************/
11814 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
11816 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11817 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
11819 +EXPORT_SYMBOL(vchi_disconnect);
11822 +/***********************************************************
11823 + * Name: vchi_service_open
11824 + * Name: vchi_service_create
11826 + * Arguments: VCHI_INSTANCE_T *instance_handle
11827 + * SERVICE_CREATION_T *setup,
11828 + * VCHI_SERVICE_HANDLE_T *handle
11830 + * Description: Routine to open a service
11832 + * Returns: int32_t - success == 0
11834 + ***********************************************************/
11836 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
11837 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
11839 + SHIM_SERVICE_T *service =
11840 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
11842 + switch (reason) {
11843 + case VCHIQ_MESSAGE_AVAILABLE:
11844 + vchiu_queue_push(&service->queue, header);
11846 + if (service->callback)
11847 + service->callback(service->callback_param,
11848 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
11850 + case VCHIQ_BULK_TRANSMIT_DONE:
11851 + if (service->callback)
11852 + service->callback(service->callback_param,
11853 + VCHI_CALLBACK_BULK_SENT, bulk_user);
11855 + case VCHIQ_BULK_RECEIVE_DONE:
11856 + if (service->callback)
11857 + service->callback(service->callback_param,
11858 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
11860 + case VCHIQ_SERVICE_CLOSED:
11861 + if (service->callback)
11862 + service->callback(service->callback_param,
11863 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
11865 + case VCHIQ_SERVICE_OPENED:
11866 + /* No equivalent VCHI reason */
11868 + case VCHIQ_BULK_TRANSMIT_ABORTED:
11869 + if (service->callback)
11870 + service->callback(service->callback_param,
11871 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user);
11873 + case VCHIQ_BULK_RECEIVE_ABORTED:
11874 + if (service->callback)
11875 + service->callback(service->callback_param,
11876 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user);
11879 + WARN(1, "not supported\n");
11883 + return VCHIQ_SUCCESS;
11886 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
11887 + SERVICE_CREATION_T *setup)
11889 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
11894 + if (vchiu_queue_init(&service->queue, 64)) {
11895 + service->callback = setup->callback;
11896 + service->callback_param = setup->callback_param;
11906 +static void service_free(SHIM_SERVICE_T *service)
11909 + vchiu_queue_delete(&service->queue);
11914 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
11915 + SERVICE_CREATION_T *setup,
11916 + VCHI_SERVICE_HANDLE_T *handle)
11918 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11919 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
11921 + VCHIQ_SERVICE_PARAMS_T params;
11922 + VCHIQ_STATUS_T status;
11924 + memset(¶ms, 0, sizeof(params));
11925 + params.fourcc = setup->service_id;
11926 + params.callback = shim_callback;
11927 + params.userdata = service;
11928 + params.version = setup->version.version;
11929 + params.version_min = setup->version.version_min;
11931 + status = vchiq_open_service(instance, ¶ms,
11932 + &service->handle);
11933 + if (status != VCHIQ_SUCCESS) {
11934 + service_free(service);
11939 + *handle = (VCHI_SERVICE_HANDLE_T)service;
11941 + return (service != NULL) ? 0 : -1;
11943 +EXPORT_SYMBOL(vchi_service_open);
11945 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
11946 + SERVICE_CREATION_T *setup,
11947 + VCHI_SERVICE_HANDLE_T *handle)
11949 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11950 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
11952 + VCHIQ_SERVICE_PARAMS_T params;
11953 + VCHIQ_STATUS_T status;
11955 + memset(¶ms, 0, sizeof(params));
11956 + params.fourcc = setup->service_id;
11957 + params.callback = shim_callback;
11958 + params.userdata = service;
11959 + params.version = setup->version.version;
11960 + params.version_min = setup->version.version_min;
11961 + status = vchiq_add_service(instance, ¶ms, &service->handle);
11963 + if (status != VCHIQ_SUCCESS) {
11964 + service_free(service);
11969 + *handle = (VCHI_SERVICE_HANDLE_T)service;
11971 + return (service != NULL) ? 0 : -1;
11973 +EXPORT_SYMBOL(vchi_service_create);
11975 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
11977 + int32_t ret = -1;
11978 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11980 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
11981 + if (status == VCHIQ_SUCCESS) {
11982 + service_free(service);
11986 + ret = vchiq_status_to_vchi(status);
11990 +EXPORT_SYMBOL(vchi_service_close);
11992 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
11994 + int32_t ret = -1;
11995 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11997 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
11998 + if (status == VCHIQ_SUCCESS) {
11999 + service_free(service);
12003 + ret = vchiq_status_to_vchi(status);
12007 +EXPORT_SYMBOL(vchi_service_destroy);
12009 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12011 + int32_t ret = -1;
12012 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12015 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12016 + ret = vchiq_status_to_vchi( status );
12020 +EXPORT_SYMBOL(vchi_get_peer_version);
12022 +/* ----------------------------------------------------------------------
12023 + * read a uint32_t from buffer.
12024 + * network format is defined to be little endian
12025 + * -------------------------------------------------------------------- */
12027 +vchi_readbuf_uint32(const void *_ptr)
12029 + const unsigned char *ptr = _ptr;
12030 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12033 +/* ----------------------------------------------------------------------
12034 + * write a uint32_t to buffer.
12035 + * network format is defined to be little endian
12036 + * -------------------------------------------------------------------- */
12038 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12040 + unsigned char *ptr = _ptr;
12041 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
12042 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
12043 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12044 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12047 +/* ----------------------------------------------------------------------
12048 + * read a uint16_t from buffer.
12049 + * network format is defined to be little endian
12050 + * -------------------------------------------------------------------- */
12052 +vchi_readbuf_uint16(const void *_ptr)
12054 + const unsigned char *ptr = _ptr;
12055 + return ptr[0] | (ptr[1] << 8);
12058 +/* ----------------------------------------------------------------------
12059 + * write a uint16_t into the buffer.
12060 + * network format is defined to be little endian
12061 + * -------------------------------------------------------------------- */
12063 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12065 + unsigned char *ptr = _ptr;
12066 + ptr[0] = (value >> 0) & 0xFF;
12067 + ptr[1] = (value >> 8) & 0xFF;
12070 +/***********************************************************
12071 + * Name: vchi_service_use
12073 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12075 + * Description: Routine to increment refcount on a service
12079 + ***********************************************************/
12080 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12082 + int32_t ret = -1;
12083 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12085 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12088 +EXPORT_SYMBOL(vchi_service_use);
12090 +/***********************************************************
12091 + * Name: vchi_service_release
12093 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12095 + * Description: Routine to decrement refcount on a service
12099 + ***********************************************************/
12100 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12102 + int32_t ret = -1;
12103 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12105 + ret = vchiq_status_to_vchi(
12106 + vchiq_release_service(service->handle));
12109 +EXPORT_SYMBOL(vchi_service_release);
12111 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12114 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12116 + * Redistribution and use in source and binary forms, with or without
12117 + * modification, are permitted provided that the following conditions
12119 + * 1. Redistributions of source code must retain the above copyright
12120 + * notice, this list of conditions, and the following disclaimer,
12121 + * without modification.
12122 + * 2. Redistributions in binary form must reproduce the above copyright
12123 + * notice, this list of conditions and the following disclaimer in the
12124 + * documentation and/or other materials provided with the distribution.
12125 + * 3. The names of the above-listed copyright holders may not be used
12126 + * to endorse or promote products derived from this software without
12127 + * specific prior written permission.
12129 + * ALTERNATIVELY, this software may be distributed under the terms of the
12130 + * GNU General Public License ("GPL") version 2, as published by the Free
12131 + * Software Foundation.
12133 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12134 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12135 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12136 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12137 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12138 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12139 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12140 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12141 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12142 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12143 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12146 +#include "vchiq_util.h"
12148 +static inline int is_pow2(int i)
12150 + return i && !(i & (i - 1));
12153 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12155 + WARN_ON(!is_pow2(size));
12157 + queue->size = size;
12159 + queue->write = 0;
12161 + sema_init(&queue->pop, 0);
12162 + sema_init(&queue->push, 0);
12164 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12165 + if (queue->storage == NULL) {
12166 + vchiu_queue_delete(queue);
12172 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12174 + if (queue->storage != NULL)
12175 + kfree(queue->storage);
12178 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12180 + return queue->read == queue->write;
12183 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12185 + return queue->write == queue->read + queue->size;
12188 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12190 + while (queue->write == queue->read + queue->size) {
12191 + if (down_interruptible(&queue->pop) != 0) {
12192 + flush_signals(current);
12197 + * Write to queue->storage must be visible after read from
12202 + queue->storage[queue->write & (queue->size - 1)] = header;
12205 + * Write to queue->storage must be visible before write to
12212 + up(&queue->push);
12215 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12217 + while (queue->write == queue->read) {
12218 + if (down_interruptible(&queue->push) != 0) {
12219 + flush_signals(current);
12223 + up(&queue->push); // We haven't removed anything from the queue.
12226 + * Read from queue->storage must be visible after read from
12231 + return queue->storage[queue->read & (queue->size - 1)];
12234 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12236 + VCHIQ_HEADER_T *header;
12238 + while (queue->write == queue->read) {
12239 + if (down_interruptible(&queue->push) != 0) {
12240 + flush_signals(current);
12245 + * Read from queue->storage must be visible after read from
12250 + header = queue->storage[queue->read & (queue->size - 1)];
12253 + * Read from queue->storage must be visible before write to
12265 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12268 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12270 + * Redistribution and use in source and binary forms, with or without
12271 + * modification, are permitted provided that the following conditions
12273 + * 1. Redistributions of source code must retain the above copyright
12274 + * notice, this list of conditions, and the following disclaimer,
12275 + * without modification.
12276 + * 2. Redistributions in binary form must reproduce the above copyright
12277 + * notice, this list of conditions and the following disclaimer in the
12278 + * documentation and/or other materials provided with the distribution.
12279 + * 3. The names of the above-listed copyright holders may not be used
12280 + * to endorse or promote products derived from this software without
12281 + * specific prior written permission.
12283 + * ALTERNATIVELY, this software may be distributed under the terms of the
12284 + * GNU General Public License ("GPL") version 2, as published by the Free
12285 + * Software Foundation.
12287 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12288 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12289 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12290 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12291 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12292 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12293 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12294 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12295 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12296 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12297 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12300 +#ifndef VCHIQ_UTIL_H
12301 +#define VCHIQ_UTIL_H
12303 +#include <linux/types.h>
12304 +#include <linux/semaphore.h>
12305 +#include <linux/mutex.h>
12306 +#include <linux/bitops.h>
12307 +#include <linux/kthread.h>
12308 +#include <linux/wait.h>
12309 +#include <linux/vmalloc.h>
12310 +#include <linux/jiffies.h>
12311 +#include <linux/delay.h>
12312 +#include <linux/string.h>
12313 +#include <linux/types.h>
12314 +#include <linux/interrupt.h>
12315 +#include <linux/random.h>
12316 +#include <linux/sched.h>
12317 +#include <linux/ctype.h>
12318 +#include <linux/uaccess.h>
12319 +#include <linux/time.h> /* for time_t */
12320 +#include <linux/slab.h>
12321 +#include <linux/vmalloc.h>
12323 +#include "vchiq_if.h"
12330 + struct semaphore pop;
12331 + struct semaphore push;
12333 + VCHIQ_HEADER_T **storage;
12336 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
12337 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
12339 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
12340 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
12342 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
12344 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
12345 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
12350 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12353 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12355 + * Redistribution and use in source and binary forms, with or without
12356 + * modification, are permitted provided that the following conditions
12358 + * 1. Redistributions of source code must retain the above copyright
12359 + * notice, this list of conditions, and the following disclaimer,
12360 + * without modification.
12361 + * 2. Redistributions in binary form must reproduce the above copyright
12362 + * notice, this list of conditions and the following disclaimer in the
12363 + * documentation and/or other materials provided with the distribution.
12364 + * 3. The names of the above-listed copyright holders may not be used
12365 + * to endorse or promote products derived from this software without
12366 + * specific prior written permission.
12368 + * ALTERNATIVELY, this software may be distributed under the terms of the
12369 + * GNU General Public License ("GPL") version 2, as published by the Free
12370 + * Software Foundation.
12372 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12373 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12374 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12375 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12376 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12377 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12378 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12379 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12380 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12381 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12382 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12384 +#include "vchiq_build_info.h"
12385 +#include <linux/broadcom/vc_debug_sym.h>
12387 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
12388 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
12389 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
12390 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
12392 +const char *vchiq_get_build_hostname( void )
12394 + return vchiq_build_hostname;
12397 +const char *vchiq_get_build_version( void )
12399 + return vchiq_build_version;
12402 +const char *vchiq_get_build_date( void )
12404 + return vchiq_build_date;
12407 +const char *vchiq_get_build_time( void )
12409 + return vchiq_build_time;