b65386f3f49ba2eb23cc495fd3357fb7b7f27812
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-3.14 / 0006-bcm2708-vchiq-driver.patch
1 From c9e2d1daa32fd2267d3a61ae3afc2f429746a01f Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 06/54] bcm2708 vchiq driver
5
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
7
8 vchiq: create_pagelist copes with vmalloc memory
9
10 Signed-off-by: Daniel Stone <daniels@collabora.com>
11
12 vchiq: fix the shim message release
13
14 Signed-off-by: Daniel Stone <daniels@collabora.com>
15
16 vchiq: export additional symbols
17
18 Signed-off-by: Daniel Stone <daniels@collabora.com>
19 ---
20 drivers/misc/Kconfig | 1 +
21 drivers/misc/Makefile | 1 +
22 drivers/misc/vc04_services/Kconfig | 9 +
23 drivers/misc/vc04_services/Makefile | 17 +
24 .../interface/vchi/connections/connection.h | 328 ++
25 .../interface/vchi/message_drivers/message.h | 204 ++
26 drivers/misc/vc04_services/interface/vchi/vchi.h | 373 ++
27 .../misc/vc04_services/interface/vchi/vchi_cfg.h | 224 ++
28 .../interface/vchi/vchi_cfg_internal.h | 71 +
29 .../vc04_services/interface/vchi/vchi_common.h | 163 +
30 .../misc/vc04_services/interface/vchi/vchi_mh.h | 42 +
31 .../misc/vc04_services/interface/vchiq_arm/vchiq.h | 40 +
32 .../vc04_services/interface/vchiq_arm/vchiq_2835.h | 42 +
33 .../interface/vchiq_arm/vchiq_2835_arm.c | 561 +++
34 .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 2813 ++++++++++++++
35 .../vc04_services/interface/vchiq_arm/vchiq_arm.h | 212 ++
36 .../interface/vchiq_arm/vchiq_build_info.h | 37 +
37 .../vc04_services/interface/vchiq_arm/vchiq_cfg.h | 60 +
38 .../interface/vchiq_arm/vchiq_connected.c | 119 +
39 .../interface/vchiq_arm/vchiq_connected.h | 50 +
40 .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3824 ++++++++++++++++++++
41 .../vc04_services/interface/vchiq_arm/vchiq_core.h | 706 ++++
42 .../interface/vchiq_arm/vchiq_genversion | 87 +
43 .../vc04_services/interface/vchiq_arm/vchiq_if.h | 188 +
44 .../interface/vchiq_arm/vchiq_ioctl.h | 129 +
45 .../interface/vchiq_arm/vchiq_kern_lib.c | 456 +++
46 .../interface/vchiq_arm/vchiq_memdrv.h | 71 +
47 .../interface/vchiq_arm/vchiq_pagelist.h | 58 +
48 .../vc04_services/interface/vchiq_arm/vchiq_proc.c | 253 ++
49 .../vc04_services/interface/vchiq_arm/vchiq_shim.c | 828 +++++
50 .../vc04_services/interface/vchiq_arm/vchiq_util.c | 151 +
51 .../vc04_services/interface/vchiq_arm/vchiq_util.h | 81 +
52 .../interface/vchiq_arm/vchiq_version.c | 59 +
53 33 files changed, 12258 insertions(+)
54 create mode 100644 drivers/misc/vc04_services/Kconfig
55 create mode 100644 drivers/misc/vc04_services/Makefile
56 create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
57 create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
58 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
59 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
60 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
61 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
62 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
63 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
64 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
65 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
66 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
67 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
68 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
69 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
70 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
71 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
72 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
73 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
74 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
75 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
76 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
77 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
78 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
79 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
80 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
81 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
82 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
83 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
84 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
85
86 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
87 index 6cb388e..56867fa 100644
88 --- a/drivers/misc/Kconfig
89 +++ b/drivers/misc/Kconfig
90 @@ -524,6 +524,7 @@ source "drivers/misc/carma/Kconfig"
91 source "drivers/misc/altera-stapl/Kconfig"
92 source "drivers/misc/mei/Kconfig"
93 source "drivers/misc/vmw_vmci/Kconfig"
94 +source "drivers/misc/vc04_services/Kconfig"
95 source "drivers/misc/mic/Kconfig"
96 source "drivers/misc/genwqe/Kconfig"
97 endmenu
98 diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
99 index 99b9424..ad85616 100644
100 --- a/drivers/misc/Makefile
101 +++ b/drivers/misc/Makefile
102 @@ -52,5 +52,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/
103 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
104 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
105 obj-$(CONFIG_SRAM) += sram.o
106 +obj-y += vc04_services/
107 obj-y += mic/
108 obj-$(CONFIG_GENWQE) += genwqe/
109 diff --git a/drivers/misc/vc04_services/Kconfig b/drivers/misc/vc04_services/Kconfig
110 new file mode 100644
111 index 0000000..2663933
112 --- /dev/null
113 +++ b/drivers/misc/vc04_services/Kconfig
114 @@ -0,0 +1,9 @@
115 +config BCM2708_VCHIQ
116 + tristate "Videocore VCHIQ"
117 + depends on MACH_BCM2708
118 + default y
119 + help
120 + Kernel to VideoCore communication interface for the
121 + BCM2708 family of products.
122 + Defaults to Y when the Broadcom Videocore services
123 + are included in the build, N otherwise.
124 diff --git a/drivers/misc/vc04_services/Makefile b/drivers/misc/vc04_services/Makefile
125 new file mode 100644
126 index 0000000..4224f58
127 --- /dev/null
128 +++ b/drivers/misc/vc04_services/Makefile
129 @@ -0,0 +1,17 @@
130 +ifeq ($(CONFIG_MACH_BCM2708),y)
131 +
132 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
133 +
134 +vchiq-objs := \
135 + interface/vchiq_arm/vchiq_core.o \
136 + interface/vchiq_arm/vchiq_arm.o \
137 + interface/vchiq_arm/vchiq_kern_lib.o \
138 + interface/vchiq_arm/vchiq_2835_arm.o \
139 + interface/vchiq_arm/vchiq_proc.o \
140 + interface/vchiq_arm/vchiq_shim.o \
141 + interface/vchiq_arm/vchiq_util.o \
142 + interface/vchiq_arm/vchiq_connected.o \
143 +
144 +ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
145 +
146 +endif
147 diff --git a/drivers/misc/vc04_services/interface/vchi/connections/connection.h b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
148 new file mode 100644
149 index 0000000..fef6ac3
150 --- /dev/null
151 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
152 @@ -0,0 +1,328 @@
153 +/**
154 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
155 + *
156 + * Redistribution and use in source and binary forms, with or without
157 + * modification, are permitted provided that the following conditions
158 + * are met:
159 + * 1. Redistributions of source code must retain the above copyright
160 + * notice, this list of conditions, and the following disclaimer,
161 + * without modification.
162 + * 2. Redistributions in binary form must reproduce the above copyright
163 + * notice, this list of conditions and the following disclaimer in the
164 + * documentation and/or other materials provided with the distribution.
165 + * 3. The names of the above-listed copyright holders may not be used
166 + * to endorse or promote products derived from this software without
167 + * specific prior written permission.
168 + *
169 + * ALTERNATIVELY, this software may be distributed under the terms of the
170 + * GNU General Public License ("GPL") version 2, as published by the Free
171 + * Software Foundation.
172 + *
173 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
174 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
175 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
176 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
177 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
178 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
179 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
180 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
181 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
182 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
183 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
184 + */
185 +
186 +#ifndef CONNECTION_H_
187 +#define CONNECTION_H_
188 +
189 +#include <linux/kernel.h>
190 +#include <linux/types.h>
191 +#include <linux/semaphore.h>
192 +
193 +#include "interface/vchi/vchi_cfg_internal.h"
194 +#include "interface/vchi/vchi_common.h"
195 +#include "interface/vchi/message_drivers/message.h"
196 +
197 +/******************************************************************************
198 + Global defs
199 + *****************************************************************************/
200 +
201 +// Opaque handle for a connection / service pair
202 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
203 +
204 +// opaque handle to the connection state information
205 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
206 +
207 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
208 +
209 +
210 +/******************************************************************************
211 + API
212 + *****************************************************************************/
213 +
214 +// Routine to init a connection with a particular low level driver
215 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
216 + const VCHI_MESSAGE_DRIVER_T * driver );
217 +
218 +// Routine to control CRC enabling at a connection level
219 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
220 + VCHI_CRC_CONTROL_T control );
221 +
222 +// Routine to create a service
223 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
224 + int32_t service_id,
225 + uint32_t rx_fifo_size,
226 + uint32_t tx_fifo_size,
227 + int server,
228 + VCHI_CALLBACK_T callback,
229 + void *callback_param,
230 + int32_t want_crc,
231 + int32_t want_unaligned_bulk_rx,
232 + int32_t want_unaligned_bulk_tx,
233 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
234 +
235 +// Routine to close a service
236 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
237 +
238 +// Routine to queue a message
239 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
240 + const void *data,
241 + uint32_t data_size,
242 + VCHI_FLAGS_T flags,
243 + void *msg_handle );
244 +
245 +// scatter-gather (vector) message queueing
246 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
247 + VCHI_MSG_VECTOR_T *vector,
248 + uint32_t count,
249 + VCHI_FLAGS_T flags,
250 + void *msg_handle );
251 +
252 +// Routine to dequeue a message
253 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
254 + void *data,
255 + uint32_t max_data_size_to_read,
256 + uint32_t *actual_msg_size,
257 + VCHI_FLAGS_T flags );
258 +
259 +// Routine to peek at a message
260 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
261 + void **data,
262 + uint32_t *msg_size,
263 + VCHI_FLAGS_T flags );
264 +
265 +// Routine to hold a message
266 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
267 + void **data,
268 + uint32_t *msg_size,
269 + VCHI_FLAGS_T flags,
270 + void **message_handle );
271 +
272 +// Routine to initialise a received message iterator
273 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
274 + VCHI_MSG_ITER_T *iter,
275 + VCHI_FLAGS_T flags );
276 +
277 +// Routine to release a held message
278 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
279 + void *message_handle );
280 +
281 +// Routine to get info on a held message
282 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
283 + void *message_handle,
284 + void **data,
285 + int32_t *msg_size,
286 + uint32_t *tx_timestamp,
287 + uint32_t *rx_timestamp );
288 +
289 +// Routine to check whether the iterator has a next message
290 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
291 + const VCHI_MSG_ITER_T *iter );
292 +
293 +// Routine to advance the iterator
294 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
295 + VCHI_MSG_ITER_T *iter,
296 + void **data,
297 + uint32_t *msg_size );
298 +
299 +// Routine to remove the last message returned by the iterator
300 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
301 + VCHI_MSG_ITER_T *iter );
302 +
303 +// Routine to hold the last message returned by the iterator
304 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
305 + VCHI_MSG_ITER_T *iter,
306 + void **msg_handle );
307 +
308 +// Routine to transmit bulk data
309 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
310 + const void *data_src,
311 + uint32_t data_size,
312 + VCHI_FLAGS_T flags,
313 + void *bulk_handle );
314 +
315 +// Routine to receive data
316 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
317 + void *data_dst,
318 + uint32_t data_size,
319 + VCHI_FLAGS_T flags,
320 + void *bulk_handle );
321 +
322 +// Routine to report if a server is available
323 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
324 +
325 +// Routine to report the number of RX slots available
326 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
327 +
328 +// Routine to report the RX slot size
329 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
330 +
331 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
332 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
333 + int32_t service,
334 + uint32_t length,
335 + MESSAGE_TX_CHANNEL_T channel,
336 + uint32_t channel_params,
337 + uint32_t data_length,
338 + uint32_t data_offset);
339 +
340 +// Callback to inform a service that a Xon or Xoff message has been received
341 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
342 +
343 +// Callback to inform a service that a server available reply message has been received
344 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
345 +
346 +// Callback to indicate that bulk auxiliary messages have arrived
347 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
348 +
349 +// Callback to indicate that bulk auxiliary messages have arrived
350 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
351 +
352 +// Callback with all the connection info you require
353 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
354 +
355 +// Callback to inform of a disconnect
356 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
357 +
358 +// Callback to inform of a power control request
359 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
360 +
361 +// allocate memory suitably aligned for this connection
362 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
363 +
364 +// free memory allocated by buffer_allocate
365 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
366 +
367 +
368 +/******************************************************************************
369 + System driver struct
370 + *****************************************************************************/
371 +
372 +struct opaque_vchi_connection_api_t
373 +{
374 + // Routine to init the connection
375 + VCHI_CONNECTION_INIT_T init;
376 +
377 + // Connection-level CRC control
378 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
379 +
380 + // Routine to connect to or create service
381 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
382 +
383 + // Routine to disconnect from a service
384 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
385 +
386 + // Routine to queue a message
387 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
388 +
389 + // scatter-gather (vector) message queue
390 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
391 +
392 + // Routine to dequeue a message
393 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
394 +
395 + // Routine to peek at a message
396 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
397 +
398 + // Routine to hold a message
399 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
400 +
401 + // Routine to initialise a received message iterator
402 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
403 +
404 + // Routine to release a message
405 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
406 +
407 + // Routine to get information on a held message
408 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
409 +
410 + // Routine to check for next message on iterator
411 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
412 +
413 + // Routine to get next message on iterator
414 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
415 +
416 + // Routine to remove the last message returned by iterator
417 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
418 +
419 + // Routine to hold the last message returned by iterator
420 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
421 +
422 + // Routine to transmit bulk data
423 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
424 +
425 + // Routine to receive data
426 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
427 +
428 + // Routine to report the available servers
429 + VCHI_CONNECTION_SERVER_PRESENT server_present;
430 +
431 + // Routine to report the number of RX slots available
432 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
433 +
434 + // Routine to report the RX slot size
435 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
436 +
437 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
438 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
439 +
440 + // Callback to inform a service that a Xon or Xoff message has been received
441 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
442 +
443 + // Callback to inform a service that a server available reply message has been received
444 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
445 +
446 + // Callback to indicate that bulk auxiliary messages have arrived
447 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
448 +
449 + // Callback to indicate that a bulk auxiliary message has been transmitted
450 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
451 +
452 + // Callback to provide information about the connection
453 + VCHI_CONNECTION_INFO connection_info;
454 +
455 + // Callback to notify that peer has requested disconnect
456 + VCHI_CONNECTION_DISCONNECT disconnect;
457 +
458 + // Callback to notify that peer has requested power change
459 + VCHI_CONNECTION_POWER_CONTROL power_control;
460 +
461 + // allocate memory suitably aligned for this connection
462 + VCHI_BUFFER_ALLOCATE buffer_allocate;
463 +
464 + // free memory allocated by buffer_allocate
465 + VCHI_BUFFER_FREE buffer_free;
466 +
467 +};
468 +
469 +struct vchi_connection_t {
470 + const VCHI_CONNECTION_API_T *api;
471 + VCHI_CONNECTION_STATE_T *state;
472 +#ifdef VCHI_COARSE_LOCKING
473 + struct semaphore sem;
474 +#endif
475 +};
476 +
477 +
478 +#endif /* CONNECTION_H_ */
479 +
480 +/****************************** End of file **********************************/
481 diff --git a/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
482 new file mode 100644
483 index 0000000..8b3f767
484 --- /dev/null
485 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
486 @@ -0,0 +1,204 @@
487 +/**
488 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
489 + *
490 + * Redistribution and use in source and binary forms, with or without
491 + * modification, are permitted provided that the following conditions
492 + * are met:
493 + * 1. Redistributions of source code must retain the above copyright
494 + * notice, this list of conditions, and the following disclaimer,
495 + * without modification.
496 + * 2. Redistributions in binary form must reproduce the above copyright
497 + * notice, this list of conditions and the following disclaimer in the
498 + * documentation and/or other materials provided with the distribution.
499 + * 3. The names of the above-listed copyright holders may not be used
500 + * to endorse or promote products derived from this software without
501 + * specific prior written permission.
502 + *
503 + * ALTERNATIVELY, this software may be distributed under the terms of the
504 + * GNU General Public License ("GPL") version 2, as published by the Free
505 + * Software Foundation.
506 + *
507 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
508 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
509 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
510 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
511 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
512 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
513 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
514 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
515 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
516 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
517 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
518 + */
519 +
520 +#ifndef _VCHI_MESSAGE_H_
521 +#define _VCHI_MESSAGE_H_
522 +
523 +#include <linux/kernel.h>
524 +#include <linux/types.h>
525 +#include <linux/semaphore.h>
526 +
527 +#include "interface/vchi/vchi_cfg_internal.h"
528 +#include "interface/vchi/vchi_common.h"
529 +
530 +
531 +typedef enum message_event_type {
532 + MESSAGE_EVENT_NONE,
533 + MESSAGE_EVENT_NOP,
534 + MESSAGE_EVENT_MESSAGE,
535 + MESSAGE_EVENT_SLOT_COMPLETE,
536 + MESSAGE_EVENT_RX_BULK_PAUSED,
537 + MESSAGE_EVENT_RX_BULK_COMPLETE,
538 + MESSAGE_EVENT_TX_COMPLETE,
539 + MESSAGE_EVENT_MSG_DISCARDED
540 +} MESSAGE_EVENT_TYPE_T;
541 +
542 +typedef enum vchi_msg_flags
543 +{
544 + VCHI_MSG_FLAGS_NONE = 0x0,
545 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
546 +} VCHI_MSG_FLAGS_T;
547 +
548 +typedef enum message_tx_channel
549 +{
550 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
551 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
552 +} MESSAGE_TX_CHANNEL_T;
553 +
554 +// Macros used for cycling through bulk channels
555 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
556 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
557 +
558 +typedef enum message_rx_channel
559 +{
560 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
561 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
562 +} MESSAGE_RX_CHANNEL_T;
563 +
564 +// Message receive slot information
565 +typedef struct rx_msg_slot_info {
566 +
567 + struct rx_msg_slot_info *next;
568 + //struct slot_info *prev;
569 +#if !defined VCHI_COARSE_LOCKING
570 + struct semaphore sem;
571 +#endif
572 +
573 + uint8_t *addr; // base address of slot
574 + uint32_t len; // length of slot in bytes
575 +
576 + uint32_t write_ptr; // hardware causes this to advance
577 + uint32_t read_ptr; // this module does the reading
578 + int active; // is this slot in the hardware dma fifo?
579 + uint32_t msgs_parsed; // count how many messages are in this slot
580 + uint32_t msgs_released; // how many messages have been released
581 + void *state; // connection state information
582 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
583 +} RX_MSG_SLOTINFO_T;
584 +
585 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
586 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
587 +// driver will be tasked with sending the aligned core section.
588 +typedef struct rx_bulk_slotinfo_t {
589 + struct rx_bulk_slotinfo_t *next;
590 +
591 + struct semaphore *blocking;
592 +
593 + // needed by DMA
594 + void *addr;
595 + uint32_t len;
596 +
597 + // needed for the callback
598 + void *service;
599 + void *handle;
600 + VCHI_FLAGS_T flags;
601 +} RX_BULK_SLOTINFO_T;
602 +
603 +
604 +/* ----------------------------------------------------------------------
605 + * each connection driver will have a pool of the following struct.
606 + *
607 + * the pool will be managed by vchi_qman_*
608 + * this means there will be multiple queues (single linked lists)
609 + * a given struct message_info will be on exactly one of these queues
610 + * at any one time
611 + * -------------------------------------------------------------------- */
612 +typedef struct rx_message_info {
613 +
614 + struct message_info *next;
615 + //struct message_info *prev;
616 +
617 + uint8_t *addr;
618 + uint32_t len;
619 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
620 + uint32_t tx_timestamp;
621 + uint32_t rx_timestamp;
622 +
623 +} RX_MESSAGE_INFO_T;
624 +
625 +typedef struct {
626 + MESSAGE_EVENT_TYPE_T type;
627 +
628 + struct {
629 + // for messages
630 + void *addr; // address of message
631 + uint16_t slot_delta; // whether this message indicated slot delta
632 + uint32_t len; // length of message
633 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
634 + int32_t service; // service id this message is destined for
635 + uint32_t tx_timestamp; // timestamp from the header
636 + uint32_t rx_timestamp; // timestamp when we parsed it
637 + } message;
638 +
639 + // FIXME: cleanup slot reporting...
640 + RX_MSG_SLOTINFO_T *rx_msg;
641 + RX_BULK_SLOTINFO_T *rx_bulk;
642 + void *tx_handle;
643 + MESSAGE_TX_CHANNEL_T tx_channel;
644 +
645 +} MESSAGE_EVENT_T;
646 +
647 +
648 +// callbacks
649 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
650 +
651 +typedef struct {
652 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
653 +} VCHI_MESSAGE_DRIVER_OPEN_T;
654 +
655 +
656 +// handle to this instance of message driver (as returned by ->open)
657 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
658 +
659 +struct opaque_vchi_message_driver_t {
660 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
661 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
662 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
663 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
664 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
665 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
666 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
667 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
668 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
669 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
670 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
671 +
672 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
673 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
674 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
675 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
676 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
677 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
678 +
679 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
680 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
681 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
682 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
683 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
684 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
685 +};
686 +
687 +
688 +#endif // _VCHI_MESSAGE_H_
689 +
690 +/****************************** End of file ***********************************/
691 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi.h b/drivers/misc/vc04_services/interface/vchi/vchi.h
692 new file mode 100644
693 index 0000000..cee5291
694 --- /dev/null
695 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
696 @@ -0,0 +1,373 @@
697 +/**
698 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
699 + *
700 + * Redistribution and use in source and binary forms, with or without
701 + * modification, are permitted provided that the following conditions
702 + * are met:
703 + * 1. Redistributions of source code must retain the above copyright
704 + * notice, this list of conditions, and the following disclaimer,
705 + * without modification.
706 + * 2. Redistributions in binary form must reproduce the above copyright
707 + * notice, this list of conditions and the following disclaimer in the
708 + * documentation and/or other materials provided with the distribution.
709 + * 3. The names of the above-listed copyright holders may not be used
710 + * to endorse or promote products derived from this software without
711 + * specific prior written permission.
712 + *
713 + * ALTERNATIVELY, this software may be distributed under the terms of the
714 + * GNU General Public License ("GPL") version 2, as published by the Free
715 + * Software Foundation.
716 + *
717 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
718 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
719 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
720 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
721 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
722 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
723 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
724 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
725 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
726 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
727 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
728 + */
729 +
730 +#ifndef VCHI_H_
731 +#define VCHI_H_
732 +
733 +#include "interface/vchi/vchi_cfg.h"
734 +#include "interface/vchi/vchi_common.h"
735 +#include "interface/vchi/connections/connection.h"
736 +#include "vchi_mh.h"
737 +
738 +
739 +/******************************************************************************
740 + Global defs
741 + *****************************************************************************/
742 +
743 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
744 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
745 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
746 +
747 +#ifdef USE_VCHIQ_ARM
748 +#define VCHI_BULK_ALIGNED(x) 1
749 +#else
750 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
751 +#endif
752 +
753 +struct vchi_version {
754 + uint32_t version;
755 + uint32_t version_min;
756 +};
757 +#define VCHI_VERSION(v_) { v_, v_ }
758 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
759 +
760 +typedef enum
761 +{
762 + VCHI_VEC_POINTER,
763 + VCHI_VEC_HANDLE,
764 + VCHI_VEC_LIST
765 +} VCHI_MSG_VECTOR_TYPE_T;
766 +
767 +typedef struct vchi_msg_vector_ex {
768 +
769 + VCHI_MSG_VECTOR_TYPE_T type;
770 + union
771 + {
772 + // a memory handle
773 + struct
774 + {
775 + VCHI_MEM_HANDLE_T handle;
776 + uint32_t offset;
777 + int32_t vec_len;
778 + } handle;
779 +
780 + // an ordinary data pointer
781 + struct
782 + {
783 + const void *vec_base;
784 + int32_t vec_len;
785 + } ptr;
786 +
787 + // a nested vector list
788 + struct
789 + {
790 + struct vchi_msg_vector_ex *vec;
791 + uint32_t vec_len;
792 + } list;
793 + } u;
794 +} VCHI_MSG_VECTOR_EX_T;
795 +
796 +
797 +// Construct an entry in a msg vector for a pointer (p) of length (l)
798 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
799 +
800 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
801 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
802 +
803 +// Macros to manipulate 'FOURCC' values
804 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
805 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
806 +
807 +
808 +// Opaque service information
809 +struct opaque_vchi_service_t;
810 +
811 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
812 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
813 +typedef struct
814 +{
815 + struct opaque_vchi_service_t *service;
816 + void *message;
817 +} VCHI_HELD_MSG_T;
818 +
819 +
820 +
821 +// structure used to provide the information needed to open a server or a client
822 +typedef struct {
823 + struct vchi_version version;
824 + int32_t service_id;
825 + VCHI_CONNECTION_T *connection;
826 + uint32_t rx_fifo_size;
827 + uint32_t tx_fifo_size;
828 + VCHI_CALLBACK_T callback;
829 + void *callback_param;
830 + /* client intends to receive bulk transfers of
831 + odd lengths or into unaligned buffers */
832 + int32_t want_unaligned_bulk_rx;
833 + /* client intends to transmit bulk transfers of
834 + odd lengths or out of unaligned buffers */
835 + int32_t want_unaligned_bulk_tx;
836 + /* client wants to check CRCs on (bulk) xfers.
837 + Only needs to be set at 1 end - will do both directions. */
838 + int32_t want_crc;
839 +} SERVICE_CREATION_T;
840 +
841 +// Opaque handle for a VCHI instance
842 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
843 +
844 +// Opaque handle for a server or client
845 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
846 +
847 +// Service registration & startup
848 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
849 +
850 +typedef struct service_info_tag {
851 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
852 + VCHI_SERVICE_INIT init; /* Service initialisation function */
853 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
854 +} SERVICE_INFO_T;
855 +
856 +/******************************************************************************
857 + Global funcs - implementation is specific to which side you are on (local / remote)
858 + *****************************************************************************/
859 +
860 +#ifdef __cplusplus
861 +extern "C" {
862 +#endif
863 +
864 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
865 + const VCHI_MESSAGE_DRIVER_T * low_level);
866 +
867 +
868 +// Routine used to initialise the vchi on both local + remote connections
869 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
870 +
871 +extern int32_t vchi_exit( void );
872 +
873 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
874 + const uint32_t num_connections,
875 + VCHI_INSTANCE_T instance_handle );
876 +
877 +//When this is called, ensure that all services have no data pending.
878 +//Bulk transfers can remain 'queued'
879 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
880 +
881 +// Global control over bulk CRC checking
882 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
883 + VCHI_CRC_CONTROL_T control );
884 +
885 +// helper functions
886 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
887 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
888 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
889 +
890 +
891 +/******************************************************************************
892 + Global service API
893 + *****************************************************************************/
894 +// Routine to create a named service
895 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
896 + SERVICE_CREATION_T *setup,
897 + VCHI_SERVICE_HANDLE_T *handle );
898 +
899 +// Routine to destory a service
900 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
901 +
902 +// Routine to open a named service
903 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
904 + SERVICE_CREATION_T *setup,
905 + VCHI_SERVICE_HANDLE_T *handle);
906 +
907 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
908 + short *peer_version );
909 +
910 +// Routine to close a named service
911 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
912 +
913 +// Routine to increment ref count on a named service
914 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
915 +
916 +// Routine to decrement ref count on a named service
917 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
918 +
919 +// Routine to send a message accross a service
920 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
921 + const void *data,
922 + uint32_t data_size,
923 + VCHI_FLAGS_T flags,
924 + void *msg_handle );
925 +
926 +// scatter-gather (vector) and send message
927 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
928 + VCHI_MSG_VECTOR_EX_T *vector,
929 + uint32_t count,
930 + VCHI_FLAGS_T flags,
931 + void *msg_handle );
932 +
933 +// legacy scatter-gather (vector) and send message, only handles pointers
934 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
935 + VCHI_MSG_VECTOR_T *vector,
936 + uint32_t count,
937 + VCHI_FLAGS_T flags,
938 + void *msg_handle );
939 +
940 +// Routine to receive a msg from a service
941 +// Dequeue is equivalent to hold, copy into client buffer, release
942 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
943 + void *data,
944 + uint32_t max_data_size_to_read,
945 + uint32_t *actual_msg_size,
946 + VCHI_FLAGS_T flags );
947 +
948 +// Routine to look at a message in place.
949 +// The message is not dequeued, so a subsequent call to peek or dequeue
950 +// will return the same message.
951 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
952 + void **data,
953 + uint32_t *msg_size,
954 + VCHI_FLAGS_T flags );
955 +
956 +// Routine to remove a message after it has been read in place with peek
957 +// The first message on the queue is dequeued.
958 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
959 +
960 +// Routine to look at a message in place.
961 +// The message is dequeued, so the caller is left holding it; the descriptor is
962 +// filled in and must be released when the user has finished with the message.
963 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
964 + void **data, // } may be NULL, as info can be
965 + uint32_t *msg_size, // } obtained from HELD_MSG_T
966 + VCHI_FLAGS_T flags,
967 + VCHI_HELD_MSG_T *message_descriptor );
968 +
969 +// Initialise an iterator to look through messages in place
970 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
971 + VCHI_MSG_ITER_T *iter,
972 + VCHI_FLAGS_T flags );
973 +
974 +/******************************************************************************
975 + Global service support API - operations on held messages and message iterators
976 + *****************************************************************************/
977 +
978 +// Routine to get the address of a held message
979 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
980 +
981 +// Routine to get the size of a held message
982 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
983 +
984 +// Routine to get the transmit timestamp as written into the header by the peer
985 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
986 +
987 +// Routine to get the reception timestamp, written as we parsed the header
988 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
989 +
990 +// Routine to release a held message after it has been processed
991 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
992 +
993 +// Indicates whether the iterator has a next message.
994 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
995 +
996 +// Return the pointer and length for the next message and advance the iterator.
997 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
998 + void **data,
999 + uint32_t *msg_size );
1000 +
1001 +// Remove the last message returned by vchi_msg_iter_next.
1002 +// Can only be called once after each call to vchi_msg_iter_next.
1003 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
1004 +
1005 +// Hold the last message returned by vchi_msg_iter_next.
1006 +// Can only be called once after each call to vchi_msg_iter_next.
1007 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
1008 + VCHI_HELD_MSG_T *message );
1009 +
1010 +// Return information for the next message, and hold it, advancing the iterator.
1011 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
1012 + void **data, // } may be NULL
1013 + uint32_t *msg_size, // }
1014 + VCHI_HELD_MSG_T *message );
1015 +
1016 +
1017 +/******************************************************************************
1018 + Global bulk API
1019 + *****************************************************************************/
1020 +
1021 +// Routine to prepare interface for a transfer from the other side
1022 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
1023 + void *data_dst,
1024 + uint32_t data_size,
1025 + VCHI_FLAGS_T flags,
1026 + void *transfer_handle );
1027 +
1028 +
1029 +// Prepare interface for a transfer from the other side into relocatable memory.
1030 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
1031 + VCHI_MEM_HANDLE_T h_dst,
1032 + uint32_t offset,
1033 + uint32_t data_size,
1034 + const VCHI_FLAGS_T flags,
1035 + void * const bulk_handle );
1036 +
1037 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1038 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1039 + const void *data_src,
1040 + uint32_t data_size,
1041 + VCHI_FLAGS_T flags,
1042 + void *transfer_handle );
1043 +
1044 +
1045 +/******************************************************************************
1046 + Configuration plumbing
1047 + *****************************************************************************/
1048 +
1049 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1050 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1051 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1052 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1053 +
1054 +// declare all message drivers here
1055 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1056 +
1057 +#ifdef __cplusplus
1058 +}
1059 +#endif
1060 +
1061 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1062 + VCHI_MEM_HANDLE_T h_src,
1063 + uint32_t offset,
1064 + uint32_t data_size,
1065 + VCHI_FLAGS_T flags,
1066 + void *transfer_handle );
1067 +#endif /* VCHI_H_ */
1068 +
1069 +/****************************** End of file **********************************/
1070 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1071 new file mode 100644
1072 index 0000000..26bc2d3
1073 --- /dev/null
1074 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1075 @@ -0,0 +1,224 @@
1076 +/**
1077 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1078 + *
1079 + * Redistribution and use in source and binary forms, with or without
1080 + * modification, are permitted provided that the following conditions
1081 + * are met:
1082 + * 1. Redistributions of source code must retain the above copyright
1083 + * notice, this list of conditions, and the following disclaimer,
1084 + * without modification.
1085 + * 2. Redistributions in binary form must reproduce the above copyright
1086 + * notice, this list of conditions and the following disclaimer in the
1087 + * documentation and/or other materials provided with the distribution.
1088 + * 3. The names of the above-listed copyright holders may not be used
1089 + * to endorse or promote products derived from this software without
1090 + * specific prior written permission.
1091 + *
1092 + * ALTERNATIVELY, this software may be distributed under the terms of the
1093 + * GNU General Public License ("GPL") version 2, as published by the Free
1094 + * Software Foundation.
1095 + *
1096 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1097 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1098 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1099 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1100 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1101 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1102 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1103 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1104 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1105 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1106 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1107 + */
1108 +
1109 +#ifndef VCHI_CFG_H_
1110 +#define VCHI_CFG_H_
1111 +
1112 +/****************************************************************************************
1113 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1114 + * services.
1115 + ***************************************************************************************/
1116 +
1117 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1118 +/* Really determined by the message driver, and should be available from a run-time call. */
1119 +#ifndef VCHI_BULK_ALIGN
1120 +# if __VCCOREVER__ >= 0x04000000
1121 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1122 +# else
1123 +# define VCHI_BULK_ALIGN 16
1124 +# endif
1125 +#endif
1126 +
1127 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1128 +/* May be less than or greater than VCHI_BULK_ALIGN */
1129 +/* Really determined by the message driver, and should be available from a run-time call. */
1130 +#ifndef VCHI_BULK_GRANULARITY
1131 +# if __VCCOREVER__ >= 0x04000000
1132 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1133 +# else
1134 +# define VCHI_BULK_GRANULARITY 16
1135 +# endif
1136 +#endif
1137 +
1138 +/* The largest possible message to be queued with vchi_msg_queue. */
1139 +#ifndef VCHI_MAX_MSG_SIZE
1140 +# if defined VCHI_LOCAL_HOST_PORT
1141 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1142 +# else
1143 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1144 +# endif
1145 +#endif
1146 +
1147 +/******************************************************************************************
1148 + * Defines below are system configuration options, and should not be used by VCHI services.
1149 + *****************************************************************************************/
1150 +
1151 +/* How many connections can we support? A localhost implementation uses 2 connections,
1152 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1153 + * driver. */
1154 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1155 +# define VCHI_MAX_NUM_CONNECTIONS 3
1156 +#endif
1157 +
1158 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1159 + * amount of static memory. */
1160 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1161 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1162 +#endif
1163 +
1164 +/* Adjust if using a message driver that supports more logical TX channels */
1165 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1166 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1167 +#endif
1168 +
1169 +/* Adjust if using a message driver that supports more logical RX channels */
1170 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1171 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1172 +#endif
1173 +
1174 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1175 + * receive queue space, less message headers. */
1176 +#ifndef VCHI_NUM_READ_SLOTS
1177 +# if defined(VCHI_LOCAL_HOST_PORT)
1178 +# define VCHI_NUM_READ_SLOTS 4
1179 +# else
1180 +# define VCHI_NUM_READ_SLOTS 48
1181 +# endif
1182 +#endif
1183 +
1184 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1185 + * performance. Only define on VideoCore end, talking to host.
1186 + */
1187 +//#define VCHI_MSG_RX_OVERRUN
1188 +
1189 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1190 + * underneath VCHI will usually have its own buffering. */
1191 +#ifndef VCHI_NUM_WRITE_SLOTS
1192 +# define VCHI_NUM_WRITE_SLOTS 4
1193 +#endif
1194 +
1195 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1196 + * then it's taking up too much buffer space, and the peer service will be told to stop
1197 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1198 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1199 + * is too high. */
1200 +#ifndef VCHI_XOFF_THRESHOLD
1201 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1202 +#endif
1203 +
1204 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1205 + * service has dequeued/released enough messages that it's now occupying
1206 + * VCHI_XON_THRESHOLD slots or fewer. */
1207 +#ifndef VCHI_XON_THRESHOLD
1208 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1209 +#endif
1210 +
1211 +/* A size below which a bulk transfer omits the handshake completely and always goes
1212 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1213 + * can guarantee this by enabling unaligned transmits).
1214 + * Not API. */
1215 +#ifndef VCHI_MIN_BULK_SIZE
1216 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1217 +#endif
1218 +
1219 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1220 + * speed and latency; the smaller the chunk size the better change of messages and other
1221 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1222 + * break transmissions into chunks.
1223 + */
1224 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1225 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1226 +#endif
1227 +
1228 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1229 + * with multiple-line frames. Only use if the receiver can cope. */
1230 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1231 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1232 +#endif
1233 +
1234 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1235 + * vchi_msg_queue will be blocked. */
1236 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1237 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1238 +#endif
1239 +
1240 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1241 + * will be suspended until older messages are dequeued/released. */
1242 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1243 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1244 +#endif
1245 +
1246 +/* Really should be able to cope if we run out of received message descriptors, by
1247 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1248 + * under the carpet. */
1249 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1250 +# undef VCHI_RX_MSG_QUEUE_SIZE
1251 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1252 +#endif
1253 +
1254 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1255 + * will be blocked. */
1256 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1257 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1258 +#endif
1259 +
1260 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1261 + * will be blocked. */
1262 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1263 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1264 +#endif
1265 +
1266 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1267 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1268 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1269 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1270 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1271 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1272 +#endif
1273 +
1274 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1275 + * transmitter on and off.
1276 + */
1277 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1278 +
1279 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1280 +
1281 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1282 + * negative for no IDLE.
1283 + */
1284 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1285 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1286 +# endif
1287 +
1288 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1289 + * negative for no OFF.
1290 + */
1291 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1292 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1293 +# endif
1294 +
1295 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1296 +
1297 +#endif /* VCHI_CFG_H_ */
1298 +
1299 +/****************************** End of file **********************************/
1300 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1301 new file mode 100644
1302 index 0000000..35dcba4
1303 --- /dev/null
1304 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1305 @@ -0,0 +1,71 @@
1306 +/**
1307 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1308 + *
1309 + * Redistribution and use in source and binary forms, with or without
1310 + * modification, are permitted provided that the following conditions
1311 + * are met:
1312 + * 1. Redistributions of source code must retain the above copyright
1313 + * notice, this list of conditions, and the following disclaimer,
1314 + * without modification.
1315 + * 2. Redistributions in binary form must reproduce the above copyright
1316 + * notice, this list of conditions and the following disclaimer in the
1317 + * documentation and/or other materials provided with the distribution.
1318 + * 3. The names of the above-listed copyright holders may not be used
1319 + * to endorse or promote products derived from this software without
1320 + * specific prior written permission.
1321 + *
1322 + * ALTERNATIVELY, this software may be distributed under the terms of the
1323 + * GNU General Public License ("GPL") version 2, as published by the Free
1324 + * Software Foundation.
1325 + *
1326 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1327 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1328 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1329 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1330 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1331 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1332 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1333 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1334 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1335 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1336 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1337 + */
1338 +
1339 +#ifndef VCHI_CFG_INTERNAL_H_
1340 +#define VCHI_CFG_INTERNAL_H_
1341 +
1342 +/****************************************************************************************
1343 + * Control optimisation attempts.
1344 + ***************************************************************************************/
1345 +
1346 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1347 +#define VCHI_COARSE_LOCKING
1348 +
1349 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1350 +// (only relevant if VCHI_COARSE_LOCKING)
1351 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1352 +
1353 +// Avoid lock on non-blocking peek
1354 +// (only relevant if VCHI_COARSE_LOCKING)
1355 +#define VCHI_AVOID_PEEK_LOCK
1356 +
1357 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1358 +#define VCHI_MULTIPLE_HANDLER_THREADS
1359 +
1360 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1361 +// our way through the pool of descriptors.
1362 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1363 +
1364 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1365 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1366 +
1367 +// Don't use message descriptors for TX messages that don't need them
1368 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1369 +
1370 +// Nano-locks for multiqueue
1371 +//#define VCHI_MQUEUE_NANOLOCKS
1372 +
1373 +// Lock-free(er) dequeuing
1374 +//#define VCHI_RX_NANOLOCKS
1375 +
1376 +#endif /*VCHI_CFG_INTERNAL_H_*/
1377 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_common.h b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1378 new file mode 100644
1379 index 0000000..9e6c00e
1380 --- /dev/null
1381 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1382 @@ -0,0 +1,163 @@
1383 +/**
1384 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1385 + *
1386 + * Redistribution and use in source and binary forms, with or without
1387 + * modification, are permitted provided that the following conditions
1388 + * are met:
1389 + * 1. Redistributions of source code must retain the above copyright
1390 + * notice, this list of conditions, and the following disclaimer,
1391 + * without modification.
1392 + * 2. Redistributions in binary form must reproduce the above copyright
1393 + * notice, this list of conditions and the following disclaimer in the
1394 + * documentation and/or other materials provided with the distribution.
1395 + * 3. The names of the above-listed copyright holders may not be used
1396 + * to endorse or promote products derived from this software without
1397 + * specific prior written permission.
1398 + *
1399 + * ALTERNATIVELY, this software may be distributed under the terms of the
1400 + * GNU General Public License ("GPL") version 2, as published by the Free
1401 + * Software Foundation.
1402 + *
1403 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1404 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1405 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1406 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1407 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1408 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1409 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1410 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1411 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1412 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1413 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1414 + */
1415 +
1416 +#ifndef VCHI_COMMON_H_
1417 +#define VCHI_COMMON_H_
1418 +
1419 +
1420 +//flags used when sending messages (must be bitmapped)
1421 +typedef enum
1422 +{
1423 + VCHI_FLAGS_NONE = 0x0,
1424 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
1425 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
1426 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
1427 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
1428 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
1429 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
1430 +
1431 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
1432 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
1433 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
1434 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
1435 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
1436 + VCHI_FLAGS_INTERNAL = 0xFF0000
1437 +} VCHI_FLAGS_T;
1438 +
1439 +// constants for vchi_crc_control()
1440 +typedef enum {
1441 + VCHI_CRC_NOTHING = -1,
1442 + VCHI_CRC_PER_SERVICE = 0,
1443 + VCHI_CRC_EVERYTHING = 1,
1444 +} VCHI_CRC_CONTROL_T;
1445 +
1446 +//callback reasons when an event occurs on a service
1447 +typedef enum
1448 +{
1449 + VCHI_CALLBACK_REASON_MIN,
1450 +
1451 + //This indicates that there is data available
1452 + //handle is the msg id that was transmitted with the data
1453 + // When a message is received and there was no FULL message available previously, send callback
1454 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1455 + VCHI_CALLBACK_MSG_AVAILABLE,
1456 + VCHI_CALLBACK_MSG_SENT,
1457 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1458 +
1459 + // This indicates that a transfer from the other side has completed
1460 + VCHI_CALLBACK_BULK_RECEIVED,
1461 + //This indicates that data queued up to be sent has now gone
1462 + //handle is the msg id that was used when sending the data
1463 + VCHI_CALLBACK_BULK_SENT,
1464 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1465 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1466 +
1467 + VCHI_CALLBACK_SERVICE_CLOSED,
1468 +
1469 + // this side has sent XOFF to peer due to lack of data consumption by service
1470 + // (suggests the service may need to take some recovery action if it has
1471 + // been deliberately holding off consuming data)
1472 + VCHI_CALLBACK_SENT_XOFF,
1473 + VCHI_CALLBACK_SENT_XON,
1474 +
1475 + // indicates that a bulk transfer has finished reading the source buffer
1476 + VCHI_CALLBACK_BULK_DATA_READ,
1477 +
1478 + // power notification events (currently host side only)
1479 + VCHI_CALLBACK_PEER_OFF,
1480 + VCHI_CALLBACK_PEER_SUSPENDED,
1481 + VCHI_CALLBACK_PEER_ON,
1482 + VCHI_CALLBACK_PEER_RESUMED,
1483 + VCHI_CALLBACK_FORCED_POWER_OFF,
1484 +
1485 +#ifdef USE_VCHIQ_ARM
1486 + // some extra notifications provided by vchiq_arm
1487 + VCHI_CALLBACK_SERVICE_OPENED,
1488 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1489 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1490 +#endif
1491 +
1492 + VCHI_CALLBACK_REASON_MAX
1493 +} VCHI_CALLBACK_REASON_T;
1494 +
1495 +//Calback used by all services / bulk transfers
1496 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1497 + VCHI_CALLBACK_REASON_T reason,
1498 + void *handle ); //for transmitting msg's only
1499 +
1500 +
1501 +
1502 +/*
1503 + * Define vector struct for scatter-gather (vector) operations
1504 + * Vectors can be nested - if a vector element has negative length, then
1505 + * the data pointer is treated as pointing to another vector array, with
1506 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1507 + * you can do this:
1508 + *
1509 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1510 + * {
1511 + * VCHI_MSG_VECTOR_T nv[2];
1512 + * nv[0].vec_base = my_header;
1513 + * nv[0].vec_len = sizeof my_header;
1514 + * nv[1].vec_base = v;
1515 + * nv[1].vec_len = -n;
1516 + * ...
1517 + *
1518 + */
1519 +typedef struct vchi_msg_vector {
1520 + const void *vec_base;
1521 + int32_t vec_len;
1522 +} VCHI_MSG_VECTOR_T;
1523 +
1524 +// Opaque type for a connection API
1525 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1526 +
1527 +// Opaque type for a message driver
1528 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1529 +
1530 +
1531 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1532 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1533 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1534 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1535 +// is used again after messages for that service are removed/dequeued by any
1536 +// means other than vchi_msg_iter_... calls on the iterator itself.
1537 +typedef struct {
1538 + struct opaque_vchi_service_t *service;
1539 + void *last;
1540 + void *next;
1541 + void *remove;
1542 +} VCHI_MSG_ITER_T;
1543 +
1544 +
1545 +#endif // VCHI_COMMON_H_
1546 diff --git a/drivers/misc/vc04_services/interface/vchi/vchi_mh.h b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1547 new file mode 100644
1548 index 0000000..198bd07
1549 --- /dev/null
1550 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1551 @@ -0,0 +1,42 @@
1552 +/**
1553 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1554 + *
1555 + * Redistribution and use in source and binary forms, with or without
1556 + * modification, are permitted provided that the following conditions
1557 + * are met:
1558 + * 1. Redistributions of source code must retain the above copyright
1559 + * notice, this list of conditions, and the following disclaimer,
1560 + * without modification.
1561 + * 2. Redistributions in binary form must reproduce the above copyright
1562 + * notice, this list of conditions and the following disclaimer in the
1563 + * documentation and/or other materials provided with the distribution.
1564 + * 3. The names of the above-listed copyright holders may not be used
1565 + * to endorse or promote products derived from this software without
1566 + * specific prior written permission.
1567 + *
1568 + * ALTERNATIVELY, this software may be distributed under the terms of the
1569 + * GNU General Public License ("GPL") version 2, as published by the Free
1570 + * Software Foundation.
1571 + *
1572 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1573 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1574 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1575 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1576 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1577 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1578 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1579 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1580 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1581 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1582 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1583 + */
1584 +
1585 +#ifndef VCHI_MH_H_
1586 +#define VCHI_MH_H_
1587 +
1588 +#include <linux/types.h>
1589 +
1590 +typedef int32_t VCHI_MEM_HANDLE_T;
1591 +#define VCHI_MEM_HANDLE_INVALID 0
1592 +
1593 +#endif
1594 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1595 new file mode 100644
1596 index 0000000..ad398ba
1597 --- /dev/null
1598 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1599 @@ -0,0 +1,40 @@
1600 +/**
1601 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1602 + *
1603 + * Redistribution and use in source and binary forms, with or without
1604 + * modification, are permitted provided that the following conditions
1605 + * are met:
1606 + * 1. Redistributions of source code must retain the above copyright
1607 + * notice, this list of conditions, and the following disclaimer,
1608 + * without modification.
1609 + * 2. Redistributions in binary form must reproduce the above copyright
1610 + * notice, this list of conditions and the following disclaimer in the
1611 + * documentation and/or other materials provided with the distribution.
1612 + * 3. The names of the above-listed copyright holders may not be used
1613 + * to endorse or promote products derived from this software without
1614 + * specific prior written permission.
1615 + *
1616 + * ALTERNATIVELY, this software may be distributed under the terms of the
1617 + * GNU General Public License ("GPL") version 2, as published by the Free
1618 + * Software Foundation.
1619 + *
1620 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1621 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1622 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1623 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1624 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1625 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1626 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1627 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1628 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1629 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1630 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1631 + */
1632 +
1633 +#ifndef VCHIQ_VCHIQ_H
1634 +#define VCHIQ_VCHIQ_H
1635 +
1636 +#include "vchiq_if.h"
1637 +#include "vchiq_util.h"
1638 +
1639 +#endif
1640 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1641 new file mode 100644
1642 index 0000000..7ea5c64
1643 --- /dev/null
1644 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1645 @@ -0,0 +1,42 @@
1646 +/**
1647 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1648 + *
1649 + * Redistribution and use in source and binary forms, with or without
1650 + * modification, are permitted provided that the following conditions
1651 + * are met:
1652 + * 1. Redistributions of source code must retain the above copyright
1653 + * notice, this list of conditions, and the following disclaimer,
1654 + * without modification.
1655 + * 2. Redistributions in binary form must reproduce the above copyright
1656 + * notice, this list of conditions and the following disclaimer in the
1657 + * documentation and/or other materials provided with the distribution.
1658 + * 3. The names of the above-listed copyright holders may not be used
1659 + * to endorse or promote products derived from this software without
1660 + * specific prior written permission.
1661 + *
1662 + * ALTERNATIVELY, this software may be distributed under the terms of the
1663 + * GNU General Public License ("GPL") version 2, as published by the Free
1664 + * Software Foundation.
1665 + *
1666 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1667 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1668 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1669 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1670 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1671 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1672 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1673 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1674 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1675 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1676 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1677 + */
1678 +
1679 +#ifndef VCHIQ_2835_H
1680 +#define VCHIQ_2835_H
1681 +
1682 +#include "vchiq_pagelist.h"
1683 +
1684 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1685 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
1686 +
1687 +#endif /* VCHIQ_2835_H */
1688 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1689 new file mode 100644
1690 index 0000000..b3bdaa2
1691 --- /dev/null
1692 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1693 @@ -0,0 +1,561 @@
1694 +/**
1695 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1696 + *
1697 + * Redistribution and use in source and binary forms, with or without
1698 + * modification, are permitted provided that the following conditions
1699 + * are met:
1700 + * 1. Redistributions of source code must retain the above copyright
1701 + * notice, this list of conditions, and the following disclaimer,
1702 + * without modification.
1703 + * 2. Redistributions in binary form must reproduce the above copyright
1704 + * notice, this list of conditions and the following disclaimer in the
1705 + * documentation and/or other materials provided with the distribution.
1706 + * 3. The names of the above-listed copyright holders may not be used
1707 + * to endorse or promote products derived from this software without
1708 + * specific prior written permission.
1709 + *
1710 + * ALTERNATIVELY, this software may be distributed under the terms of the
1711 + * GNU General Public License ("GPL") version 2, as published by the Free
1712 + * Software Foundation.
1713 + *
1714 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1715 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1716 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1717 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1718 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1719 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1720 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1721 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1722 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1723 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1724 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1725 + */
1726 +
1727 +#include <linux/kernel.h>
1728 +#include <linux/types.h>
1729 +#include <linux/errno.h>
1730 +#include <linux/interrupt.h>
1731 +#include <linux/irq.h>
1732 +#include <linux/pagemap.h>
1733 +#include <linux/dma-mapping.h>
1734 +#include <linux/version.h>
1735 +#include <linux/io.h>
1736 +#include <linux/uaccess.h>
1737 +#include <asm/pgtable.h>
1738 +
1739 +#include <mach/irqs.h>
1740 +
1741 +#include <mach/platform.h>
1742 +#include <mach/vcio.h>
1743 +
1744 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1745 +
1746 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
1747 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
1748 +
1749 +#include "vchiq_arm.h"
1750 +#include "vchiq_2835.h"
1751 +#include "vchiq_connected.h"
1752 +
1753 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1754 +
1755 +typedef struct vchiq_2835_state_struct {
1756 + int inited;
1757 + VCHIQ_ARM_STATE_T arm_state;
1758 +} VCHIQ_2835_ARM_STATE_T;
1759 +
1760 +static char *g_slot_mem;
1761 +static int g_slot_mem_size;
1762 +dma_addr_t g_slot_phys;
1763 +static FRAGMENTS_T *g_fragments_base;
1764 +static FRAGMENTS_T *g_free_fragments;
1765 +struct semaphore g_free_fragments_sema;
1766 +
1767 +extern int vchiq_arm_log_level;
1768 +
1769 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1770 +
1771 +static irqreturn_t
1772 +vchiq_doorbell_irq(int irq, void *dev_id);
1773 +
1774 +static int
1775 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1776 + struct task_struct *task, PAGELIST_T ** ppagelist);
1777 +
1778 +static void
1779 +free_pagelist(PAGELIST_T *pagelist, int actual);
1780 +
1781 +int __init
1782 +vchiq_platform_init(VCHIQ_STATE_T *state)
1783 +{
1784 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1785 + int frag_mem_size;
1786 + int err;
1787 + int i;
1788 +
1789 + /* Allocate space for the channels in coherent memory */
1790 + g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1791 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1792 +
1793 + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
1794 + &g_slot_phys, GFP_ATOMIC);
1795 +
1796 + if (!g_slot_mem) {
1797 + vchiq_log_error(vchiq_arm_log_level,
1798 + "Unable to allocate channel memory");
1799 + err = -ENOMEM;
1800 + goto failed_alloc;
1801 + }
1802 +
1803 + WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
1804 +
1805 + vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
1806 + if (!vchiq_slot_zero) {
1807 + err = -EINVAL;
1808 + goto failed_init_slots;
1809 + }
1810 +
1811 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1812 + (int)g_slot_phys + g_slot_mem_size;
1813 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1814 + MAX_FRAGMENTS;
1815 +
1816 + g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
1817 + g_slot_mem_size += frag_mem_size;
1818 +
1819 + g_free_fragments = g_fragments_base;
1820 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1821 + *(FRAGMENTS_T **)&g_fragments_base[i] =
1822 + &g_fragments_base[i + 1];
1823 + }
1824 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1825 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1826 +
1827 + if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
1828 + VCHIQ_SUCCESS) {
1829 + err = -EINVAL;
1830 + goto failed_vchiq_init;
1831 + }
1832 +
1833 + err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
1834 + IRQF_IRQPOLL, "VCHIQ doorbell",
1835 + state);
1836 + if (err < 0) {
1837 + vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
1838 + "irq=%d err=%d", __func__,
1839 + VCHIQ_DOORBELL_IRQ, err);
1840 + goto failed_request_irq;
1841 + }
1842 +
1843 + /* Send the base address of the slots to VideoCore */
1844 +
1845 + dsb(); /* Ensure all writes have completed */
1846 +
1847 + bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
1848 +
1849 + vchiq_log_info(vchiq_arm_log_level,
1850 + "vchiq_init - done (slots %x, phys %x)",
1851 + (unsigned int)vchiq_slot_zero, g_slot_phys);
1852 +
1853 + vchiq_call_connected_callbacks();
1854 +
1855 + return 0;
1856 +
1857 +failed_request_irq:
1858 +failed_vchiq_init:
1859 +failed_init_slots:
1860 + dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
1861 +
1862 +failed_alloc:
1863 + return err;
1864 +}
1865 +
1866 +void __exit
1867 +vchiq_platform_exit(VCHIQ_STATE_T *state)
1868 +{
1869 + free_irq(VCHIQ_DOORBELL_IRQ, state);
1870 + dma_free_coherent(NULL, g_slot_mem_size,
1871 + g_slot_mem, g_slot_phys);
1872 +}
1873 +
1874 +
1875 +VCHIQ_STATUS_T
1876 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
1877 +{
1878 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1879 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
1880 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
1881 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
1882 + if(status != VCHIQ_SUCCESS)
1883 + {
1884 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
1885 + }
1886 + return status;
1887 +}
1888 +
1889 +VCHIQ_ARM_STATE_T*
1890 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
1891 +{
1892 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
1893 + {
1894 + BUG();
1895 + }
1896 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
1897 +}
1898 +
1899 +void
1900 +remote_event_signal(REMOTE_EVENT_T *event)
1901 +{
1902 + wmb();
1903 +
1904 + event->fired = 1;
1905 +
1906 + dsb(); /* data barrier operation */
1907 +
1908 + if (event->armed) {
1909 + /* trigger vc interrupt */
1910 +
1911 + writel(0, __io_address(ARM_0_BELL2));
1912 + }
1913 +}
1914 +
1915 +int
1916 +vchiq_copy_from_user(void *dst, const void *src, int size)
1917 +{
1918 + if ((uint32_t)src < TASK_SIZE) {
1919 + return copy_from_user(dst, src, size);
1920 + } else {
1921 + memcpy(dst, src, size);
1922 + return 0;
1923 + }
1924 +}
1925 +
1926 +VCHIQ_STATUS_T
1927 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
1928 + void *offset, int size, int dir)
1929 +{
1930 + PAGELIST_T *pagelist;
1931 + int ret;
1932 +
1933 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
1934 +
1935 + ret = create_pagelist((char __user *)offset, size,
1936 + (dir == VCHIQ_BULK_RECEIVE)
1937 + ? PAGELIST_READ
1938 + : PAGELIST_WRITE,
1939 + current,
1940 + &pagelist);
1941 + if (ret != 0)
1942 + return VCHIQ_ERROR;
1943 +
1944 + bulk->handle = memhandle;
1945 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
1946 +
1947 + /* Store the pagelist address in remote_data, which isn't used by the
1948 + slave. */
1949 + bulk->remote_data = pagelist;
1950 +
1951 + return VCHIQ_SUCCESS;
1952 +}
1953 +
1954 +void
1955 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
1956 +{
1957 + if (bulk && bulk->remote_data && bulk->actual)
1958 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
1959 +}
1960 +
1961 +void
1962 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
1963 +{
1964 + /*
1965 + * This should only be called on the master (VideoCore) side, but
1966 + * provide an implementation to avoid the need for ifdefery.
1967 + */
1968 + BUG();
1969 +}
1970 +
1971 +void
1972 +vchiq_dump_platform_state(void *dump_context)
1973 +{
1974 + char buf[80];
1975 + int len;
1976 + len = snprintf(buf, sizeof(buf),
1977 + " Platform: 2835 (VC master)");
1978 + vchiq_dump(dump_context, buf, len + 1);
1979 +}
1980 +
1981 +VCHIQ_STATUS_T
1982 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
1983 +{
1984 + return VCHIQ_ERROR;
1985 +}
1986 +
1987 +VCHIQ_STATUS_T
1988 +vchiq_platform_resume(VCHIQ_STATE_T *state)
1989 +{
1990 + return VCHIQ_SUCCESS;
1991 +}
1992 +
1993 +void
1994 +vchiq_platform_paused(VCHIQ_STATE_T *state)
1995 +{
1996 +}
1997 +
1998 +void
1999 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
2000 +{
2001 +}
2002 +
2003 +int
2004 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
2005 +{
2006 + return 1; // autosuspend not supported - videocore always wanted
2007 +}
2008 +
2009 +int
2010 +vchiq_platform_use_suspend_timer(void)
2011 +{
2012 + return 0;
2013 +}
2014 +void
2015 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
2016 +{
2017 + vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
2018 +}
2019 +void
2020 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
2021 +{
2022 + (void)state;
2023 +}
2024 +/*
2025 + * Local functions
2026 + */
2027 +
2028 +static irqreturn_t
2029 +vchiq_doorbell_irq(int irq, void *dev_id)
2030 +{
2031 + VCHIQ_STATE_T *state = dev_id;
2032 + irqreturn_t ret = IRQ_NONE;
2033 + unsigned int status;
2034 +
2035 + /* Read (and clear) the doorbell */
2036 + status = readl(__io_address(ARM_0_BELL0));
2037 +
2038 + if (status & 0x4) { /* Was the doorbell rung? */
2039 + remote_event_pollall(state);
2040 + ret = IRQ_HANDLED;
2041 + }
2042 +
2043 + return ret;
2044 +}
2045 +
2046 +/* There is a potential problem with partial cache lines (pages?)
2047 +** at the ends of the block when reading. If the CPU accessed anything in
2048 +** the same line (page?) then it may have pulled old data into the cache,
2049 +** obscuring the new data underneath. We can solve this by transferring the
2050 +** partial cache lines separately, and allowing the ARM to copy into the
2051 +** cached area.
2052 +
2053 +** N.B. This implementation plays slightly fast and loose with the Linux
2054 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2055 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2056 +** from increased speed as a result.
2057 +*/
2058 +
2059 +static int
2060 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2061 + struct task_struct *task, PAGELIST_T ** ppagelist)
2062 +{
2063 + PAGELIST_T *pagelist;
2064 + struct page **pages;
2065 + struct page *page;
2066 + unsigned long *addrs;
2067 + unsigned int num_pages, offset, i;
2068 + char *addr, *base_addr, *next_addr;
2069 + int run, addridx, actual_pages;
2070 + unsigned long *need_release;
2071 +
2072 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
2073 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2074 +
2075 + *ppagelist = NULL;
2076 +
2077 + /* Allocate enough storage to hold the page pointers and the page
2078 + ** list
2079 + */
2080 + pagelist = kmalloc(sizeof(PAGELIST_T) +
2081 + (num_pages * sizeof(unsigned long)) +
2082 + sizeof(unsigned long) +
2083 + (num_pages * sizeof(pages[0])),
2084 + GFP_KERNEL);
2085 +
2086 + vchiq_log_trace(vchiq_arm_log_level,
2087 + "create_pagelist - %x", (unsigned int)pagelist);
2088 + if (!pagelist)
2089 + return -ENOMEM;
2090 +
2091 + addrs = pagelist->addrs;
2092 + need_release = (unsigned long *)(addrs + num_pages);
2093 + pages = (struct page **)(addrs + num_pages + 1);
2094 +
2095 + if (is_vmalloc_addr(buf)) {
2096 + for (actual_pages = 0; actual_pages < num_pages; actual_pages++) {
2097 + pages[actual_pages] = vmalloc_to_page(buf + (actual_pages * PAGE_SIZE));
2098 + }
2099 + *need_release = 0; /* do not try and release vmalloc pages */
2100 + } else {
2101 + down_read(&task->mm->mmap_sem);
2102 + actual_pages = get_user_pages(task, task->mm,
2103 + (unsigned long)buf & ~(PAGE_SIZE - 1),
2104 + num_pages,
2105 + (type == PAGELIST_READ) /*Write */ ,
2106 + 0 /*Force */ ,
2107 + pages,
2108 + NULL /*vmas */);
2109 + up_read(&task->mm->mmap_sem);
2110 +
2111 + if (actual_pages != num_pages) {
2112 + vchiq_log_info(vchiq_arm_log_level,
2113 + "create_pagelist - only %d/%d pages locked",
2114 + actual_pages,
2115 + num_pages);
2116 +
2117 + /* This is probably due to the process being killed */
2118 + while (actual_pages > 0)
2119 + {
2120 + actual_pages--;
2121 + page_cache_release(pages[actual_pages]);
2122 + }
2123 + kfree(pagelist);
2124 + if (actual_pages == 0)
2125 + actual_pages = -ENOMEM;
2126 + return actual_pages;
2127 + }
2128 + *need_release = 1; /* release user pages */
2129 + }
2130 +
2131 + pagelist->length = count;
2132 + pagelist->type = type;
2133 + pagelist->offset = offset;
2134 +
2135 + /* Group the pages into runs of contiguous pages */
2136 +
2137 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2138 + next_addr = base_addr + PAGE_SIZE;
2139 + addridx = 0;
2140 + run = 0;
2141 +
2142 + for (i = 1; i < num_pages; i++) {
2143 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2144 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2145 + next_addr += PAGE_SIZE;
2146 + run++;
2147 + } else {
2148 + addrs[addridx] = (unsigned long)base_addr + run;
2149 + addridx++;
2150 + base_addr = addr;
2151 + next_addr = addr + PAGE_SIZE;
2152 + run = 0;
2153 + }
2154 + }
2155 +
2156 + addrs[addridx] = (unsigned long)base_addr + run;
2157 + addridx++;
2158 +
2159 + /* Partial cache lines (fragments) require special measures */
2160 + if ((type == PAGELIST_READ) &&
2161 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2162 + ((pagelist->offset + pagelist->length) &
2163 + (CACHE_LINE_SIZE - 1)))) {
2164 + FRAGMENTS_T *fragments;
2165 +
2166 + if (down_interruptible(&g_free_fragments_sema) != 0) {
2167 + kfree(pagelist);
2168 + return -EINTR;
2169 + }
2170 +
2171 + WARN_ON(g_free_fragments == NULL);
2172 +
2173 + down(&g_free_fragments_mutex);
2174 + fragments = (FRAGMENTS_T *) g_free_fragments;
2175 + WARN_ON(fragments == NULL);
2176 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2177 + up(&g_free_fragments_mutex);
2178 + pagelist->type =
2179 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2180 + g_fragments_base);
2181 + }
2182 +
2183 + for (page = virt_to_page(pagelist);
2184 + page <= virt_to_page(addrs + num_pages - 1); page++) {
2185 + flush_dcache_page(page);
2186 + }
2187 +
2188 + *ppagelist = pagelist;
2189 +
2190 + return 0;
2191 +}
2192 +
2193 +static void
2194 +free_pagelist(PAGELIST_T *pagelist, int actual)
2195 +{
2196 + unsigned long *need_release;
2197 + struct page **pages;
2198 + unsigned int num_pages, i;
2199 +
2200 + vchiq_log_trace(vchiq_arm_log_level,
2201 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2202 +
2203 + num_pages =
2204 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2205 + PAGE_SIZE;
2206 +
2207 + need_release = (unsigned long *)(pagelist->addrs + num_pages);
2208 + pages = (struct page **)(pagelist->addrs + num_pages + 1);
2209 +
2210 + /* Deal with any partial cache lines (fragments) */
2211 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2212 + FRAGMENTS_T *fragments = g_fragments_base +
2213 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2214 + int head_bytes, tail_bytes;
2215 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2216 + (CACHE_LINE_SIZE - 1);
2217 + tail_bytes = (pagelist->offset + actual) &
2218 + (CACHE_LINE_SIZE - 1);
2219 +
2220 + if ((actual >= 0) && (head_bytes != 0)) {
2221 + if (head_bytes > actual)
2222 + head_bytes = actual;
2223 +
2224 + memcpy((char *)page_address(pages[0]) +
2225 + pagelist->offset,
2226 + fragments->headbuf,
2227 + head_bytes);
2228 + }
2229 + if ((actual >= 0) && (head_bytes < actual) &&
2230 + (tail_bytes != 0)) {
2231 + memcpy((char *)page_address(pages[num_pages - 1]) +
2232 + ((pagelist->offset + actual) &
2233 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2234 + fragments->tailbuf, tail_bytes);
2235 + }
2236 +
2237 + down(&g_free_fragments_mutex);
2238 + *(FRAGMENTS_T **) fragments = g_free_fragments;
2239 + g_free_fragments = fragments;
2240 + up(&g_free_fragments_mutex);
2241 + up(&g_free_fragments_sema);
2242 + }
2243 +
2244 + if (*need_release) {
2245 + for (i = 0; i < num_pages; i++) {
2246 + if (pagelist->type != PAGELIST_WRITE)
2247 + set_page_dirty(pages[i]);
2248 +
2249 + page_cache_release(pages[i]);
2250 + }
2251 + }
2252 +
2253 + kfree(pagelist);
2254 +}
2255 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2256 new file mode 100644
2257 index 0000000..c1fb8c3
2258 --- /dev/null
2259 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2260 @@ -0,0 +1,2813 @@
2261 +/**
2262 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2263 + *
2264 + * Redistribution and use in source and binary forms, with or without
2265 + * modification, are permitted provided that the following conditions
2266 + * are met:
2267 + * 1. Redistributions of source code must retain the above copyright
2268 + * notice, this list of conditions, and the following disclaimer,
2269 + * without modification.
2270 + * 2. Redistributions in binary form must reproduce the above copyright
2271 + * notice, this list of conditions and the following disclaimer in the
2272 + * documentation and/or other materials provided with the distribution.
2273 + * 3. The names of the above-listed copyright holders may not be used
2274 + * to endorse or promote products derived from this software without
2275 + * specific prior written permission.
2276 + *
2277 + * ALTERNATIVELY, this software may be distributed under the terms of the
2278 + * GNU General Public License ("GPL") version 2, as published by the Free
2279 + * Software Foundation.
2280 + *
2281 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2282 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2283 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2284 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2285 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2286 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2287 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2288 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2289 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2290 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2291 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2292 + */
2293 +
2294 +#include <linux/kernel.h>
2295 +#include <linux/module.h>
2296 +#include <linux/types.h>
2297 +#include <linux/errno.h>
2298 +#include <linux/cdev.h>
2299 +#include <linux/fs.h>
2300 +#include <linux/device.h>
2301 +#include <linux/mm.h>
2302 +#include <linux/highmem.h>
2303 +#include <linux/pagemap.h>
2304 +#include <linux/bug.h>
2305 +#include <linux/semaphore.h>
2306 +#include <linux/list.h>
2307 +#include <linux/proc_fs.h>
2308 +
2309 +#include "vchiq_core.h"
2310 +#include "vchiq_ioctl.h"
2311 +#include "vchiq_arm.h"
2312 +
2313 +#define DEVICE_NAME "vchiq"
2314 +
2315 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2316 +#undef MODULE_PARAM_PREFIX
2317 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2318 +
2319 +#define VCHIQ_MINOR 0
2320 +
2321 +/* Some per-instance constants */
2322 +#define MAX_COMPLETIONS 16
2323 +#define MAX_SERVICES 64
2324 +#define MAX_ELEMENTS 8
2325 +#define MSG_QUEUE_SIZE 64
2326 +
2327 +#define KEEPALIVE_VER 1
2328 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2329 +
2330 +/* Run time control of log level, based on KERN_XXX level. */
2331 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2332 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2333 +
2334 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2335 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2336 +
2337 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2338 +static const char *const suspend_state_names[] = {
2339 + "VC_SUSPEND_FORCE_CANCELED",
2340 + "VC_SUSPEND_REJECTED",
2341 + "VC_SUSPEND_FAILED",
2342 + "VC_SUSPEND_IDLE",
2343 + "VC_SUSPEND_REQUESTED",
2344 + "VC_SUSPEND_IN_PROGRESS",
2345 + "VC_SUSPEND_SUSPENDED"
2346 +};
2347 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2348 +static const char *const resume_state_names[] = {
2349 + "VC_RESUME_FAILED",
2350 + "VC_RESUME_IDLE",
2351 + "VC_RESUME_REQUESTED",
2352 + "VC_RESUME_IN_PROGRESS",
2353 + "VC_RESUME_RESUMED"
2354 +};
2355 +/* The number of times we allow force suspend to timeout before actually
2356 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
2357 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2358 +*/
2359 +#define FORCE_SUSPEND_FAIL_MAX 8
2360 +
2361 +/* The time in ms allowed for videocore to go idle when force suspend has been
2362 + * requested */
2363 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2364 +
2365 +
2366 +static void suspend_timer_callback(unsigned long context);
2367 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
2368 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
2369 +
2370 +
2371 +typedef struct user_service_struct {
2372 + VCHIQ_SERVICE_T *service;
2373 + void *userdata;
2374 + VCHIQ_INSTANCE_T instance;
2375 + int is_vchi;
2376 + int dequeue_pending;
2377 + int message_available_pos;
2378 + int msg_insert;
2379 + int msg_remove;
2380 + struct semaphore insert_event;
2381 + struct semaphore remove_event;
2382 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2383 +} USER_SERVICE_T;
2384 +
2385 +struct bulk_waiter_node {
2386 + struct bulk_waiter bulk_waiter;
2387 + int pid;
2388 + struct list_head list;
2389 +};
2390 +
2391 +struct vchiq_instance_struct {
2392 + VCHIQ_STATE_T *state;
2393 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2394 + int completion_insert;
2395 + int completion_remove;
2396 + struct semaphore insert_event;
2397 + struct semaphore remove_event;
2398 + struct mutex completion_mutex;
2399 +
2400 + int connected;
2401 + int closing;
2402 + int pid;
2403 + int mark;
2404 +
2405 + struct list_head bulk_waiter_list;
2406 + struct mutex bulk_waiter_list_mutex;
2407 +
2408 + struct proc_dir_entry *proc_entry;
2409 +};
2410 +
2411 +typedef struct dump_context_struct {
2412 + char __user *buf;
2413 + size_t actual;
2414 + size_t space;
2415 + loff_t offset;
2416 +} DUMP_CONTEXT_T;
2417 +
2418 +static struct cdev vchiq_cdev;
2419 +static dev_t vchiq_devid;
2420 +static VCHIQ_STATE_T g_state;
2421 +static struct class *vchiq_class;
2422 +static struct device *vchiq_dev;
2423 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2424 +
2425 +static const char *const ioctl_names[] = {
2426 + "CONNECT",
2427 + "SHUTDOWN",
2428 + "CREATE_SERVICE",
2429 + "REMOVE_SERVICE",
2430 + "QUEUE_MESSAGE",
2431 + "QUEUE_BULK_TRANSMIT",
2432 + "QUEUE_BULK_RECEIVE",
2433 + "AWAIT_COMPLETION",
2434 + "DEQUEUE_MESSAGE",
2435 + "GET_CLIENT_ID",
2436 + "GET_CONFIG",
2437 + "CLOSE_SERVICE",
2438 + "USE_SERVICE",
2439 + "RELEASE_SERVICE",
2440 + "SET_SERVICE_OPTION",
2441 + "DUMP_PHYS_MEM"
2442 +};
2443 +
2444 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2445 + (VCHIQ_IOC_MAX + 1));
2446 +
2447 +static void
2448 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2449 +
2450 +/****************************************************************************
2451 +*
2452 +* add_completion
2453 +*
2454 +***************************************************************************/
2455 +
2456 +static VCHIQ_STATUS_T
2457 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2458 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2459 + void *bulk_userdata)
2460 +{
2461 + VCHIQ_COMPLETION_DATA_T *completion;
2462 + DEBUG_INITIALISE(g_state.local)
2463 +
2464 + while (instance->completion_insert ==
2465 + (instance->completion_remove + MAX_COMPLETIONS)) {
2466 + /* Out of space - wait for the client */
2467 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2468 + vchiq_log_trace(vchiq_arm_log_level,
2469 + "add_completion - completion queue full");
2470 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2471 + if (down_interruptible(&instance->remove_event) != 0) {
2472 + vchiq_log_info(vchiq_arm_log_level,
2473 + "service_callback interrupted");
2474 + return VCHIQ_RETRY;
2475 + } else if (instance->closing) {
2476 + vchiq_log_info(vchiq_arm_log_level,
2477 + "service_callback closing");
2478 + return VCHIQ_ERROR;
2479 + }
2480 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2481 + }
2482 +
2483 + completion =
2484 + &instance->completions[instance->completion_insert &
2485 + (MAX_COMPLETIONS - 1)];
2486 +
2487 + completion->header = header;
2488 + completion->reason = reason;
2489 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2490 + completion->service_userdata = user_service->service;
2491 + completion->bulk_userdata = bulk_userdata;
2492 +
2493 + if (reason == VCHIQ_SERVICE_CLOSED)
2494 + /* Take an extra reference, to be held until
2495 + this CLOSED notification is delivered. */
2496 + lock_service(user_service->service);
2497 +
2498 + /* A write barrier is needed here to ensure that the entire completion
2499 + record is written out before the insert point. */
2500 + wmb();
2501 +
2502 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
2503 + user_service->message_available_pos =
2504 + instance->completion_insert;
2505 + instance->completion_insert++;
2506 +
2507 + up(&instance->insert_event);
2508 +
2509 + return VCHIQ_SUCCESS;
2510 +}
2511 +
2512 +/****************************************************************************
2513 +*
2514 +* service_callback
2515 +*
2516 +***************************************************************************/
2517 +
2518 +static VCHIQ_STATUS_T
2519 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2520 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2521 +{
2522 + /* How do we ensure the callback goes to the right client?
2523 + ** The service_user data points to a USER_SERVICE_T record containing
2524 + ** the original callback and the user state structure, which contains a
2525 + ** circular buffer for completion records.
2526 + */
2527 + USER_SERVICE_T *user_service;
2528 + VCHIQ_SERVICE_T *service;
2529 + VCHIQ_INSTANCE_T instance;
2530 + DEBUG_INITIALISE(g_state.local)
2531 +
2532 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2533 +
2534 + service = handle_to_service(handle);
2535 + BUG_ON(!service);
2536 + user_service = (USER_SERVICE_T *)service->base.userdata;
2537 + instance = user_service->instance;
2538 +
2539 + if (!instance || instance->closing)
2540 + return VCHIQ_SUCCESS;
2541 +
2542 + vchiq_log_trace(vchiq_arm_log_level,
2543 + "service_callback - service %lx(%d), reason %d, header %lx, "
2544 + "instance %lx, bulk_userdata %lx",
2545 + (unsigned long)user_service,
2546 + service->localport,
2547 + reason, (unsigned long)header,
2548 + (unsigned long)instance, (unsigned long)bulk_userdata);
2549 +
2550 + if (header && user_service->is_vchi) {
2551 + spin_lock(&msg_queue_spinlock);
2552 + while (user_service->msg_insert ==
2553 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2554 + spin_unlock(&msg_queue_spinlock);
2555 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2556 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2557 + vchiq_log_trace(vchiq_arm_log_level,
2558 + "service_callback - msg queue full");
2559 + /* If there is no MESSAGE_AVAILABLE in the completion
2560 + ** queue, add one
2561 + */
2562 + if ((user_service->message_available_pos -
2563 + instance->completion_remove) < 0) {
2564 + VCHIQ_STATUS_T status;
2565 + vchiq_log_info(vchiq_arm_log_level,
2566 + "Inserting extra MESSAGE_AVAILABLE");
2567 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2568 + status = add_completion(instance, reason,
2569 + NULL, user_service, bulk_userdata);
2570 + if (status != VCHIQ_SUCCESS) {
2571 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2572 + return status;
2573 + }
2574 + }
2575 +
2576 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2577 + if (down_interruptible(&user_service->remove_event)
2578 + != 0) {
2579 + vchiq_log_info(vchiq_arm_log_level,
2580 + "service_callback interrupted");
2581 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2582 + return VCHIQ_RETRY;
2583 + } else if (instance->closing) {
2584 + vchiq_log_info(vchiq_arm_log_level,
2585 + "service_callback closing");
2586 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2587 + return VCHIQ_ERROR;
2588 + }
2589 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2590 + spin_lock(&msg_queue_spinlock);
2591 + }
2592 +
2593 + user_service->msg_queue[user_service->msg_insert &
2594 + (MSG_QUEUE_SIZE - 1)] = header;
2595 + user_service->msg_insert++;
2596 + spin_unlock(&msg_queue_spinlock);
2597 +
2598 + up(&user_service->insert_event);
2599 +
2600 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2601 + ** there is a MESSAGE_AVAILABLE in the completion queue then
2602 + ** bypass the completion queue.
2603 + */
2604 + if (((user_service->message_available_pos -
2605 + instance->completion_remove) >= 0) ||
2606 + user_service->dequeue_pending) {
2607 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2608 + user_service->dequeue_pending = 0;
2609 + return VCHIQ_SUCCESS;
2610 + }
2611 +
2612 + header = NULL;
2613 + }
2614 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2615 +
2616 + return add_completion(instance, reason, header, user_service,
2617 + bulk_userdata);
2618 +}
2619 +
2620 +/****************************************************************************
2621 +*
2622 +* user_service_free
2623 +*
2624 +***************************************************************************/
2625 +static void
2626 +user_service_free(void *userdata)
2627 +{
2628 + kfree(userdata);
2629 +}
2630 +
2631 +/****************************************************************************
2632 +*
2633 +* vchiq_ioctl
2634 +*
2635 +***************************************************************************/
2636 +
2637 +static long
2638 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2639 +{
2640 + VCHIQ_INSTANCE_T instance = file->private_data;
2641 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2642 + VCHIQ_SERVICE_T *service = NULL;
2643 + long ret = 0;
2644 + int i, rc;
2645 + DEBUG_INITIALISE(g_state.local)
2646 +
2647 + vchiq_log_trace(vchiq_arm_log_level,
2648 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2649 + (unsigned int)instance,
2650 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2651 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2652 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2653 +
2654 + switch (cmd) {
2655 + case VCHIQ_IOC_SHUTDOWN:
2656 + if (!instance->connected)
2657 + break;
2658 +
2659 + /* Remove all services */
2660 + i = 0;
2661 + while ((service = next_service_by_instance(instance->state,
2662 + instance, &i)) != NULL) {
2663 + status = vchiq_remove_service(service->handle);
2664 + unlock_service(service);
2665 + if (status != VCHIQ_SUCCESS)
2666 + break;
2667 + }
2668 + service = NULL;
2669 +
2670 + if (status == VCHIQ_SUCCESS) {
2671 + /* Wake the completion thread and ask it to exit */
2672 + instance->closing = 1;
2673 + up(&instance->insert_event);
2674 + }
2675 +
2676 + break;
2677 +
2678 + case VCHIQ_IOC_CONNECT:
2679 + if (instance->connected) {
2680 + ret = -EINVAL;
2681 + break;
2682 + }
2683 + rc = mutex_lock_interruptible(&instance->state->mutex);
2684 + if (rc != 0) {
2685 + vchiq_log_error(vchiq_arm_log_level,
2686 + "vchiq: connect: could not lock mutex for "
2687 + "state %d: %d",
2688 + instance->state->id, rc);
2689 + ret = -EINTR;
2690 + break;
2691 + }
2692 + status = vchiq_connect_internal(instance->state, instance);
2693 + mutex_unlock(&instance->state->mutex);
2694 +
2695 + if (status == VCHIQ_SUCCESS)
2696 + instance->connected = 1;
2697 + else
2698 + vchiq_log_error(vchiq_arm_log_level,
2699 + "vchiq: could not connect: %d", status);
2700 + break;
2701 +
2702 + case VCHIQ_IOC_CREATE_SERVICE: {
2703 + VCHIQ_CREATE_SERVICE_T args;
2704 + USER_SERVICE_T *user_service = NULL;
2705 + void *userdata;
2706 + int srvstate;
2707 +
2708 + if (copy_from_user
2709 + (&args, (const void __user *)arg,
2710 + sizeof(args)) != 0) {
2711 + ret = -EFAULT;
2712 + break;
2713 + }
2714 +
2715 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2716 + if (!user_service) {
2717 + ret = -ENOMEM;
2718 + break;
2719 + }
2720 +
2721 + if (args.is_open) {
2722 + if (!instance->connected) {
2723 + ret = -ENOTCONN;
2724 + kfree(user_service);
2725 + break;
2726 + }
2727 + srvstate = VCHIQ_SRVSTATE_OPENING;
2728 + } else {
2729 + srvstate =
2730 + instance->connected ?
2731 + VCHIQ_SRVSTATE_LISTENING :
2732 + VCHIQ_SRVSTATE_HIDDEN;
2733 + }
2734 +
2735 + userdata = args.params.userdata;
2736 + args.params.callback = service_callback;
2737 + args.params.userdata = user_service;
2738 + service = vchiq_add_service_internal(
2739 + instance->state,
2740 + &args.params, srvstate,
2741 + instance, user_service_free);
2742 +
2743 + if (service != NULL) {
2744 + user_service->service = service;
2745 + user_service->userdata = userdata;
2746 + user_service->instance = instance;
2747 + user_service->is_vchi = args.is_vchi;
2748 + user_service->dequeue_pending = 0;
2749 + user_service->message_available_pos =
2750 + instance->completion_remove - 1;
2751 + user_service->msg_insert = 0;
2752 + user_service->msg_remove = 0;
2753 + sema_init(&user_service->insert_event, 0);
2754 + sema_init(&user_service->remove_event, 0);
2755 +
2756 + if (args.is_open) {
2757 + status = vchiq_open_service_internal
2758 + (service, instance->pid);
2759 + if (status != VCHIQ_SUCCESS) {
2760 + vchiq_remove_service(service->handle);
2761 + service = NULL;
2762 + ret = (status == VCHIQ_RETRY) ?
2763 + -EINTR : -EIO;
2764 + break;
2765 + }
2766 + }
2767 +
2768 + if (copy_to_user((void __user *)
2769 + &(((VCHIQ_CREATE_SERVICE_T __user *)
2770 + arg)->handle),
2771 + (const void *)&service->handle,
2772 + sizeof(service->handle)) != 0) {
2773 + ret = -EFAULT;
2774 + vchiq_remove_service(service->handle);
2775 + }
2776 +
2777 + service = NULL;
2778 + } else {
2779 + ret = -EEXIST;
2780 + kfree(user_service);
2781 + }
2782 + } break;
2783 +
2784 + case VCHIQ_IOC_CLOSE_SERVICE: {
2785 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2786 +
2787 + service = find_service_for_instance(instance, handle);
2788 + if (service != NULL)
2789 + status = vchiq_close_service(service->handle);
2790 + else
2791 + ret = -EINVAL;
2792 + } break;
2793 +
2794 + case VCHIQ_IOC_REMOVE_SERVICE: {
2795 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2796 +
2797 + service = find_service_for_instance(instance, handle);
2798 + if (service != NULL)
2799 + status = vchiq_remove_service(service->handle);
2800 + else
2801 + ret = -EINVAL;
2802 + } break;
2803 +
2804 + case VCHIQ_IOC_USE_SERVICE:
2805 + case VCHIQ_IOC_RELEASE_SERVICE: {
2806 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2807 +
2808 + service = find_service_for_instance(instance, handle);
2809 + if (service != NULL) {
2810 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
2811 + vchiq_use_service_internal(service) :
2812 + vchiq_release_service_internal(service);
2813 + if (status != VCHIQ_SUCCESS) {
2814 + vchiq_log_error(vchiq_susp_log_level,
2815 + "%s: cmd %s returned error %d for "
2816 + "service %c%c%c%c:%03d",
2817 + __func__,
2818 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
2819 + "VCHIQ_IOC_USE_SERVICE" :
2820 + "VCHIQ_IOC_RELEASE_SERVICE",
2821 + status,
2822 + VCHIQ_FOURCC_AS_4CHARS(
2823 + service->base.fourcc),
2824 + service->client_id);
2825 + ret = -EINVAL;
2826 + }
2827 + } else
2828 + ret = -EINVAL;
2829 + } break;
2830 +
2831 + case VCHIQ_IOC_QUEUE_MESSAGE: {
2832 + VCHIQ_QUEUE_MESSAGE_T args;
2833 + if (copy_from_user
2834 + (&args, (const void __user *)arg,
2835 + sizeof(args)) != 0) {
2836 + ret = -EFAULT;
2837 + break;
2838 + }
2839 +
2840 + service = find_service_for_instance(instance, args.handle);
2841 +
2842 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
2843 + /* Copy elements into kernel space */
2844 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
2845 + if (copy_from_user(elements, args.elements,
2846 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
2847 + status = vchiq_queue_message
2848 + (args.handle,
2849 + elements, args.count);
2850 + else
2851 + ret = -EFAULT;
2852 + } else {
2853 + ret = -EINVAL;
2854 + }
2855 + } break;
2856 +
2857 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
2858 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
2859 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
2860 + struct bulk_waiter_node *waiter = NULL;
2861 + VCHIQ_BULK_DIR_T dir =
2862 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
2863 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
2864 +
2865 + if (copy_from_user
2866 + (&args, (const void __user *)arg,
2867 + sizeof(args)) != 0) {
2868 + ret = -EFAULT;
2869 + break;
2870 + }
2871 +
2872 + service = find_service_for_instance(instance, args.handle);
2873 + if (!service) {
2874 + ret = -EINVAL;
2875 + break;
2876 + }
2877 +
2878 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
2879 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
2880 + GFP_KERNEL);
2881 + if (!waiter) {
2882 + ret = -ENOMEM;
2883 + break;
2884 + }
2885 + args.userdata = &waiter->bulk_waiter;
2886 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
2887 + struct list_head *pos;
2888 + mutex_lock(&instance->bulk_waiter_list_mutex);
2889 + list_for_each(pos, &instance->bulk_waiter_list) {
2890 + if (list_entry(pos, struct bulk_waiter_node,
2891 + list)->pid == current->pid) {
2892 + waiter = list_entry(pos,
2893 + struct bulk_waiter_node,
2894 + list);
2895 + list_del(pos);
2896 + break;
2897 + }
2898 +
2899 + }
2900 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2901 + if (!waiter) {
2902 + vchiq_log_error(vchiq_arm_log_level,
2903 + "no bulk_waiter found for pid %d",
2904 + current->pid);
2905 + ret = -ESRCH;
2906 + break;
2907 + }
2908 + vchiq_log_info(vchiq_arm_log_level,
2909 + "found bulk_waiter %x for pid %d",
2910 + (unsigned int)waiter, current->pid);
2911 + args.userdata = &waiter->bulk_waiter;
2912 + }
2913 + status = vchiq_bulk_transfer
2914 + (args.handle,
2915 + VCHI_MEM_HANDLE_INVALID,
2916 + args.data, args.size,
2917 + args.userdata, args.mode,
2918 + dir);
2919 + if (!waiter)
2920 + break;
2921 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
2922 + !waiter->bulk_waiter.bulk) {
2923 + if (waiter->bulk_waiter.bulk) {
2924 + /* Cancel the signal when the transfer
2925 + ** completes. */
2926 + spin_lock(&bulk_waiter_spinlock);
2927 + waiter->bulk_waiter.bulk->userdata = NULL;
2928 + spin_unlock(&bulk_waiter_spinlock);
2929 + }
2930 + kfree(waiter);
2931 + } else {
2932 + const VCHIQ_BULK_MODE_T mode_waiting =
2933 + VCHIQ_BULK_MODE_WAITING;
2934 + waiter->pid = current->pid;
2935 + mutex_lock(&instance->bulk_waiter_list_mutex);
2936 + list_add(&waiter->list, &instance->bulk_waiter_list);
2937 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2938 + vchiq_log_info(vchiq_arm_log_level,
2939 + "saved bulk_waiter %x for pid %d",
2940 + (unsigned int)waiter, current->pid);
2941 +
2942 + if (copy_to_user((void __user *)
2943 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
2944 + arg)->mode),
2945 + (const void *)&mode_waiting,
2946 + sizeof(mode_waiting)) != 0)
2947 + ret = -EFAULT;
2948 + }
2949 + } break;
2950 +
2951 + case VCHIQ_IOC_AWAIT_COMPLETION: {
2952 + VCHIQ_AWAIT_COMPLETION_T args;
2953 +
2954 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2955 + if (!instance->connected) {
2956 + ret = -ENOTCONN;
2957 + break;
2958 + }
2959 +
2960 + if (copy_from_user(&args, (const void __user *)arg,
2961 + sizeof(args)) != 0) {
2962 + ret = -EFAULT;
2963 + break;
2964 + }
2965 +
2966 + mutex_lock(&instance->completion_mutex);
2967 +
2968 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2969 + while ((instance->completion_remove ==
2970 + instance->completion_insert)
2971 + && !instance->closing) {
2972 + int rc;
2973 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2974 + mutex_unlock(&instance->completion_mutex);
2975 + rc = down_interruptible(&instance->insert_event);
2976 + mutex_lock(&instance->completion_mutex);
2977 + if (rc != 0) {
2978 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2979 + vchiq_log_info(vchiq_arm_log_level,
2980 + "AWAIT_COMPLETION interrupted");
2981 + ret = -EINTR;
2982 + break;
2983 + }
2984 + }
2985 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2986 +
2987 + /* A read memory barrier is needed to stop prefetch of a stale
2988 + ** completion record
2989 + */
2990 + rmb();
2991 +
2992 + if (ret == 0) {
2993 + int msgbufcount = args.msgbufcount;
2994 + for (ret = 0; ret < args.count; ret++) {
2995 + VCHIQ_COMPLETION_DATA_T *completion;
2996 + VCHIQ_SERVICE_T *service;
2997 + USER_SERVICE_T *user_service;
2998 + VCHIQ_HEADER_T *header;
2999 + if (instance->completion_remove ==
3000 + instance->completion_insert)
3001 + break;
3002 + completion = &instance->completions[
3003 + instance->completion_remove &
3004 + (MAX_COMPLETIONS - 1)];
3005 +
3006 + service = completion->service_userdata;
3007 + user_service = service->base.userdata;
3008 + completion->service_userdata =
3009 + user_service->userdata;
3010 +
3011 + header = completion->header;
3012 + if (header) {
3013 + void __user *msgbuf;
3014 + int msglen;
3015 +
3016 + msglen = header->size +
3017 + sizeof(VCHIQ_HEADER_T);
3018 + /* This must be a VCHIQ-style service */
3019 + if (args.msgbufsize < msglen) {
3020 + vchiq_log_error(
3021 + vchiq_arm_log_level,
3022 + "header %x: msgbufsize"
3023 + " %x < msglen %x",
3024 + (unsigned int)header,
3025 + args.msgbufsize,
3026 + msglen);
3027 + WARN(1, "invalid message "
3028 + "size\n");
3029 + if (ret == 0)
3030 + ret = -EMSGSIZE;
3031 + break;
3032 + }
3033 + if (msgbufcount <= 0)
3034 + /* Stall here for lack of a
3035 + ** buffer for the message. */
3036 + break;
3037 + /* Get the pointer from user space */
3038 + msgbufcount--;
3039 + if (copy_from_user(&msgbuf,
3040 + (const void __user *)
3041 + &args.msgbufs[msgbufcount],
3042 + sizeof(msgbuf)) != 0) {
3043 + if (ret == 0)
3044 + ret = -EFAULT;
3045 + break;
3046 + }
3047 +
3048 + /* Copy the message to user space */
3049 + if (copy_to_user(msgbuf, header,
3050 + msglen) != 0) {
3051 + if (ret == 0)
3052 + ret = -EFAULT;
3053 + break;
3054 + }
3055 +
3056 + /* Now it has been copied, the message
3057 + ** can be released. */
3058 + vchiq_release_message(service->handle,
3059 + header);
3060 +
3061 + /* The completion must point to the
3062 + ** msgbuf. */
3063 + completion->header = msgbuf;
3064 + }
3065 +
3066 + if (completion->reason ==
3067 + VCHIQ_SERVICE_CLOSED)
3068 + unlock_service(service);
3069 +
3070 + if (copy_to_user((void __user *)(
3071 + (size_t)args.buf +
3072 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
3073 + completion,
3074 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
3075 + if (ret == 0)
3076 + ret = -EFAULT;
3077 + break;
3078 + }
3079 +
3080 + instance->completion_remove++;
3081 + }
3082 +
3083 + if (msgbufcount != args.msgbufcount) {
3084 + if (copy_to_user((void __user *)
3085 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3086 + msgbufcount,
3087 + &msgbufcount,
3088 + sizeof(msgbufcount)) != 0) {
3089 + ret = -EFAULT;
3090 + }
3091 + }
3092 + }
3093 +
3094 + if (ret != 0)
3095 + up(&instance->remove_event);
3096 + mutex_unlock(&instance->completion_mutex);
3097 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3098 + } break;
3099 +
3100 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3101 + VCHIQ_DEQUEUE_MESSAGE_T args;
3102 + USER_SERVICE_T *user_service;
3103 + VCHIQ_HEADER_T *header;
3104 +
3105 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3106 + if (copy_from_user
3107 + (&args, (const void __user *)arg,
3108 + sizeof(args)) != 0) {
3109 + ret = -EFAULT;
3110 + break;
3111 + }
3112 + service = find_service_for_instance(instance, args.handle);
3113 + if (!service) {
3114 + ret = -EINVAL;
3115 + break;
3116 + }
3117 + user_service = (USER_SERVICE_T *)service->base.userdata;
3118 + if (user_service->is_vchi == 0) {
3119 + ret = -EINVAL;
3120 + break;
3121 + }
3122 +
3123 + spin_lock(&msg_queue_spinlock);
3124 + if (user_service->msg_remove == user_service->msg_insert) {
3125 + if (!args.blocking) {
3126 + spin_unlock(&msg_queue_spinlock);
3127 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3128 + ret = -EWOULDBLOCK;
3129 + break;
3130 + }
3131 + user_service->dequeue_pending = 1;
3132 + do {
3133 + spin_unlock(&msg_queue_spinlock);
3134 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3135 + if (down_interruptible(
3136 + &user_service->insert_event) != 0) {
3137 + vchiq_log_info(vchiq_arm_log_level,
3138 + "DEQUEUE_MESSAGE interrupted");
3139 + ret = -EINTR;
3140 + break;
3141 + }
3142 + spin_lock(&msg_queue_spinlock);
3143 + } while (user_service->msg_remove ==
3144 + user_service->msg_insert);
3145 +
3146 + if (ret)
3147 + break;
3148 + }
3149 +
3150 + BUG_ON((int)(user_service->msg_insert -
3151 + user_service->msg_remove) < 0);
3152 +
3153 + header = user_service->msg_queue[user_service->msg_remove &
3154 + (MSG_QUEUE_SIZE - 1)];
3155 + user_service->msg_remove++;
3156 + spin_unlock(&msg_queue_spinlock);
3157 +
3158 + up(&user_service->remove_event);
3159 + if (header == NULL)
3160 + ret = -ENOTCONN;
3161 + else if (header->size <= args.bufsize) {
3162 + /* Copy to user space if msgbuf is not NULL */
3163 + if ((args.buf == NULL) ||
3164 + (copy_to_user((void __user *)args.buf,
3165 + header->data,
3166 + header->size) == 0)) {
3167 + ret = header->size;
3168 + vchiq_release_message(
3169 + service->handle,
3170 + header);
3171 + } else
3172 + ret = -EFAULT;
3173 + } else {
3174 + vchiq_log_error(vchiq_arm_log_level,
3175 + "header %x: bufsize %x < size %x",
3176 + (unsigned int)header, args.bufsize,
3177 + header->size);
3178 + WARN(1, "invalid size\n");
3179 + ret = -EMSGSIZE;
3180 + }
3181 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3182 + } break;
3183 +
3184 + case VCHIQ_IOC_GET_CLIENT_ID: {
3185 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3186 +
3187 + ret = vchiq_get_client_id(handle);
3188 + } break;
3189 +
3190 + case VCHIQ_IOC_GET_CONFIG: {
3191 + VCHIQ_GET_CONFIG_T args;
3192 + VCHIQ_CONFIG_T config;
3193 +
3194 + if (copy_from_user(&args, (const void __user *)arg,
3195 + sizeof(args)) != 0) {
3196 + ret = -EFAULT;
3197 + break;
3198 + }
3199 + if (args.config_size > sizeof(config)) {
3200 + ret = -EINVAL;
3201 + break;
3202 + }
3203 + status = vchiq_get_config(instance, args.config_size, &config);
3204 + if (status == VCHIQ_SUCCESS) {
3205 + if (copy_to_user((void __user *)args.pconfig,
3206 + &config, args.config_size) != 0) {
3207 + ret = -EFAULT;
3208 + break;
3209 + }
3210 + }
3211 + } break;
3212 +
3213 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
3214 + VCHIQ_SET_SERVICE_OPTION_T args;
3215 +
3216 + if (copy_from_user(
3217 + &args, (const void __user *)arg,
3218 + sizeof(args)) != 0) {
3219 + ret = -EFAULT;
3220 + break;
3221 + }
3222 +
3223 + service = find_service_for_instance(instance, args.handle);
3224 + if (!service) {
3225 + ret = -EINVAL;
3226 + break;
3227 + }
3228 +
3229 + status = vchiq_set_service_option(
3230 + args.handle, args.option, args.value);
3231 + } break;
3232 +
3233 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
3234 + VCHIQ_DUMP_MEM_T args;
3235 +
3236 + if (copy_from_user
3237 + (&args, (const void __user *)arg,
3238 + sizeof(args)) != 0) {
3239 + ret = -EFAULT;
3240 + break;
3241 + }
3242 + dump_phys_mem(args.virt_addr, args.num_bytes);
3243 + } break;
3244 +
3245 + default:
3246 + ret = -ENOTTY;
3247 + break;
3248 + }
3249 +
3250 + if (service)
3251 + unlock_service(service);
3252 +
3253 + if (ret == 0) {
3254 + if (status == VCHIQ_ERROR)
3255 + ret = -EIO;
3256 + else if (status == VCHIQ_RETRY)
3257 + ret = -EINTR;
3258 + }
3259 +
3260 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3261 + (ret != -EWOULDBLOCK))
3262 + vchiq_log_info(vchiq_arm_log_level,
3263 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3264 + (unsigned long)instance,
3265 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3266 + ioctl_names[_IOC_NR(cmd)] :
3267 + "<invalid>",
3268 + status, ret);
3269 + else
3270 + vchiq_log_trace(vchiq_arm_log_level,
3271 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3272 + (unsigned long)instance,
3273 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3274 + ioctl_names[_IOC_NR(cmd)] :
3275 + "<invalid>",
3276 + status, ret);
3277 +
3278 + return ret;
3279 +}
3280 +
3281 +/****************************************************************************
3282 +*
3283 +* vchiq_open
3284 +*
3285 +***************************************************************************/
3286 +
3287 +static int
3288 +vchiq_open(struct inode *inode, struct file *file)
3289 +{
3290 + int dev = iminor(inode) & 0x0f;
3291 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3292 + switch (dev) {
3293 + case VCHIQ_MINOR: {
3294 + int ret;
3295 + VCHIQ_STATE_T *state = vchiq_get_state();
3296 + VCHIQ_INSTANCE_T instance;
3297 +
3298 + if (!state) {
3299 + vchiq_log_error(vchiq_arm_log_level,
3300 + "vchiq has no connection to VideoCore");
3301 + return -ENOTCONN;
3302 + }
3303 +
3304 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3305 + if (!instance)
3306 + return -ENOMEM;
3307 +
3308 + instance->state = state;
3309 + instance->pid = current->tgid;
3310 +
3311 + ret = vchiq_proc_add_instance(instance);
3312 + if (ret != 0) {
3313 + kfree(instance);
3314 + return ret;
3315 + }
3316 +
3317 + sema_init(&instance->insert_event, 0);
3318 + sema_init(&instance->remove_event, 0);
3319 + mutex_init(&instance->completion_mutex);
3320 + mutex_init(&instance->bulk_waiter_list_mutex);
3321 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
3322 +
3323 + file->private_data = instance;
3324 + } break;
3325 +
3326 + default:
3327 + vchiq_log_error(vchiq_arm_log_level,
3328 + "Unknown minor device: %d", dev);
3329 + return -ENXIO;
3330 + }
3331 +
3332 + return 0;
3333 +}
3334 +
3335 +/****************************************************************************
3336 +*
3337 +* vchiq_release
3338 +*
3339 +***************************************************************************/
3340 +
3341 +static int
3342 +vchiq_release(struct inode *inode, struct file *file)
3343 +{
3344 + int dev = iminor(inode) & 0x0f;
3345 + int ret = 0;
3346 + switch (dev) {
3347 + case VCHIQ_MINOR: {
3348 + VCHIQ_INSTANCE_T instance = file->private_data;
3349 + VCHIQ_STATE_T *state = vchiq_get_state();
3350 + VCHIQ_SERVICE_T *service;
3351 + int i;
3352 +
3353 + vchiq_log_info(vchiq_arm_log_level,
3354 + "vchiq_release: instance=%lx",
3355 + (unsigned long)instance);
3356 +
3357 + if (!state) {
3358 + ret = -EPERM;
3359 + goto out;
3360 + }
3361 +
3362 + /* Ensure videocore is awake to allow termination. */
3363 + vchiq_use_internal(instance->state, NULL,
3364 + USE_TYPE_VCHIQ);
3365 +
3366 + mutex_lock(&instance->completion_mutex);
3367 +
3368 + /* Wake the completion thread and ask it to exit */
3369 + instance->closing = 1;
3370 + up(&instance->insert_event);
3371 +
3372 + mutex_unlock(&instance->completion_mutex);
3373 +
3374 + /* Wake the slot handler if the completion queue is full. */
3375 + up(&instance->remove_event);
3376 +
3377 + /* Mark all services for termination... */
3378 + i = 0;
3379 + while ((service = next_service_by_instance(state, instance,
3380 + &i)) != NULL) {
3381 + USER_SERVICE_T *user_service = service->base.userdata;
3382 +
3383 + /* Wake the slot handler if the msg queue is full. */
3384 + up(&user_service->remove_event);
3385 +
3386 + vchiq_terminate_service_internal(service);
3387 + unlock_service(service);
3388 + }
3389 +
3390 + /* ...and wait for them to die */
3391 + i = 0;
3392 + while ((service = next_service_by_instance(state, instance, &i))
3393 + != NULL) {
3394 + USER_SERVICE_T *user_service = service->base.userdata;
3395 +
3396 + down(&service->remove_event);
3397 +
3398 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3399 +
3400 + spin_lock(&msg_queue_spinlock);
3401 +
3402 + while (user_service->msg_remove !=
3403 + user_service->msg_insert) {
3404 + VCHIQ_HEADER_T *header = user_service->
3405 + msg_queue[user_service->msg_remove &
3406 + (MSG_QUEUE_SIZE - 1)];
3407 + user_service->msg_remove++;
3408 + spin_unlock(&msg_queue_spinlock);
3409 +
3410 + if (header)
3411 + vchiq_release_message(
3412 + service->handle,
3413 + header);
3414 + spin_lock(&msg_queue_spinlock);
3415 + }
3416 +
3417 + spin_unlock(&msg_queue_spinlock);
3418 +
3419 + unlock_service(service);
3420 + }
3421 +
3422 + /* Release any closed services */
3423 + while (instance->completion_remove !=
3424 + instance->completion_insert) {
3425 + VCHIQ_COMPLETION_DATA_T *completion;
3426 + VCHIQ_SERVICE_T *service;
3427 + completion = &instance->completions[
3428 + instance->completion_remove &
3429 + (MAX_COMPLETIONS - 1)];
3430 + service = completion->service_userdata;
3431 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
3432 + unlock_service(service);
3433 + instance->completion_remove++;
3434 + }
3435 +
3436 + /* Release the PEER service count. */
3437 + vchiq_release_internal(instance->state, NULL);
3438 +
3439 + {
3440 + struct list_head *pos, *next;
3441 + list_for_each_safe(pos, next,
3442 + &instance->bulk_waiter_list) {
3443 + struct bulk_waiter_node *waiter;
3444 + waiter = list_entry(pos,
3445 + struct bulk_waiter_node,
3446 + list);
3447 + list_del(pos);
3448 + vchiq_log_info(vchiq_arm_log_level,
3449 + "bulk_waiter - cleaned up %x "
3450 + "for pid %d",
3451 + (unsigned int)waiter, waiter->pid);
3452 + kfree(waiter);
3453 + }
3454 + }
3455 +
3456 + vchiq_proc_remove_instance(instance);
3457 +
3458 + kfree(instance);
3459 + file->private_data = NULL;
3460 + } break;
3461 +
3462 + default:
3463 + vchiq_log_error(vchiq_arm_log_level,
3464 + "Unknown minor device: %d", dev);
3465 + ret = -ENXIO;
3466 + }
3467 +
3468 +out:
3469 + return ret;
3470 +}
3471 +
3472 +/****************************************************************************
3473 +*
3474 +* vchiq_dump
3475 +*
3476 +***************************************************************************/
3477 +
3478 +void
3479 +vchiq_dump(void *dump_context, const char *str, int len)
3480 +{
3481 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3482 +
3483 + if (context->actual < context->space) {
3484 + int copy_bytes;
3485 + if (context->offset > 0) {
3486 + int skip_bytes = min(len, (int)context->offset);
3487 + str += skip_bytes;
3488 + len -= skip_bytes;
3489 + context->offset -= skip_bytes;
3490 + if (context->offset > 0)
3491 + return;
3492 + }
3493 + copy_bytes = min(len, (int)(context->space - context->actual));
3494 + if (copy_bytes == 0)
3495 + return;
3496 + if (copy_to_user(context->buf + context->actual, str,
3497 + copy_bytes))
3498 + context->actual = -EFAULT;
3499 + context->actual += copy_bytes;
3500 + len -= copy_bytes;
3501 +
3502 + /* If tne terminating NUL is included in the length, then it
3503 + ** marks the end of a line and should be replaced with a
3504 + ** carriage return. */
3505 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3506 + char cr = '\n';
3507 + if (copy_to_user(context->buf + context->actual - 1,
3508 + &cr, 1))
3509 + context->actual = -EFAULT;
3510 + }
3511 + }
3512 +}
3513 +
3514 +/****************************************************************************
3515 +*
3516 +* vchiq_dump_platform_instance_state
3517 +*
3518 +***************************************************************************/
3519 +
3520 +void
3521 +vchiq_dump_platform_instances(void *dump_context)
3522 +{
3523 + VCHIQ_STATE_T *state = vchiq_get_state();
3524 + char buf[80];
3525 + int len;
3526 + int i;
3527 +
3528 + /* There is no list of instances, so instead scan all services,
3529 + marking those that have been dumped. */
3530 +
3531 + for (i = 0; i < state->unused_service; i++) {
3532 + VCHIQ_SERVICE_T *service = state->services[i];
3533 + VCHIQ_INSTANCE_T instance;
3534 +
3535 + if (service && (service->base.callback == service_callback)) {
3536 + instance = service->instance;
3537 + if (instance)
3538 + instance->mark = 0;
3539 + }
3540 + }
3541 +
3542 + for (i = 0; i < state->unused_service; i++) {
3543 + VCHIQ_SERVICE_T *service = state->services[i];
3544 + VCHIQ_INSTANCE_T instance;
3545 +
3546 + if (service && (service->base.callback == service_callback)) {
3547 + instance = service->instance;
3548 + if (instance && !instance->mark) {
3549 + len = snprintf(buf, sizeof(buf),
3550 + "Instance %x: pid %d,%s completions "
3551 + "%d/%d",
3552 + (unsigned int)instance, instance->pid,
3553 + instance->connected ? " connected, " :
3554 + "",
3555 + instance->completion_insert -
3556 + instance->completion_remove,
3557 + MAX_COMPLETIONS);
3558 +
3559 + vchiq_dump(dump_context, buf, len + 1);
3560 +
3561 + instance->mark = 1;
3562 + }
3563 + }
3564 + }
3565 +}
3566 +
3567 +/****************************************************************************
3568 +*
3569 +* vchiq_dump_platform_service_state
3570 +*
3571 +***************************************************************************/
3572 +
3573 +void
3574 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3575 +{
3576 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3577 + char buf[80];
3578 + int len;
3579 +
3580 + len = snprintf(buf, sizeof(buf), " instance %x",
3581 + (unsigned int)service->instance);
3582 +
3583 + if ((service->base.callback == service_callback) &&
3584 + user_service->is_vchi) {
3585 + len += snprintf(buf + len, sizeof(buf) - len,
3586 + ", %d/%d messages",
3587 + user_service->msg_insert - user_service->msg_remove,
3588 + MSG_QUEUE_SIZE);
3589 +
3590 + if (user_service->dequeue_pending)
3591 + len += snprintf(buf + len, sizeof(buf) - len,
3592 + " (dequeue pending)");
3593 + }
3594 +
3595 + vchiq_dump(dump_context, buf, len + 1);
3596 +}
3597 +
3598 +/****************************************************************************
3599 +*
3600 +* dump_user_mem
3601 +*
3602 +***************************************************************************/
3603 +
3604 +static void
3605 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3606 +{
3607 + int rc;
3608 + uint8_t *end_virt_addr = virt_addr + num_bytes;
3609 + int num_pages;
3610 + int offset;
3611 + int end_offset;
3612 + int page_idx;
3613 + int prev_idx;
3614 + struct page *page;
3615 + struct page **pages;
3616 + uint8_t *kmapped_virt_ptr;
3617 +
3618 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3619 +
3620 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3621 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3622 + ~0x0fuL);
3623 +
3624 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3625 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3626 +
3627 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3628 +
3629 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3630 + if (pages == NULL) {
3631 + vchiq_log_error(vchiq_arm_log_level,
3632 + "Unable to allocation memory for %d pages\n",
3633 + num_pages);
3634 + return;
3635 + }
3636 +
3637 + down_read(&current->mm->mmap_sem);
3638 + rc = get_user_pages(current, /* task */
3639 + current->mm, /* mm */
3640 + (unsigned long)virt_addr, /* start */
3641 + num_pages, /* len */
3642 + 0, /* write */
3643 + 0, /* force */
3644 + pages, /* pages (array of page pointers) */
3645 + NULL); /* vmas */
3646 + up_read(&current->mm->mmap_sem);
3647 +
3648 + prev_idx = -1;
3649 + page = NULL;
3650 +
3651 + while (offset < end_offset) {
3652 +
3653 + int page_offset = offset % PAGE_SIZE;
3654 + page_idx = offset / PAGE_SIZE;
3655 +
3656 + if (page_idx != prev_idx) {
3657 +
3658 + if (page != NULL)
3659 + kunmap(page);
3660 + page = pages[page_idx];
3661 + kmapped_virt_ptr = kmap(page);
3662 +
3663 + prev_idx = page_idx;
3664 + }
3665 +
3666 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3667 + vchiq_log_dump_mem("ph",
3668 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
3669 + page_offset],
3670 + &kmapped_virt_ptr[page_offset], 16);
3671 +
3672 + offset += 16;
3673 + }
3674 + if (page != NULL)
3675 + kunmap(page);
3676 +
3677 + for (page_idx = 0; page_idx < num_pages; page_idx++)
3678 + page_cache_release(pages[page_idx]);
3679 +
3680 + kfree(pages);
3681 +}
3682 +
3683 +/****************************************************************************
3684 +*
3685 +* vchiq_read
3686 +*
3687 +***************************************************************************/
3688 +
3689 +static ssize_t
3690 +vchiq_read(struct file *file, char __user *buf,
3691 + size_t count, loff_t *ppos)
3692 +{
3693 + DUMP_CONTEXT_T context;
3694 + context.buf = buf;
3695 + context.actual = 0;
3696 + context.space = count;
3697 + context.offset = *ppos;
3698 +
3699 + vchiq_dump_state(&context, &g_state);
3700 +
3701 + *ppos += context.actual;
3702 +
3703 + return context.actual;
3704 +}
3705 +
3706 +VCHIQ_STATE_T *
3707 +vchiq_get_state(void)
3708 +{
3709 +
3710 + if (g_state.remote == NULL)
3711 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3712 + else if (g_state.remote->initialised != 1)
3713 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3714 + __func__, g_state.remote->initialised);
3715 +
3716 + return ((g_state.remote != NULL) &&
3717 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
3718 +}
3719 +
3720 +static const struct file_operations
3721 +vchiq_fops = {
3722 + .owner = THIS_MODULE,
3723 + .unlocked_ioctl = vchiq_ioctl,
3724 + .open = vchiq_open,
3725 + .release = vchiq_release,
3726 + .read = vchiq_read
3727 +};
3728 +
3729 +/*
3730 + * Autosuspend related functionality
3731 + */
3732 +
3733 +int
3734 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3735 +{
3736 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3737 + if (!arm_state)
3738 + /* autosuspend not supported - always return wanted */
3739 + return 1;
3740 + else if (arm_state->blocked_count)
3741 + return 1;
3742 + else if (!arm_state->videocore_use_count)
3743 + /* usage count zero - check for override unless we're forcing */
3744 + if (arm_state->resume_blocked)
3745 + return 0;
3746 + else
3747 + return vchiq_platform_videocore_wanted(state);
3748 + else
3749 + /* non-zero usage count - videocore still required */
3750 + return 1;
3751 +}
3752 +
3753 +static VCHIQ_STATUS_T
3754 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3755 + VCHIQ_HEADER_T *header,
3756 + VCHIQ_SERVICE_HANDLE_T service_user,
3757 + void *bulk_user)
3758 +{
3759 + vchiq_log_error(vchiq_susp_log_level,
3760 + "%s callback reason %d", __func__, reason);
3761 + return 0;
3762 +}
3763 +
3764 +static int
3765 +vchiq_keepalive_thread_func(void *v)
3766 +{
3767 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
3768 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3769 +
3770 + VCHIQ_STATUS_T status;
3771 + VCHIQ_INSTANCE_T instance;
3772 + VCHIQ_SERVICE_HANDLE_T ka_handle;
3773 +
3774 + VCHIQ_SERVICE_PARAMS_T params = {
3775 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
3776 + .callback = vchiq_keepalive_vchiq_callback,
3777 + .version = KEEPALIVE_VER,
3778 + .version_min = KEEPALIVE_VER_MIN
3779 + };
3780 +
3781 + status = vchiq_initialise(&instance);
3782 + if (status != VCHIQ_SUCCESS) {
3783 + vchiq_log_error(vchiq_susp_log_level,
3784 + "%s vchiq_initialise failed %d", __func__, status);
3785 + goto exit;
3786 + }
3787 +
3788 + status = vchiq_connect(instance);
3789 + if (status != VCHIQ_SUCCESS) {
3790 + vchiq_log_error(vchiq_susp_log_level,
3791 + "%s vchiq_connect failed %d", __func__, status);
3792 + goto shutdown;
3793 + }
3794 +
3795 + status = vchiq_add_service(instance, &params, &ka_handle);
3796 + if (status != VCHIQ_SUCCESS) {
3797 + vchiq_log_error(vchiq_susp_log_level,
3798 + "%s vchiq_open_service failed %d", __func__, status);
3799 + goto shutdown;
3800 + }
3801 +
3802 + while (1) {
3803 + long rc = 0, uc = 0;
3804 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
3805 + != 0) {
3806 + vchiq_log_error(vchiq_susp_log_level,
3807 + "%s interrupted", __func__);
3808 + flush_signals(current);
3809 + continue;
3810 + }
3811 +
3812 + /* read and clear counters. Do release_count then use_count to
3813 + * prevent getting more releases than uses */
3814 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
3815 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
3816 +
3817 + /* Call use/release service the requisite number of times.
3818 + * Process use before release so use counts don't go negative */
3819 + while (uc--) {
3820 + atomic_inc(&arm_state->ka_use_ack_count);
3821 + status = vchiq_use_service(ka_handle);
3822 + if (status != VCHIQ_SUCCESS) {
3823 + vchiq_log_error(vchiq_susp_log_level,
3824 + "%s vchiq_use_service error %d",
3825 + __func__, status);
3826 + }
3827 + }
3828 + while (rc--) {
3829 + status = vchiq_release_service(ka_handle);
3830 + if (status != VCHIQ_SUCCESS) {
3831 + vchiq_log_error(vchiq_susp_log_level,
3832 + "%s vchiq_release_service error %d",
3833 + __func__, status);
3834 + }
3835 + }
3836 + }
3837 +
3838 +shutdown:
3839 + vchiq_shutdown(instance);
3840 +exit:
3841 + return 0;
3842 +}
3843 +
3844 +
3845 +
3846 +VCHIQ_STATUS_T
3847 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
3848 +{
3849 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3850 +
3851 + if (arm_state) {
3852 + rwlock_init(&arm_state->susp_res_lock);
3853 +
3854 + init_completion(&arm_state->ka_evt);
3855 + atomic_set(&arm_state->ka_use_count, 0);
3856 + atomic_set(&arm_state->ka_use_ack_count, 0);
3857 + atomic_set(&arm_state->ka_release_count, 0);
3858 +
3859 + init_completion(&arm_state->vc_suspend_complete);
3860 +
3861 + init_completion(&arm_state->vc_resume_complete);
3862 + /* Initialise to 'done' state. We only want to block on resume
3863 + * completion while videocore is suspended. */
3864 + set_resume_state(arm_state, VC_RESUME_RESUMED);
3865 +
3866 + init_completion(&arm_state->resume_blocker);
3867 + /* Initialise to 'done' state. We only want to block on this
3868 + * completion while resume is blocked */
3869 + complete_all(&arm_state->resume_blocker);
3870 +
3871 + init_completion(&arm_state->blocked_blocker);
3872 + /* Initialise to 'done' state. We only want to block on this
3873 + * completion while things are waiting on the resume blocker */
3874 + complete_all(&arm_state->blocked_blocker);
3875 +
3876 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
3877 + arm_state->suspend_timer_running = 0;
3878 + init_timer(&arm_state->suspend_timer);
3879 + arm_state->suspend_timer.data = (unsigned long)(state);
3880 + arm_state->suspend_timer.function = suspend_timer_callback;
3881 +
3882 + arm_state->first_connect = 0;
3883 +
3884 + }
3885 + return status;
3886 +}
3887 +
3888 +/*
3889 +** Functions to modify the state variables;
3890 +** set_suspend_state
3891 +** set_resume_state
3892 +**
3893 +** There are more state variables than we might like, so ensure they remain in
3894 +** step. Suspend and resume state are maintained separately, since most of
3895 +** these state machines can operate independently. However, there are a few
3896 +** states where state transitions in one state machine cause a reset to the
3897 +** other state machine. In addition, there are some completion events which
3898 +** need to occur on state machine reset and end-state(s), so these are also
3899 +** dealt with in these functions.
3900 +**
3901 +** In all states we set the state variable according to the input, but in some
3902 +** cases we perform additional steps outlined below;
3903 +**
3904 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
3905 +** The suspend completion is completed after any suspend
3906 +** attempt. When we reset the state machine we also reset
3907 +** the completion. This reset occurs when videocore is
3908 +** resumed, and also if we initiate suspend after a suspend
3909 +** failure.
3910 +**
3911 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
3912 +** suspend - ie from this point on we must try to suspend
3913 +** before resuming can occur. We therefore also reset the
3914 +** resume state machine to VC_RESUME_IDLE in this state.
3915 +**
3916 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
3917 +** complete_all on the suspend completion to notify
3918 +** anything waiting for suspend to happen.
3919 +**
3920 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
3921 +** initiate resume, so no need to alter resume state.
3922 +** We call complete_all on the suspend completion to notify
3923 +** of suspend rejection.
3924 +**
3925 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
3926 +** suspend completion and reset the resume state machine.
3927 +**
3928 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
3929 +** resume completion is in it's 'done' state whenever
3930 +** videcore is running. Therfore, the VC_RESUME_IDLE state
3931 +** implies that videocore is suspended.
3932 +** Hence, any thread which needs to wait until videocore is
3933 +** running can wait on this completion - it will only block
3934 +** if videocore is suspended.
3935 +**
3936 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
3937 +** Call complete_all on the resume completion to unblock
3938 +** any threads waiting for resume. Also reset the suspend
3939 +** state machine to it's idle state.
3940 +**
3941 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
3942 +*/
3943 +
3944 +inline void
3945 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
3946 + enum vc_suspend_status new_state)
3947 +{
3948 + /* set the state in all cases */
3949 + arm_state->vc_suspend_state = new_state;
3950 +
3951 + /* state specific additional actions */
3952 + switch (new_state) {
3953 + case VC_SUSPEND_FORCE_CANCELED:
3954 + complete_all(&arm_state->vc_suspend_complete);
3955 + break;
3956 + case VC_SUSPEND_REJECTED:
3957 + complete_all(&arm_state->vc_suspend_complete);
3958 + break;
3959 + case VC_SUSPEND_FAILED:
3960 + complete_all(&arm_state->vc_suspend_complete);
3961 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
3962 + complete_all(&arm_state->vc_resume_complete);
3963 + break;
3964 + case VC_SUSPEND_IDLE:
3965 + reinit_completion(&arm_state->vc_suspend_complete);
3966 + break;
3967 + case VC_SUSPEND_REQUESTED:
3968 + break;
3969 + case VC_SUSPEND_IN_PROGRESS:
3970 + set_resume_state(arm_state, VC_RESUME_IDLE);
3971 + break;
3972 + case VC_SUSPEND_SUSPENDED:
3973 + complete_all(&arm_state->vc_suspend_complete);
3974 + break;
3975 + default:
3976 + BUG();
3977 + break;
3978 + }
3979 +}
3980 +
3981 +inline void
3982 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
3983 + enum vc_resume_status new_state)
3984 +{
3985 + /* set the state in all cases */
3986 + arm_state->vc_resume_state = new_state;
3987 +
3988 + /* state specific additional actions */
3989 + switch (new_state) {
3990 + case VC_RESUME_FAILED:
3991 + break;
3992 + case VC_RESUME_IDLE:
3993 + reinit_completion(&arm_state->vc_resume_complete);
3994 + break;
3995 + case VC_RESUME_REQUESTED:
3996 + break;
3997 + case VC_RESUME_IN_PROGRESS:
3998 + break;
3999 + case VC_RESUME_RESUMED:
4000 + complete_all(&arm_state->vc_resume_complete);
4001 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4002 + break;
4003 + default:
4004 + BUG();
4005 + break;
4006 + }
4007 +}
4008 +
4009 +
4010 +/* should be called with the write lock held */
4011 +inline void
4012 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
4013 +{
4014 + del_timer(&arm_state->suspend_timer);
4015 + arm_state->suspend_timer.expires = jiffies +
4016 + msecs_to_jiffies(arm_state->
4017 + suspend_timer_timeout);
4018 + add_timer(&arm_state->suspend_timer);
4019 + arm_state->suspend_timer_running = 1;
4020 +}
4021 +
4022 +/* should be called with the write lock held */
4023 +static inline void
4024 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
4025 +{
4026 + if (arm_state->suspend_timer_running) {
4027 + del_timer(&arm_state->suspend_timer);
4028 + arm_state->suspend_timer_running = 0;
4029 + }
4030 +}
4031 +
4032 +static inline int
4033 +need_resume(VCHIQ_STATE_T *state)
4034 +{
4035 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4036 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
4037 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
4038 + vchiq_videocore_wanted(state);
4039 +}
4040 +
4041 +static int
4042 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
4043 +{
4044 + int status = VCHIQ_SUCCESS;
4045 + const unsigned long timeout_val =
4046 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
4047 + int resume_count = 0;
4048 +
4049 + /* Allow any threads which were blocked by the last force suspend to
4050 + * complete if they haven't already. Only give this one shot; if
4051 + * blocked_count is incremented after blocked_blocker is completed
4052 + * (which only happens when blocked_count hits 0) then those threads
4053 + * will have to wait until next time around */
4054 + if (arm_state->blocked_count) {
4055 + reinit_completion(&arm_state->blocked_blocker);
4056 + write_unlock_bh(&arm_state->susp_res_lock);
4057 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
4058 + "blocked clients", __func__);
4059 + if (wait_for_completion_interruptible_timeout(
4060 + &arm_state->blocked_blocker, timeout_val)
4061 + <= 0) {
4062 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4063 + "previously blocked clients failed" , __func__);
4064 + status = VCHIQ_ERROR;
4065 + write_lock_bh(&arm_state->susp_res_lock);
4066 + goto out;
4067 + }
4068 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
4069 + "clients resumed", __func__);
4070 + write_lock_bh(&arm_state->susp_res_lock);
4071 + }
4072 +
4073 + /* We need to wait for resume to complete if it's in process */
4074 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
4075 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
4076 + if (resume_count > 1) {
4077 + status = VCHIQ_ERROR;
4078 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4079 + "many times for resume" , __func__);
4080 + goto out;
4081 + }
4082 + write_unlock_bh(&arm_state->susp_res_lock);
4083 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4084 + __func__);
4085 + if (wait_for_completion_interruptible_timeout(
4086 + &arm_state->vc_resume_complete, timeout_val)
4087 + <= 0) {
4088 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4089 + "resume failed (%s)", __func__,
4090 + resume_state_names[arm_state->vc_resume_state +
4091 + VC_RESUME_NUM_OFFSET]);
4092 + status = VCHIQ_ERROR;
4093 + write_lock_bh(&arm_state->susp_res_lock);
4094 + goto out;
4095 + }
4096 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4097 + write_lock_bh(&arm_state->susp_res_lock);
4098 + resume_count++;
4099 + }
4100 + reinit_completion(&arm_state->resume_blocker);
4101 + arm_state->resume_blocked = 1;
4102 +
4103 +out:
4104 + return status;
4105 +}
4106 +
4107 +static inline void
4108 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4109 +{
4110 + complete_all(&arm_state->resume_blocker);
4111 + arm_state->resume_blocked = 0;
4112 +}
4113 +
4114 +/* Initiate suspend via slot handler. Should be called with the write lock
4115 + * held */
4116 +VCHIQ_STATUS_T
4117 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4118 +{
4119 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4120 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4121 +
4122 + if (!arm_state)
4123 + goto out;
4124 +
4125 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4126 + status = VCHIQ_SUCCESS;
4127 +
4128 +
4129 + switch (arm_state->vc_suspend_state) {
4130 + case VC_SUSPEND_REQUESTED:
4131 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4132 + "requested", __func__);
4133 + break;
4134 + case VC_SUSPEND_IN_PROGRESS:
4135 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4136 + "progress", __func__);
4137 + break;
4138 +
4139 + default:
4140 + /* We don't expect to be in other states, so log but continue
4141 + * anyway */
4142 + vchiq_log_error(vchiq_susp_log_level,
4143 + "%s unexpected suspend state %s", __func__,
4144 + suspend_state_names[arm_state->vc_suspend_state +
4145 + VC_SUSPEND_NUM_OFFSET]);
4146 + /* fall through */
4147 + case VC_SUSPEND_REJECTED:
4148 + case VC_SUSPEND_FAILED:
4149 + /* Ensure any idle state actions have been run */
4150 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4151 + /* fall through */
4152 + case VC_SUSPEND_IDLE:
4153 + vchiq_log_info(vchiq_susp_log_level,
4154 + "%s: suspending", __func__);
4155 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4156 + /* kick the slot handler thread to initiate suspend */
4157 + request_poll(state, NULL, 0);
4158 + break;
4159 + }
4160 +
4161 +out:
4162 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4163 + return status;
4164 +}
4165 +
4166 +void
4167 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4168 +{
4169 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4170 + int susp = 0;
4171 +
4172 + if (!arm_state)
4173 + goto out;
4174 +
4175 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4176 +
4177 + write_lock_bh(&arm_state->susp_res_lock);
4178 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4179 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4180 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4181 + susp = 1;
4182 + }
4183 + write_unlock_bh(&arm_state->susp_res_lock);
4184 +
4185 + if (susp)
4186 + vchiq_platform_suspend(state);
4187 +
4188 +out:
4189 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4190 + return;
4191 +}
4192 +
4193 +
4194 +static void
4195 +output_timeout_error(VCHIQ_STATE_T *state)
4196 +{
4197 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4198 + char service_err[50] = "";
4199 + int vc_use_count = arm_state->videocore_use_count;
4200 + int active_services = state->unused_service;
4201 + int i;
4202 +
4203 + if (!arm_state->videocore_use_count) {
4204 + snprintf(service_err, 50, " Videocore usecount is 0");
4205 + goto output_msg;
4206 + }
4207 + for (i = 0; i < active_services; i++) {
4208 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4209 + if (service_ptr && service_ptr->service_use_count &&
4210 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4211 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4212 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4213 + service_ptr->base.fourcc),
4214 + service_ptr->client_id,
4215 + service_ptr->service_use_count,
4216 + service_ptr->service_use_count ==
4217 + vc_use_count ? "" : " (+ more)");
4218 + break;
4219 + }
4220 + }
4221 +
4222 +output_msg:
4223 + vchiq_log_error(vchiq_susp_log_level,
4224 + "timed out waiting for vc suspend (%d).%s",
4225 + arm_state->autosuspend_override, service_err);
4226 +
4227 +}
4228 +
4229 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4230 +** We don't actually force suspend, since videocore may get into a bad state
4231 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
4232 +** determine a good point to suspend. If this doesn't happen within 100ms we
4233 +** report failure.
4234 +**
4235 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4236 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4237 +*/
4238 +VCHIQ_STATUS_T
4239 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4240 +{
4241 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4242 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4243 + long rc = 0;
4244 + int repeat = -1;
4245 +
4246 + if (!arm_state)
4247 + goto out;
4248 +
4249 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4250 +
4251 + write_lock_bh(&arm_state->susp_res_lock);
4252 +
4253 + status = block_resume(arm_state);
4254 + if (status != VCHIQ_SUCCESS)
4255 + goto unlock;
4256 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4257 + /* Already suspended - just block resume and exit */
4258 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4259 + __func__);
4260 + status = VCHIQ_SUCCESS;
4261 + goto unlock;
4262 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4263 + /* initiate suspend immediately in the case that we're waiting
4264 + * for the timeout */
4265 + stop_suspend_timer(arm_state);
4266 + if (!vchiq_videocore_wanted(state)) {
4267 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4268 + "idle, initiating suspend", __func__);
4269 + status = vchiq_arm_vcsuspend(state);
4270 + } else if (arm_state->autosuspend_override <
4271 + FORCE_SUSPEND_FAIL_MAX) {
4272 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
4273 + "videocore go idle", __func__);
4274 + status = VCHIQ_SUCCESS;
4275 + } else {
4276 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4277 + "many times - attempting suspend", __func__);
4278 + status = vchiq_arm_vcsuspend(state);
4279 + }
4280 + } else {
4281 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4282 + "in progress - wait for completion", __func__);
4283 + status = VCHIQ_SUCCESS;
4284 + }
4285 +
4286 + /* Wait for suspend to happen due to system idle (not forced..) */
4287 + if (status != VCHIQ_SUCCESS)
4288 + goto unblock_resume;
4289 +
4290 + do {
4291 + write_unlock_bh(&arm_state->susp_res_lock);
4292 +
4293 + rc = wait_for_completion_interruptible_timeout(
4294 + &arm_state->vc_suspend_complete,
4295 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4296 +
4297 + write_lock_bh(&arm_state->susp_res_lock);
4298 + if (rc < 0) {
4299 + vchiq_log_warning(vchiq_susp_log_level, "%s "
4300 + "interrupted waiting for suspend", __func__);
4301 + status = VCHIQ_ERROR;
4302 + goto unblock_resume;
4303 + } else if (rc == 0) {
4304 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4305 + /* Repeat timeout once if in progress */
4306 + if (repeat < 0) {
4307 + repeat = 1;
4308 + continue;
4309 + }
4310 + }
4311 + arm_state->autosuspend_override++;
4312 + output_timeout_error(state);
4313 +
4314 + status = VCHIQ_RETRY;
4315 + goto unblock_resume;
4316 + }
4317 + } while (0 < (repeat--));
4318 +
4319 + /* Check and report state in case we need to abort ARM suspend */
4320 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4321 + status = VCHIQ_RETRY;
4322 + vchiq_log_error(vchiq_susp_log_level,
4323 + "%s videocore suspend failed (state %s)", __func__,
4324 + suspend_state_names[arm_state->vc_suspend_state +
4325 + VC_SUSPEND_NUM_OFFSET]);
4326 + /* Reset the state only if it's still in an error state.
4327 + * Something could have already initiated another suspend. */
4328 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4329 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4330 +
4331 + goto unblock_resume;
4332 + }
4333 +
4334 + /* successfully suspended - unlock and exit */
4335 + goto unlock;
4336 +
4337 +unblock_resume:
4338 + /* all error states need to unblock resume before exit */
4339 + unblock_resume(arm_state);
4340 +
4341 +unlock:
4342 + write_unlock_bh(&arm_state->susp_res_lock);
4343 +
4344 +out:
4345 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4346 + return status;
4347 +}
4348 +
4349 +void
4350 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4351 +{
4352 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4353 +
4354 + if (!arm_state)
4355 + goto out;
4356 +
4357 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4358 +
4359 + write_lock_bh(&arm_state->susp_res_lock);
4360 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4361 + arm_state->first_connect &&
4362 + !vchiq_videocore_wanted(state)) {
4363 + vchiq_arm_vcsuspend(state);
4364 + }
4365 + write_unlock_bh(&arm_state->susp_res_lock);
4366 +
4367 +out:
4368 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4369 + return;
4370 +}
4371 +
4372 +
4373 +int
4374 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4375 +{
4376 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4377 + int resume = 0;
4378 + int ret = -1;
4379 +
4380 + if (!arm_state)
4381 + goto out;
4382 +
4383 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4384 +
4385 + write_lock_bh(&arm_state->susp_res_lock);
4386 + unblock_resume(arm_state);
4387 + resume = vchiq_check_resume(state);
4388 + write_unlock_bh(&arm_state->susp_res_lock);
4389 +
4390 + if (resume) {
4391 + if (wait_for_completion_interruptible(
4392 + &arm_state->vc_resume_complete) < 0) {
4393 + vchiq_log_error(vchiq_susp_log_level,
4394 + "%s interrupted", __func__);
4395 + /* failed, cannot accurately derive suspend
4396 + * state, so exit early. */
4397 + goto out;
4398 + }
4399 + }
4400 +
4401 + read_lock_bh(&arm_state->susp_res_lock);
4402 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4403 + vchiq_log_info(vchiq_susp_log_level,
4404 + "%s: Videocore remains suspended", __func__);
4405 + } else {
4406 + vchiq_log_info(vchiq_susp_log_level,
4407 + "%s: Videocore resumed", __func__);
4408 + ret = 0;
4409 + }
4410 + read_unlock_bh(&arm_state->susp_res_lock);
4411 +out:
4412 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4413 + return ret;
4414 +}
4415 +
4416 +/* This function should be called with the write lock held */
4417 +int
4418 +vchiq_check_resume(VCHIQ_STATE_T *state)
4419 +{
4420 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4421 + int resume = 0;
4422 +
4423 + if (!arm_state)
4424 + goto out;
4425 +
4426 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4427 +
4428 + if (need_resume(state)) {
4429 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4430 + request_poll(state, NULL, 0);
4431 + resume = 1;
4432 + }
4433 +
4434 +out:
4435 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4436 + return resume;
4437 +}
4438 +
4439 +void
4440 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4441 +{
4442 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4443 + int res = 0;
4444 +
4445 + if (!arm_state)
4446 + goto out;
4447 +
4448 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4449 +
4450 + write_lock_bh(&arm_state->susp_res_lock);
4451 + if (arm_state->wake_address == 0) {
4452 + vchiq_log_info(vchiq_susp_log_level,
4453 + "%s: already awake", __func__);
4454 + goto unlock;
4455 + }
4456 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4457 + vchiq_log_info(vchiq_susp_log_level,
4458 + "%s: already resuming", __func__);
4459 + goto unlock;
4460 + }
4461 +
4462 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4463 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4464 + res = 1;
4465 + } else
4466 + vchiq_log_trace(vchiq_susp_log_level,
4467 + "%s: not resuming (resume state %s)", __func__,
4468 + resume_state_names[arm_state->vc_resume_state +
4469 + VC_RESUME_NUM_OFFSET]);
4470 +
4471 +unlock:
4472 + write_unlock_bh(&arm_state->susp_res_lock);
4473 +
4474 + if (res)
4475 + vchiq_platform_resume(state);
4476 +
4477 +out:
4478 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4479 + return;
4480 +
4481 +}
4482 +
4483 +
4484 +
4485 +VCHIQ_STATUS_T
4486 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4487 + enum USE_TYPE_E use_type)
4488 +{
4489 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4490 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4491 + char entity[16];
4492 + int *entity_uc;
4493 + int local_uc, local_entity_uc;
4494 +
4495 + if (!arm_state)
4496 + goto out;
4497 +
4498 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4499 +
4500 + if (use_type == USE_TYPE_VCHIQ) {
4501 + sprintf(entity, "VCHIQ: ");
4502 + entity_uc = &arm_state->peer_use_count;
4503 + } else if (service) {
4504 + sprintf(entity, "%c%c%c%c:%03d",
4505 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4506 + service->client_id);
4507 + entity_uc = &service->service_use_count;
4508 + } else {
4509 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
4510 + "ptr", __func__);
4511 + ret = VCHIQ_ERROR;
4512 + goto out;
4513 + }
4514 +
4515 + write_lock_bh(&arm_state->susp_res_lock);
4516 + while (arm_state->resume_blocked) {
4517 + /* If we call 'use' while force suspend is waiting for suspend,
4518 + * then we're about to block the thread which the force is
4519 + * waiting to complete, so we're bound to just time out. In this
4520 + * case, set the suspend state such that the wait will be
4521 + * canceled, so we can complete as quickly as possible. */
4522 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4523 + VC_SUSPEND_IDLE) {
4524 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4525 + break;
4526 + }
4527 + /* If suspend is already in progress then we need to block */
4528 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4529 + /* Indicate that there are threads waiting on the resume
4530 + * blocker. These need to be allowed to complete before
4531 + * a _second_ call to force suspend can complete,
4532 + * otherwise low priority threads might never actually
4533 + * continue */
4534 + arm_state->blocked_count++;
4535 + write_unlock_bh(&arm_state->susp_res_lock);
4536 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4537 + "blocked - waiting...", __func__, entity);
4538 + if (wait_for_completion_killable(
4539 + &arm_state->resume_blocker) != 0) {
4540 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
4541 + "wait for resume blocker interrupted",
4542 + __func__, entity);
4543 + ret = VCHIQ_ERROR;
4544 + write_lock_bh(&arm_state->susp_res_lock);
4545 + arm_state->blocked_count--;
4546 + write_unlock_bh(&arm_state->susp_res_lock);
4547 + goto out;
4548 + }
4549 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4550 + "unblocked", __func__, entity);
4551 + write_lock_bh(&arm_state->susp_res_lock);
4552 + if (--arm_state->blocked_count == 0)
4553 + complete_all(&arm_state->blocked_blocker);
4554 + }
4555 + }
4556 +
4557 + stop_suspend_timer(arm_state);
4558 +
4559 + local_uc = ++arm_state->videocore_use_count;
4560 + local_entity_uc = ++(*entity_uc);
4561 +
4562 + /* If there's a pending request which hasn't yet been serviced then
4563 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
4564 + * vc_resume_complete will block until we either resume or fail to
4565 + * suspend */
4566 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4567 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4568 +
4569 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4570 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4571 + vchiq_log_info(vchiq_susp_log_level,
4572 + "%s %s count %d, state count %d",
4573 + __func__, entity, local_entity_uc, local_uc);
4574 + request_poll(state, NULL, 0);
4575 + } else
4576 + vchiq_log_trace(vchiq_susp_log_level,
4577 + "%s %s count %d, state count %d",
4578 + __func__, entity, *entity_uc, local_uc);
4579 +
4580 +
4581 + write_unlock_bh(&arm_state->susp_res_lock);
4582 +
4583 + /* Completion is in a done state when we're not suspended, so this won't
4584 + * block for the non-suspended case. */
4585 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4586 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4587 + __func__, entity);
4588 + if (wait_for_completion_killable(
4589 + &arm_state->vc_resume_complete) != 0) {
4590 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4591 + "resume interrupted", __func__, entity);
4592 + ret = VCHIQ_ERROR;
4593 + goto out;
4594 + }
4595 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4596 + entity);
4597 + }
4598 +
4599 + if (ret == VCHIQ_SUCCESS) {
4600 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4601 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4602 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4603 + /* Send the use notify to videocore */
4604 + status = vchiq_send_remote_use_active(state);
4605 + if (status == VCHIQ_SUCCESS)
4606 + ack_cnt--;
4607 + else
4608 + atomic_add(ack_cnt,
4609 + &arm_state->ka_use_ack_count);
4610 + }
4611 + }
4612 +
4613 +out:
4614 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4615 + return ret;
4616 +}
4617 +
4618 +VCHIQ_STATUS_T
4619 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4620 +{
4621 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4622 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4623 + char entity[16];
4624 + int *entity_uc;
4625 + int local_uc, local_entity_uc;
4626 +
4627 + if (!arm_state)
4628 + goto out;
4629 +
4630 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4631 +
4632 + if (service) {
4633 + sprintf(entity, "%c%c%c%c:%03d",
4634 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4635 + service->client_id);
4636 + entity_uc = &service->service_use_count;
4637 + } else {
4638 + sprintf(entity, "PEER: ");
4639 + entity_uc = &arm_state->peer_use_count;
4640 + }
4641 +
4642 + write_lock_bh(&arm_state->susp_res_lock);
4643 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
4644 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
4645 + WARN_ON(!arm_state->videocore_use_count);
4646 + WARN_ON(!(*entity_uc));
4647 + ret = VCHIQ_ERROR;
4648 + goto unlock;
4649 + }
4650 + local_uc = --arm_state->videocore_use_count;
4651 + local_entity_uc = --(*entity_uc);
4652 +
4653 + if (!vchiq_videocore_wanted(state)) {
4654 + if (vchiq_platform_use_suspend_timer() &&
4655 + !arm_state->resume_blocked) {
4656 + /* Only use the timer if we're not trying to force
4657 + * suspend (=> resume_blocked) */
4658 + start_suspend_timer(arm_state);
4659 + } else {
4660 + vchiq_log_info(vchiq_susp_log_level,
4661 + "%s %s count %d, state count %d - suspending",
4662 + __func__, entity, *entity_uc,
4663 + arm_state->videocore_use_count);
4664 + vchiq_arm_vcsuspend(state);
4665 + }
4666 + } else
4667 + vchiq_log_trace(vchiq_susp_log_level,
4668 + "%s %s count %d, state count %d",
4669 + __func__, entity, *entity_uc,
4670 + arm_state->videocore_use_count);
4671 +
4672 +unlock:
4673 + write_unlock_bh(&arm_state->susp_res_lock);
4674 +
4675 +out:
4676 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4677 + return ret;
4678 +}
4679 +
4680 +void
4681 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4682 +{
4683 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4684 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4685 + atomic_inc(&arm_state->ka_use_count);
4686 + complete(&arm_state->ka_evt);
4687 +}
4688 +
4689 +void
4690 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4691 +{
4692 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4693 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4694 + atomic_inc(&arm_state->ka_release_count);
4695 + complete(&arm_state->ka_evt);
4696 +}
4697 +
4698 +VCHIQ_STATUS_T
4699 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4700 +{
4701 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4702 +}
4703 +
4704 +VCHIQ_STATUS_T
4705 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4706 +{
4707 + return vchiq_release_internal(service->state, service);
4708 +}
4709 +
4710 +static void suspend_timer_callback(unsigned long context)
4711 +{
4712 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
4713 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4714 + if (!arm_state)
4715 + goto out;
4716 + vchiq_log_info(vchiq_susp_log_level,
4717 + "%s - suspend timer expired - check suspend", __func__);
4718 + vchiq_check_suspend(state);
4719 +out:
4720 + return;
4721 +}
4722 +
4723 +VCHIQ_STATUS_T
4724 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
4725 +{
4726 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4727 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4728 + if (service) {
4729 + ret = vchiq_use_internal(service->state, service,
4730 + USE_TYPE_SERVICE_NO_RESUME);
4731 + unlock_service(service);
4732 + }
4733 + return ret;
4734 +}
4735 +
4736 +VCHIQ_STATUS_T
4737 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
4738 +{
4739 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4740 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4741 + if (service) {
4742 + ret = vchiq_use_internal(service->state, service,
4743 + USE_TYPE_SERVICE);
4744 + unlock_service(service);
4745 + }
4746 + return ret;
4747 +}
4748 +
4749 +VCHIQ_STATUS_T
4750 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
4751 +{
4752 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4753 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4754 + if (service) {
4755 + ret = vchiq_release_internal(service->state, service);
4756 + unlock_service(service);
4757 + }
4758 + return ret;
4759 +}
4760 +
4761 +void
4762 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
4763 +{
4764 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4765 + int i, j = 0;
4766 + /* Only dump 64 services */
4767 + static const int local_max_services = 64;
4768 + /* If there's more than 64 services, only dump ones with
4769 + * non-zero counts */
4770 + int only_nonzero = 0;
4771 + static const char *nz = "<-- preventing suspend";
4772 +
4773 + enum vc_suspend_status vc_suspend_state;
4774 + enum vc_resume_status vc_resume_state;
4775 + int peer_count;
4776 + int vc_use_count;
4777 + int active_services;
4778 + struct service_data_struct {
4779 + int fourcc;
4780 + int clientid;
4781 + int use_count;
4782 + } service_data[local_max_services];
4783 +
4784 + if (!arm_state)
4785 + return;
4786 +
4787 + read_lock_bh(&arm_state->susp_res_lock);
4788 + vc_suspend_state = arm_state->vc_suspend_state;
4789 + vc_resume_state = arm_state->vc_resume_state;
4790 + peer_count = arm_state->peer_use_count;
4791 + vc_use_count = arm_state->videocore_use_count;
4792 + active_services = state->unused_service;
4793 + if (active_services > local_max_services)
4794 + only_nonzero = 1;
4795 +
4796 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
4797 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4798 + if (!service_ptr)
4799 + continue;
4800 +
4801 + if (only_nonzero && !service_ptr->service_use_count)
4802 + continue;
4803 +
4804 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
4805 + service_data[j].fourcc = service_ptr->base.fourcc;
4806 + service_data[j].clientid = service_ptr->client_id;
4807 + service_data[j++].use_count = service_ptr->
4808 + service_use_count;
4809 + }
4810 + }
4811 +
4812 + read_unlock_bh(&arm_state->susp_res_lock);
4813 +
4814 + vchiq_log_warning(vchiq_susp_log_level,
4815 + "-- Videcore suspend state: %s --",
4816 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
4817 + vchiq_log_warning(vchiq_susp_log_level,
4818 + "-- Videcore resume state: %s --",
4819 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
4820 +
4821 + if (only_nonzero)
4822 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
4823 + "services (%d). Only dumping up to first %d services "
4824 + "with non-zero use-count", active_services,
4825 + local_max_services);
4826 +
4827 + for (i = 0; i < j; i++) {
4828 + vchiq_log_warning(vchiq_susp_log_level,
4829 + "----- %c%c%c%c:%d service count %d %s",
4830 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
4831 + service_data[i].clientid,
4832 + service_data[i].use_count,
4833 + service_data[i].use_count ? nz : "");
4834 + }
4835 + vchiq_log_warning(vchiq_susp_log_level,
4836 + "----- VCHIQ use count count %d", peer_count);
4837 + vchiq_log_warning(vchiq_susp_log_level,
4838 + "--- Overall vchiq instance use count %d", vc_use_count);
4839 +
4840 + vchiq_dump_platform_use_state(state);
4841 +}
4842 +
4843 +VCHIQ_STATUS_T
4844 +vchiq_check_service(VCHIQ_SERVICE_T *service)
4845 +{
4846 + VCHIQ_ARM_STATE_T *arm_state;
4847 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4848 +
4849 + if (!service || !service->state)
4850 + goto out;
4851 +
4852 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4853 +
4854 + arm_state = vchiq_platform_get_arm_state(service->state);
4855 +
4856 + read_lock_bh(&arm_state->susp_res_lock);
4857 + if (service->service_use_count)
4858 + ret = VCHIQ_SUCCESS;
4859 + read_unlock_bh(&arm_state->susp_res_lock);
4860 +
4861 + if (ret == VCHIQ_ERROR) {
4862 + vchiq_log_error(vchiq_susp_log_level,
4863 + "%s ERROR - %c%c%c%c:%d service count %d, "
4864 + "state count %d, videocore suspend state %s", __func__,
4865 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4866 + service->client_id, service->service_use_count,
4867 + arm_state->videocore_use_count,
4868 + suspend_state_names[arm_state->vc_suspend_state +
4869 + VC_SUSPEND_NUM_OFFSET]);
4870 + vchiq_dump_service_use_state(service->state);
4871 + }
4872 +out:
4873 + return ret;
4874 +}
4875 +
4876 +/* stub functions */
4877 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
4878 +{
4879 + (void)state;
4880 +}
4881 +
4882 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
4883 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
4884 +{
4885 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4886 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
4887 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
4888 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
4889 + write_lock_bh(&arm_state->susp_res_lock);
4890 + if (!arm_state->first_connect) {
4891 + char threadname[10];
4892 + arm_state->first_connect = 1;
4893 + write_unlock_bh(&arm_state->susp_res_lock);
4894 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
4895 + state->id);
4896 + arm_state->ka_thread = kthread_create(
4897 + &vchiq_keepalive_thread_func,
4898 + (void *)state,
4899 + threadname);
4900 + if (arm_state->ka_thread == NULL) {
4901 + vchiq_log_error(vchiq_susp_log_level,
4902 + "vchiq: FATAL: couldn't create thread %s",
4903 + threadname);
4904 + } else {
4905 + wake_up_process(arm_state->ka_thread);
4906 + }
4907 + } else
4908 + write_unlock_bh(&arm_state->susp_res_lock);
4909 + }
4910 +}
4911 +
4912 +
4913 +/****************************************************************************
4914 +*
4915 +* vchiq_init - called when the module is loaded.
4916 +*
4917 +***************************************************************************/
4918 +
4919 +static int __init
4920 +vchiq_init(void)
4921 +{
4922 + int err;
4923 + void *ptr_err;
4924 +
4925 + /* create proc entries */
4926 + err = vchiq_proc_init();
4927 + if (err != 0)
4928 + goto failed_proc_init;
4929 +
4930 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
4931 + if (err != 0) {
4932 + vchiq_log_error(vchiq_arm_log_level,
4933 + "Unable to allocate device number");
4934 + goto failed_alloc_chrdev;
4935 + }
4936 + cdev_init(&vchiq_cdev, &vchiq_fops);
4937 + vchiq_cdev.owner = THIS_MODULE;
4938 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
4939 + if (err != 0) {
4940 + vchiq_log_error(vchiq_arm_log_level,
4941 + "Unable to register device");
4942 + goto failed_cdev_add;
4943 + }
4944 +
4945 + /* create sysfs entries */
4946 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
4947 + ptr_err = vchiq_class;
4948 + if (IS_ERR(ptr_err))
4949 + goto failed_class_create;
4950 +
4951 + vchiq_dev = device_create(vchiq_class, NULL,
4952 + vchiq_devid, NULL, "vchiq");
4953 + ptr_err = vchiq_dev;
4954 + if (IS_ERR(ptr_err))
4955 + goto failed_device_create;
4956 +
4957 + err = vchiq_platform_init(&g_state);
4958 + if (err != 0)
4959 + goto failed_platform_init;
4960 +
4961 + vchiq_log_info(vchiq_arm_log_level,
4962 + "vchiq: initialised - version %d (min %d), device %d.%d",
4963 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
4964 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
4965 +
4966 + return 0;
4967 +
4968 +failed_platform_init:
4969 + device_destroy(vchiq_class, vchiq_devid);
4970 +failed_device_create:
4971 + class_destroy(vchiq_class);
4972 +failed_class_create:
4973 + cdev_del(&vchiq_cdev);
4974 + err = PTR_ERR(ptr_err);
4975 +failed_cdev_add:
4976 + unregister_chrdev_region(vchiq_devid, 1);
4977 +failed_alloc_chrdev:
4978 + vchiq_proc_deinit();
4979 +failed_proc_init:
4980 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
4981 + return err;
4982 +}
4983 +
4984 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4985 +{
4986 + VCHIQ_SERVICE_T *service;
4987 + int use_count = 0, i;
4988 + i = 0;
4989 + while ((service = next_service_by_instance(instance->state,
4990 + instance, &i)) != NULL) {
4991 + use_count += service->service_use_count;
4992 + unlock_service(service);
4993 + }
4994 + return use_count;
4995 +}
4996 +
4997 +/* read the per-process use-count */
4998 +static int proc_read_use_count(char *page, char **start,
4999 + off_t off, int count,
5000 + int *eof, void *data)
5001 +{
5002 + VCHIQ_INSTANCE_T instance = data;
5003 + int len, use_count;
5004 +
5005 + use_count = vchiq_instance_get_use_count(instance);
5006 + len = snprintf(page+off, count, "%d\n", use_count);
5007 +
5008 + return len;
5009 +}
5010 +
5011 +/* add an instance (process) to the proc entries */
5012 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
5013 +{
5014 +#if 1
5015 + return 0;
5016 +#else
5017 + char pidstr[32];
5018 + struct proc_dir_entry *top, *use_count;
5019 + struct proc_dir_entry *clients = vchiq_clients_top();
5020 + int pid = instance->pid;
5021 +
5022 + snprintf(pidstr, sizeof(pidstr), "%d", pid);
5023 + top = proc_mkdir(pidstr, clients);
5024 + if (!top)
5025 + goto fail_top;
5026 +
5027 + use_count = create_proc_read_entry("use_count",
5028 + 0444, top,
5029 + proc_read_use_count,
5030 + instance);
5031 + if (!use_count)
5032 + goto fail_use_count;
5033 +
5034 + instance->proc_entry = top;
5035 +
5036 + return 0;
5037 +
5038 +fail_use_count:
5039 + remove_proc_entry(top->name, clients);
5040 +fail_top:
5041 + return -ENOMEM;
5042 +#endif
5043 +}
5044 +
5045 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
5046 +{
5047 +#if 0
5048 + struct proc_dir_entry *clients = vchiq_clients_top();
5049 + remove_proc_entry("use_count", instance->proc_entry);
5050 + remove_proc_entry(instance->proc_entry->name, clients);
5051 +#endif
5052 +}
5053 +
5054 +/****************************************************************************
5055 +*
5056 +* vchiq_exit - called when the module is unloaded.
5057 +*
5058 +***************************************************************************/
5059 +
5060 +static void __exit
5061 +vchiq_exit(void)
5062 +{
5063 + vchiq_platform_exit(&g_state);
5064 + device_destroy(vchiq_class, vchiq_devid);
5065 + class_destroy(vchiq_class);
5066 + cdev_del(&vchiq_cdev);
5067 + unregister_chrdev_region(vchiq_devid, 1);
5068 +}
5069 +
5070 +module_init(vchiq_init);
5071 +module_exit(vchiq_exit);
5072 +MODULE_LICENSE("GPL");
5073 +MODULE_AUTHOR("Broadcom Corporation");
5074 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5075 new file mode 100644
5076 index 0000000..75ad4c6
5077 --- /dev/null
5078 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5079 @@ -0,0 +1,212 @@
5080 +/**
5081 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5082 + *
5083 + * Redistribution and use in source and binary forms, with or without
5084 + * modification, are permitted provided that the following conditions
5085 + * are met:
5086 + * 1. Redistributions of source code must retain the above copyright
5087 + * notice, this list of conditions, and the following disclaimer,
5088 + * without modification.
5089 + * 2. Redistributions in binary form must reproduce the above copyright
5090 + * notice, this list of conditions and the following disclaimer in the
5091 + * documentation and/or other materials provided with the distribution.
5092 + * 3. The names of the above-listed copyright holders may not be used
5093 + * to endorse or promote products derived from this software without
5094 + * specific prior written permission.
5095 + *
5096 + * ALTERNATIVELY, this software may be distributed under the terms of the
5097 + * GNU General Public License ("GPL") version 2, as published by the Free
5098 + * Software Foundation.
5099 + *
5100 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5101 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5102 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5103 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5104 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5105 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5106 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5107 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5108 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5109 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5110 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5111 + */
5112 +
5113 +#ifndef VCHIQ_ARM_H
5114 +#define VCHIQ_ARM_H
5115 +
5116 +#include <linux/mutex.h>
5117 +#include <linux/semaphore.h>
5118 +#include <linux/atomic.h>
5119 +#include "vchiq_core.h"
5120 +
5121 +
5122 +enum vc_suspend_status {
5123 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5124 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
5125 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
5126 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
5127 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
5128 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
5129 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
5130 +};
5131 +
5132 +enum vc_resume_status {
5133 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
5134 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
5135 + VC_RESUME_REQUESTED, /* User has requested resume */
5136 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5137 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
5138 +};
5139 +
5140 +
5141 +enum USE_TYPE_E {
5142 + USE_TYPE_SERVICE,
5143 + USE_TYPE_SERVICE_NO_RESUME,
5144 + USE_TYPE_VCHIQ
5145 +};
5146 +
5147 +
5148 +
5149 +typedef struct vchiq_arm_state_struct {
5150 + /* Keepalive-related data */
5151 + struct task_struct *ka_thread;
5152 + struct completion ka_evt;
5153 + atomic_t ka_use_count;
5154 + atomic_t ka_use_ack_count;
5155 + atomic_t ka_release_count;
5156 +
5157 + struct completion vc_suspend_complete;
5158 + struct completion vc_resume_complete;
5159 +
5160 + rwlock_t susp_res_lock;
5161 + enum vc_suspend_status vc_suspend_state;
5162 + enum vc_resume_status vc_resume_state;
5163 +
5164 + unsigned int wake_address;
5165 +
5166 + struct timer_list suspend_timer;
5167 + int suspend_timer_timeout;
5168 + int suspend_timer_running;
5169 +
5170 + /* Global use count for videocore.
5171 + ** This is equal to the sum of the use counts for all services. When
5172 + ** this hits zero the videocore suspend procedure will be initiated.
5173 + */
5174 + int videocore_use_count;
5175 +
5176 + /* Use count to track requests from videocore peer.
5177 + ** This use count is not associated with a service, so needs to be
5178 + ** tracked separately with the state.
5179 + */
5180 + int peer_use_count;
5181 +
5182 + /* Flag to indicate whether resume is blocked. This happens when the
5183 + ** ARM is suspending
5184 + */
5185 + struct completion resume_blocker;
5186 + int resume_blocked;
5187 + struct completion blocked_blocker;
5188 + int blocked_count;
5189 +
5190 + int autosuspend_override;
5191 +
5192 + /* Flag to indicate that the first vchiq connect has made it through.
5193 + ** This means that both sides should be fully ready, and we should
5194 + ** be able to suspend after this point.
5195 + */
5196 + int first_connect;
5197 +
5198 + unsigned long long suspend_start_time;
5199 + unsigned long long sleep_start_time;
5200 + unsigned long long resume_start_time;
5201 + unsigned long long last_wake_time;
5202 +
5203 +} VCHIQ_ARM_STATE_T;
5204 +
5205 +extern int vchiq_arm_log_level;
5206 +extern int vchiq_susp_log_level;
5207 +
5208 +extern int __init
5209 +vchiq_platform_init(VCHIQ_STATE_T *state);
5210 +
5211 +extern void __exit
5212 +vchiq_platform_exit(VCHIQ_STATE_T *state);
5213 +
5214 +extern VCHIQ_STATE_T *
5215 +vchiq_get_state(void);
5216 +
5217 +extern VCHIQ_STATUS_T
5218 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5219 +
5220 +extern VCHIQ_STATUS_T
5221 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5222 +
5223 +extern int
5224 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5225 +
5226 +extern VCHIQ_STATUS_T
5227 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5228 +
5229 +extern VCHIQ_STATUS_T
5230 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5231 +
5232 +extern int
5233 +vchiq_check_resume(VCHIQ_STATE_T *state);
5234 +
5235 +extern void
5236 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5237 +
5238 +extern VCHIQ_STATUS_T
5239 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5240 +
5241 +extern VCHIQ_STATUS_T
5242 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5243 +
5244 +extern VCHIQ_STATUS_T
5245 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5246 +
5247 +extern VCHIQ_STATUS_T
5248 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5249 +
5250 +extern int
5251 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5252 +
5253 +extern int
5254 +vchiq_platform_use_suspend_timer(void);
5255 +
5256 +extern void
5257 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5258 +
5259 +extern void
5260 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5261 +
5262 +extern VCHIQ_ARM_STATE_T*
5263 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5264 +
5265 +extern int
5266 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5267 +
5268 +extern VCHIQ_STATUS_T
5269 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5270 + enum USE_TYPE_E use_type);
5271 +extern VCHIQ_STATUS_T
5272 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5273 +
5274 +void
5275 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5276 + enum vc_suspend_status new_state);
5277 +
5278 +void
5279 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5280 + enum vc_resume_status new_state);
5281 +
5282 +void
5283 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5284 +
5285 +extern int vchiq_proc_init(void);
5286 +extern void vchiq_proc_deinit(void);
5287 +extern struct proc_dir_entry *vchiq_proc_top(void);
5288 +extern struct proc_dir_entry *vchiq_clients_top(void);
5289 +
5290 +
5291 +#endif /* VCHIQ_ARM_H */
5292 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5293 new file mode 100644
5294 index 0000000..df64581
5295 --- /dev/null
5296 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5297 @@ -0,0 +1,37 @@
5298 +/**
5299 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5300 + *
5301 + * Redistribution and use in source and binary forms, with or without
5302 + * modification, are permitted provided that the following conditions
5303 + * are met:
5304 + * 1. Redistributions of source code must retain the above copyright
5305 + * notice, this list of conditions, and the following disclaimer,
5306 + * without modification.
5307 + * 2. Redistributions in binary form must reproduce the above copyright
5308 + * notice, this list of conditions and the following disclaimer in the
5309 + * documentation and/or other materials provided with the distribution.
5310 + * 3. The names of the above-listed copyright holders may not be used
5311 + * to endorse or promote products derived from this software without
5312 + * specific prior written permission.
5313 + *
5314 + * ALTERNATIVELY, this software may be distributed under the terms of the
5315 + * GNU General Public License ("GPL") version 2, as published by the Free
5316 + * Software Foundation.
5317 + *
5318 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5319 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5320 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5321 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5322 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5323 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5324 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5325 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5326 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5327 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5328 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5329 + */
5330 +
5331 +const char *vchiq_get_build_hostname(void);
5332 +const char *vchiq_get_build_version(void);
5333 +const char *vchiq_get_build_time(void);
5334 +const char *vchiq_get_build_date(void);
5335 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5336 new file mode 100644
5337 index 0000000..493c86c
5338 --- /dev/null
5339 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5340 @@ -0,0 +1,60 @@
5341 +/**
5342 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5343 + *
5344 + * Redistribution and use in source and binary forms, with or without
5345 + * modification, are permitted provided that the following conditions
5346 + * are met:
5347 + * 1. Redistributions of source code must retain the above copyright
5348 + * notice, this list of conditions, and the following disclaimer,
5349 + * without modification.
5350 + * 2. Redistributions in binary form must reproduce the above copyright
5351 + * notice, this list of conditions and the following disclaimer in the
5352 + * documentation and/or other materials provided with the distribution.
5353 + * 3. The names of the above-listed copyright holders may not be used
5354 + * to endorse or promote products derived from this software without
5355 + * specific prior written permission.
5356 + *
5357 + * ALTERNATIVELY, this software may be distributed under the terms of the
5358 + * GNU General Public License ("GPL") version 2, as published by the Free
5359 + * Software Foundation.
5360 + *
5361 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5362 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5363 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5364 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5365 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5366 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5367 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5368 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5369 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5370 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5371 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5372 + */
5373 +
5374 +#ifndef VCHIQ_CFG_H
5375 +#define VCHIQ_CFG_H
5376 +
5377 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5378 +/* The version of VCHIQ - change with any non-trivial change */
5379 +#define VCHIQ_VERSION 6
5380 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5381 +** incompatible change */
5382 +#define VCHIQ_VERSION_MIN 3
5383 +
5384 +#define VCHIQ_MAX_STATES 1
5385 +#define VCHIQ_MAX_SERVICES 4096
5386 +#define VCHIQ_MAX_SLOTS 128
5387 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5388 +
5389 +#define VCHIQ_NUM_CURRENT_BULKS 32
5390 +#define VCHIQ_NUM_SERVICE_BULKS 4
5391 +
5392 +#ifndef VCHIQ_ENABLE_DEBUG
5393 +#define VCHIQ_ENABLE_DEBUG 1
5394 +#endif
5395 +
5396 +#ifndef VCHIQ_ENABLE_STATS
5397 +#define VCHIQ_ENABLE_STATS 1
5398 +#endif
5399 +
5400 +#endif /* VCHIQ_CFG_H */
5401 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5402 new file mode 100644
5403 index 0000000..65f4b52
5404 --- /dev/null
5405 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5406 @@ -0,0 +1,119 @@
5407 +/**
5408 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5409 + *
5410 + * Redistribution and use in source and binary forms, with or without
5411 + * modification, are permitted provided that the following conditions
5412 + * are met:
5413 + * 1. Redistributions of source code must retain the above copyright
5414 + * notice, this list of conditions, and the following disclaimer,
5415 + * without modification.
5416 + * 2. Redistributions in binary form must reproduce the above copyright
5417 + * notice, this list of conditions and the following disclaimer in the
5418 + * documentation and/or other materials provided with the distribution.
5419 + * 3. The names of the above-listed copyright holders may not be used
5420 + * to endorse or promote products derived from this software without
5421 + * specific prior written permission.
5422 + *
5423 + * ALTERNATIVELY, this software may be distributed under the terms of the
5424 + * GNU General Public License ("GPL") version 2, as published by the Free
5425 + * Software Foundation.
5426 + *
5427 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5428 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5429 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5430 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5431 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5432 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5433 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5434 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5435 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5436 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5437 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5438 + */
5439 +
5440 +#include "vchiq_connected.h"
5441 +#include "vchiq_core.h"
5442 +#include <linux/module.h>
5443 +#include <linux/mutex.h>
5444 +
5445 +#define MAX_CALLBACKS 10
5446 +
5447 +static int g_connected;
5448 +static int g_num_deferred_callbacks;
5449 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5450 +static int g_once_init;
5451 +static struct mutex g_connected_mutex;
5452 +
5453 +/****************************************************************************
5454 +*
5455 +* Function to initialize our lock.
5456 +*
5457 +***************************************************************************/
5458 +
5459 +static void connected_init(void)
5460 +{
5461 + if (!g_once_init) {
5462 + mutex_init(&g_connected_mutex);
5463 + g_once_init = 1;
5464 + }
5465 +}
5466 +
5467 +/****************************************************************************
5468 +*
5469 +* This function is used to defer initialization until the vchiq stack is
5470 +* initialized. If the stack is already initialized, then the callback will
5471 +* be made immediately, otherwise it will be deferred until
5472 +* vchiq_call_connected_callbacks is called.
5473 +*
5474 +***************************************************************************/
5475 +
5476 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5477 +{
5478 + connected_init();
5479 +
5480 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5481 + return;
5482 +
5483 + if (g_connected)
5484 + /* We're already connected. Call the callback immediately. */
5485 +
5486 + callback();
5487 + else {
5488 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5489 + vchiq_log_error(vchiq_core_log_level,
5490 + "There already %d callback registered - "
5491 + "please increase MAX_CALLBACKS",
5492 + g_num_deferred_callbacks);
5493 + else {
5494 + g_deferred_callback[g_num_deferred_callbacks] =
5495 + callback;
5496 + g_num_deferred_callbacks++;
5497 + }
5498 + }
5499 + mutex_unlock(&g_connected_mutex);
5500 +}
5501 +
5502 +/****************************************************************************
5503 +*
5504 +* This function is called by the vchiq stack once it has been connected to
5505 +* the videocore and clients can start to use the stack.
5506 +*
5507 +***************************************************************************/
5508 +
5509 +void vchiq_call_connected_callbacks(void)
5510 +{
5511 + int i;
5512 +
5513 + connected_init();
5514 +
5515 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5516 + return;
5517 +
5518 + for (i = 0; i < g_num_deferred_callbacks; i++)
5519 + g_deferred_callback[i]();
5520 +
5521 + g_num_deferred_callbacks = 0;
5522 + g_connected = 1;
5523 + mutex_unlock(&g_connected_mutex);
5524 +}
5525 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5526 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5527 new file mode 100644
5528 index 0000000..863b3e3
5529 --- /dev/null
5530 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5531 @@ -0,0 +1,50 @@
5532 +/**
5533 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5534 + *
5535 + * Redistribution and use in source and binary forms, with or without
5536 + * modification, are permitted provided that the following conditions
5537 + * are met:
5538 + * 1. Redistributions of source code must retain the above copyright
5539 + * notice, this list of conditions, and the following disclaimer,
5540 + * without modification.
5541 + * 2. Redistributions in binary form must reproduce the above copyright
5542 + * notice, this list of conditions and the following disclaimer in the
5543 + * documentation and/or other materials provided with the distribution.
5544 + * 3. The names of the above-listed copyright holders may not be used
5545 + * to endorse or promote products derived from this software without
5546 + * specific prior written permission.
5547 + *
5548 + * ALTERNATIVELY, this software may be distributed under the terms of the
5549 + * GNU General Public License ("GPL") version 2, as published by the Free
5550 + * Software Foundation.
5551 + *
5552 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5553 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5554 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5555 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5556 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5557 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5558 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5559 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5560 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5561 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5562 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5563 + */
5564 +
5565 +#ifndef VCHIQ_CONNECTED_H
5566 +#define VCHIQ_CONNECTED_H
5567 +
5568 +/* ---- Include Files ----------------------------------------------------- */
5569 +
5570 +/* ---- Constants and Types ---------------------------------------------- */
5571 +
5572 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5573 +
5574 +/* ---- Variable Externs ------------------------------------------------- */
5575 +
5576 +/* ---- Function Prototypes ---------------------------------------------- */
5577 +
5578 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5579 +void vchiq_call_connected_callbacks(void);
5580 +
5581 +#endif /* VCHIQ_CONNECTED_H */
5582 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5583 new file mode 100644
5584 index 0000000..f35ed4f
5585 --- /dev/null
5586 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5587 @@ -0,0 +1,3824 @@
5588 +/**
5589 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5590 + *
5591 + * Redistribution and use in source and binary forms, with or without
5592 + * modification, are permitted provided that the following conditions
5593 + * are met:
5594 + * 1. Redistributions of source code must retain the above copyright
5595 + * notice, this list of conditions, and the following disclaimer,
5596 + * without modification.
5597 + * 2. Redistributions in binary form must reproduce the above copyright
5598 + * notice, this list of conditions and the following disclaimer in the
5599 + * documentation and/or other materials provided with the distribution.
5600 + * 3. The names of the above-listed copyright holders may not be used
5601 + * to endorse or promote products derived from this software without
5602 + * specific prior written permission.
5603 + *
5604 + * ALTERNATIVELY, this software may be distributed under the terms of the
5605 + * GNU General Public License ("GPL") version 2, as published by the Free
5606 + * Software Foundation.
5607 + *
5608 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5609 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5610 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5611 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5612 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5613 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5614 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5615 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5616 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5617 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5618 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5619 + */
5620 +
5621 +#include "vchiq_core.h"
5622 +
5623 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5624 +
5625 +#define HANDLE_STATE_SHIFT 12
5626 +
5627 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5628 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5629 +#define SLOT_INDEX_FROM_DATA(state, data) \
5630 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5631 + VCHIQ_SLOT_SIZE)
5632 +#define SLOT_INDEX_FROM_INFO(state, info) \
5633 + ((unsigned int)(info - state->slot_info))
5634 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5635 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5636 +
5637 +
5638 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5639 +
5640 +
5641 +struct vchiq_open_payload {
5642 + int fourcc;
5643 + int client_id;
5644 + short version;
5645 + short version_min;
5646 +};
5647 +
5648 +struct vchiq_openack_payload {
5649 + short version;
5650 +};
5651 +
5652 +/* we require this for consistency between endpoints */
5653 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5654 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5655 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5656 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5657 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5658 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5659 +
5660 +/* Run time control of log level, based on KERN_XXX level. */
5661 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5662 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5663 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5664 +
5665 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5666 +
5667 +static DEFINE_SPINLOCK(service_spinlock);
5668 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5669 +DEFINE_SPINLOCK(quota_spinlock);
5670 +
5671 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5672 +static unsigned int handle_seq;
5673 +
5674 +static const char *const srvstate_names[] = {
5675 + "FREE",
5676 + "HIDDEN",
5677 + "LISTENING",
5678 + "OPENING",
5679 + "OPEN",
5680 + "OPENSYNC",
5681 + "CLOSESENT",
5682 + "CLOSERECVD",
5683 + "CLOSEWAIT",
5684 + "CLOSED"
5685 +};
5686 +
5687 +static const char *const reason_names[] = {
5688 + "SERVICE_OPENED",
5689 + "SERVICE_CLOSED",
5690 + "MESSAGE_AVAILABLE",
5691 + "BULK_TRANSMIT_DONE",
5692 + "BULK_RECEIVE_DONE",
5693 + "BULK_TRANSMIT_ABORTED",
5694 + "BULK_RECEIVE_ABORTED"
5695 +};
5696 +
5697 +static const char *const conn_state_names[] = {
5698 + "DISCONNECTED",
5699 + "CONNECTING",
5700 + "CONNECTED",
5701 + "PAUSING",
5702 + "PAUSE_SENT",
5703 + "PAUSED",
5704 + "RESUMING",
5705 + "PAUSE_TIMEOUT",
5706 + "RESUME_TIMEOUT"
5707 +};
5708 +
5709 +
5710 +static void
5711 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5712 +
5713 +static const char *msg_type_str(unsigned int msg_type)
5714 +{
5715 + switch (msg_type) {
5716 + case VCHIQ_MSG_PADDING: return "PADDING";
5717 + case VCHIQ_MSG_CONNECT: return "CONNECT";
5718 + case VCHIQ_MSG_OPEN: return "OPEN";
5719 + case VCHIQ_MSG_OPENACK: return "OPENACK";
5720 + case VCHIQ_MSG_CLOSE: return "CLOSE";
5721 + case VCHIQ_MSG_DATA: return "DATA";
5722 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
5723 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
5724 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
5725 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
5726 + case VCHIQ_MSG_PAUSE: return "PAUSE";
5727 + case VCHIQ_MSG_RESUME: return "RESUME";
5728 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
5729 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
5730 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
5731 + }
5732 + return "???";
5733 +}
5734 +
5735 +static inline void
5736 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5737 +{
5738 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5739 + service->state->id, service->localport,
5740 + srvstate_names[service->srvstate],
5741 + srvstate_names[newstate]);
5742 + service->srvstate = newstate;
5743 +}
5744 +
5745 +VCHIQ_SERVICE_T *
5746 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5747 +{
5748 + VCHIQ_SERVICE_T *service;
5749 +
5750 + spin_lock(&service_spinlock);
5751 + service = handle_to_service(handle);
5752 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5753 + (service->handle == handle)) {
5754 + BUG_ON(service->ref_count == 0);
5755 + service->ref_count++;
5756 + } else
5757 + service = NULL;
5758 + spin_unlock(&service_spinlock);
5759 +
5760 + if (!service)
5761 + vchiq_log_info(vchiq_core_log_level,
5762 + "Invalid service handle 0x%x", handle);
5763 +
5764 + return service;
5765 +}
5766 +
5767 +VCHIQ_SERVICE_T *
5768 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
5769 +{
5770 + VCHIQ_SERVICE_T *service = NULL;
5771 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
5772 + spin_lock(&service_spinlock);
5773 + service = state->services[localport];
5774 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
5775 + BUG_ON(service->ref_count == 0);
5776 + service->ref_count++;
5777 + } else
5778 + service = NULL;
5779 + spin_unlock(&service_spinlock);
5780 + }
5781 +
5782 + if (!service)
5783 + vchiq_log_info(vchiq_core_log_level,
5784 + "Invalid port %d", localport);
5785 +
5786 + return service;
5787 +}
5788 +
5789 +VCHIQ_SERVICE_T *
5790 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
5791 + VCHIQ_SERVICE_HANDLE_T handle) {
5792 + VCHIQ_SERVICE_T *service;
5793 +
5794 + spin_lock(&service_spinlock);
5795 + service = handle_to_service(handle);
5796 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5797 + (service->handle == handle) &&
5798 + (service->instance == instance)) {
5799 + BUG_ON(service->ref_count == 0);
5800 + service->ref_count++;
5801 + } else
5802 + service = NULL;
5803 + spin_unlock(&service_spinlock);
5804 +
5805 + if (!service)
5806 + vchiq_log_info(vchiq_core_log_level,
5807 + "Invalid service handle 0x%x", handle);
5808 +
5809 + return service;
5810 +}
5811 +
5812 +VCHIQ_SERVICE_T *
5813 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
5814 + int *pidx)
5815 +{
5816 + VCHIQ_SERVICE_T *service = NULL;
5817 + int idx = *pidx;
5818 +
5819 + spin_lock(&service_spinlock);
5820 + while (idx < state->unused_service) {
5821 + VCHIQ_SERVICE_T *srv = state->services[idx++];
5822 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
5823 + (srv->instance == instance)) {
5824 + service = srv;
5825 + BUG_ON(service->ref_count == 0);
5826 + service->ref_count++;
5827 + break;
5828 + }
5829 + }
5830 + spin_unlock(&service_spinlock);
5831 +
5832 + *pidx = idx;
5833 +
5834 + return service;
5835 +}
5836 +
5837 +void
5838 +lock_service(VCHIQ_SERVICE_T *service)
5839 +{
5840 + spin_lock(&service_spinlock);
5841 + BUG_ON(!service || (service->ref_count == 0));
5842 + if (service)
5843 + service->ref_count++;
5844 + spin_unlock(&service_spinlock);
5845 +}
5846 +
5847 +void
5848 +unlock_service(VCHIQ_SERVICE_T *service)
5849 +{
5850 + VCHIQ_STATE_T *state = service->state;
5851 + spin_lock(&service_spinlock);
5852 + BUG_ON(!service || (service->ref_count == 0));
5853 + if (service && service->ref_count) {
5854 + service->ref_count--;
5855 + if (!service->ref_count) {
5856 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
5857 + state->services[service->localport] = NULL;
5858 + } else
5859 + service = NULL;
5860 + }
5861 + spin_unlock(&service_spinlock);
5862 +
5863 + if (service && service->userdata_term)
5864 + service->userdata_term(service->base.userdata);
5865 +
5866 + kfree(service);
5867 +}
5868 +
5869 +int
5870 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
5871 +{
5872 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5873 + int id;
5874 +
5875 + id = service ? service->client_id : 0;
5876 + if (service)
5877 + unlock_service(service);
5878 +
5879 + return id;
5880 +}
5881 +
5882 +void *
5883 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
5884 +{
5885 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5886 +
5887 + return service ? service->base.userdata : NULL;
5888 +}
5889 +
5890 +int
5891 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
5892 +{
5893 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5894 +
5895 + return service ? service->base.fourcc : 0;
5896 +}
5897 +
5898 +static void
5899 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
5900 +{
5901 + VCHIQ_STATE_T *state = service->state;
5902 + VCHIQ_SERVICE_QUOTA_T *service_quota;
5903 +
5904 + service->closing = 1;
5905 +
5906 + /* Synchronise with other threads. */
5907 + mutex_lock(&state->recycle_mutex);
5908 + mutex_unlock(&state->recycle_mutex);
5909 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
5910 + /* If we're pausing then the slot_mutex is held until resume
5911 + * by the slot handler. Therefore don't try to acquire this
5912 + * mutex if we're the slot handler and in the pause sent state.
5913 + * We don't need to in this case anyway. */
5914 + mutex_lock(&state->slot_mutex);
5915 + mutex_unlock(&state->slot_mutex);
5916 + }
5917 +
5918 + /* Unblock any sending thread. */
5919 + service_quota = &state->service_quotas[service->localport];
5920 + up(&service_quota->quota_event);
5921 +}
5922 +
5923 +static void
5924 +mark_service_closing(VCHIQ_SERVICE_T *service)
5925 +{
5926 + mark_service_closing_internal(service, 0);
5927 +}
5928 +
5929 +static inline VCHIQ_STATUS_T
5930 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
5931 + VCHIQ_HEADER_T *header, void *bulk_userdata)
5932 +{
5933 + VCHIQ_STATUS_T status;
5934 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
5935 + service->state->id, service->localport, reason_names[reason],
5936 + (unsigned int)header, (unsigned int)bulk_userdata);
5937 + status = service->base.callback(reason, header, service->handle,
5938 + bulk_userdata);
5939 + if (status == VCHIQ_ERROR) {
5940 + vchiq_log_warning(vchiq_core_log_level,
5941 + "%d: ignoring ERROR from callback to service %x",
5942 + service->state->id, service->handle);
5943 + status = VCHIQ_SUCCESS;
5944 + }
5945 + return status;
5946 +}
5947 +
5948 +inline void
5949 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
5950 +{
5951 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
5952 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
5953 + conn_state_names[oldstate],
5954 + conn_state_names[newstate]);
5955 + state->conn_state = newstate;
5956 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
5957 +}
5958 +
5959 +static inline void
5960 +remote_event_create(REMOTE_EVENT_T *event)
5961 +{
5962 + event->armed = 0;
5963 + /* Don't clear the 'fired' flag because it may already have been set
5964 + ** by the other side. */
5965 + sema_init(event->event, 0);
5966 +}
5967 +
5968 +static inline void
5969 +remote_event_destroy(REMOTE_EVENT_T *event)
5970 +{
5971 + (void)event;
5972 +}
5973 +
5974 +static inline int
5975 +remote_event_wait(REMOTE_EVENT_T *event)
5976 +{
5977 + if (!event->fired) {
5978 + event->armed = 1;
5979 + dsb();
5980 + if (!event->fired) {
5981 + if (down_interruptible(event->event) != 0) {
5982 + event->armed = 0;
5983 + return 0;
5984 + }
5985 + }
5986 + event->armed = 0;
5987 + wmb();
5988 + }
5989 +
5990 + event->fired = 0;
5991 + return 1;
5992 +}
5993 +
5994 +static inline void
5995 +remote_event_signal_local(REMOTE_EVENT_T *event)
5996 +{
5997 + event->armed = 0;
5998 + up(event->event);
5999 +}
6000 +
6001 +static inline void
6002 +remote_event_poll(REMOTE_EVENT_T *event)
6003 +{
6004 + if (event->fired && event->armed)
6005 + remote_event_signal_local(event);
6006 +}
6007 +
6008 +void
6009 +remote_event_pollall(VCHIQ_STATE_T *state)
6010 +{
6011 + remote_event_poll(&state->local->sync_trigger);
6012 + remote_event_poll(&state->local->sync_release);
6013 + remote_event_poll(&state->local->trigger);
6014 + remote_event_poll(&state->local->recycle);
6015 +}
6016 +
6017 +/* Round up message sizes so that any space at the end of a slot is always big
6018 +** enough for a header. This relies on header size being a power of two, which
6019 +** has been verified earlier by a static assertion. */
6020 +
6021 +static inline unsigned int
6022 +calc_stride(unsigned int size)
6023 +{
6024 + /* Allow room for the header */
6025 + size += sizeof(VCHIQ_HEADER_T);
6026 +
6027 + /* Round up */
6028 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
6029 + - 1);
6030 +}
6031 +
6032 +/* Called by the slot handler thread */
6033 +static VCHIQ_SERVICE_T *
6034 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
6035 +{
6036 + int i;
6037 +
6038 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
6039 +
6040 + for (i = 0; i < state->unused_service; i++) {
6041 + VCHIQ_SERVICE_T *service = state->services[i];
6042 + if (service &&
6043 + (service->public_fourcc == fourcc) &&
6044 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
6045 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
6046 + (service->remoteport == VCHIQ_PORT_FREE)))) {
6047 + lock_service(service);
6048 + return service;
6049 + }
6050 + }
6051 +
6052 + return NULL;
6053 +}
6054 +
6055 +/* Called by the slot handler thread */
6056 +static VCHIQ_SERVICE_T *
6057 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
6058 +{
6059 + int i;
6060 + for (i = 0; i < state->unused_service; i++) {
6061 + VCHIQ_SERVICE_T *service = state->services[i];
6062 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
6063 + && (service->remoteport == port)) {
6064 + lock_service(service);
6065 + return service;
6066 + }
6067 + }
6068 + return NULL;
6069 +}
6070 +
6071 +inline void
6072 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
6073 +{
6074 + uint32_t value;
6075 +
6076 + if (service) {
6077 + do {
6078 + value = atomic_read(&service->poll_flags);
6079 + } while (atomic_cmpxchg(&service->poll_flags, value,
6080 + value | (1 << poll_type)) != value);
6081 +
6082 + do {
6083 + value = atomic_read(&state->poll_services[
6084 + service->localport>>5]);
6085 + } while (atomic_cmpxchg(
6086 + &state->poll_services[service->localport>>5],
6087 + value, value | (1 << (service->localport & 0x1f)))
6088 + != value);
6089 + }
6090 +
6091 + state->poll_needed = 1;
6092 + wmb();
6093 +
6094 + /* ... and ensure the slot handler runs. */
6095 + remote_event_signal_local(&state->local->trigger);
6096 +}
6097 +
6098 +/* Called from queue_message, by the slot handler and application threads,
6099 +** with slot_mutex held */
6100 +static VCHIQ_HEADER_T *
6101 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6102 +{
6103 + VCHIQ_SHARED_STATE_T *local = state->local;
6104 + int tx_pos = state->local_tx_pos;
6105 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6106 +
6107 + if (space > slot_space) {
6108 + VCHIQ_HEADER_T *header;
6109 + /* Fill the remaining space with padding */
6110 + WARN_ON(state->tx_data == NULL);
6111 + header = (VCHIQ_HEADER_T *)
6112 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6113 + header->msgid = VCHIQ_MSGID_PADDING;
6114 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6115 +
6116 + tx_pos += slot_space;
6117 + }
6118 +
6119 + /* If necessary, get the next slot. */
6120 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6121 + int slot_index;
6122 +
6123 + /* If there is no free slot... */
6124 +
6125 + if (down_trylock(&state->slot_available_event) != 0) {
6126 + /* ...wait for one. */
6127 +
6128 + VCHIQ_STATS_INC(state, slot_stalls);
6129 +
6130 + /* But first, flush through the last slot. */
6131 + state->local_tx_pos = tx_pos;
6132 + local->tx_pos = tx_pos;
6133 + remote_event_signal(&state->remote->trigger);
6134 +
6135 + if (!is_blocking ||
6136 + (down_interruptible(
6137 + &state->slot_available_event) != 0))
6138 + return NULL; /* No space available */
6139 + }
6140 +
6141 + BUG_ON(tx_pos ==
6142 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6143 +
6144 + slot_index = local->slot_queue[
6145 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6146 + VCHIQ_SLOT_QUEUE_MASK];
6147 + state->tx_data =
6148 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6149 + }
6150 +
6151 + state->local_tx_pos = tx_pos + space;
6152 +
6153 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6154 +}
6155 +
6156 +/* Called by the recycle thread. */
6157 +static void
6158 +process_free_queue(VCHIQ_STATE_T *state)
6159 +{
6160 + VCHIQ_SHARED_STATE_T *local = state->local;
6161 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6162 + int slot_queue_available;
6163 +
6164 + /* Use a read memory barrier to ensure that any state that may have
6165 + ** been modified by another thread is not masked by stale prefetched
6166 + ** values. */
6167 + rmb();
6168 +
6169 + /* Find slots which have been freed by the other side, and return them
6170 + ** to the available queue. */
6171 + slot_queue_available = state->slot_queue_available;
6172 +
6173 + while (slot_queue_available != local->slot_queue_recycle) {
6174 + unsigned int pos;
6175 + int slot_index = local->slot_queue[slot_queue_available++ &
6176 + VCHIQ_SLOT_QUEUE_MASK];
6177 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6178 + int data_found = 0;
6179 +
6180 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6181 + state->id, slot_index, (unsigned int)data,
6182 + local->slot_queue_recycle, slot_queue_available);
6183 +
6184 + /* Initialise the bitmask for services which have used this
6185 + ** slot */
6186 + BITSET_ZERO(service_found);
6187 +
6188 + pos = 0;
6189 +
6190 + while (pos < VCHIQ_SLOT_SIZE) {
6191 + VCHIQ_HEADER_T *header =
6192 + (VCHIQ_HEADER_T *)(data + pos);
6193 + int msgid = header->msgid;
6194 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6195 + int port = VCHIQ_MSG_SRCPORT(msgid);
6196 + VCHIQ_SERVICE_QUOTA_T *service_quota =
6197 + &state->service_quotas[port];
6198 + int count;
6199 + spin_lock(&quota_spinlock);
6200 + count = service_quota->message_use_count;
6201 + if (count > 0)
6202 + service_quota->message_use_count =
6203 + count - 1;
6204 + spin_unlock(&quota_spinlock);
6205 +
6206 + if (count == service_quota->message_quota)
6207 + /* Signal the service that it
6208 + ** has dropped below its quota
6209 + */
6210 + up(&service_quota->quota_event);
6211 + else if (count == 0) {
6212 + vchiq_log_error(vchiq_core_log_level,
6213 + "service %d "
6214 + "message_use_count=%d "
6215 + "(header %x, msgid %x, "
6216 + "header->msgid %x, "
6217 + "header->size %x)",
6218 + port,
6219 + service_quota->
6220 + message_use_count,
6221 + (unsigned int)header, msgid,
6222 + header->msgid,
6223 + header->size);
6224 + WARN(1, "invalid message use count\n");
6225 + }
6226 + if (!BITSET_IS_SET(service_found, port)) {
6227 + /* Set the found bit for this service */
6228 + BITSET_SET(service_found, port);
6229 +
6230 + spin_lock(&quota_spinlock);
6231 + count = service_quota->slot_use_count;
6232 + if (count > 0)
6233 + service_quota->slot_use_count =
6234 + count - 1;
6235 + spin_unlock(&quota_spinlock);
6236 +
6237 + if (count > 0) {
6238 + /* Signal the service in case
6239 + ** it has dropped below its
6240 + ** quota */
6241 + up(&service_quota->quota_event);
6242 + vchiq_log_trace(
6243 + vchiq_core_log_level,
6244 + "%d: pfq:%d %x@%x - "
6245 + "slot_use->%d",
6246 + state->id, port,
6247 + header->size,
6248 + (unsigned int)header,
6249 + count - 1);
6250 + } else {
6251 + vchiq_log_error(
6252 + vchiq_core_log_level,
6253 + "service %d "
6254 + "slot_use_count"
6255 + "=%d (header %x"
6256 + ", msgid %x, "
6257 + "header->msgid"
6258 + " %x, header->"
6259 + "size %x)",
6260 + port, count,
6261 + (unsigned int)header,
6262 + msgid,
6263 + header->msgid,
6264 + header->size);
6265 + WARN(1, "bad slot use count\n");
6266 + }
6267 + }
6268 +
6269 + data_found = 1;
6270 + }
6271 +
6272 + pos += calc_stride(header->size);
6273 + if (pos > VCHIQ_SLOT_SIZE) {
6274 + vchiq_log_error(vchiq_core_log_level,
6275 + "pfq - pos %x: header %x, msgid %x, "
6276 + "header->msgid %x, header->size %x",
6277 + pos, (unsigned int)header, msgid,
6278 + header->msgid, header->size);
6279 + WARN(1, "invalid slot position\n");
6280 + }
6281 + }
6282 +
6283 + if (data_found) {
6284 + int count;
6285 + spin_lock(&quota_spinlock);
6286 + count = state->data_use_count;
6287 + if (count > 0)
6288 + state->data_use_count =
6289 + count - 1;
6290 + spin_unlock(&quota_spinlock);
6291 + if (count == state->data_quota)
6292 + up(&state->data_quota_event);
6293 + }
6294 +
6295 + state->slot_queue_available = slot_queue_available;
6296 + up(&state->slot_available_event);
6297 + }
6298 +}
6299 +
6300 +/* Called by the slot handler and application threads */
6301 +static VCHIQ_STATUS_T
6302 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6303 + int msgid, const VCHIQ_ELEMENT_T *elements,
6304 + int count, int size, int is_blocking)
6305 +{
6306 + VCHIQ_SHARED_STATE_T *local;
6307 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6308 + VCHIQ_HEADER_T *header;
6309 + int type = VCHIQ_MSG_TYPE(msgid);
6310 +
6311 + unsigned int stride;
6312 +
6313 + local = state->local;
6314 +
6315 + stride = calc_stride(size);
6316 +
6317 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6318 +
6319 + if ((type != VCHIQ_MSG_RESUME) &&
6320 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
6321 + return VCHIQ_RETRY;
6322 +
6323 + if (type == VCHIQ_MSG_DATA) {
6324 + int tx_end_index;
6325 +
6326 + BUG_ON(!service);
6327 +
6328 + if (service->closing) {
6329 + /* The service has been closed */
6330 + mutex_unlock(&state->slot_mutex);
6331 + return VCHIQ_ERROR;
6332 + }
6333 +
6334 + service_quota = &state->service_quotas[service->localport];
6335 +
6336 + spin_lock(&quota_spinlock);
6337 +
6338 + /* Ensure this service doesn't use more than its quota of
6339 + ** messages or slots */
6340 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6341 + state->local_tx_pos + stride - 1);
6342 +
6343 + /* Ensure data messages don't use more than their quota of
6344 + ** slots */
6345 + while ((tx_end_index != state->previous_data_index) &&
6346 + (state->data_use_count == state->data_quota)) {
6347 + VCHIQ_STATS_INC(state, data_stalls);
6348 + spin_unlock(&quota_spinlock);
6349 + mutex_unlock(&state->slot_mutex);
6350 +
6351 + if (down_interruptible(&state->data_quota_event)
6352 + != 0)
6353 + return VCHIQ_RETRY;
6354 +
6355 + mutex_lock(&state->slot_mutex);
6356 + spin_lock(&quota_spinlock);
6357 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6358 + state->local_tx_pos + stride - 1);
6359 + if ((tx_end_index == state->previous_data_index) ||
6360 + (state->data_use_count < state->data_quota)) {
6361 + /* Pass the signal on to other waiters */
6362 + up(&state->data_quota_event);
6363 + break;
6364 + }
6365 + }
6366 +
6367 + while ((service_quota->message_use_count ==
6368 + service_quota->message_quota) ||
6369 + ((tx_end_index != service_quota->previous_tx_index) &&
6370 + (service_quota->slot_use_count ==
6371 + service_quota->slot_quota))) {
6372 + spin_unlock(&quota_spinlock);
6373 + vchiq_log_trace(vchiq_core_log_level,
6374 + "%d: qm:%d %s,%x - quota stall "
6375 + "(msg %d, slot %d)",
6376 + state->id, service->localport,
6377 + msg_type_str(type), size,
6378 + service_quota->message_use_count,
6379 + service_quota->slot_use_count);
6380 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6381 + mutex_unlock(&state->slot_mutex);
6382 + if (down_interruptible(&service_quota->quota_event)
6383 + != 0)
6384 + return VCHIQ_RETRY;
6385 + if (service->closing)
6386 + return VCHIQ_ERROR;
6387 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6388 + return VCHIQ_RETRY;
6389 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6390 + /* The service has been closed */
6391 + mutex_unlock(&state->slot_mutex);
6392 + return VCHIQ_ERROR;
6393 + }
6394 + spin_lock(&quota_spinlock);
6395 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6396 + state->local_tx_pos + stride - 1);
6397 + }
6398 +
6399 + spin_unlock(&quota_spinlock);
6400 + }
6401 +
6402 + header = reserve_space(state, stride, is_blocking);
6403 +
6404 + if (!header) {
6405 + if (service)
6406 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6407 + mutex_unlock(&state->slot_mutex);
6408 + return VCHIQ_RETRY;
6409 + }
6410 +
6411 + if (type == VCHIQ_MSG_DATA) {
6412 + int i, pos;
6413 + int tx_end_index;
6414 + int slot_use_count;
6415 +
6416 + vchiq_log_info(vchiq_core_log_level,
6417 + "%d: qm %s@%x,%x (%d->%d)",
6418 + state->id,
6419 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6420 + (unsigned int)header, size,
6421 + VCHIQ_MSG_SRCPORT(msgid),
6422 + VCHIQ_MSG_DSTPORT(msgid));
6423 +
6424 + BUG_ON(!service);
6425 +
6426 + for (i = 0, pos = 0; i < (unsigned int)count;
6427 + pos += elements[i++].size)
6428 + if (elements[i].size) {
6429 + if (vchiq_copy_from_user
6430 + (header->data + pos, elements[i].data,
6431 + (size_t) elements[i].size) !=
6432 + VCHIQ_SUCCESS) {
6433 + mutex_unlock(&state->slot_mutex);
6434 + VCHIQ_SERVICE_STATS_INC(service,
6435 + error_count);
6436 + return VCHIQ_ERROR;
6437 + }
6438 + if (i == 0) {
6439 + if (vchiq_core_msg_log_level >=
6440 + VCHIQ_LOG_INFO)
6441 + vchiq_log_dump_mem("Sent", 0,
6442 + header->data + pos,
6443 + min(64u,
6444 + elements[0].size));
6445 + }
6446 + }
6447 +
6448 + spin_lock(&quota_spinlock);
6449 + service_quota->message_use_count++;
6450 +
6451 + tx_end_index =
6452 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6453 +
6454 + /* If this transmission can't fit in the last slot used by any
6455 + ** service, the data_use_count must be increased. */
6456 + if (tx_end_index != state->previous_data_index) {
6457 + state->previous_data_index = tx_end_index;
6458 + state->data_use_count++;
6459 + }
6460 +
6461 + /* If this isn't the same slot last used by this service,
6462 + ** the service's slot_use_count must be increased. */
6463 + if (tx_end_index != service_quota->previous_tx_index) {
6464 + service_quota->previous_tx_index = tx_end_index;
6465 + slot_use_count = ++service_quota->slot_use_count;
6466 + } else {
6467 + slot_use_count = 0;
6468 + }
6469 +
6470 + spin_unlock(&quota_spinlock);
6471 +
6472 + if (slot_use_count)
6473 + vchiq_log_trace(vchiq_core_log_level,
6474 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6475 + state->id, service->localport,
6476 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6477 + slot_use_count, header);
6478 +
6479 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6480 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6481 + } else {
6482 + vchiq_log_info(vchiq_core_log_level,
6483 + "%d: qm %s@%x,%x (%d->%d)", state->id,
6484 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6485 + (unsigned int)header, size,
6486 + VCHIQ_MSG_SRCPORT(msgid),
6487 + VCHIQ_MSG_DSTPORT(msgid));
6488 + if (size != 0) {
6489 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6490 + memcpy(header->data, elements[0].data,
6491 + elements[0].size);
6492 + }
6493 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6494 + }
6495 +
6496 + header->msgid = msgid;
6497 + header->size = size;
6498 +
6499 + {
6500 + int svc_fourcc;
6501 +
6502 + svc_fourcc = service
6503 + ? service->base.fourcc
6504 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6505 +
6506 + vchiq_log_info(vchiq_core_msg_log_level,
6507 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6508 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6509 + VCHIQ_MSG_TYPE(msgid),
6510 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6511 + VCHIQ_MSG_SRCPORT(msgid),
6512 + VCHIQ_MSG_DSTPORT(msgid),
6513 + size);
6514 + }
6515 +
6516 + /* Make sure the new header is visible to the peer. */
6517 + wmb();
6518 +
6519 + /* Make the new tx_pos visible to the peer. */
6520 + local->tx_pos = state->local_tx_pos;
6521 + wmb();
6522 +
6523 + if (service && (type == VCHIQ_MSG_CLOSE))
6524 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6525 +
6526 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6527 + mutex_unlock(&state->slot_mutex);
6528 +
6529 + remote_event_signal(&state->remote->trigger);
6530 +
6531 + return VCHIQ_SUCCESS;
6532 +}
6533 +
6534 +/* Called by the slot handler and application threads */
6535 +static VCHIQ_STATUS_T
6536 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6537 + int msgid, const VCHIQ_ELEMENT_T *elements,
6538 + int count, int size, int is_blocking)
6539 +{
6540 + VCHIQ_SHARED_STATE_T *local;
6541 + VCHIQ_HEADER_T *header;
6542 +
6543 + local = state->local;
6544 +
6545 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6546 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
6547 + return VCHIQ_RETRY;
6548 +
6549 + remote_event_wait(&local->sync_release);
6550 +
6551 + rmb();
6552 +
6553 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6554 + local->slot_sync);
6555 +
6556 + {
6557 + int oldmsgid = header->msgid;
6558 + if (oldmsgid != VCHIQ_MSGID_PADDING)
6559 + vchiq_log_error(vchiq_core_log_level,
6560 + "%d: qms - msgid %x, not PADDING",
6561 + state->id, oldmsgid);
6562 + }
6563 +
6564 + if (service) {
6565 + int i, pos;
6566 +
6567 + vchiq_log_info(vchiq_sync_log_level,
6568 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6569 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6570 + (unsigned int)header, size,
6571 + VCHIQ_MSG_SRCPORT(msgid),
6572 + VCHIQ_MSG_DSTPORT(msgid));
6573 +
6574 + for (i = 0, pos = 0; i < (unsigned int)count;
6575 + pos += elements[i++].size)
6576 + if (elements[i].size) {
6577 + if (vchiq_copy_from_user
6578 + (header->data + pos, elements[i].data,
6579 + (size_t) elements[i].size) !=
6580 + VCHIQ_SUCCESS) {
6581 + mutex_unlock(&state->sync_mutex);
6582 + VCHIQ_SERVICE_STATS_INC(service,
6583 + error_count);
6584 + return VCHIQ_ERROR;
6585 + }
6586 + if (i == 0) {
6587 + if (vchiq_sync_log_level >=
6588 + VCHIQ_LOG_TRACE)
6589 + vchiq_log_dump_mem("Sent Sync",
6590 + 0, header->data + pos,
6591 + min(64u,
6592 + elements[0].size));
6593 + }
6594 + }
6595 +
6596 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6597 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6598 + } else {
6599 + vchiq_log_info(vchiq_sync_log_level,
6600 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6601 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6602 + (unsigned int)header, size,
6603 + VCHIQ_MSG_SRCPORT(msgid),
6604 + VCHIQ_MSG_DSTPORT(msgid));
6605 + if (size != 0) {
6606 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6607 + memcpy(header->data, elements[0].data,
6608 + elements[0].size);
6609 + }
6610 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6611 + }
6612 +
6613 + header->size = size;
6614 + header->msgid = msgid;
6615 +
6616 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6617 + int svc_fourcc;
6618 +
6619 + svc_fourcc = service
6620 + ? service->base.fourcc
6621 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6622 +
6623 + vchiq_log_trace(vchiq_sync_log_level,
6624 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6625 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6626 + VCHIQ_MSG_TYPE(msgid),
6627 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6628 + VCHIQ_MSG_SRCPORT(msgid),
6629 + VCHIQ_MSG_DSTPORT(msgid),
6630 + size);
6631 + }
6632 +
6633 + /* Make sure the new header is visible to the peer. */
6634 + wmb();
6635 +
6636 + remote_event_signal(&state->remote->sync_trigger);
6637 +
6638 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6639 + mutex_unlock(&state->sync_mutex);
6640 +
6641 + return VCHIQ_SUCCESS;
6642 +}
6643 +
6644 +static inline void
6645 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6646 +{
6647 + slot->use_count++;
6648 +}
6649 +
6650 +static void
6651 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6652 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6653 +{
6654 + int release_count;
6655 +
6656 + mutex_lock(&state->recycle_mutex);
6657 +
6658 + if (header) {
6659 + int msgid = header->msgid;
6660 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6661 + (service && service->closing)) {
6662 + mutex_unlock(&state->recycle_mutex);
6663 + return;
6664 + }
6665 +
6666 + /* Rewrite the message header to prevent a double
6667 + ** release */
6668 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6669 + }
6670 +
6671 + release_count = slot_info->release_count;
6672 + slot_info->release_count = ++release_count;
6673 +
6674 + if (release_count == slot_info->use_count) {
6675 + int slot_queue_recycle;
6676 + /* Add to the freed queue */
6677 +
6678 + /* A read barrier is necessary here to prevent speculative
6679 + ** fetches of remote->slot_queue_recycle from overtaking the
6680 + ** mutex. */
6681 + rmb();
6682 +
6683 + slot_queue_recycle = state->remote->slot_queue_recycle;
6684 + state->remote->slot_queue[slot_queue_recycle &
6685 + VCHIQ_SLOT_QUEUE_MASK] =
6686 + SLOT_INDEX_FROM_INFO(state, slot_info);
6687 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6688 + vchiq_log_info(vchiq_core_log_level,
6689 + "%d: release_slot %d - recycle->%x",
6690 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6691 + state->remote->slot_queue_recycle);
6692 +
6693 + /* A write barrier is necessary, but remote_event_signal
6694 + ** contains one. */
6695 + remote_event_signal(&state->remote->recycle);
6696 + }
6697 +
6698 + mutex_unlock(&state->recycle_mutex);
6699 +}
6700 +
6701 +/* Called by the slot handler - don't hold the bulk mutex */
6702 +static VCHIQ_STATUS_T
6703 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6704 + int retry_poll)
6705 +{
6706 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6707 +
6708 + vchiq_log_trace(vchiq_core_log_level,
6709 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
6710 + service->state->id, service->localport,
6711 + (queue == &service->bulk_tx) ? 't' : 'r',
6712 + queue->process, queue->remote_notify, queue->remove);
6713 +
6714 + if (service->state->is_master) {
6715 + while (queue->remote_notify != queue->process) {
6716 + VCHIQ_BULK_T *bulk =
6717 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
6718 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6719 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6720 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6721 + service->remoteport);
6722 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6723 + /* Only reply to non-dummy bulk requests */
6724 + if (bulk->remote_data) {
6725 + status = queue_message(service->state, NULL,
6726 + msgid, &element, 1, 4, 0);
6727 + if (status != VCHIQ_SUCCESS)
6728 + break;
6729 + }
6730 + queue->remote_notify++;
6731 + }
6732 + } else {
6733 + queue->remote_notify = queue->process;
6734 + }
6735 +
6736 + if (status == VCHIQ_SUCCESS) {
6737 + while (queue->remove != queue->remote_notify) {
6738 + VCHIQ_BULK_T *bulk =
6739 + &queue->bulks[BULK_INDEX(queue->remove)];
6740 +
6741 + /* Only generate callbacks for non-dummy bulk
6742 + ** requests, and non-terminated services */
6743 + if (bulk->data && service->instance) {
6744 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
6745 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
6746 + VCHIQ_SERVICE_STATS_INC(service,
6747 + bulk_tx_count);
6748 + VCHIQ_SERVICE_STATS_ADD(service,
6749 + bulk_tx_bytes,
6750 + bulk->actual);
6751 + } else {
6752 + VCHIQ_SERVICE_STATS_INC(service,
6753 + bulk_rx_count);
6754 + VCHIQ_SERVICE_STATS_ADD(service,
6755 + bulk_rx_bytes,
6756 + bulk->actual);
6757 + }
6758 + } else {
6759 + VCHIQ_SERVICE_STATS_INC(service,
6760 + bulk_aborted_count);
6761 + }
6762 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
6763 + struct bulk_waiter *waiter;
6764 + spin_lock(&bulk_waiter_spinlock);
6765 + waiter = bulk->userdata;
6766 + if (waiter) {
6767 + waiter->actual = bulk->actual;
6768 + up(&waiter->event);
6769 + }
6770 + spin_unlock(&bulk_waiter_spinlock);
6771 + } else if (bulk->mode ==
6772 + VCHIQ_BULK_MODE_CALLBACK) {
6773 + VCHIQ_REASON_T reason = (bulk->dir ==
6774 + VCHIQ_BULK_TRANSMIT) ?
6775 + ((bulk->actual ==
6776 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6777 + VCHIQ_BULK_TRANSMIT_ABORTED :
6778 + VCHIQ_BULK_TRANSMIT_DONE) :
6779 + ((bulk->actual ==
6780 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6781 + VCHIQ_BULK_RECEIVE_ABORTED :
6782 + VCHIQ_BULK_RECEIVE_DONE);
6783 + status = make_service_callback(service,
6784 + reason, NULL, bulk->userdata);
6785 + if (status == VCHIQ_RETRY)
6786 + break;
6787 + }
6788 + }
6789 +
6790 + queue->remove++;
6791 + up(&service->bulk_remove_event);
6792 + }
6793 + if (!retry_poll)
6794 + status = VCHIQ_SUCCESS;
6795 + }
6796 +
6797 + if (status == VCHIQ_RETRY)
6798 + request_poll(service->state, service,
6799 + (queue == &service->bulk_tx) ?
6800 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
6801 +
6802 + return status;
6803 +}
6804 +
6805 +/* Called by the slot handler thread */
6806 +static void
6807 +poll_services(VCHIQ_STATE_T *state)
6808 +{
6809 + int group, i;
6810 +
6811 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
6812 + uint32_t flags;
6813 + flags = atomic_xchg(&state->poll_services[group], 0);
6814 + for (i = 0; flags; i++) {
6815 + if (flags & (1 << i)) {
6816 + VCHIQ_SERVICE_T *service =
6817 + find_service_by_port(state,
6818 + (group<<5) + i);
6819 + uint32_t service_flags;
6820 + flags &= ~(1 << i);
6821 + if (!service)
6822 + continue;
6823 + service_flags =
6824 + atomic_xchg(&service->poll_flags, 0);
6825 + if (service_flags &
6826 + (1 << VCHIQ_POLL_REMOVE)) {
6827 + vchiq_log_info(vchiq_core_log_level,
6828 + "%d: ps - remove %d<->%d",
6829 + state->id, service->localport,
6830 + service->remoteport);
6831 +
6832 + /* Make it look like a client, because
6833 + it must be removed and not left in
6834 + the LISTENING state. */
6835 + service->public_fourcc =
6836 + VCHIQ_FOURCC_INVALID;
6837 +
6838 + if (vchiq_close_service_internal(
6839 + service, 0/*!close_recvd*/) !=
6840 + VCHIQ_SUCCESS)
6841 + request_poll(state, service,
6842 + VCHIQ_POLL_REMOVE);
6843 + } else if (service_flags &
6844 + (1 << VCHIQ_POLL_TERMINATE)) {
6845 + vchiq_log_info(vchiq_core_log_level,
6846 + "%d: ps - terminate %d<->%d",
6847 + state->id, service->localport,
6848 + service->remoteport);
6849 + if (vchiq_close_service_internal(
6850 + service, 0/*!close_recvd*/) !=
6851 + VCHIQ_SUCCESS)
6852 + request_poll(state, service,
6853 + VCHIQ_POLL_TERMINATE);
6854 + }
6855 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
6856 + notify_bulks(service,
6857 + &service->bulk_tx,
6858 + 1/*retry_poll*/);
6859 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
6860 + notify_bulks(service,
6861 + &service->bulk_rx,
6862 + 1/*retry_poll*/);
6863 + unlock_service(service);
6864 + }
6865 + }
6866 + }
6867 +}
6868 +
6869 +/* Called by the slot handler or application threads, holding the bulk mutex. */
6870 +static int
6871 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6872 +{
6873 + VCHIQ_STATE_T *state = service->state;
6874 + int resolved = 0;
6875 + int rc;
6876 +
6877 + while ((queue->process != queue->local_insert) &&
6878 + (queue->process != queue->remote_insert)) {
6879 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6880 +
6881 + vchiq_log_trace(vchiq_core_log_level,
6882 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
6883 + state->id, service->localport,
6884 + (queue == &service->bulk_tx) ? 't' : 'r',
6885 + queue->local_insert, queue->remote_insert,
6886 + queue->process);
6887 +
6888 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
6889 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
6890 +
6891 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
6892 + if (rc != 0)
6893 + break;
6894 +
6895 + vchiq_transfer_bulk(bulk);
6896 + mutex_unlock(&state->bulk_transfer_mutex);
6897 +
6898 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
6899 + const char *header = (queue == &service->bulk_tx) ?
6900 + "Send Bulk to" : "Recv Bulk from";
6901 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
6902 + vchiq_log_info(vchiq_core_msg_log_level,
6903 + "%s %c%c%c%c d:%d len:%d %x<->%x",
6904 + header,
6905 + VCHIQ_FOURCC_AS_4CHARS(
6906 + service->base.fourcc),
6907 + service->remoteport,
6908 + bulk->size,
6909 + (unsigned int)bulk->data,
6910 + (unsigned int)bulk->remote_data);
6911 + else
6912 + vchiq_log_info(vchiq_core_msg_log_level,
6913 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
6914 + " rx len:%d %x<->%x",
6915 + header,
6916 + VCHIQ_FOURCC_AS_4CHARS(
6917 + service->base.fourcc),
6918 + service->remoteport,
6919 + bulk->size,
6920 + bulk->remote_size,
6921 + (unsigned int)bulk->data,
6922 + (unsigned int)bulk->remote_data);
6923 + }
6924 +
6925 + vchiq_complete_bulk(bulk);
6926 + queue->process++;
6927 + resolved++;
6928 + }
6929 + return resolved;
6930 +}
6931 +
6932 +/* Called with the bulk_mutex held */
6933 +static void
6934 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6935 +{
6936 + int is_tx = (queue == &service->bulk_tx);
6937 + vchiq_log_trace(vchiq_core_log_level,
6938 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
6939 + service->state->id, service->localport, is_tx ? 't' : 'r',
6940 + queue->local_insert, queue->remote_insert, queue->process);
6941 +
6942 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
6943 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
6944 +
6945 + while ((queue->process != queue->local_insert) ||
6946 + (queue->process != queue->remote_insert)) {
6947 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6948 +
6949 + if (queue->process == queue->remote_insert) {
6950 + /* fabricate a matching dummy bulk */
6951 + bulk->remote_data = NULL;
6952 + bulk->remote_size = 0;
6953 + queue->remote_insert++;
6954 + }
6955 +
6956 + if (queue->process != queue->local_insert) {
6957 + vchiq_complete_bulk(bulk);
6958 +
6959 + vchiq_log_info(vchiq_core_msg_log_level,
6960 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
6961 + "rx len:%d",
6962 + is_tx ? "Send Bulk to" : "Recv Bulk from",
6963 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
6964 + service->remoteport,
6965 + bulk->size,
6966 + bulk->remote_size);
6967 + } else {
6968 + /* fabricate a matching dummy bulk */
6969 + bulk->data = NULL;
6970 + bulk->size = 0;
6971 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
6972 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
6973 + VCHIQ_BULK_RECEIVE;
6974 + queue->local_insert++;
6975 + }
6976 +
6977 + queue->process++;
6978 + }
6979 +}
6980 +
6981 +/* Called from the slot handler thread */
6982 +static void
6983 +pause_bulks(VCHIQ_STATE_T *state)
6984 +{
6985 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
6986 + WARN_ON_ONCE(1);
6987 + atomic_set(&pause_bulks_count, 1);
6988 + return;
6989 + }
6990 +
6991 + /* Block bulk transfers from all services */
6992 + mutex_lock(&state->bulk_transfer_mutex);
6993 +}
6994 +
6995 +/* Called from the slot handler thread */
6996 +static void
6997 +resume_bulks(VCHIQ_STATE_T *state)
6998 +{
6999 + int i;
7000 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
7001 + WARN_ON_ONCE(1);
7002 + atomic_set(&pause_bulks_count, 0);
7003 + return;
7004 + }
7005 +
7006 + /* Allow bulk transfers from all services */
7007 + mutex_unlock(&state->bulk_transfer_mutex);
7008 +
7009 + if (state->deferred_bulks == 0)
7010 + return;
7011 +
7012 + /* Deal with any bulks which had to be deferred due to being in
7013 + * paused state. Don't try to match up to number of deferred bulks
7014 + * in case we've had something come and close the service in the
7015 + * interim - just process all bulk queues for all services */
7016 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
7017 + __func__, state->deferred_bulks);
7018 +
7019 + for (i = 0; i < state->unused_service; i++) {
7020 + VCHIQ_SERVICE_T *service = state->services[i];
7021 + int resolved_rx = 0;
7022 + int resolved_tx = 0;
7023 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
7024 + continue;
7025 +
7026 + mutex_lock(&service->bulk_mutex);
7027 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
7028 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
7029 + mutex_unlock(&service->bulk_mutex);
7030 + if (resolved_rx)
7031 + notify_bulks(service, &service->bulk_rx, 1);
7032 + if (resolved_tx)
7033 + notify_bulks(service, &service->bulk_tx, 1);
7034 + }
7035 + state->deferred_bulks = 0;
7036 +}
7037 +
7038 +static int
7039 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
7040 +{
7041 + VCHIQ_SERVICE_T *service = NULL;
7042 + int msgid, size;
7043 + int type;
7044 + unsigned int localport, remoteport;
7045 +
7046 + msgid = header->msgid;
7047 + size = header->size;
7048 + type = VCHIQ_MSG_TYPE(msgid);
7049 + localport = VCHIQ_MSG_DSTPORT(msgid);
7050 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7051 + if (size >= sizeof(struct vchiq_open_payload)) {
7052 + const struct vchiq_open_payload *payload =
7053 + (struct vchiq_open_payload *)header->data;
7054 + unsigned int fourcc;
7055 +
7056 + fourcc = payload->fourcc;
7057 + vchiq_log_info(vchiq_core_log_level,
7058 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
7059 + state->id, (unsigned int)header,
7060 + localport,
7061 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
7062 +
7063 + service = get_listening_service(state, fourcc);
7064 +
7065 + if (service) {
7066 + /* A matching service exists */
7067 + short version = payload->version;
7068 + short version_min = payload->version_min;
7069 + if ((service->version < version_min) ||
7070 + (version < service->version_min)) {
7071 + /* Version mismatch */
7072 + vchiq_loud_error_header();
7073 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
7074 + "version mismatch - local (%d, min %d)"
7075 + " vs. remote (%d, min %d)",
7076 + state->id, service->localport,
7077 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
7078 + service->version, service->version_min,
7079 + version, version_min);
7080 + vchiq_loud_error_footer();
7081 + unlock_service(service);
7082 + service = NULL;
7083 + goto fail_open;
7084 + }
7085 + service->peer_version = version;
7086 +
7087 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
7088 + struct vchiq_openack_payload ack_payload = {
7089 + service->version
7090 + };
7091 + VCHIQ_ELEMENT_T body = {
7092 + &ack_payload,
7093 + sizeof(ack_payload)
7094 + };
7095 +
7096 + /* Acknowledge the OPEN */
7097 + if (service->sync) {
7098 + if (queue_message_sync(state, NULL,
7099 + VCHIQ_MAKE_MSG(
7100 + VCHIQ_MSG_OPENACK,
7101 + service->localport,
7102 + remoteport),
7103 + &body, 1, sizeof(ack_payload),
7104 + 0) == VCHIQ_RETRY)
7105 + goto bail_not_ready;
7106 + } else {
7107 + if (queue_message(state, NULL,
7108 + VCHIQ_MAKE_MSG(
7109 + VCHIQ_MSG_OPENACK,
7110 + service->localport,
7111 + remoteport),
7112 + &body, 1, sizeof(ack_payload),
7113 + 0) == VCHIQ_RETRY)
7114 + goto bail_not_ready;
7115 + }
7116 +
7117 + /* The service is now open */
7118 + vchiq_set_service_state(service,
7119 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7120 + : VCHIQ_SRVSTATE_OPEN);
7121 + }
7122 +
7123 + service->remoteport = remoteport;
7124 + service->client_id = ((int *)header->data)[1];
7125 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7126 + NULL, NULL) == VCHIQ_RETRY) {
7127 + /* Bail out if not ready */
7128 + service->remoteport = VCHIQ_PORT_FREE;
7129 + goto bail_not_ready;
7130 + }
7131 +
7132 + /* Success - the message has been dealt with */
7133 + unlock_service(service);
7134 + return 1;
7135 + }
7136 + }
7137 +
7138 +fail_open:
7139 + /* No available service, or an invalid request - send a CLOSE */
7140 + if (queue_message(state, NULL,
7141 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7142 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7143 + goto bail_not_ready;
7144 +
7145 + return 1;
7146 +
7147 +bail_not_ready:
7148 + if (service)
7149 + unlock_service(service);
7150 +
7151 + return 0;
7152 +}
7153 +
7154 +/* Called by the slot handler thread */
7155 +static void
7156 +parse_rx_slots(VCHIQ_STATE_T *state)
7157 +{
7158 + VCHIQ_SHARED_STATE_T *remote = state->remote;
7159 + VCHIQ_SERVICE_T *service = NULL;
7160 + int tx_pos;
7161 + DEBUG_INITIALISE(state->local)
7162 +
7163 + tx_pos = remote->tx_pos;
7164 +
7165 + while (state->rx_pos != tx_pos) {
7166 + VCHIQ_HEADER_T *header;
7167 + int msgid, size;
7168 + int type;
7169 + unsigned int localport, remoteport;
7170 +
7171 + DEBUG_TRACE(PARSE_LINE);
7172 + if (!state->rx_data) {
7173 + int rx_index;
7174 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7175 + rx_index = remote->slot_queue[
7176 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7177 + VCHIQ_SLOT_QUEUE_MASK];
7178 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7179 + rx_index);
7180 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7181 +
7182 + /* Initialise use_count to one, and increment
7183 + ** release_count at the end of the slot to avoid
7184 + ** releasing the slot prematurely. */
7185 + state->rx_info->use_count = 1;
7186 + state->rx_info->release_count = 0;
7187 + }
7188 +
7189 + header = (VCHIQ_HEADER_T *)(state->rx_data +
7190 + (state->rx_pos & VCHIQ_SLOT_MASK));
7191 + DEBUG_VALUE(PARSE_HEADER, (int)header);
7192 + msgid = header->msgid;
7193 + DEBUG_VALUE(PARSE_MSGID, msgid);
7194 + size = header->size;
7195 + type = VCHIQ_MSG_TYPE(msgid);
7196 + localport = VCHIQ_MSG_DSTPORT(msgid);
7197 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7198 +
7199 + if (type != VCHIQ_MSG_DATA)
7200 + VCHIQ_STATS_INC(state, ctrl_rx_count);
7201 +
7202 + switch (type) {
7203 + case VCHIQ_MSG_OPENACK:
7204 + case VCHIQ_MSG_CLOSE:
7205 + case VCHIQ_MSG_DATA:
7206 + case VCHIQ_MSG_BULK_RX:
7207 + case VCHIQ_MSG_BULK_TX:
7208 + case VCHIQ_MSG_BULK_RX_DONE:
7209 + case VCHIQ_MSG_BULK_TX_DONE:
7210 + service = find_service_by_port(state, localport);
7211 + if ((!service || service->remoteport != remoteport) &&
7212 + (localport == 0) &&
7213 + (type == VCHIQ_MSG_CLOSE)) {
7214 + /* This could be a CLOSE from a client which
7215 + hadn't yet received the OPENACK - look for
7216 + the connected service */
7217 + if (service)
7218 + unlock_service(service);
7219 + service = get_connected_service(state,
7220 + remoteport);
7221 + if (service)
7222 + vchiq_log_warning(vchiq_core_log_level,
7223 + "%d: prs %s@%x (%d->%d) - "
7224 + "found connected service %d",
7225 + state->id, msg_type_str(type),
7226 + (unsigned int)header,
7227 + remoteport, localport,
7228 + service->localport);
7229 + }
7230 +
7231 + if (!service) {
7232 + vchiq_log_error(vchiq_core_log_level,
7233 + "%d: prs %s@%x (%d->%d) - "
7234 + "invalid/closed service %d",
7235 + state->id, msg_type_str(type),
7236 + (unsigned int)header,
7237 + remoteport, localport, localport);
7238 + goto skip_message;
7239 + }
7240 + break;
7241 + default:
7242 + break;
7243 + }
7244 +
7245 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7246 + int svc_fourcc;
7247 +
7248 + svc_fourcc = service
7249 + ? service->base.fourcc
7250 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7251 + vchiq_log_info(vchiq_core_msg_log_level,
7252 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7253 + "len:%d",
7254 + msg_type_str(type), type,
7255 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7256 + remoteport, localport, size);
7257 + if (size > 0)
7258 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7259 + min(64, size));
7260 + }
7261 +
7262 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7263 + > VCHIQ_SLOT_SIZE) {
7264 + vchiq_log_error(vchiq_core_log_level,
7265 + "header %x (msgid %x) - size %x too big for "
7266 + "slot",
7267 + (unsigned int)header, (unsigned int)msgid,
7268 + (unsigned int)size);
7269 + WARN(1, "oversized for slot\n");
7270 + }
7271 +
7272 + switch (type) {
7273 + case VCHIQ_MSG_OPEN:
7274 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7275 + if (!parse_open(state, header))
7276 + goto bail_not_ready;
7277 + break;
7278 + case VCHIQ_MSG_OPENACK:
7279 + if (size >= sizeof(struct vchiq_openack_payload)) {
7280 + const struct vchiq_openack_payload *payload =
7281 + (struct vchiq_openack_payload *)
7282 + header->data;
7283 + service->peer_version = payload->version;
7284 + }
7285 + vchiq_log_info(vchiq_core_log_level,
7286 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7287 + state->id, (unsigned int)header, size,
7288 + remoteport, localport, service->peer_version);
7289 + if (service->srvstate ==
7290 + VCHIQ_SRVSTATE_OPENING) {
7291 + service->remoteport = remoteport;
7292 + vchiq_set_service_state(service,
7293 + VCHIQ_SRVSTATE_OPEN);
7294 + up(&service->remove_event);
7295 + } else
7296 + vchiq_log_error(vchiq_core_log_level,
7297 + "OPENACK received in state %s",
7298 + srvstate_names[service->srvstate]);
7299 + break;
7300 + case VCHIQ_MSG_CLOSE:
7301 + WARN_ON(size != 0); /* There should be no data */
7302 +
7303 + vchiq_log_info(vchiq_core_log_level,
7304 + "%d: prs CLOSE@%x (%d->%d)",
7305 + state->id, (unsigned int)header,
7306 + remoteport, localport);
7307 +
7308 + mark_service_closing_internal(service, 1);
7309 +
7310 + if (vchiq_close_service_internal(service,
7311 + 1/*close_recvd*/) == VCHIQ_RETRY)
7312 + goto bail_not_ready;
7313 +
7314 + vchiq_log_info(vchiq_core_log_level,
7315 + "Close Service %c%c%c%c s:%u d:%d",
7316 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7317 + service->localport,
7318 + service->remoteport);
7319 + break;
7320 + case VCHIQ_MSG_DATA:
7321 + vchiq_log_trace(vchiq_core_log_level,
7322 + "%d: prs DATA@%x,%x (%d->%d)",
7323 + state->id, (unsigned int)header, size,
7324 + remoteport, localport);
7325 +
7326 + if ((service->remoteport == remoteport)
7327 + && (service->srvstate ==
7328 + VCHIQ_SRVSTATE_OPEN)) {
7329 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7330 + claim_slot(state->rx_info);
7331 + DEBUG_TRACE(PARSE_LINE);
7332 + if (make_service_callback(service,
7333 + VCHIQ_MESSAGE_AVAILABLE, header,
7334 + NULL) == VCHIQ_RETRY) {
7335 + DEBUG_TRACE(PARSE_LINE);
7336 + goto bail_not_ready;
7337 + }
7338 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7339 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7340 + size);
7341 + } else {
7342 + VCHIQ_STATS_INC(state, error_count);
7343 + }
7344 + break;
7345 + case VCHIQ_MSG_CONNECT:
7346 + vchiq_log_info(vchiq_core_log_level,
7347 + "%d: prs CONNECT@%x",
7348 + state->id, (unsigned int)header);
7349 + up(&state->connect);
7350 + break;
7351 + case VCHIQ_MSG_BULK_RX:
7352 + case VCHIQ_MSG_BULK_TX: {
7353 + VCHIQ_BULK_QUEUE_T *queue;
7354 + WARN_ON(!state->is_master);
7355 + queue = (type == VCHIQ_MSG_BULK_RX) ?
7356 + &service->bulk_tx : &service->bulk_rx;
7357 + if ((service->remoteport == remoteport)
7358 + && (service->srvstate ==
7359 + VCHIQ_SRVSTATE_OPEN)) {
7360 + VCHIQ_BULK_T *bulk;
7361 + int resolved = 0;
7362 +
7363 + DEBUG_TRACE(PARSE_LINE);
7364 + if (mutex_lock_interruptible(
7365 + &service->bulk_mutex) != 0) {
7366 + DEBUG_TRACE(PARSE_LINE);
7367 + goto bail_not_ready;
7368 + }
7369 +
7370 + WARN_ON(!(queue->remote_insert < queue->remove +
7371 + VCHIQ_NUM_SERVICE_BULKS));
7372 + bulk = &queue->bulks[
7373 + BULK_INDEX(queue->remote_insert)];
7374 + bulk->remote_data =
7375 + (void *)((int *)header->data)[0];
7376 + bulk->remote_size = ((int *)header->data)[1];
7377 + wmb();
7378 +
7379 + vchiq_log_info(vchiq_core_log_level,
7380 + "%d: prs %s@%x (%d->%d) %x@%x",
7381 + state->id, msg_type_str(type),
7382 + (unsigned int)header,
7383 + remoteport, localport,
7384 + bulk->remote_size,
7385 + (unsigned int)bulk->remote_data);
7386 +
7387 + queue->remote_insert++;
7388 +
7389 + if (atomic_read(&pause_bulks_count)) {
7390 + state->deferred_bulks++;
7391 + vchiq_log_info(vchiq_core_log_level,
7392 + "%s: deferring bulk (%d)",
7393 + __func__,
7394 + state->deferred_bulks);
7395 + if (state->conn_state !=
7396 + VCHIQ_CONNSTATE_PAUSE_SENT)
7397 + vchiq_log_error(
7398 + vchiq_core_log_level,
7399 + "%s: bulks paused in "
7400 + "unexpected state %s",
7401 + __func__,
7402 + conn_state_names[
7403 + state->conn_state]);
7404 + } else if (state->conn_state ==
7405 + VCHIQ_CONNSTATE_CONNECTED) {
7406 + DEBUG_TRACE(PARSE_LINE);
7407 + resolved = resolve_bulks(service,
7408 + queue);
7409 + }
7410 +
7411 + mutex_unlock(&service->bulk_mutex);
7412 + if (resolved)
7413 + notify_bulks(service, queue,
7414 + 1/*retry_poll*/);
7415 + }
7416 + } break;
7417 + case VCHIQ_MSG_BULK_RX_DONE:
7418 + case VCHIQ_MSG_BULK_TX_DONE:
7419 + WARN_ON(state->is_master);
7420 + if ((service->remoteport == remoteport)
7421 + && (service->srvstate !=
7422 + VCHIQ_SRVSTATE_FREE)) {
7423 + VCHIQ_BULK_QUEUE_T *queue;
7424 + VCHIQ_BULK_T *bulk;
7425 +
7426 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7427 + &service->bulk_rx : &service->bulk_tx;
7428 +
7429 + DEBUG_TRACE(PARSE_LINE);
7430 + if (mutex_lock_interruptible(
7431 + &service->bulk_mutex) != 0) {
7432 + DEBUG_TRACE(PARSE_LINE);
7433 + goto bail_not_ready;
7434 + }
7435 + if ((int)(queue->remote_insert -
7436 + queue->local_insert) >= 0) {
7437 + vchiq_log_error(vchiq_core_log_level,
7438 + "%d: prs %s@%x (%d->%d) "
7439 + "unexpected (ri=%d,li=%d)",
7440 + state->id, msg_type_str(type),
7441 + (unsigned int)header,
7442 + remoteport, localport,
7443 + queue->remote_insert,
7444 + queue->local_insert);
7445 + mutex_unlock(&service->bulk_mutex);
7446 + break;
7447 + }
7448 +
7449 + BUG_ON(queue->process == queue->local_insert);
7450 + BUG_ON(queue->process != queue->remote_insert);
7451 +
7452 + bulk = &queue->bulks[
7453 + BULK_INDEX(queue->remote_insert)];
7454 + bulk->actual = *(int *)header->data;
7455 + queue->remote_insert++;
7456 +
7457 + vchiq_log_info(vchiq_core_log_level,
7458 + "%d: prs %s@%x (%d->%d) %x@%x",
7459 + state->id, msg_type_str(type),
7460 + (unsigned int)header,
7461 + remoteport, localport,
7462 + bulk->actual, (unsigned int)bulk->data);
7463 +
7464 + vchiq_log_trace(vchiq_core_log_level,
7465 + "%d: prs:%d %cx li=%x ri=%x p=%x",
7466 + state->id, localport,
7467 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
7468 + 'r' : 't',
7469 + queue->local_insert,
7470 + queue->remote_insert, queue->process);
7471 +
7472 + DEBUG_TRACE(PARSE_LINE);
7473 + WARN_ON(queue->process == queue->local_insert);
7474 + vchiq_complete_bulk(bulk);
7475 + queue->process++;
7476 + mutex_unlock(&service->bulk_mutex);
7477 + DEBUG_TRACE(PARSE_LINE);
7478 + notify_bulks(service, queue, 1/*retry_poll*/);
7479 + DEBUG_TRACE(PARSE_LINE);
7480 + }
7481 + break;
7482 + case VCHIQ_MSG_PADDING:
7483 + vchiq_log_trace(vchiq_core_log_level,
7484 + "%d: prs PADDING@%x,%x",
7485 + state->id, (unsigned int)header, size);
7486 + break;
7487 + case VCHIQ_MSG_PAUSE:
7488 + /* If initiated, signal the application thread */
7489 + vchiq_log_trace(vchiq_core_log_level,
7490 + "%d: prs PAUSE@%x,%x",
7491 + state->id, (unsigned int)header, size);
7492 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7493 + vchiq_log_error(vchiq_core_log_level,
7494 + "%d: PAUSE received in state PAUSED",
7495 + state->id);
7496 + break;
7497 + }
7498 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7499 + /* Send a PAUSE in response */
7500 + if (queue_message(state, NULL,
7501 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7502 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7503 + goto bail_not_ready;
7504 + if (state->is_master)
7505 + pause_bulks(state);
7506 + }
7507 + /* At this point slot_mutex is held */
7508 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7509 + vchiq_platform_paused(state);
7510 + break;
7511 + case VCHIQ_MSG_RESUME:
7512 + vchiq_log_trace(vchiq_core_log_level,
7513 + "%d: prs RESUME@%x,%x",
7514 + state->id, (unsigned int)header, size);
7515 + /* Release the slot mutex */
7516 + mutex_unlock(&state->slot_mutex);
7517 + if (state->is_master)
7518 + resume_bulks(state);
7519 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7520 + vchiq_platform_resumed(state);
7521 + break;
7522 +
7523 + case VCHIQ_MSG_REMOTE_USE:
7524 + vchiq_on_remote_use(state);
7525 + break;
7526 + case VCHIQ_MSG_REMOTE_RELEASE:
7527 + vchiq_on_remote_release(state);
7528 + break;
7529 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7530 + vchiq_on_remote_use_active(state);
7531 + break;
7532 +
7533 + default:
7534 + vchiq_log_error(vchiq_core_log_level,
7535 + "%d: prs invalid msgid %x@%x,%x",
7536 + state->id, msgid, (unsigned int)header, size);
7537 + WARN(1, "invalid message\n");
7538 + break;
7539 + }
7540 +
7541 +skip_message:
7542 + if (service) {
7543 + unlock_service(service);
7544 + service = NULL;
7545 + }
7546 +
7547 + state->rx_pos += calc_stride(size);
7548 +
7549 + DEBUG_TRACE(PARSE_LINE);
7550 + /* Perform some housekeeping when the end of the slot is
7551 + ** reached. */
7552 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7553 + /* Remove the extra reference count. */
7554 + release_slot(state, state->rx_info, NULL, NULL);
7555 + state->rx_data = NULL;
7556 + }
7557 + }
7558 +
7559 +bail_not_ready:
7560 + if (service)
7561 + unlock_service(service);
7562 +}
7563 +
7564 +/* Called by the slot handler thread */
7565 +static int
7566 +slot_handler_func(void *v)
7567 +{
7568 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7569 + VCHIQ_SHARED_STATE_T *local = state->local;
7570 + DEBUG_INITIALISE(local)
7571 +
7572 + while (1) {
7573 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
7574 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7575 + remote_event_wait(&local->trigger);
7576 +
7577 + rmb();
7578 +
7579 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7580 + if (state->poll_needed) {
7581 + /* Check if we need to suspend - may change our
7582 + * conn_state */
7583 + vchiq_platform_check_suspend(state);
7584 +
7585 + state->poll_needed = 0;
7586 +
7587 + /* Handle service polling and other rare conditions here
7588 + ** out of the mainline code */
7589 + switch (state->conn_state) {
7590 + case VCHIQ_CONNSTATE_CONNECTED:
7591 + /* Poll the services as requested */
7592 + poll_services(state);
7593 + break;
7594 +
7595 + case VCHIQ_CONNSTATE_PAUSING:
7596 + if (state->is_master)
7597 + pause_bulks(state);
7598 + if (queue_message(state, NULL,
7599 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7600 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7601 + vchiq_set_conn_state(state,
7602 + VCHIQ_CONNSTATE_PAUSE_SENT);
7603 + } else {
7604 + if (state->is_master)
7605 + resume_bulks(state);
7606 + /* Retry later */
7607 + state->poll_needed = 1;
7608 + }
7609 + break;
7610 +
7611 + case VCHIQ_CONNSTATE_PAUSED:
7612 + vchiq_platform_resume(state);
7613 + break;
7614 +
7615 + case VCHIQ_CONNSTATE_RESUMING:
7616 + if (queue_message(state, NULL,
7617 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7618 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7619 + if (state->is_master)
7620 + resume_bulks(state);
7621 + vchiq_set_conn_state(state,
7622 + VCHIQ_CONNSTATE_CONNECTED);
7623 + vchiq_platform_resumed(state);
7624 + } else {
7625 + /* This should really be impossible,
7626 + ** since the PAUSE should have flushed
7627 + ** through outstanding messages. */
7628 + vchiq_log_error(vchiq_core_log_level,
7629 + "Failed to send RESUME "
7630 + "message");
7631 + BUG();
7632 + }
7633 + break;
7634 +
7635 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7636 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7637 + vchiq_platform_handle_timeout(state);
7638 + break;
7639 + default:
7640 + break;
7641 + }
7642 +
7643 +
7644 + }
7645 +
7646 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7647 + parse_rx_slots(state);
7648 + }
7649 + return 0;
7650 +}
7651 +
7652 +
7653 +/* Called by the recycle thread */
7654 +static int
7655 +recycle_func(void *v)
7656 +{
7657 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7658 + VCHIQ_SHARED_STATE_T *local = state->local;
7659 +
7660 + while (1) {
7661 + remote_event_wait(&local->recycle);
7662 +
7663 + process_free_queue(state);
7664 + }
7665 + return 0;
7666 +}
7667 +
7668 +
7669 +/* Called by the sync thread */
7670 +static int
7671 +sync_func(void *v)
7672 +{
7673 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7674 + VCHIQ_SHARED_STATE_T *local = state->local;
7675 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7676 + state->remote->slot_sync);
7677 +
7678 + while (1) {
7679 + VCHIQ_SERVICE_T *service;
7680 + int msgid, size;
7681 + int type;
7682 + unsigned int localport, remoteport;
7683 +
7684 + remote_event_wait(&local->sync_trigger);
7685 +
7686 + rmb();
7687 +
7688 + msgid = header->msgid;
7689 + size = header->size;
7690 + type = VCHIQ_MSG_TYPE(msgid);
7691 + localport = VCHIQ_MSG_DSTPORT(msgid);
7692 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7693 +
7694 + service = find_service_by_port(state, localport);
7695 +
7696 + if (!service) {
7697 + vchiq_log_error(vchiq_sync_log_level,
7698 + "%d: sf %s@%x (%d->%d) - "
7699 + "invalid/closed service %d",
7700 + state->id, msg_type_str(type),
7701 + (unsigned int)header,
7702 + remoteport, localport, localport);
7703 + release_message_sync(state, header);
7704 + continue;
7705 + }
7706 +
7707 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7708 + int svc_fourcc;
7709 +
7710 + svc_fourcc = service
7711 + ? service->base.fourcc
7712 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7713 + vchiq_log_trace(vchiq_sync_log_level,
7714 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7715 + msg_type_str(type),
7716 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7717 + remoteport, localport, size);
7718 + if (size > 0)
7719 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7720 + min(64, size));
7721 + }
7722 +
7723 + switch (type) {
7724 + case VCHIQ_MSG_OPENACK:
7725 + if (size >= sizeof(struct vchiq_openack_payload)) {
7726 + const struct vchiq_openack_payload *payload =
7727 + (struct vchiq_openack_payload *)
7728 + header->data;
7729 + service->peer_version = payload->version;
7730 + }
7731 + vchiq_log_info(vchiq_sync_log_level,
7732 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
7733 + state->id, (unsigned int)header, size,
7734 + remoteport, localport, service->peer_version);
7735 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
7736 + service->remoteport = remoteport;
7737 + vchiq_set_service_state(service,
7738 + VCHIQ_SRVSTATE_OPENSYNC);
7739 + up(&service->remove_event);
7740 + }
7741 + release_message_sync(state, header);
7742 + break;
7743 +
7744 + case VCHIQ_MSG_DATA:
7745 + vchiq_log_trace(vchiq_sync_log_level,
7746 + "%d: sf DATA@%x,%x (%d->%d)",
7747 + state->id, (unsigned int)header, size,
7748 + remoteport, localport);
7749 +
7750 + if ((service->remoteport == remoteport) &&
7751 + (service->srvstate ==
7752 + VCHIQ_SRVSTATE_OPENSYNC)) {
7753 + if (make_service_callback(service,
7754 + VCHIQ_MESSAGE_AVAILABLE, header,
7755 + NULL) == VCHIQ_RETRY)
7756 + vchiq_log_error(vchiq_sync_log_level,
7757 + "synchronous callback to "
7758 + "service %d returns "
7759 + "VCHIQ_RETRY",
7760 + localport);
7761 + }
7762 + break;
7763 +
7764 + default:
7765 + vchiq_log_error(vchiq_sync_log_level,
7766 + "%d: sf unexpected msgid %x@%x,%x",
7767 + state->id, msgid, (unsigned int)header, size);
7768 + release_message_sync(state, header);
7769 + break;
7770 + }
7771 +
7772 + unlock_service(service);
7773 + }
7774 +
7775 + return 0;
7776 +}
7777 +
7778 +
7779 +static void
7780 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
7781 +{
7782 + queue->local_insert = 0;
7783 + queue->remote_insert = 0;
7784 + queue->process = 0;
7785 + queue->remote_notify = 0;
7786 + queue->remove = 0;
7787 +}
7788 +
7789 +
7790 +inline const char *
7791 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
7792 +{
7793 + return conn_state_names[conn_state];
7794 +}
7795 +
7796 +
7797 +VCHIQ_SLOT_ZERO_T *
7798 +vchiq_init_slots(void *mem_base, int mem_size)
7799 +{
7800 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
7801 + VCHIQ_SLOT_ZERO_T *slot_zero =
7802 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
7803 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
7804 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
7805 +
7806 + /* Ensure there is enough memory to run an absolutely minimum system */
7807 + num_slots -= first_data_slot;
7808 +
7809 + if (num_slots < 4) {
7810 + vchiq_log_error(vchiq_core_log_level,
7811 + "vchiq_init_slots - insufficient memory %x bytes",
7812 + mem_size);
7813 + return NULL;
7814 + }
7815 +
7816 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
7817 +
7818 + slot_zero->magic = VCHIQ_MAGIC;
7819 + slot_zero->version = VCHIQ_VERSION;
7820 + slot_zero->version_min = VCHIQ_VERSION_MIN;
7821 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
7822 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
7823 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
7824 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
7825 +
7826 + slot_zero->master.slot_sync = first_data_slot;
7827 + slot_zero->master.slot_first = first_data_slot + 1;
7828 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
7829 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
7830 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
7831 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
7832 +
7833 + return slot_zero;
7834 +}
7835 +
7836 +VCHIQ_STATUS_T
7837 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
7838 + int is_master)
7839 +{
7840 + VCHIQ_SHARED_STATE_T *local;
7841 + VCHIQ_SHARED_STATE_T *remote;
7842 + VCHIQ_STATUS_T status;
7843 + char threadname[10];
7844 + static int id;
7845 + int i;
7846 +
7847 + vchiq_log_warning(vchiq_core_log_level,
7848 + "%s: slot_zero = 0x%08lx, is_master = %d",
7849 + __func__, (unsigned long)slot_zero, is_master);
7850 +
7851 + /* Check the input configuration */
7852 +
7853 + if (slot_zero->magic != VCHIQ_MAGIC) {
7854 + vchiq_loud_error_header();
7855 + vchiq_loud_error("Invalid VCHIQ magic value found.");
7856 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
7857 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
7858 + vchiq_loud_error_footer();
7859 + return VCHIQ_ERROR;
7860 + }
7861 +
7862 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
7863 + vchiq_loud_error_header();
7864 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7865 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
7866 + "(minimum %d)",
7867 + (unsigned int)slot_zero, slot_zero->version,
7868 + VCHIQ_VERSION_MIN);
7869 + vchiq_loud_error("Restart with a newer VideoCore image.");
7870 + vchiq_loud_error_footer();
7871 + return VCHIQ_ERROR;
7872 + }
7873 +
7874 + if (VCHIQ_VERSION < slot_zero->version_min) {
7875 + vchiq_loud_error_header();
7876 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7877 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
7878 + "minimum %d)",
7879 + (unsigned int)slot_zero, VCHIQ_VERSION,
7880 + slot_zero->version_min);
7881 + vchiq_loud_error("Restart with a newer kernel.");
7882 + vchiq_loud_error_footer();
7883 + return VCHIQ_ERROR;
7884 + }
7885 +
7886 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
7887 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
7888 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
7889 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
7890 + vchiq_loud_error_header();
7891 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
7892 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
7893 + "(expected %x)",
7894 + (unsigned int)slot_zero,
7895 + slot_zero->slot_zero_size,
7896 + sizeof(VCHIQ_SLOT_ZERO_T));
7897 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
7898 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
7899 + "(expected %d",
7900 + (unsigned int)slot_zero, slot_zero->slot_size,
7901 + VCHIQ_SLOT_SIZE);
7902 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
7903 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
7904 + "(expected %d)",
7905 + (unsigned int)slot_zero, slot_zero->max_slots,
7906 + VCHIQ_MAX_SLOTS);
7907 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
7908 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
7909 + "(expected %d)",
7910 + (unsigned int)slot_zero,
7911 + slot_zero->max_slots_per_side,
7912 + VCHIQ_MAX_SLOTS_PER_SIDE);
7913 + vchiq_loud_error_footer();
7914 + return VCHIQ_ERROR;
7915 + }
7916 +
7917 + if (is_master) {
7918 + local = &slot_zero->master;
7919 + remote = &slot_zero->slave;
7920 + } else {
7921 + local = &slot_zero->slave;
7922 + remote = &slot_zero->master;
7923 + }
7924 +
7925 + if (local->initialised) {
7926 + vchiq_loud_error_header();
7927 + if (remote->initialised)
7928 + vchiq_loud_error("local state has already been "
7929 + "initialised");
7930 + else
7931 + vchiq_loud_error("master/slave mismatch - two %ss",
7932 + is_master ? "master" : "slave");
7933 + vchiq_loud_error_footer();
7934 + return VCHIQ_ERROR;
7935 + }
7936 +
7937 + memset(state, 0, sizeof(VCHIQ_STATE_T));
7938 +
7939 + state->id = id++;
7940 + state->is_master = is_master;
7941 +
7942 + /*
7943 + initialize shared state pointers
7944 + */
7945 +
7946 + state->local = local;
7947 + state->remote = remote;
7948 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
7949 +
7950 + /*
7951 + initialize events and mutexes
7952 + */
7953 +
7954 + sema_init(&state->connect, 0);
7955 + mutex_init(&state->mutex);
7956 + sema_init(&state->trigger_event, 0);
7957 + sema_init(&state->recycle_event, 0);
7958 + sema_init(&state->sync_trigger_event, 0);
7959 + sema_init(&state->sync_release_event, 0);
7960 +
7961 + mutex_init(&state->slot_mutex);
7962 + mutex_init(&state->recycle_mutex);
7963 + mutex_init(&state->sync_mutex);
7964 + mutex_init(&state->bulk_transfer_mutex);
7965 +
7966 + sema_init(&state->slot_available_event, 0);
7967 + sema_init(&state->slot_remove_event, 0);
7968 + sema_init(&state->data_quota_event, 0);
7969 +
7970 + state->slot_queue_available = 0;
7971 +
7972 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
7973 + VCHIQ_SERVICE_QUOTA_T *service_quota =
7974 + &state->service_quotas[i];
7975 + sema_init(&service_quota->quota_event, 0);
7976 + }
7977 +
7978 + for (i = local->slot_first; i <= local->slot_last; i++) {
7979 + local->slot_queue[state->slot_queue_available++] = i;
7980 + up(&state->slot_available_event);
7981 + }
7982 +
7983 + state->default_slot_quota = state->slot_queue_available/2;
7984 + state->default_message_quota =
7985 + min((unsigned short)(state->default_slot_quota * 256),
7986 + (unsigned short)~0);
7987 +
7988 + state->previous_data_index = -1;
7989 + state->data_use_count = 0;
7990 + state->data_quota = state->slot_queue_available - 1;
7991 +
7992 + local->trigger.event = &state->trigger_event;
7993 + remote_event_create(&local->trigger);
7994 + local->tx_pos = 0;
7995 +
7996 + local->recycle.event = &state->recycle_event;
7997 + remote_event_create(&local->recycle);
7998 + local->slot_queue_recycle = state->slot_queue_available;
7999 +
8000 + local->sync_trigger.event = &state->sync_trigger_event;
8001 + remote_event_create(&local->sync_trigger);
8002 +
8003 + local->sync_release.event = &state->sync_release_event;
8004 + remote_event_create(&local->sync_release);
8005 +
8006 + /* At start-of-day, the slot is empty and available */
8007 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
8008 + = VCHIQ_MSGID_PADDING;
8009 + remote_event_signal_local(&local->sync_release);
8010 +
8011 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
8012 +
8013 + status = vchiq_platform_init_state(state);
8014 +
8015 + /*
8016 + bring up slot handler thread
8017 + */
8018 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
8019 + state->slot_handler_thread = kthread_create(&slot_handler_func,
8020 + (void *)state,
8021 + threadname);
8022 +
8023 + if (state->slot_handler_thread == NULL) {
8024 + vchiq_loud_error_header();
8025 + vchiq_loud_error("couldn't create thread %s", threadname);
8026 + vchiq_loud_error_footer();
8027 + return VCHIQ_ERROR;
8028 + }
8029 + set_user_nice(state->slot_handler_thread, -19);
8030 + wake_up_process(state->slot_handler_thread);
8031 +
8032 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
8033 + state->recycle_thread = kthread_create(&recycle_func,
8034 + (void *)state,
8035 + threadname);
8036 + if (state->recycle_thread == NULL) {
8037 + vchiq_loud_error_header();
8038 + vchiq_loud_error("couldn't create thread %s", threadname);
8039 + vchiq_loud_error_footer();
8040 + return VCHIQ_ERROR;
8041 + }
8042 + set_user_nice(state->recycle_thread, -19);
8043 + wake_up_process(state->recycle_thread);
8044 +
8045 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
8046 + state->sync_thread = kthread_create(&sync_func,
8047 + (void *)state,
8048 + threadname);
8049 + if (state->sync_thread == NULL) {
8050 + vchiq_loud_error_header();
8051 + vchiq_loud_error("couldn't create thread %s", threadname);
8052 + vchiq_loud_error_footer();
8053 + return VCHIQ_ERROR;
8054 + }
8055 + set_user_nice(state->sync_thread, -20);
8056 + wake_up_process(state->sync_thread);
8057 +
8058 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
8059 + vchiq_states[state->id] = state;
8060 +
8061 + /* Indicate readiness to the other side */
8062 + local->initialised = 1;
8063 +
8064 + return status;
8065 +}
8066 +
8067 +/* Called from application thread when a client or server service is created. */
8068 +VCHIQ_SERVICE_T *
8069 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
8070 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
8071 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
8072 +{
8073 + VCHIQ_SERVICE_T *service;
8074 +
8075 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
8076 + if (service) {
8077 + service->base.fourcc = params->fourcc;
8078 + service->base.callback = params->callback;
8079 + service->base.userdata = params->userdata;
8080 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
8081 + service->ref_count = 1;
8082 + service->srvstate = VCHIQ_SRVSTATE_FREE;
8083 + service->userdata_term = userdata_term;
8084 + service->localport = VCHIQ_PORT_FREE;
8085 + service->remoteport = VCHIQ_PORT_FREE;
8086 +
8087 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
8088 + VCHIQ_FOURCC_INVALID : params->fourcc;
8089 + service->client_id = 0;
8090 + service->auto_close = 1;
8091 + service->sync = 0;
8092 + service->closing = 0;
8093 + atomic_set(&service->poll_flags, 0);
8094 + service->version = params->version;
8095 + service->version_min = params->version_min;
8096 + service->state = state;
8097 + service->instance = instance;
8098 + service->service_use_count = 0;
8099 + init_bulk_queue(&service->bulk_tx);
8100 + init_bulk_queue(&service->bulk_rx);
8101 + sema_init(&service->remove_event, 0);
8102 + sema_init(&service->bulk_remove_event, 0);
8103 + mutex_init(&service->bulk_mutex);
8104 + memset(&service->stats, 0, sizeof(service->stats));
8105 + } else {
8106 + vchiq_log_error(vchiq_core_log_level,
8107 + "Out of memory");
8108 + }
8109 +
8110 + if (service) {
8111 + VCHIQ_SERVICE_T **pservice = NULL;
8112 + int i;
8113 +
8114 + /* Although it is perfectly possible to use service_spinlock
8115 + ** to protect the creation of services, it is overkill as it
8116 + ** disables interrupts while the array is searched.
8117 + ** The only danger is of another thread trying to create a
8118 + ** service - service deletion is safe.
8119 + ** Therefore it is preferable to use state->mutex which,
8120 + ** although slower to claim, doesn't block interrupts while
8121 + ** it is held.
8122 + */
8123 +
8124 + mutex_lock(&state->mutex);
8125 +
8126 + /* Prepare to use a previously unused service */
8127 + if (state->unused_service < VCHIQ_MAX_SERVICES)
8128 + pservice = &state->services[state->unused_service];
8129 +
8130 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8131 + for (i = 0; i < state->unused_service; i++) {
8132 + VCHIQ_SERVICE_T *srv = state->services[i];
8133 + if (!srv) {
8134 + pservice = &state->services[i];
8135 + break;
8136 + }
8137 + }
8138 + } else {
8139 + for (i = (state->unused_service - 1); i >= 0; i--) {
8140 + VCHIQ_SERVICE_T *srv = state->services[i];
8141 + if (!srv)
8142 + pservice = &state->services[i];
8143 + else if ((srv->public_fourcc == params->fourcc)
8144 + && ((srv->instance != instance) ||
8145 + (srv->base.callback !=
8146 + params->callback))) {
8147 + /* There is another server using this
8148 + ** fourcc which doesn't match. */
8149 + pservice = NULL;
8150 + break;
8151 + }
8152 + }
8153 + }
8154 +
8155 + if (pservice) {
8156 + service->localport = (pservice - state->services);
8157 + if (!handle_seq)
8158 + handle_seq = VCHIQ_MAX_STATES *
8159 + VCHIQ_MAX_SERVICES;
8160 + service->handle = handle_seq |
8161 + (state->id * VCHIQ_MAX_SERVICES) |
8162 + service->localport;
8163 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8164 + *pservice = service;
8165 + if (pservice == &state->services[state->unused_service])
8166 + state->unused_service++;
8167 + }
8168 +
8169 + mutex_unlock(&state->mutex);
8170 +
8171 + if (!pservice) {
8172 + kfree(service);
8173 + service = NULL;
8174 + }
8175 + }
8176 +
8177 + if (service) {
8178 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8179 + &state->service_quotas[service->localport];
8180 + service_quota->slot_quota = state->default_slot_quota;
8181 + service_quota->message_quota = state->default_message_quota;
8182 + if (service_quota->slot_use_count == 0)
8183 + service_quota->previous_tx_index =
8184 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8185 + - 1;
8186 +
8187 + /* Bring this service online */
8188 + vchiq_set_service_state(service, srvstate);
8189 +
8190 + vchiq_log_info(vchiq_core_msg_log_level,
8191 + "%s Service %c%c%c%c SrcPort:%d",
8192 + (srvstate == VCHIQ_SRVSTATE_OPENING)
8193 + ? "Open" : "Add",
8194 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8195 + service->localport);
8196 + }
8197 +
8198 + /* Don't unlock the service - leave it with a ref_count of 1. */
8199 +
8200 + return service;
8201 +}
8202 +
8203 +VCHIQ_STATUS_T
8204 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8205 +{
8206 + struct vchiq_open_payload payload = {
8207 + service->base.fourcc,
8208 + client_id,
8209 + service->version,
8210 + service->version_min
8211 + };
8212 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8213 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8214 +
8215 + service->client_id = client_id;
8216 + vchiq_use_service_internal(service);
8217 + status = queue_message(service->state, NULL,
8218 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8219 + &body, 1, sizeof(payload), 1);
8220 + if (status == VCHIQ_SUCCESS) {
8221 + if (down_interruptible(&service->remove_event) != 0) {
8222 + status = VCHIQ_RETRY;
8223 + vchiq_release_service_internal(service);
8224 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8225 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8226 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8227 + vchiq_log_error(vchiq_core_log_level,
8228 + "%d: osi - srvstate = %s (ref %d)",
8229 + service->state->id,
8230 + srvstate_names[service->srvstate],
8231 + service->ref_count);
8232 + status = VCHIQ_ERROR;
8233 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8234 + vchiq_release_service_internal(service);
8235 + }
8236 + }
8237 + return status;
8238 +}
8239 +
8240 +static void
8241 +release_service_messages(VCHIQ_SERVICE_T *service)
8242 +{
8243 + VCHIQ_STATE_T *state = service->state;
8244 + int slot_last = state->remote->slot_last;
8245 + int i;
8246 +
8247 + /* Release any claimed messages */
8248 + for (i = state->remote->slot_first; i <= slot_last; i++) {
8249 + VCHIQ_SLOT_INFO_T *slot_info =
8250 + SLOT_INFO_FROM_INDEX(state, i);
8251 + if (slot_info->release_count != slot_info->use_count) {
8252 + char *data =
8253 + (char *)SLOT_DATA_FROM_INDEX(state, i);
8254 + unsigned int pos, end;
8255 +
8256 + end = VCHIQ_SLOT_SIZE;
8257 + if (data == state->rx_data)
8258 + /* This buffer is still being read from - stop
8259 + ** at the current read position */
8260 + end = state->rx_pos & VCHIQ_SLOT_MASK;
8261 +
8262 + pos = 0;
8263 +
8264 + while (pos < end) {
8265 + VCHIQ_HEADER_T *header =
8266 + (VCHIQ_HEADER_T *)(data + pos);
8267 + int msgid = header->msgid;
8268 + int port = VCHIQ_MSG_DSTPORT(msgid);
8269 + if ((port == service->localport) &&
8270 + (msgid & VCHIQ_MSGID_CLAIMED)) {
8271 + vchiq_log_info(vchiq_core_log_level,
8272 + " fsi - hdr %x",
8273 + (unsigned int)header);
8274 + release_slot(state, slot_info, header,
8275 + NULL);
8276 + }
8277 + pos += calc_stride(header->size);
8278 + if (pos > VCHIQ_SLOT_SIZE) {
8279 + vchiq_log_error(vchiq_core_log_level,
8280 + "fsi - pos %x: header %x, "
8281 + "msgid %x, header->msgid %x, "
8282 + "header->size %x",
8283 + pos, (unsigned int)header,
8284 + msgid, header->msgid,
8285 + header->size);
8286 + WARN(1, "invalid slot position\n");
8287 + }
8288 + }
8289 + }
8290 + }
8291 +}
8292 +
8293 +static int
8294 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8295 +{
8296 + VCHIQ_STATUS_T status;
8297 +
8298 + /* Abort any outstanding bulk transfers */
8299 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8300 + return 0;
8301 + abort_outstanding_bulks(service, &service->bulk_tx);
8302 + abort_outstanding_bulks(service, &service->bulk_rx);
8303 + mutex_unlock(&service->bulk_mutex);
8304 +
8305 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8306 + if (status == VCHIQ_SUCCESS)
8307 + status = notify_bulks(service, &service->bulk_rx,
8308 + 0/*!retry_poll*/);
8309 + return (status == VCHIQ_SUCCESS);
8310 +}
8311 +
8312 +static VCHIQ_STATUS_T
8313 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8314 +{
8315 + VCHIQ_STATUS_T status;
8316 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8317 + int newstate;
8318 +
8319 + switch (service->srvstate) {
8320 + case VCHIQ_SRVSTATE_OPEN:
8321 + case VCHIQ_SRVSTATE_CLOSESENT:
8322 + case VCHIQ_SRVSTATE_CLOSERECVD:
8323 + if (is_server) {
8324 + if (service->auto_close) {
8325 + service->client_id = 0;
8326 + service->remoteport = VCHIQ_PORT_FREE;
8327 + newstate = VCHIQ_SRVSTATE_LISTENING;
8328 + } else
8329 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8330 + } else
8331 + newstate = VCHIQ_SRVSTATE_CLOSED;
8332 + vchiq_set_service_state(service, newstate);
8333 + break;
8334 + case VCHIQ_SRVSTATE_LISTENING:
8335 + break;
8336 + default:
8337 + vchiq_log_error(vchiq_core_log_level,
8338 + "close_service_complete(%x) called in state %s",
8339 + service->handle, srvstate_names[service->srvstate]);
8340 + WARN(1, "close_service_complete in unexpected state\n");
8341 + return VCHIQ_ERROR;
8342 + }
8343 +
8344 + status = make_service_callback(service,
8345 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
8346 +
8347 + if (status != VCHIQ_RETRY) {
8348 + int uc = service->service_use_count;
8349 + int i;
8350 + /* Complete the close process */
8351 + for (i = 0; i < uc; i++)
8352 + /* cater for cases where close is forced and the
8353 + ** client may not close all it's handles */
8354 + vchiq_release_service_internal(service);
8355 +
8356 + service->client_id = 0;
8357 + service->remoteport = VCHIQ_PORT_FREE;
8358 +
8359 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8360 + vchiq_free_service_internal(service);
8361 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8362 + if (is_server)
8363 + service->closing = 0;
8364 +
8365 + up(&service->remove_event);
8366 + }
8367 + } else
8368 + vchiq_set_service_state(service, failstate);
8369 +
8370 + return status;
8371 +}
8372 +
8373 +/* Called by the slot handler */
8374 +VCHIQ_STATUS_T
8375 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8376 +{
8377 + VCHIQ_STATE_T *state = service->state;
8378 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8379 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8380 +
8381 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8382 + service->state->id, service->localport, close_recvd,
8383 + srvstate_names[service->srvstate]);
8384 +
8385 + switch (service->srvstate) {
8386 + case VCHIQ_SRVSTATE_CLOSED:
8387 + case VCHIQ_SRVSTATE_HIDDEN:
8388 + case VCHIQ_SRVSTATE_LISTENING:
8389 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8390 + if (close_recvd)
8391 + vchiq_log_error(vchiq_core_log_level,
8392 + "vchiq_close_service_internal(1) called "
8393 + "in state %s",
8394 + srvstate_names[service->srvstate]);
8395 + else if (is_server) {
8396 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8397 + status = VCHIQ_ERROR;
8398 + } else {
8399 + service->client_id = 0;
8400 + service->remoteport = VCHIQ_PORT_FREE;
8401 + if (service->srvstate ==
8402 + VCHIQ_SRVSTATE_CLOSEWAIT)
8403 + vchiq_set_service_state(service,
8404 + VCHIQ_SRVSTATE_LISTENING);
8405 + }
8406 + up(&service->remove_event);
8407 + } else
8408 + vchiq_free_service_internal(service);
8409 + break;
8410 + case VCHIQ_SRVSTATE_OPENING:
8411 + if (close_recvd) {
8412 + /* The open was rejected - tell the user */
8413 + vchiq_set_service_state(service,
8414 + VCHIQ_SRVSTATE_CLOSEWAIT);
8415 + up(&service->remove_event);
8416 + } else {
8417 + /* Shutdown mid-open - let the other side know */
8418 + status = queue_message(state, service,
8419 + VCHIQ_MAKE_MSG
8420 + (VCHIQ_MSG_CLOSE,
8421 + service->localport,
8422 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8423 + NULL, 0, 0, 0);
8424 + }
8425 + break;
8426 +
8427 + case VCHIQ_SRVSTATE_OPENSYNC:
8428 + mutex_lock(&state->sync_mutex);
8429 + /* Drop through */
8430 +
8431 + case VCHIQ_SRVSTATE_OPEN:
8432 + if (state->is_master || close_recvd) {
8433 + if (!do_abort_bulks(service))
8434 + status = VCHIQ_RETRY;
8435 + }
8436 +
8437 + release_service_messages(service);
8438 +
8439 + if (status == VCHIQ_SUCCESS)
8440 + status = queue_message(state, service,
8441 + VCHIQ_MAKE_MSG
8442 + (VCHIQ_MSG_CLOSE,
8443 + service->localport,
8444 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8445 + NULL, 0, 0, 0);
8446 +
8447 + if (status == VCHIQ_SUCCESS) {
8448 + if (!close_recvd)
8449 + break;
8450 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8451 + mutex_unlock(&state->sync_mutex);
8452 + break;
8453 + } else
8454 + break;
8455 +
8456 + status = close_service_complete(service,
8457 + VCHIQ_SRVSTATE_CLOSERECVD);
8458 + break;
8459 +
8460 + case VCHIQ_SRVSTATE_CLOSESENT:
8461 + if (!close_recvd)
8462 + /* This happens when a process is killed mid-close */
8463 + break;
8464 +
8465 + if (!state->is_master) {
8466 + if (!do_abort_bulks(service)) {
8467 + status = VCHIQ_RETRY;
8468 + break;
8469 + }
8470 + }
8471 +
8472 + if (status == VCHIQ_SUCCESS)
8473 + status = close_service_complete(service,
8474 + VCHIQ_SRVSTATE_CLOSERECVD);
8475 + break;
8476 +
8477 + case VCHIQ_SRVSTATE_CLOSERECVD:
8478 + if (!close_recvd && is_server)
8479 + /* Force into LISTENING mode */
8480 + vchiq_set_service_state(service,
8481 + VCHIQ_SRVSTATE_LISTENING);
8482 + status = close_service_complete(service,
8483 + VCHIQ_SRVSTATE_CLOSERECVD);
8484 + break;
8485 +
8486 + default:
8487 + vchiq_log_error(vchiq_core_log_level,
8488 + "vchiq_close_service_internal(%d) called in state %s",
8489 + close_recvd, srvstate_names[service->srvstate]);
8490 + break;
8491 + }
8492 +
8493 + return status;
8494 +}
8495 +
8496 +/* Called from the application process upon process death */
8497 +void
8498 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8499 +{
8500 + VCHIQ_STATE_T *state = service->state;
8501 +
8502 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8503 + state->id, service->localport, service->remoteport);
8504 +
8505 + mark_service_closing(service);
8506 +
8507 + /* Mark the service for removal by the slot handler */
8508 + request_poll(state, service, VCHIQ_POLL_REMOVE);
8509 +}
8510 +
8511 +/* Called from the slot handler */
8512 +void
8513 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8514 +{
8515 + VCHIQ_STATE_T *state = service->state;
8516 +
8517 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8518 + state->id, service->localport);
8519 +
8520 + switch (service->srvstate) {
8521 + case VCHIQ_SRVSTATE_OPENING:
8522 + case VCHIQ_SRVSTATE_CLOSED:
8523 + case VCHIQ_SRVSTATE_HIDDEN:
8524 + case VCHIQ_SRVSTATE_LISTENING:
8525 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8526 + break;
8527 + default:
8528 + vchiq_log_error(vchiq_core_log_level,
8529 + "%d: fsi - (%d) in state %s",
8530 + state->id, service->localport,
8531 + srvstate_names[service->srvstate]);
8532 + return;
8533 + }
8534 +
8535 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8536 +
8537 + up(&service->remove_event);
8538 +
8539 + /* Release the initial lock */
8540 + unlock_service(service);
8541 +}
8542 +
8543 +VCHIQ_STATUS_T
8544 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8545 +{
8546 + VCHIQ_SERVICE_T *service;
8547 + int i;
8548 +
8549 + /* Find all services registered to this client and enable them. */
8550 + i = 0;
8551 + while ((service = next_service_by_instance(state, instance,
8552 + &i)) != NULL) {
8553 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8554 + vchiq_set_service_state(service,
8555 + VCHIQ_SRVSTATE_LISTENING);
8556 + unlock_service(service);
8557 + }
8558 +
8559 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8560 + if (queue_message(state, NULL,
8561 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8562 + 0, 1) == VCHIQ_RETRY)
8563 + return VCHIQ_RETRY;
8564 +
8565 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8566 + }
8567 +
8568 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8569 + if (down_interruptible(&state->connect) != 0)
8570 + return VCHIQ_RETRY;
8571 +
8572 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8573 + up(&state->connect);
8574 + }
8575 +
8576 + return VCHIQ_SUCCESS;
8577 +}
8578 +
8579 +VCHIQ_STATUS_T
8580 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8581 +{
8582 + VCHIQ_SERVICE_T *service;
8583 + int i;
8584 +
8585 + /* Find all services registered to this client and enable them. */
8586 + i = 0;
8587 + while ((service = next_service_by_instance(state, instance,
8588 + &i)) != NULL) {
8589 + (void)vchiq_remove_service(service->handle);
8590 + unlock_service(service);
8591 + }
8592 +
8593 + return VCHIQ_SUCCESS;
8594 +}
8595 +
8596 +VCHIQ_STATUS_T
8597 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8598 +{
8599 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8600 +
8601 + switch (state->conn_state) {
8602 + case VCHIQ_CONNSTATE_CONNECTED:
8603 + /* Request a pause */
8604 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8605 + request_poll(state, NULL, 0);
8606 + break;
8607 + default:
8608 + vchiq_log_error(vchiq_core_log_level,
8609 + "vchiq_pause_internal in state %s\n",
8610 + conn_state_names[state->conn_state]);
8611 + status = VCHIQ_ERROR;
8612 + VCHIQ_STATS_INC(state, error_count);
8613 + break;
8614 + }
8615 +
8616 + return status;
8617 +}
8618 +
8619 +VCHIQ_STATUS_T
8620 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8621 +{
8622 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8623 +
8624 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8625 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8626 + request_poll(state, NULL, 0);
8627 + } else {
8628 + status = VCHIQ_ERROR;
8629 + VCHIQ_STATS_INC(state, error_count);
8630 + }
8631 +
8632 + return status;
8633 +}
8634 +
8635 +VCHIQ_STATUS_T
8636 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8637 +{
8638 + /* Unregister the service */
8639 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8640 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8641 +
8642 + if (!service)
8643 + return VCHIQ_ERROR;
8644 +
8645 + vchiq_log_info(vchiq_core_log_level,
8646 + "%d: close_service:%d",
8647 + service->state->id, service->localport);
8648 +
8649 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8650 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8651 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8652 + unlock_service(service);
8653 + return VCHIQ_ERROR;
8654 + }
8655 +
8656 + mark_service_closing(service);
8657 +
8658 + if (current == service->state->slot_handler_thread) {
8659 + status = vchiq_close_service_internal(service,
8660 + 0/*!close_recvd*/);
8661 + BUG_ON(status == VCHIQ_RETRY);
8662 + } else {
8663 + /* Mark the service for termination by the slot handler */
8664 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8665 + }
8666 +
8667 + while (1) {
8668 + if (down_interruptible(&service->remove_event) != 0) {
8669 + status = VCHIQ_RETRY;
8670 + break;
8671 + }
8672 +
8673 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8674 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8675 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8676 + break;
8677 +
8678 + vchiq_log_warning(vchiq_core_log_level,
8679 + "%d: close_service:%d - waiting in state %s",
8680 + service->state->id, service->localport,
8681 + srvstate_names[service->srvstate]);
8682 + }
8683 +
8684 + if ((status == VCHIQ_SUCCESS) &&
8685 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8686 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8687 + status = VCHIQ_ERROR;
8688 +
8689 + unlock_service(service);
8690 +
8691 + return status;
8692 +}
8693 +
8694 +VCHIQ_STATUS_T
8695 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
8696 +{
8697 + /* Unregister the service */
8698 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8699 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8700 +
8701 + if (!service)
8702 + return VCHIQ_ERROR;
8703 +
8704 + vchiq_log_info(vchiq_core_log_level,
8705 + "%d: remove_service:%d",
8706 + service->state->id, service->localport);
8707 +
8708 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
8709 + unlock_service(service);
8710 + return VCHIQ_ERROR;
8711 + }
8712 +
8713 + mark_service_closing(service);
8714 +
8715 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
8716 + (current == service->state->slot_handler_thread)) {
8717 + /* Make it look like a client, because it must be removed and
8718 + not left in the LISTENING state. */
8719 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
8720 +
8721 + status = vchiq_close_service_internal(service,
8722 + 0/*!close_recvd*/);
8723 + BUG_ON(status == VCHIQ_RETRY);
8724 + } else {
8725 + /* Mark the service for removal by the slot handler */
8726 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
8727 + }
8728 + while (1) {
8729 + if (down_interruptible(&service->remove_event) != 0) {
8730 + status = VCHIQ_RETRY;
8731 + break;
8732 + }
8733 +
8734 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8735 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8736 + break;
8737 +
8738 + vchiq_log_warning(vchiq_core_log_level,
8739 + "%d: remove_service:%d - waiting in state %s",
8740 + service->state->id, service->localport,
8741 + srvstate_names[service->srvstate]);
8742 + }
8743 +
8744 + if ((status == VCHIQ_SUCCESS) &&
8745 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
8746 + status = VCHIQ_ERROR;
8747 +
8748 + unlock_service(service);
8749 +
8750 + return status;
8751 +}
8752 +
8753 +
8754 +/* This function may be called by kernel threads or user threads.
8755 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
8756 + * received and the call should be retried after being returned to user
8757 + * context.
8758 + * When called in blocking mode, the userdata field points to a bulk_waiter
8759 + * structure.
8760 + */
8761 +VCHIQ_STATUS_T
8762 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
8763 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
8764 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
8765 +{
8766 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8767 + VCHIQ_BULK_QUEUE_T *queue;
8768 + VCHIQ_BULK_T *bulk;
8769 + VCHIQ_STATE_T *state;
8770 + struct bulk_waiter *bulk_waiter = NULL;
8771 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
8772 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
8773 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
8774 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8775 +
8776 + if (!service ||
8777 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
8778 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
8779 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8780 + goto error_exit;
8781 +
8782 + switch (mode) {
8783 + case VCHIQ_BULK_MODE_NOCALLBACK:
8784 + case VCHIQ_BULK_MODE_CALLBACK:
8785 + break;
8786 + case VCHIQ_BULK_MODE_BLOCKING:
8787 + bulk_waiter = (struct bulk_waiter *)userdata;
8788 + sema_init(&bulk_waiter->event, 0);
8789 + bulk_waiter->actual = 0;
8790 + bulk_waiter->bulk = NULL;
8791 + break;
8792 + case VCHIQ_BULK_MODE_WAITING:
8793 + bulk_waiter = (struct bulk_waiter *)userdata;
8794 + bulk = bulk_waiter->bulk;
8795 + goto waiting;
8796 + default:
8797 + goto error_exit;
8798 + }
8799 +
8800 + state = service->state;
8801 +
8802 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
8803 + &service->bulk_tx : &service->bulk_rx;
8804 +
8805 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
8806 + status = VCHIQ_RETRY;
8807 + goto error_exit;
8808 + }
8809 +
8810 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
8811 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
8812 + do {
8813 + mutex_unlock(&service->bulk_mutex);
8814 + if (down_interruptible(&service->bulk_remove_event)
8815 + != 0) {
8816 + status = VCHIQ_RETRY;
8817 + goto error_exit;
8818 + }
8819 + if (mutex_lock_interruptible(&service->bulk_mutex)
8820 + != 0) {
8821 + status = VCHIQ_RETRY;
8822 + goto error_exit;
8823 + }
8824 + } while (queue->local_insert == queue->remove +
8825 + VCHIQ_NUM_SERVICE_BULKS);
8826 + }
8827 +
8828 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
8829 +
8830 + bulk->mode = mode;
8831 + bulk->dir = dir;
8832 + bulk->userdata = userdata;
8833 + bulk->size = size;
8834 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
8835 +
8836 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
8837 + VCHIQ_SUCCESS)
8838 + goto unlock_error_exit;
8839 +
8840 + wmb();
8841 +
8842 + vchiq_log_info(vchiq_core_log_level,
8843 + "%d: bt (%d->%d) %cx %x@%x %x",
8844 + state->id,
8845 + service->localport, service->remoteport, dir_char,
8846 + size, (unsigned int)bulk->data, (unsigned int)userdata);
8847 +
8848 + if (state->is_master) {
8849 + queue->local_insert++;
8850 + if (resolve_bulks(service, queue))
8851 + request_poll(state, service,
8852 + (dir == VCHIQ_BULK_TRANSMIT) ?
8853 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
8854 + } else {
8855 + int payload[2] = { (int)bulk->data, bulk->size };
8856 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
8857 +
8858 + status = queue_message(state, NULL,
8859 + VCHIQ_MAKE_MSG(dir_msgtype,
8860 + service->localport, service->remoteport),
8861 + &element, 1, sizeof(payload), 1);
8862 + if (status != VCHIQ_SUCCESS) {
8863 + vchiq_complete_bulk(bulk);
8864 + goto unlock_error_exit;
8865 + }
8866 + queue->local_insert++;
8867 + }
8868 +
8869 + mutex_unlock(&service->bulk_mutex);
8870 +
8871 + vchiq_log_trace(vchiq_core_log_level,
8872 + "%d: bt:%d %cx li=%x ri=%x p=%x",
8873 + state->id,
8874 + service->localport, dir_char,
8875 + queue->local_insert, queue->remote_insert, queue->process);
8876 +
8877 +waiting:
8878 + unlock_service(service);
8879 +
8880 + status = VCHIQ_SUCCESS;
8881 +
8882 + if (bulk_waiter) {
8883 + bulk_waiter->bulk = bulk;
8884 + if (down_interruptible(&bulk_waiter->event) != 0)
8885 + status = VCHIQ_RETRY;
8886 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
8887 + status = VCHIQ_ERROR;
8888 + }
8889 +
8890 + return status;
8891 +
8892 +unlock_error_exit:
8893 + mutex_unlock(&service->bulk_mutex);
8894 +
8895 +error_exit:
8896 + if (service)
8897 + unlock_service(service);
8898 + return status;
8899 +}
8900 +
8901 +VCHIQ_STATUS_T
8902 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
8903 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
8904 +{
8905 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8906 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8907 +
8908 + unsigned int size = 0;
8909 + unsigned int i;
8910 +
8911 + if (!service ||
8912 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8913 + goto error_exit;
8914 +
8915 + for (i = 0; i < (unsigned int)count; i++) {
8916 + if (elements[i].size) {
8917 + if (elements[i].data == NULL) {
8918 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8919 + goto error_exit;
8920 + }
8921 + size += elements[i].size;
8922 + }
8923 + }
8924 +
8925 + if (size > VCHIQ_MAX_MSG_SIZE) {
8926 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8927 + goto error_exit;
8928 + }
8929 +
8930 + switch (service->srvstate) {
8931 + case VCHIQ_SRVSTATE_OPEN:
8932 + status = queue_message(service->state, service,
8933 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8934 + service->localport,
8935 + service->remoteport),
8936 + elements, count, size, 1);
8937 + break;
8938 + case VCHIQ_SRVSTATE_OPENSYNC:
8939 + status = queue_message_sync(service->state, service,
8940 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8941 + service->localport,
8942 + service->remoteport),
8943 + elements, count, size, 1);
8944 + break;
8945 + default:
8946 + status = VCHIQ_ERROR;
8947 + break;
8948 + }
8949 +
8950 +error_exit:
8951 + if (service)
8952 + unlock_service(service);
8953 +
8954 + return status;
8955 +}
8956 +
8957 +void
8958 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
8959 +{
8960 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8961 + VCHIQ_SHARED_STATE_T *remote;
8962 + VCHIQ_STATE_T *state;
8963 + int slot_index;
8964 +
8965 + if (!service)
8966 + return;
8967 +
8968 + state = service->state;
8969 + remote = state->remote;
8970 +
8971 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
8972 +
8973 + if ((slot_index >= remote->slot_first) &&
8974 + (slot_index <= remote->slot_last)) {
8975 + int msgid = header->msgid;
8976 + if (msgid & VCHIQ_MSGID_CLAIMED) {
8977 + VCHIQ_SLOT_INFO_T *slot_info =
8978 + SLOT_INFO_FROM_INDEX(state, slot_index);
8979 +
8980 + release_slot(state, slot_info, header, service);
8981 + }
8982 + } else if (slot_index == remote->slot_sync)
8983 + release_message_sync(state, header);
8984 +
8985 + unlock_service(service);
8986 +}
8987 +
8988 +static void
8989 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8990 +{
8991 + header->msgid = VCHIQ_MSGID_PADDING;
8992 + wmb();
8993 + remote_event_signal(&state->remote->sync_release);
8994 +}
8995 +
8996 +VCHIQ_STATUS_T
8997 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
8998 +{
8999 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9000 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9001 +
9002 + if (!service ||
9003 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
9004 + !peer_version)
9005 + goto exit;
9006 + *peer_version = service->peer_version;
9007 + status = VCHIQ_SUCCESS;
9008 +
9009 +exit:
9010 + if (service)
9011 + unlock_service(service);
9012 + return status;
9013 +}
9014 +
9015 +VCHIQ_STATUS_T
9016 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
9017 + int config_size, VCHIQ_CONFIG_T *pconfig)
9018 +{
9019 + VCHIQ_CONFIG_T config;
9020 +
9021 + (void)instance;
9022 +
9023 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
9024 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
9025 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
9026 + config.max_services = VCHIQ_MAX_SERVICES;
9027 + config.version = VCHIQ_VERSION;
9028 + config.version_min = VCHIQ_VERSION_MIN;
9029 +
9030 + if (config_size > sizeof(VCHIQ_CONFIG_T))
9031 + return VCHIQ_ERROR;
9032 +
9033 + memcpy(pconfig, &config,
9034 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
9035 +
9036 + return VCHIQ_SUCCESS;
9037 +}
9038 +
9039 +VCHIQ_STATUS_T
9040 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
9041 + VCHIQ_SERVICE_OPTION_T option, int value)
9042 +{
9043 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9044 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9045 +
9046 + if (service) {
9047 + switch (option) {
9048 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
9049 + service->auto_close = value;
9050 + status = VCHIQ_SUCCESS;
9051 + break;
9052 +
9053 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
9054 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9055 + &service->state->service_quotas[
9056 + service->localport];
9057 + if (value == 0)
9058 + value = service->state->default_slot_quota;
9059 + if ((value >= service_quota->slot_use_count) &&
9060 + (value < (unsigned short)~0)) {
9061 + service_quota->slot_quota = value;
9062 + if ((value >= service_quota->slot_use_count) &&
9063 + (service_quota->message_quota >=
9064 + service_quota->message_use_count)) {
9065 + /* Signal the service that it may have
9066 + ** dropped below its quota */
9067 + up(&service_quota->quota_event);
9068 + }
9069 + status = VCHIQ_SUCCESS;
9070 + }
9071 + } break;
9072 +
9073 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
9074 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9075 + &service->state->service_quotas[
9076 + service->localport];
9077 + if (value == 0)
9078 + value = service->state->default_message_quota;
9079 + if ((value >= service_quota->message_use_count) &&
9080 + (value < (unsigned short)~0)) {
9081 + service_quota->message_quota = value;
9082 + if ((value >=
9083 + service_quota->message_use_count) &&
9084 + (service_quota->slot_quota >=
9085 + service_quota->slot_use_count))
9086 + /* Signal the service that it may have
9087 + ** dropped below its quota */
9088 + up(&service_quota->quota_event);
9089 + status = VCHIQ_SUCCESS;
9090 + }
9091 + } break;
9092 +
9093 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
9094 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9095 + (service->srvstate ==
9096 + VCHIQ_SRVSTATE_LISTENING)) {
9097 + service->sync = value;
9098 + status = VCHIQ_SUCCESS;
9099 + }
9100 + break;
9101 +
9102 + default:
9103 + break;
9104 + }
9105 + unlock_service(service);
9106 + }
9107 +
9108 + return status;
9109 +}
9110 +
9111 +void
9112 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9113 + VCHIQ_SHARED_STATE_T *shared, const char *label)
9114 +{
9115 + static const char *const debug_names[] = {
9116 + "<entries>",
9117 + "SLOT_HANDLER_COUNT",
9118 + "SLOT_HANDLER_LINE",
9119 + "PARSE_LINE",
9120 + "PARSE_HEADER",
9121 + "PARSE_MSGID",
9122 + "AWAIT_COMPLETION_LINE",
9123 + "DEQUEUE_MESSAGE_LINE",
9124 + "SERVICE_CALLBACK_LINE",
9125 + "MSG_QUEUE_FULL_COUNT",
9126 + "COMPLETION_QUEUE_FULL_COUNT"
9127 + };
9128 + int i;
9129 +
9130 + char buf[80];
9131 + int len;
9132 + len = snprintf(buf, sizeof(buf),
9133 + " %s: slots %d-%d tx_pos=%x recycle=%x",
9134 + label, shared->slot_first, shared->slot_last,
9135 + shared->tx_pos, shared->slot_queue_recycle);
9136 + vchiq_dump(dump_context, buf, len + 1);
9137 +
9138 + len = snprintf(buf, sizeof(buf),
9139 + " Slots claimed:");
9140 + vchiq_dump(dump_context, buf, len + 1);
9141 +
9142 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
9143 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9144 + if (slot_info.use_count != slot_info.release_count) {
9145 + len = snprintf(buf, sizeof(buf),
9146 + " %d: %d/%d", i, slot_info.use_count,
9147 + slot_info.release_count);
9148 + vchiq_dump(dump_context, buf, len + 1);
9149 + }
9150 + }
9151 +
9152 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9153 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
9154 + debug_names[i], shared->debug[i], shared->debug[i]);
9155 + vchiq_dump(dump_context, buf, len + 1);
9156 + }
9157 +}
9158 +
9159 +void
9160 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9161 +{
9162 + char buf[80];
9163 + int len;
9164 + int i;
9165 +
9166 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9167 + conn_state_names[state->conn_state]);
9168 + vchiq_dump(dump_context, buf, len + 1);
9169 +
9170 + len = snprintf(buf, sizeof(buf),
9171 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
9172 + state->local->tx_pos,
9173 + (uint32_t)state->tx_data +
9174 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
9175 + state->rx_pos,
9176 + (uint32_t)state->rx_data +
9177 + (state->rx_pos & VCHIQ_SLOT_MASK));
9178 + vchiq_dump(dump_context, buf, len + 1);
9179 +
9180 + len = snprintf(buf, sizeof(buf),
9181 + " Version: %d (min %d)",
9182 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9183 + vchiq_dump(dump_context, buf, len + 1);
9184 +
9185 + if (VCHIQ_ENABLE_STATS) {
9186 + len = snprintf(buf, sizeof(buf),
9187 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9188 + "error_count=%d",
9189 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9190 + state->stats.error_count);
9191 + vchiq_dump(dump_context, buf, len + 1);
9192 + }
9193 +
9194 + len = snprintf(buf, sizeof(buf),
9195 + " Slots: %d available (%d data), %d recyclable, %d stalls "
9196 + "(%d data)",
9197 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9198 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9199 + state->data_quota - state->data_use_count,
9200 + state->local->slot_queue_recycle - state->slot_queue_available,
9201 + state->stats.slot_stalls, state->stats.data_stalls);
9202 + vchiq_dump(dump_context, buf, len + 1);
9203 +
9204 + vchiq_dump_platform_state(dump_context);
9205 +
9206 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9207 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9208 +
9209 + vchiq_dump_platform_instances(dump_context);
9210 +
9211 + for (i = 0; i < state->unused_service; i++) {
9212 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9213 +
9214 + if (service) {
9215 + vchiq_dump_service_state(dump_context, service);
9216 + unlock_service(service);
9217 + }
9218 + }
9219 +}
9220 +
9221 +void
9222 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9223 +{
9224 + char buf[80];
9225 + int len;
9226 +
9227 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9228 + service->localport, srvstate_names[service->srvstate],
9229 + service->ref_count - 1); /*Don't include the lock just taken*/
9230 +
9231 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9232 + char remoteport[30];
9233 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9234 + &service->state->service_quotas[service->localport];
9235 + int fourcc = service->base.fourcc;
9236 + int tx_pending, rx_pending;
9237 + if (service->remoteport != VCHIQ_PORT_FREE) {
9238 + int len2 = snprintf(remoteport, sizeof(remoteport),
9239 + "%d", service->remoteport);
9240 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9241 + snprintf(remoteport + len2,
9242 + sizeof(remoteport) - len2,
9243 + " (client %x)", service->client_id);
9244 + } else
9245 + strcpy(remoteport, "n/a");
9246 +
9247 + len += snprintf(buf + len, sizeof(buf) - len,
9248 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9249 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
9250 + remoteport,
9251 + service_quota->message_use_count,
9252 + service_quota->message_quota,
9253 + service_quota->slot_use_count,
9254 + service_quota->slot_quota);
9255 +
9256 + vchiq_dump(dump_context, buf, len + 1);
9257 +
9258 + tx_pending = service->bulk_tx.local_insert -
9259 + service->bulk_tx.remote_insert;
9260 +
9261 + rx_pending = service->bulk_rx.local_insert -
9262 + service->bulk_rx.remote_insert;
9263 +
9264 + len = snprintf(buf, sizeof(buf),
9265 + " Bulk: tx_pending=%d (size %d),"
9266 + " rx_pending=%d (size %d)",
9267 + tx_pending,
9268 + tx_pending ? service->bulk_tx.bulks[
9269 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
9270 + rx_pending,
9271 + rx_pending ? service->bulk_rx.bulks[
9272 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
9273 +
9274 + if (VCHIQ_ENABLE_STATS) {
9275 + vchiq_dump(dump_context, buf, len + 1);
9276 +
9277 + len = snprintf(buf, sizeof(buf),
9278 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
9279 + "rx_count=%d, rx_bytes=%llu",
9280 + service->stats.ctrl_tx_count,
9281 + service->stats.ctrl_tx_bytes,
9282 + service->stats.ctrl_rx_count,
9283 + service->stats.ctrl_rx_bytes);
9284 + vchiq_dump(dump_context, buf, len + 1);
9285 +
9286 + len = snprintf(buf, sizeof(buf),
9287 + " Bulk: tx_count=%d, tx_bytes=%llu, "
9288 + "rx_count=%d, rx_bytes=%llu",
9289 + service->stats.bulk_tx_count,
9290 + service->stats.bulk_tx_bytes,
9291 + service->stats.bulk_rx_count,
9292 + service->stats.bulk_rx_bytes);
9293 + vchiq_dump(dump_context, buf, len + 1);
9294 +
9295 + len = snprintf(buf, sizeof(buf),
9296 + " %d quota stalls, %d slot stalls, "
9297 + "%d bulk stalls, %d aborted, %d errors",
9298 + service->stats.quota_stalls,
9299 + service->stats.slot_stalls,
9300 + service->stats.bulk_stalls,
9301 + service->stats.bulk_aborted_count,
9302 + service->stats.error_count);
9303 + }
9304 + }
9305 +
9306 + vchiq_dump(dump_context, buf, len + 1);
9307 +
9308 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9309 + vchiq_dump_platform_service_state(dump_context, service);
9310 +}
9311 +
9312 +
9313 +void
9314 +vchiq_loud_error_header(void)
9315 +{
9316 + vchiq_log_error(vchiq_core_log_level,
9317 + "============================================================"
9318 + "================");
9319 + vchiq_log_error(vchiq_core_log_level,
9320 + "============================================================"
9321 + "================");
9322 + vchiq_log_error(vchiq_core_log_level, "=====");
9323 +}
9324 +
9325 +void
9326 +vchiq_loud_error_footer(void)
9327 +{
9328 + vchiq_log_error(vchiq_core_log_level, "=====");
9329 + vchiq_log_error(vchiq_core_log_level,
9330 + "============================================================"
9331 + "================");
9332 + vchiq_log_error(vchiq_core_log_level,
9333 + "============================================================"
9334 + "================");
9335 +}
9336 +
9337 +
9338 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9339 +{
9340 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9341 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9342 + status = queue_message(state, NULL,
9343 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9344 + NULL, 0, 0, 0);
9345 + return status;
9346 +}
9347 +
9348 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9349 +{
9350 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9351 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9352 + status = queue_message(state, NULL,
9353 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9354 + NULL, 0, 0, 0);
9355 + return status;
9356 +}
9357 +
9358 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9359 +{
9360 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9361 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9362 + status = queue_message(state, NULL,
9363 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9364 + NULL, 0, 0, 0);
9365 + return status;
9366 +}
9367 +
9368 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9369 + size_t numBytes)
9370 +{
9371 + const uint8_t *mem = (const uint8_t *)voidMem;
9372 + size_t offset;
9373 + char lineBuf[100];
9374 + char *s;
9375 +
9376 + while (numBytes > 0) {
9377 + s = lineBuf;
9378 +
9379 + for (offset = 0; offset < 16; offset++) {
9380 + if (offset < numBytes)
9381 + s += snprintf(s, 4, "%02x ", mem[offset]);
9382 + else
9383 + s += snprintf(s, 4, " ");
9384 + }
9385 +
9386 + for (offset = 0; offset < 16; offset++) {
9387 + if (offset < numBytes) {
9388 + uint8_t ch = mem[offset];
9389 +
9390 + if ((ch < ' ') || (ch > '~'))
9391 + ch = '.';
9392 + *s++ = (char)ch;
9393 + }
9394 + }
9395 + *s++ = '\0';
9396 +
9397 + if ((label != NULL) && (*label != '\0'))
9398 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9399 + "%s: %08x: %s", label, addr, lineBuf);
9400 + else
9401 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9402 + "%08x: %s", addr, lineBuf);
9403 +
9404 + addr += 16;
9405 + mem += 16;
9406 + if (numBytes > 16)
9407 + numBytes -= 16;
9408 + else
9409 + numBytes = 0;
9410 + }
9411 +}
9412 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9413 new file mode 100644
9414 index 0000000..47cdf27
9415 --- /dev/null
9416 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9417 @@ -0,0 +1,706 @@
9418 +/**
9419 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9420 + *
9421 + * Redistribution and use in source and binary forms, with or without
9422 + * modification, are permitted provided that the following conditions
9423 + * are met:
9424 + * 1. Redistributions of source code must retain the above copyright
9425 + * notice, this list of conditions, and the following disclaimer,
9426 + * without modification.
9427 + * 2. Redistributions in binary form must reproduce the above copyright
9428 + * notice, this list of conditions and the following disclaimer in the
9429 + * documentation and/or other materials provided with the distribution.
9430 + * 3. The names of the above-listed copyright holders may not be used
9431 + * to endorse or promote products derived from this software without
9432 + * specific prior written permission.
9433 + *
9434 + * ALTERNATIVELY, this software may be distributed under the terms of the
9435 + * GNU General Public License ("GPL") version 2, as published by the Free
9436 + * Software Foundation.
9437 + *
9438 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9439 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9440 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9441 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9442 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9443 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9444 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9445 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9446 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9447 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9448 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9449 + */
9450 +
9451 +#ifndef VCHIQ_CORE_H
9452 +#define VCHIQ_CORE_H
9453 +
9454 +#include <linux/mutex.h>
9455 +#include <linux/semaphore.h>
9456 +#include <linux/kthread.h>
9457 +
9458 +#include "vchiq_cfg.h"
9459 +
9460 +#include "vchiq.h"
9461 +
9462 +/* Run time control of log level, based on KERN_XXX level. */
9463 +#define VCHIQ_LOG_DEFAULT 4
9464 +#define VCHIQ_LOG_ERROR 3
9465 +#define VCHIQ_LOG_WARNING 4
9466 +#define VCHIQ_LOG_INFO 6
9467 +#define VCHIQ_LOG_TRACE 7
9468 +
9469 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
9470 +
9471 +#ifndef vchiq_log_error
9472 +#define vchiq_log_error(cat, fmt, ...) \
9473 + do { if (cat >= VCHIQ_LOG_ERROR) \
9474 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9475 +#endif
9476 +#ifndef vchiq_log_warning
9477 +#define vchiq_log_warning(cat, fmt, ...) \
9478 + do { if (cat >= VCHIQ_LOG_WARNING) \
9479 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9480 +#endif
9481 +#ifndef vchiq_log_info
9482 +#define vchiq_log_info(cat, fmt, ...) \
9483 + do { if (cat >= VCHIQ_LOG_INFO) \
9484 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9485 +#endif
9486 +#ifndef vchiq_log_trace
9487 +#define vchiq_log_trace(cat, fmt, ...) \
9488 + do { if (cat >= VCHIQ_LOG_TRACE) \
9489 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9490 +#endif
9491 +
9492 +#define vchiq_loud_error(...) \
9493 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9494 +
9495 +#ifndef vchiq_static_assert
9496 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9497 + extern int vchiq_static_assert[(cond) ? 1 : -1]
9498 +#endif
9499 +
9500 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9501 +
9502 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9503 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9504 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9505 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9506 +
9507 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
9508 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9509 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9510 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9511 +
9512 +#define VCHIQ_MSG_PADDING 0 /* - */
9513 +#define VCHIQ_MSG_CONNECT 1 /* - */
9514 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
9515 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
9516 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
9517 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
9518 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
9519 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
9520 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
9521 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
9522 +#define VCHIQ_MSG_PAUSE 10 /* - */
9523 +#define VCHIQ_MSG_RESUME 11 /* - */
9524 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
9525 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
9526 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
9527 +
9528 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
9529 +#define VCHIQ_PORT_FREE 0x1000
9530 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
9531 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9532 + ((type<<24) | (srcport<<12) | (dstport<<0))
9533 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
9534 +#define VCHIQ_MSG_SRCPORT(msgid) \
9535 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9536 +#define VCHIQ_MSG_DSTPORT(msgid) \
9537 + ((unsigned short)msgid & 0xfff)
9538 +
9539 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9540 + ((fourcc) >> 24) & 0xff, \
9541 + ((fourcc) >> 16) & 0xff, \
9542 + ((fourcc) >> 8) & 0xff, \
9543 + (fourcc) & 0xff
9544 +
9545 +/* Ensure the fields are wide enough */
9546 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9547 + == 0);
9548 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9549 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9550 + (unsigned int)VCHIQ_PORT_FREE);
9551 +
9552 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9553 +#define VCHIQ_MSGID_CLAIMED 0x40000000
9554 +
9555 +#define VCHIQ_FOURCC_INVALID 0x00000000
9556 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
9557 +
9558 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9559 +
9560 +typedef uint32_t BITSET_T;
9561 +
9562 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9563 +
9564 +#define BITSET_SIZE(b) ((b + 31) >> 5)
9565 +#define BITSET_WORD(b) (b >> 5)
9566 +#define BITSET_BIT(b) (1 << (b & 31))
9567 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
9568 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9569 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9570 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9571 +
9572 +#if VCHIQ_ENABLE_STATS
9573 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9574 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9575 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9576 + (service->stats. stat += addend)
9577 +#else
9578 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9579 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9580 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9581 +#endif
9582 +
9583 +enum {
9584 + DEBUG_ENTRIES,
9585 +#if VCHIQ_ENABLE_DEBUG
9586 + DEBUG_SLOT_HANDLER_COUNT,
9587 + DEBUG_SLOT_HANDLER_LINE,
9588 + DEBUG_PARSE_LINE,
9589 + DEBUG_PARSE_HEADER,
9590 + DEBUG_PARSE_MSGID,
9591 + DEBUG_AWAIT_COMPLETION_LINE,
9592 + DEBUG_DEQUEUE_MESSAGE_LINE,
9593 + DEBUG_SERVICE_CALLBACK_LINE,
9594 + DEBUG_MSG_QUEUE_FULL_COUNT,
9595 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9596 +#endif
9597 + DEBUG_MAX
9598 +};
9599 +
9600 +#if VCHIQ_ENABLE_DEBUG
9601 +
9602 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9603 +#define DEBUG_TRACE(d) \
9604 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9605 +#define DEBUG_VALUE(d, v) \
9606 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9607 +#define DEBUG_COUNT(d) \
9608 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9609 +
9610 +#else /* VCHIQ_ENABLE_DEBUG */
9611 +
9612 +#define DEBUG_INITIALISE(local)
9613 +#define DEBUG_TRACE(d)
9614 +#define DEBUG_VALUE(d, v)
9615 +#define DEBUG_COUNT(d)
9616 +
9617 +#endif /* VCHIQ_ENABLE_DEBUG */
9618 +
9619 +typedef enum {
9620 + VCHIQ_CONNSTATE_DISCONNECTED,
9621 + VCHIQ_CONNSTATE_CONNECTING,
9622 + VCHIQ_CONNSTATE_CONNECTED,
9623 + VCHIQ_CONNSTATE_PAUSING,
9624 + VCHIQ_CONNSTATE_PAUSE_SENT,
9625 + VCHIQ_CONNSTATE_PAUSED,
9626 + VCHIQ_CONNSTATE_RESUMING,
9627 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9628 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
9629 +} VCHIQ_CONNSTATE_T;
9630 +
9631 +enum {
9632 + VCHIQ_SRVSTATE_FREE,
9633 + VCHIQ_SRVSTATE_HIDDEN,
9634 + VCHIQ_SRVSTATE_LISTENING,
9635 + VCHIQ_SRVSTATE_OPENING,
9636 + VCHIQ_SRVSTATE_OPEN,
9637 + VCHIQ_SRVSTATE_OPENSYNC,
9638 + VCHIQ_SRVSTATE_CLOSESENT,
9639 + VCHIQ_SRVSTATE_CLOSERECVD,
9640 + VCHIQ_SRVSTATE_CLOSEWAIT,
9641 + VCHIQ_SRVSTATE_CLOSED
9642 +};
9643 +
9644 +enum {
9645 + VCHIQ_POLL_TERMINATE,
9646 + VCHIQ_POLL_REMOVE,
9647 + VCHIQ_POLL_TXNOTIFY,
9648 + VCHIQ_POLL_RXNOTIFY,
9649 + VCHIQ_POLL_COUNT
9650 +};
9651 +
9652 +typedef enum {
9653 + VCHIQ_BULK_TRANSMIT,
9654 + VCHIQ_BULK_RECEIVE
9655 +} VCHIQ_BULK_DIR_T;
9656 +
9657 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9658 +
9659 +typedef struct vchiq_bulk_struct {
9660 + short mode;
9661 + short dir;
9662 + void *userdata;
9663 + VCHI_MEM_HANDLE_T handle;
9664 + void *data;
9665 + int size;
9666 + void *remote_data;
9667 + int remote_size;
9668 + int actual;
9669 +} VCHIQ_BULK_T;
9670 +
9671 +typedef struct vchiq_bulk_queue_struct {
9672 + int local_insert; /* Where to insert the next local bulk */
9673 + int remote_insert; /* Where to insert the next remote bulk (master) */
9674 + int process; /* Bulk to transfer next */
9675 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
9676 + int remove; /* Bulk to notify the local client of, and remove,
9677 + ** next */
9678 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
9679 +} VCHIQ_BULK_QUEUE_T;
9680 +
9681 +typedef struct remote_event_struct {
9682 + int armed;
9683 + int fired;
9684 + struct semaphore *event;
9685 +} REMOTE_EVENT_T;
9686 +
9687 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
9688 +
9689 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
9690 +
9691 +typedef struct vchiq_slot_struct {
9692 + char data[VCHIQ_SLOT_SIZE];
9693 +} VCHIQ_SLOT_T;
9694 +
9695 +typedef struct vchiq_slot_info_struct {
9696 + /* Use two counters rather than one to avoid the need for a mutex. */
9697 + short use_count;
9698 + short release_count;
9699 +} VCHIQ_SLOT_INFO_T;
9700 +
9701 +typedef struct vchiq_service_struct {
9702 + VCHIQ_SERVICE_BASE_T base;
9703 + VCHIQ_SERVICE_HANDLE_T handle;
9704 + unsigned int ref_count;
9705 + int srvstate;
9706 + VCHIQ_USERDATA_TERM_T userdata_term;
9707 + unsigned int localport;
9708 + unsigned int remoteport;
9709 + int public_fourcc;
9710 + int client_id;
9711 + char auto_close;
9712 + char sync;
9713 + char closing;
9714 + atomic_t poll_flags;
9715 + short version;
9716 + short version_min;
9717 + short peer_version;
9718 +
9719 + VCHIQ_STATE_T *state;
9720 + VCHIQ_INSTANCE_T instance;
9721 +
9722 + int service_use_count;
9723 +
9724 + VCHIQ_BULK_QUEUE_T bulk_tx;
9725 + VCHIQ_BULK_QUEUE_T bulk_rx;
9726 +
9727 + struct semaphore remove_event;
9728 + struct semaphore bulk_remove_event;
9729 + struct mutex bulk_mutex;
9730 +
9731 + struct service_stats_struct {
9732 + int quota_stalls;
9733 + int slot_stalls;
9734 + int bulk_stalls;
9735 + int error_count;
9736 + int ctrl_tx_count;
9737 + int ctrl_rx_count;
9738 + int bulk_tx_count;
9739 + int bulk_rx_count;
9740 + int bulk_aborted_count;
9741 + uint64_t ctrl_tx_bytes;
9742 + uint64_t ctrl_rx_bytes;
9743 + uint64_t bulk_tx_bytes;
9744 + uint64_t bulk_rx_bytes;
9745 + } stats;
9746 +} VCHIQ_SERVICE_T;
9747 +
9748 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
9749 + statically allocated, since for accounting reasons a service's slot
9750 + usage is carried over between users of the same port number.
9751 + */
9752 +typedef struct vchiq_service_quota_struct {
9753 + unsigned short slot_quota;
9754 + unsigned short slot_use_count;
9755 + unsigned short message_quota;
9756 + unsigned short message_use_count;
9757 + struct semaphore quota_event;
9758 + int previous_tx_index;
9759 +} VCHIQ_SERVICE_QUOTA_T;
9760 +
9761 +typedef struct vchiq_shared_state_struct {
9762 +
9763 + /* A non-zero value here indicates that the content is valid. */
9764 + int initialised;
9765 +
9766 + /* The first and last (inclusive) slots allocated to the owner. */
9767 + int slot_first;
9768 + int slot_last;
9769 +
9770 + /* The slot allocated to synchronous messages from the owner. */
9771 + int slot_sync;
9772 +
9773 + /* Signalling this event indicates that owner's slot handler thread
9774 + ** should run. */
9775 + REMOTE_EVENT_T trigger;
9776 +
9777 + /* Indicates the byte position within the stream where the next message
9778 + ** will be written. The least significant bits are an index into the
9779 + ** slot. The next bits are the index of the slot in slot_queue. */
9780 + int tx_pos;
9781 +
9782 + /* This event should be signalled when a slot is recycled. */
9783 + REMOTE_EVENT_T recycle;
9784 +
9785 + /* The slot_queue index where the next recycled slot will be written. */
9786 + int slot_queue_recycle;
9787 +
9788 + /* This event should be signalled when a synchronous message is sent. */
9789 + REMOTE_EVENT_T sync_trigger;
9790 +
9791 + /* This event should be signalled when a synchronous message has been
9792 + ** released. */
9793 + REMOTE_EVENT_T sync_release;
9794 +
9795 + /* A circular buffer of slot indexes. */
9796 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
9797 +
9798 + /* Debugging state */
9799 + int debug[DEBUG_MAX];
9800 +} VCHIQ_SHARED_STATE_T;
9801 +
9802 +typedef struct vchiq_slot_zero_struct {
9803 + int magic;
9804 + short version;
9805 + short version_min;
9806 + int slot_zero_size;
9807 + int slot_size;
9808 + int max_slots;
9809 + int max_slots_per_side;
9810 + int platform_data[2];
9811 + VCHIQ_SHARED_STATE_T master;
9812 + VCHIQ_SHARED_STATE_T slave;
9813 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
9814 +} VCHIQ_SLOT_ZERO_T;
9815 +
9816 +struct vchiq_state_struct {
9817 + int id;
9818 + int initialised;
9819 + VCHIQ_CONNSTATE_T conn_state;
9820 + int is_master;
9821 +
9822 + VCHIQ_SHARED_STATE_T *local;
9823 + VCHIQ_SHARED_STATE_T *remote;
9824 + VCHIQ_SLOT_T *slot_data;
9825 +
9826 + unsigned short default_slot_quota;
9827 + unsigned short default_message_quota;
9828 +
9829 + /* Event indicating connect message received */
9830 + struct semaphore connect;
9831 +
9832 + /* Mutex protecting services */
9833 + struct mutex mutex;
9834 + VCHIQ_INSTANCE_T *instance;
9835 +
9836 + /* Processes incoming messages */
9837 + struct task_struct *slot_handler_thread;
9838 +
9839 + /* Processes recycled slots */
9840 + struct task_struct *recycle_thread;
9841 +
9842 + /* Processes synchronous messages */
9843 + struct task_struct *sync_thread;
9844 +
9845 + /* Local implementation of the trigger remote event */
9846 + struct semaphore trigger_event;
9847 +
9848 + /* Local implementation of the recycle remote event */
9849 + struct semaphore recycle_event;
9850 +
9851 + /* Local implementation of the sync trigger remote event */
9852 + struct semaphore sync_trigger_event;
9853 +
9854 + /* Local implementation of the sync release remote event */
9855 + struct semaphore sync_release_event;
9856 +
9857 + char *tx_data;
9858 + char *rx_data;
9859 + VCHIQ_SLOT_INFO_T *rx_info;
9860 +
9861 + struct mutex slot_mutex;
9862 +
9863 + struct mutex recycle_mutex;
9864 +
9865 + struct mutex sync_mutex;
9866 +
9867 + struct mutex bulk_transfer_mutex;
9868 +
9869 + /* Indicates the byte position within the stream from where the next
9870 + ** message will be read. The least significant bits are an index into
9871 + ** the slot.The next bits are the index of the slot in
9872 + ** remote->slot_queue. */
9873 + int rx_pos;
9874 +
9875 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
9876 + from remote->tx_pos. */
9877 + int local_tx_pos;
9878 +
9879 + /* The slot_queue index of the slot to become available next. */
9880 + int slot_queue_available;
9881 +
9882 + /* A flag to indicate if any poll has been requested */
9883 + int poll_needed;
9884 +
9885 + /* Ths index of the previous slot used for data messages. */
9886 + int previous_data_index;
9887 +
9888 + /* The number of slots occupied by data messages. */
9889 + unsigned short data_use_count;
9890 +
9891 + /* The maximum number of slots to be occupied by data messages. */
9892 + unsigned short data_quota;
9893 +
9894 + /* An array of bit sets indicating which services must be polled. */
9895 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
9896 +
9897 + /* The number of the first unused service */
9898 + int unused_service;
9899 +
9900 + /* Signalled when a free slot becomes available. */
9901 + struct semaphore slot_available_event;
9902 +
9903 + struct semaphore slot_remove_event;
9904 +
9905 + /* Signalled when a free data slot becomes available. */
9906 + struct semaphore data_quota_event;
9907 +
9908 + /* Incremented when there are bulk transfers which cannot be processed
9909 + * whilst paused and must be processed on resume */
9910 + int deferred_bulks;
9911 +
9912 + struct state_stats_struct {
9913 + int slot_stalls;
9914 + int data_stalls;
9915 + int ctrl_tx_count;
9916 + int ctrl_rx_count;
9917 + int error_count;
9918 + } stats;
9919 +
9920 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
9921 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
9922 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
9923 +
9924 + VCHIQ_PLATFORM_STATE_T platform_state;
9925 +};
9926 +
9927 +struct bulk_waiter {
9928 + VCHIQ_BULK_T *bulk;
9929 + struct semaphore event;
9930 + int actual;
9931 +};
9932 +
9933 +extern spinlock_t bulk_waiter_spinlock;
9934 +
9935 +extern int vchiq_core_log_level;
9936 +extern int vchiq_core_msg_log_level;
9937 +extern int vchiq_sync_log_level;
9938 +
9939 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
9940 +
9941 +extern const char *
9942 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
9943 +
9944 +extern VCHIQ_SLOT_ZERO_T *
9945 +vchiq_init_slots(void *mem_base, int mem_size);
9946 +
9947 +extern VCHIQ_STATUS_T
9948 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
9949 + int is_master);
9950 +
9951 +extern VCHIQ_STATUS_T
9952 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9953 +
9954 +extern VCHIQ_SERVICE_T *
9955 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9956 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9957 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
9958 +
9959 +extern VCHIQ_STATUS_T
9960 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
9961 +
9962 +extern VCHIQ_STATUS_T
9963 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
9964 +
9965 +extern void
9966 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
9967 +
9968 +extern void
9969 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
9970 +
9971 +extern VCHIQ_STATUS_T
9972 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9973 +
9974 +extern VCHIQ_STATUS_T
9975 +vchiq_pause_internal(VCHIQ_STATE_T *state);
9976 +
9977 +extern VCHIQ_STATUS_T
9978 +vchiq_resume_internal(VCHIQ_STATE_T *state);
9979 +
9980 +extern void
9981 +remote_event_pollall(VCHIQ_STATE_T *state);
9982 +
9983 +extern VCHIQ_STATUS_T
9984 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9985 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9986 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
9987 +
9988 +extern void
9989 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
9990 +
9991 +extern void
9992 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
9993 +
9994 +extern void
9995 +vchiq_loud_error_header(void);
9996 +
9997 +extern void
9998 +vchiq_loud_error_footer(void);
9999 +
10000 +extern void
10001 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
10002 +
10003 +static inline VCHIQ_SERVICE_T *
10004 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
10005 +{
10006 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
10007 + (VCHIQ_MAX_STATES - 1)];
10008 + if (!state)
10009 + return NULL;
10010 +
10011 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
10012 +}
10013 +
10014 +extern VCHIQ_SERVICE_T *
10015 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
10016 +
10017 +extern VCHIQ_SERVICE_T *
10018 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
10019 +
10020 +extern VCHIQ_SERVICE_T *
10021 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
10022 + VCHIQ_SERVICE_HANDLE_T handle);
10023 +
10024 +extern VCHIQ_SERVICE_T *
10025 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
10026 + int *pidx);
10027 +
10028 +extern void
10029 +lock_service(VCHIQ_SERVICE_T *service);
10030 +
10031 +extern void
10032 +unlock_service(VCHIQ_SERVICE_T *service);
10033 +
10034 +/* The following functions are called from vchiq_core, and external
10035 +** implementations must be provided. */
10036 +
10037 +extern VCHIQ_STATUS_T
10038 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
10039 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
10040 +
10041 +extern void
10042 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
10043 +
10044 +extern void
10045 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
10046 +
10047 +extern VCHIQ_STATUS_T
10048 +vchiq_copy_from_user(void *dst, const void *src, int size);
10049 +
10050 +extern void
10051 +remote_event_signal(REMOTE_EVENT_T *event);
10052 +
10053 +void
10054 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
10055 +
10056 +extern void
10057 +vchiq_platform_paused(VCHIQ_STATE_T *state);
10058 +
10059 +extern VCHIQ_STATUS_T
10060 +vchiq_platform_resume(VCHIQ_STATE_T *state);
10061 +
10062 +extern void
10063 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
10064 +
10065 +extern void
10066 +vchiq_dump(void *dump_context, const char *str, int len);
10067 +
10068 +extern void
10069 +vchiq_dump_platform_state(void *dump_context);
10070 +
10071 +extern void
10072 +vchiq_dump_platform_instances(void *dump_context);
10073 +
10074 +extern void
10075 +vchiq_dump_platform_service_state(void *dump_context,
10076 + VCHIQ_SERVICE_T *service);
10077 +
10078 +extern VCHIQ_STATUS_T
10079 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
10080 +
10081 +extern VCHIQ_STATUS_T
10082 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
10083 +
10084 +extern void
10085 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
10086 +
10087 +extern void
10088 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
10089 +
10090 +extern VCHIQ_STATUS_T
10091 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
10092 +
10093 +extern VCHIQ_STATUS_T
10094 +vchiq_check_service(VCHIQ_SERVICE_T *service);
10095 +
10096 +extern void
10097 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10098 +
10099 +extern VCHIQ_STATUS_T
10100 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10101 +
10102 +extern VCHIQ_STATUS_T
10103 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10104 +
10105 +extern VCHIQ_STATUS_T
10106 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10107 +
10108 +extern void
10109 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10110 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10111 +
10112 +extern void
10113 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10114 +
10115 +extern void
10116 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10117 +
10118 +
10119 +extern void
10120 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10121 + size_t numBytes);
10122 +
10123 +#endif
10124 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10125 new file mode 100644
10126 index 0000000..9f5b634
10127 --- /dev/null
10128 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10129 @@ -0,0 +1,87 @@
10130 +#!/usr/bin/perl -w
10131 +
10132 +use strict;
10133 +
10134 +#
10135 +# Generate a version from available information
10136 +#
10137 +
10138 +my $prefix = shift @ARGV;
10139 +my $root = shift @ARGV;
10140 +
10141 +
10142 +if ( not defined $root ) {
10143 + die "usage: $0 prefix root-dir\n";
10144 +}
10145 +
10146 +if ( ! -d $root ) {
10147 + die "root directory $root not found\n";
10148 +}
10149 +
10150 +my $version = "unknown";
10151 +my $tainted = "";
10152 +
10153 +if ( -d "$root/.git" ) {
10154 + # attempt to work out git version. only do so
10155 + # on a linux build host, as cygwin builds are
10156 + # already slow enough
10157 +
10158 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10159 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10160 + $version = "no git version";
10161 + }
10162 + else {
10163 + $version = <F>;
10164 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10165 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10166 + }
10167 +
10168 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10169 + $tainted = <G>;
10170 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10171 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10172 + if (length $tainted) {
10173 + $version = join ' ', $version, "(tainted)";
10174 + }
10175 + else {
10176 + $version = join ' ', $version, "(clean)";
10177 + }
10178 + }
10179 + }
10180 +}
10181 +
10182 +my $hostname = `hostname`;
10183 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10184 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10185 +
10186 +
10187 +print STDERR "Version $version\n";
10188 +print <<EOF;
10189 +#include "${prefix}_build_info.h"
10190 +#include <linux/broadcom/vc_debug_sym.h>
10191 +
10192 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10193 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10194 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
10195 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
10196 +
10197 +const char *vchiq_get_build_hostname( void )
10198 +{
10199 + return vchiq_build_hostname;
10200 +}
10201 +
10202 +const char *vchiq_get_build_version( void )
10203 +{
10204 + return vchiq_build_version;
10205 +}
10206 +
10207 +const char *vchiq_get_build_date( void )
10208 +{
10209 + return vchiq_build_date;
10210 +}
10211 +
10212 +const char *vchiq_get_build_time( void )
10213 +{
10214 + return vchiq_build_time;
10215 +}
10216 +EOF
10217 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10218 new file mode 100644
10219 index 0000000..50359b0
10220 --- /dev/null
10221 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10222 @@ -0,0 +1,188 @@
10223 +/**
10224 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10225 + *
10226 + * Redistribution and use in source and binary forms, with or without
10227 + * modification, are permitted provided that the following conditions
10228 + * are met:
10229 + * 1. Redistributions of source code must retain the above copyright
10230 + * notice, this list of conditions, and the following disclaimer,
10231 + * without modification.
10232 + * 2. Redistributions in binary form must reproduce the above copyright
10233 + * notice, this list of conditions and the following disclaimer in the
10234 + * documentation and/or other materials provided with the distribution.
10235 + * 3. The names of the above-listed copyright holders may not be used
10236 + * to endorse or promote products derived from this software without
10237 + * specific prior written permission.
10238 + *
10239 + * ALTERNATIVELY, this software may be distributed under the terms of the
10240 + * GNU General Public License ("GPL") version 2, as published by the Free
10241 + * Software Foundation.
10242 + *
10243 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10244 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10245 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10246 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10247 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10248 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10249 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10250 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10251 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10252 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10253 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10254 + */
10255 +
10256 +#ifndef VCHIQ_IF_H
10257 +#define VCHIQ_IF_H
10258 +
10259 +#include "interface/vchi/vchi_mh.h"
10260 +
10261 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
10262 +
10263 +#define VCHIQ_SLOT_SIZE 4096
10264 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
10265 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
10266 +
10267 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
10268 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
10269 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
10270 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
10271 +
10272 +typedef enum {
10273 + VCHIQ_SERVICE_OPENED, /* service, -, - */
10274 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
10275 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
10276 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
10277 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
10278 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
10279 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
10280 +} VCHIQ_REASON_T;
10281 +
10282 +typedef enum {
10283 + VCHIQ_ERROR = -1,
10284 + VCHIQ_SUCCESS = 0,
10285 + VCHIQ_RETRY = 1
10286 +} VCHIQ_STATUS_T;
10287 +
10288 +typedef enum {
10289 + VCHIQ_BULK_MODE_CALLBACK,
10290 + VCHIQ_BULK_MODE_BLOCKING,
10291 + VCHIQ_BULK_MODE_NOCALLBACK,
10292 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
10293 +} VCHIQ_BULK_MODE_T;
10294 +
10295 +typedef enum {
10296 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
10297 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
10298 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
10299 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS
10300 +} VCHIQ_SERVICE_OPTION_T;
10301 +
10302 +typedef struct vchiq_header_struct {
10303 + /* The message identifier - opaque to applications. */
10304 + int msgid;
10305 +
10306 + /* Size of message data. */
10307 + unsigned int size;
10308 +
10309 + char data[0]; /* message */
10310 +} VCHIQ_HEADER_T;
10311 +
10312 +typedef struct {
10313 + const void *data;
10314 + unsigned int size;
10315 +} VCHIQ_ELEMENT_T;
10316 +
10317 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
10318 +
10319 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
10320 + VCHIQ_SERVICE_HANDLE_T, void *);
10321 +
10322 +typedef struct vchiq_service_base_struct {
10323 + int fourcc;
10324 + VCHIQ_CALLBACK_T callback;
10325 + void *userdata;
10326 +} VCHIQ_SERVICE_BASE_T;
10327 +
10328 +typedef struct vchiq_service_params_struct {
10329 + int fourcc;
10330 + VCHIQ_CALLBACK_T callback;
10331 + void *userdata;
10332 + short version; /* Increment for non-trivial changes */
10333 + short version_min; /* Update for incompatible changes */
10334 +} VCHIQ_SERVICE_PARAMS_T;
10335 +
10336 +typedef struct vchiq_config_struct {
10337 + unsigned int max_msg_size;
10338 + unsigned int bulk_threshold; /* The message size above which it
10339 + is better to use a bulk transfer
10340 + (<= max_msg_size) */
10341 + unsigned int max_outstanding_bulks;
10342 + unsigned int max_services;
10343 + short version; /* The version of VCHIQ */
10344 + short version_min; /* The minimum compatible version of VCHIQ */
10345 +} VCHIQ_CONFIG_T;
10346 +
10347 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
10348 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
10349 +
10350 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
10351 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
10352 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
10353 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
10354 + const VCHIQ_SERVICE_PARAMS_T *params,
10355 + VCHIQ_SERVICE_HANDLE_T *pservice);
10356 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
10357 + const VCHIQ_SERVICE_PARAMS_T *params,
10358 + VCHIQ_SERVICE_HANDLE_T *pservice);
10359 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
10360 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
10361 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
10362 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
10363 + VCHIQ_SERVICE_HANDLE_T service);
10364 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
10365 +
10366 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
10367 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
10368 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
10369 + VCHIQ_HEADER_T *header);
10370 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10371 + const void *data, unsigned int size, void *userdata);
10372 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10373 + void *data, unsigned int size, void *userdata);
10374 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
10375 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10376 + const void *offset, unsigned int size, void *userdata);
10377 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
10378 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10379 + void *offset, unsigned int size, void *userdata);
10380 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10381 + const void *data, unsigned int size, void *userdata,
10382 + VCHIQ_BULK_MODE_T mode);
10383 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10384 + void *data, unsigned int size, void *userdata,
10385 + VCHIQ_BULK_MODE_T mode);
10386 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
10387 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
10388 + void *userdata, VCHIQ_BULK_MODE_T mode);
10389 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
10390 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
10391 + void *userdata, VCHIQ_BULK_MODE_T mode);
10392 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
10393 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
10394 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
10395 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
10396 + int config_size, VCHIQ_CONFIG_T *pconfig);
10397 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
10398 + VCHIQ_SERVICE_OPTION_T option, int value);
10399 +
10400 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
10401 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
10402 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
10403 +
10404 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
10405 + void *ptr, size_t num_bytes);
10406 +
10407 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
10408 + short *peer_version);
10409 +
10410 +#endif /* VCHIQ_IF_H */
10411 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10412 new file mode 100644
10413 index 0000000..e248037
10414 --- /dev/null
10415 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10416 @@ -0,0 +1,129 @@
10417 +/**
10418 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10419 + *
10420 + * Redistribution and use in source and binary forms, with or without
10421 + * modification, are permitted provided that the following conditions
10422 + * are met:
10423 + * 1. Redistributions of source code must retain the above copyright
10424 + * notice, this list of conditions, and the following disclaimer,
10425 + * without modification.
10426 + * 2. Redistributions in binary form must reproduce the above copyright
10427 + * notice, this list of conditions and the following disclaimer in the
10428 + * documentation and/or other materials provided with the distribution.
10429 + * 3. The names of the above-listed copyright holders may not be used
10430 + * to endorse or promote products derived from this software without
10431 + * specific prior written permission.
10432 + *
10433 + * ALTERNATIVELY, this software may be distributed under the terms of the
10434 + * GNU General Public License ("GPL") version 2, as published by the Free
10435 + * Software Foundation.
10436 + *
10437 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10438 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10439 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10440 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10441 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10442 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10443 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10444 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10445 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10446 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10447 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10448 + */
10449 +
10450 +#ifndef VCHIQ_IOCTLS_H
10451 +#define VCHIQ_IOCTLS_H
10452 +
10453 +#include <linux/ioctl.h>
10454 +#include "vchiq_if.h"
10455 +
10456 +#define VCHIQ_IOC_MAGIC 0xc4
10457 +#define VCHIQ_INVALID_HANDLE (~0)
10458 +
10459 +typedef struct {
10460 + VCHIQ_SERVICE_PARAMS_T params;
10461 + int is_open;
10462 + int is_vchi;
10463 + unsigned int handle; /* OUT */
10464 +} VCHIQ_CREATE_SERVICE_T;
10465 +
10466 +typedef struct {
10467 + unsigned int handle;
10468 + unsigned int count;
10469 + const VCHIQ_ELEMENT_T *elements;
10470 +} VCHIQ_QUEUE_MESSAGE_T;
10471 +
10472 +typedef struct {
10473 + unsigned int handle;
10474 + void *data;
10475 + unsigned int size;
10476 + void *userdata;
10477 + VCHIQ_BULK_MODE_T mode;
10478 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
10479 +
10480 +typedef struct {
10481 + VCHIQ_REASON_T reason;
10482 + VCHIQ_HEADER_T *header;
10483 + void *service_userdata;
10484 + void *bulk_userdata;
10485 +} VCHIQ_COMPLETION_DATA_T;
10486 +
10487 +typedef struct {
10488 + unsigned int count;
10489 + VCHIQ_COMPLETION_DATA_T *buf;
10490 + unsigned int msgbufsize;
10491 + unsigned int msgbufcount; /* IN/OUT */
10492 + void **msgbufs;
10493 +} VCHIQ_AWAIT_COMPLETION_T;
10494 +
10495 +typedef struct {
10496 + unsigned int handle;
10497 + int blocking;
10498 + unsigned int bufsize;
10499 + void *buf;
10500 +} VCHIQ_DEQUEUE_MESSAGE_T;
10501 +
10502 +typedef struct {
10503 + unsigned int config_size;
10504 + VCHIQ_CONFIG_T *pconfig;
10505 +} VCHIQ_GET_CONFIG_T;
10506 +
10507 +typedef struct {
10508 + unsigned int handle;
10509 + VCHIQ_SERVICE_OPTION_T option;
10510 + int value;
10511 +} VCHIQ_SET_SERVICE_OPTION_T;
10512 +
10513 +typedef struct {
10514 + void *virt_addr;
10515 + size_t num_bytes;
10516 +} VCHIQ_DUMP_MEM_T;
10517 +
10518 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
10519 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
10520 +#define VCHIQ_IOC_CREATE_SERVICE \
10521 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
10522 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
10523 +#define VCHIQ_IOC_QUEUE_MESSAGE \
10524 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
10525 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
10526 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
10527 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
10528 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
10529 +#define VCHIQ_IOC_AWAIT_COMPLETION \
10530 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
10531 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
10532 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
10533 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
10534 +#define VCHIQ_IOC_GET_CONFIG \
10535 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
10536 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
10537 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
10538 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
10539 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
10540 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
10541 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
10542 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
10543 +#define VCHIQ_IOC_MAX 15
10544 +
10545 +#endif
10546 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10547 new file mode 100644
10548 index 0000000..be9735f
10549 --- /dev/null
10550 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10551 @@ -0,0 +1,456 @@
10552 +/**
10553 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10554 + *
10555 + * Redistribution and use in source and binary forms, with or without
10556 + * modification, are permitted provided that the following conditions
10557 + * are met:
10558 + * 1. Redistributions of source code must retain the above copyright
10559 + * notice, this list of conditions, and the following disclaimer,
10560 + * without modification.
10561 + * 2. Redistributions in binary form must reproduce the above copyright
10562 + * notice, this list of conditions and the following disclaimer in the
10563 + * documentation and/or other materials provided with the distribution.
10564 + * 3. The names of the above-listed copyright holders may not be used
10565 + * to endorse or promote products derived from this software without
10566 + * specific prior written permission.
10567 + *
10568 + * ALTERNATIVELY, this software may be distributed under the terms of the
10569 + * GNU General Public License ("GPL") version 2, as published by the Free
10570 + * Software Foundation.
10571 + *
10572 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10573 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10574 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10575 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10576 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10577 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10578 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10579 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10580 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10581 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10582 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10583 + */
10584 +
10585 +/* ---- Include Files ---------------------------------------------------- */
10586 +
10587 +#include <linux/kernel.h>
10588 +#include <linux/module.h>
10589 +#include <linux/mutex.h>
10590 +
10591 +#include "vchiq_core.h"
10592 +#include "vchiq_arm.h"
10593 +
10594 +/* ---- Public Variables ------------------------------------------------- */
10595 +
10596 +/* ---- Private Constants and Types -------------------------------------- */
10597 +
10598 +struct bulk_waiter_node {
10599 + struct bulk_waiter bulk_waiter;
10600 + int pid;
10601 + struct list_head list;
10602 +};
10603 +
10604 +struct vchiq_instance_struct {
10605 + VCHIQ_STATE_T *state;
10606 +
10607 + int connected;
10608 +
10609 + struct list_head bulk_waiter_list;
10610 + struct mutex bulk_waiter_list_mutex;
10611 +};
10612 +
10613 +static VCHIQ_STATUS_T
10614 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10615 + unsigned int size, VCHIQ_BULK_DIR_T dir);
10616 +
10617 +/****************************************************************************
10618 +*
10619 +* vchiq_initialise
10620 +*
10621 +***************************************************************************/
10622 +#define VCHIQ_INIT_RETRIES 10
10623 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
10624 +{
10625 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10626 + VCHIQ_STATE_T *state;
10627 + VCHIQ_INSTANCE_T instance = NULL;
10628 + int i;
10629 +
10630 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
10631 +
10632 + /* VideoCore may not be ready due to boot up timing.
10633 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
10634 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
10635 + state = vchiq_get_state();
10636 + if (state)
10637 + break;
10638 + udelay(500);
10639 + }
10640 + if (i==VCHIQ_INIT_RETRIES) {
10641 + vchiq_log_error(vchiq_core_log_level,
10642 + "%s: videocore not initialized\n", __func__);
10643 + goto failed;
10644 + } else if (i>0) {
10645 + vchiq_log_warning(vchiq_core_log_level,
10646 + "%s: videocore initialized after %d retries\n", __func__, i);
10647 + }
10648 +
10649 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
10650 + if (!instance) {
10651 + vchiq_log_error(vchiq_core_log_level,
10652 + "%s: error allocating vchiq instance\n", __func__);
10653 + goto failed;
10654 + }
10655 +
10656 + instance->connected = 0;
10657 + instance->state = state;
10658 + mutex_init(&instance->bulk_waiter_list_mutex);
10659 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
10660 +
10661 + *instanceOut = instance;
10662 +
10663 + status = VCHIQ_SUCCESS;
10664 +
10665 +failed:
10666 + vchiq_log_trace(vchiq_core_log_level,
10667 + "%s(%p): returning %d", __func__, instance, status);
10668 +
10669 + return status;
10670 +}
10671 +EXPORT_SYMBOL(vchiq_initialise);
10672 +
10673 +/****************************************************************************
10674 +*
10675 +* vchiq_shutdown
10676 +*
10677 +***************************************************************************/
10678 +
10679 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
10680 +{
10681 + VCHIQ_STATUS_T status;
10682 + VCHIQ_STATE_T *state = instance->state;
10683 +
10684 + vchiq_log_trace(vchiq_core_log_level,
10685 + "%s(%p) called", __func__, instance);
10686 +
10687 + if (mutex_lock_interruptible(&state->mutex) != 0)
10688 + return VCHIQ_RETRY;
10689 +
10690 + /* Remove all services */
10691 + status = vchiq_shutdown_internal(state, instance);
10692 +
10693 + mutex_unlock(&state->mutex);
10694 +
10695 + vchiq_log_trace(vchiq_core_log_level,
10696 + "%s(%p): returning %d", __func__, instance, status);
10697 +
10698 + if (status == VCHIQ_SUCCESS) {
10699 + struct list_head *pos, *next;
10700 + list_for_each_safe(pos, next,
10701 + &instance->bulk_waiter_list) {
10702 + struct bulk_waiter_node *waiter;
10703 + waiter = list_entry(pos,
10704 + struct bulk_waiter_node,
10705 + list);
10706 + list_del(pos);
10707 + vchiq_log_info(vchiq_arm_log_level,
10708 + "bulk_waiter - cleaned up %x "
10709 + "for pid %d",
10710 + (unsigned int)waiter, waiter->pid);
10711 + kfree(waiter);
10712 + }
10713 + kfree(instance);
10714 + }
10715 +
10716 + return status;
10717 +}
10718 +EXPORT_SYMBOL(vchiq_shutdown);
10719 +
10720 +/****************************************************************************
10721 +*
10722 +* vchiq_is_connected
10723 +*
10724 +***************************************************************************/
10725 +
10726 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
10727 +{
10728 + return instance->connected;
10729 +}
10730 +
10731 +/****************************************************************************
10732 +*
10733 +* vchiq_connect
10734 +*
10735 +***************************************************************************/
10736 +
10737 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
10738 +{
10739 + VCHIQ_STATUS_T status;
10740 + VCHIQ_STATE_T *state = instance->state;
10741 +
10742 + vchiq_log_trace(vchiq_core_log_level,
10743 + "%s(%p) called", __func__, instance);
10744 +
10745 + if (mutex_lock_interruptible(&state->mutex) != 0) {
10746 + vchiq_log_trace(vchiq_core_log_level,
10747 + "%s: call to mutex_lock failed", __func__);
10748 + status = VCHIQ_RETRY;
10749 + goto failed;
10750 + }
10751 + status = vchiq_connect_internal(state, instance);
10752 +
10753 + if (status == VCHIQ_SUCCESS)
10754 + instance->connected = 1;
10755 +
10756 + mutex_unlock(&state->mutex);
10757 +
10758 +failed:
10759 + vchiq_log_trace(vchiq_core_log_level,
10760 + "%s(%p): returning %d", __func__, instance, status);
10761 +
10762 + return status;
10763 +}
10764 +EXPORT_SYMBOL(vchiq_connect);
10765 +
10766 +/****************************************************************************
10767 +*
10768 +* vchiq_add_service
10769 +*
10770 +***************************************************************************/
10771 +
10772 +VCHIQ_STATUS_T vchiq_add_service(
10773 + VCHIQ_INSTANCE_T instance,
10774 + const VCHIQ_SERVICE_PARAMS_T *params,
10775 + VCHIQ_SERVICE_HANDLE_T *phandle)
10776 +{
10777 + VCHIQ_STATUS_T status;
10778 + VCHIQ_STATE_T *state = instance->state;
10779 + VCHIQ_SERVICE_T *service = NULL;
10780 + int srvstate;
10781 +
10782 + vchiq_log_trace(vchiq_core_log_level,
10783 + "%s(%p) called", __func__, instance);
10784 +
10785 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10786 +
10787 + srvstate = vchiq_is_connected(instance)
10788 + ? VCHIQ_SRVSTATE_LISTENING
10789 + : VCHIQ_SRVSTATE_HIDDEN;
10790 +
10791 + service = vchiq_add_service_internal(
10792 + state,
10793 + params,
10794 + srvstate,
10795 + instance,
10796 + NULL);
10797 +
10798 + if (service) {
10799 + *phandle = service->handle;
10800 + status = VCHIQ_SUCCESS;
10801 + } else
10802 + status = VCHIQ_ERROR;
10803 +
10804 + vchiq_log_trace(vchiq_core_log_level,
10805 + "%s(%p): returning %d", __func__, instance, status);
10806 +
10807 + return status;
10808 +}
10809 +EXPORT_SYMBOL(vchiq_add_service);
10810 +
10811 +/****************************************************************************
10812 +*
10813 +* vchiq_open_service
10814 +*
10815 +***************************************************************************/
10816 +
10817 +VCHIQ_STATUS_T vchiq_open_service(
10818 + VCHIQ_INSTANCE_T instance,
10819 + const VCHIQ_SERVICE_PARAMS_T *params,
10820 + VCHIQ_SERVICE_HANDLE_T *phandle)
10821 +{
10822 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10823 + VCHIQ_STATE_T *state = instance->state;
10824 + VCHIQ_SERVICE_T *service = NULL;
10825 +
10826 + vchiq_log_trace(vchiq_core_log_level,
10827 + "%s(%p) called", __func__, instance);
10828 +
10829 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10830 +
10831 + if (!vchiq_is_connected(instance))
10832 + goto failed;
10833 +
10834 + service = vchiq_add_service_internal(state,
10835 + params,
10836 + VCHIQ_SRVSTATE_OPENING,
10837 + instance,
10838 + NULL);
10839 +
10840 + if (service) {
10841 + status = vchiq_open_service_internal(service, current->pid);
10842 + if (status == VCHIQ_SUCCESS)
10843 + *phandle = service->handle;
10844 + else
10845 + vchiq_remove_service(service->handle);
10846 + }
10847 +
10848 +failed:
10849 + vchiq_log_trace(vchiq_core_log_level,
10850 + "%s(%p): returning %d", __func__, instance, status);
10851 +
10852 + return status;
10853 +}
10854 +EXPORT_SYMBOL(vchiq_open_service);
10855 +
10856 +VCHIQ_STATUS_T
10857 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
10858 + const void *data, unsigned int size, void *userdata)
10859 +{
10860 + return vchiq_bulk_transfer(handle,
10861 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10862 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
10863 +}
10864 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
10865 +
10866 +VCHIQ_STATUS_T
10867 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10868 + unsigned int size, void *userdata)
10869 +{
10870 + return vchiq_bulk_transfer(handle,
10871 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10872 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
10873 +}
10874 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
10875 +
10876 +VCHIQ_STATUS_T
10877 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
10878 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10879 +{
10880 + VCHIQ_STATUS_T status;
10881 +
10882 + switch (mode) {
10883 + case VCHIQ_BULK_MODE_NOCALLBACK:
10884 + case VCHIQ_BULK_MODE_CALLBACK:
10885 + status = vchiq_bulk_transfer(handle,
10886 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10887 + mode, VCHIQ_BULK_TRANSMIT);
10888 + break;
10889 + case VCHIQ_BULK_MODE_BLOCKING:
10890 + status = vchiq_blocking_bulk_transfer(handle,
10891 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
10892 + break;
10893 + default:
10894 + return VCHIQ_ERROR;
10895 + }
10896 +
10897 + return status;
10898 +}
10899 +EXPORT_SYMBOL(vchiq_bulk_transmit);
10900 +
10901 +VCHIQ_STATUS_T
10902 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10903 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10904 +{
10905 + VCHIQ_STATUS_T status;
10906 +
10907 + switch (mode) {
10908 + case VCHIQ_BULK_MODE_NOCALLBACK:
10909 + case VCHIQ_BULK_MODE_CALLBACK:
10910 + status = vchiq_bulk_transfer(handle,
10911 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10912 + mode, VCHIQ_BULK_RECEIVE);
10913 + break;
10914 + case VCHIQ_BULK_MODE_BLOCKING:
10915 + status = vchiq_blocking_bulk_transfer(handle,
10916 + (void *)data, size, VCHIQ_BULK_RECEIVE);
10917 + break;
10918 + default:
10919 + return VCHIQ_ERROR;
10920 + }
10921 +
10922 + return status;
10923 +}
10924 +EXPORT_SYMBOL(vchiq_bulk_receive);
10925 +
10926 +static VCHIQ_STATUS_T
10927 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10928 + unsigned int size, VCHIQ_BULK_DIR_T dir)
10929 +{
10930 + VCHIQ_INSTANCE_T instance;
10931 + VCHIQ_SERVICE_T *service;
10932 + VCHIQ_STATUS_T status;
10933 + struct bulk_waiter_node *waiter = NULL;
10934 + struct list_head *pos;
10935 +
10936 + service = find_service_by_handle(handle);
10937 + if (!service)
10938 + return VCHIQ_ERROR;
10939 +
10940 + instance = service->instance;
10941 +
10942 + unlock_service(service);
10943 +
10944 + mutex_lock(&instance->bulk_waiter_list_mutex);
10945 + list_for_each(pos, &instance->bulk_waiter_list) {
10946 + if (list_entry(pos, struct bulk_waiter_node,
10947 + list)->pid == current->pid) {
10948 + waiter = list_entry(pos,
10949 + struct bulk_waiter_node,
10950 + list);
10951 + list_del(pos);
10952 + break;
10953 + }
10954 + }
10955 + mutex_unlock(&instance->bulk_waiter_list_mutex);
10956 +
10957 + if (waiter) {
10958 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10959 + if (bulk) {
10960 + /* This thread has an outstanding bulk transfer. */
10961 + if ((bulk->data != data) ||
10962 + (bulk->size != size)) {
10963 + /* This is not a retry of the previous one.
10964 + ** Cancel the signal when the transfer
10965 + ** completes. */
10966 + spin_lock(&bulk_waiter_spinlock);
10967 + bulk->userdata = NULL;
10968 + spin_unlock(&bulk_waiter_spinlock);
10969 + }
10970 + }
10971 + }
10972 +
10973 + if (!waiter) {
10974 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
10975 + if (!waiter) {
10976 + vchiq_log_error(vchiq_core_log_level,
10977 + "%s - out of memory", __func__);
10978 + return VCHIQ_ERROR;
10979 + }
10980 + }
10981 +
10982 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
10983 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
10984 + dir);
10985 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
10986 + !waiter->bulk_waiter.bulk) {
10987 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10988 + if (bulk) {
10989 + /* Cancel the signal when the transfer
10990 + ** completes. */
10991 + spin_lock(&bulk_waiter_spinlock);
10992 + bulk->userdata = NULL;
10993 + spin_unlock(&bulk_waiter_spinlock);
10994 + }
10995 + kfree(waiter);
10996 + } else {
10997 + waiter->pid = current->pid;
10998 + mutex_lock(&instance->bulk_waiter_list_mutex);
10999 + list_add(&waiter->list, &instance->bulk_waiter_list);
11000 + mutex_unlock(&instance->bulk_waiter_list_mutex);
11001 + vchiq_log_info(vchiq_arm_log_level,
11002 + "saved bulk_waiter %x for pid %d",
11003 + (unsigned int)waiter, current->pid);
11004 + }
11005 +
11006 + return status;
11007 +}
11008 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
11009 new file mode 100644
11010 index 0000000..d02e776
11011 --- /dev/null
11012 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
11013 @@ -0,0 +1,71 @@
11014 +/**
11015 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11016 + *
11017 + * Redistribution and use in source and binary forms, with or without
11018 + * modification, are permitted provided that the following conditions
11019 + * are met:
11020 + * 1. Redistributions of source code must retain the above copyright
11021 + * notice, this list of conditions, and the following disclaimer,
11022 + * without modification.
11023 + * 2. Redistributions in binary form must reproduce the above copyright
11024 + * notice, this list of conditions and the following disclaimer in the
11025 + * documentation and/or other materials provided with the distribution.
11026 + * 3. The names of the above-listed copyright holders may not be used
11027 + * to endorse or promote products derived from this software without
11028 + * specific prior written permission.
11029 + *
11030 + * ALTERNATIVELY, this software may be distributed under the terms of the
11031 + * GNU General Public License ("GPL") version 2, as published by the Free
11032 + * Software Foundation.
11033 + *
11034 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11035 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11036 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11037 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11038 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11039 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11040 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11041 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11042 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11043 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11044 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11045 + */
11046 +
11047 +#ifndef VCHIQ_MEMDRV_H
11048 +#define VCHIQ_MEMDRV_H
11049 +
11050 +/* ---- Include Files ----------------------------------------------------- */
11051 +
11052 +#include <linux/kernel.h>
11053 +#include "vchiq_if.h"
11054 +
11055 +/* ---- Constants and Types ---------------------------------------------- */
11056 +
11057 +typedef struct {
11058 + void *armSharedMemVirt;
11059 + dma_addr_t armSharedMemPhys;
11060 + size_t armSharedMemSize;
11061 +
11062 + void *vcSharedMemVirt;
11063 + dma_addr_t vcSharedMemPhys;
11064 + size_t vcSharedMemSize;
11065 +} VCHIQ_SHARED_MEM_INFO_T;
11066 +
11067 +/* ---- Variable Externs ------------------------------------------------- */
11068 +
11069 +/* ---- Function Prototypes ---------------------------------------------- */
11070 +
11071 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
11072 +
11073 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
11074 +
11075 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
11076 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11077 +
11078 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
11079 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11080 +
11081 +VCHIQ_STATUS_T vchiq_userdrv_resume(
11082 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11083 +
11084 +#endif
11085 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11086 new file mode 100644
11087 index 0000000..54a3ece
11088 --- /dev/null
11089 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11090 @@ -0,0 +1,58 @@
11091 +/**
11092 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11093 + *
11094 + * Redistribution and use in source and binary forms, with or without
11095 + * modification, are permitted provided that the following conditions
11096 + * are met:
11097 + * 1. Redistributions of source code must retain the above copyright
11098 + * notice, this list of conditions, and the following disclaimer,
11099 + * without modification.
11100 + * 2. Redistributions in binary form must reproduce the above copyright
11101 + * notice, this list of conditions and the following disclaimer in the
11102 + * documentation and/or other materials provided with the distribution.
11103 + * 3. The names of the above-listed copyright holders may not be used
11104 + * to endorse or promote products derived from this software without
11105 + * specific prior written permission.
11106 + *
11107 + * ALTERNATIVELY, this software may be distributed under the terms of the
11108 + * GNU General Public License ("GPL") version 2, as published by the Free
11109 + * Software Foundation.
11110 + *
11111 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11112 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11113 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11114 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11115 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11116 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11117 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11118 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11119 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11120 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11121 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11122 + */
11123 +
11124 +#ifndef VCHIQ_PAGELIST_H
11125 +#define VCHIQ_PAGELIST_H
11126 +
11127 +#ifndef PAGE_SIZE
11128 +#define PAGE_SIZE 4096
11129 +#endif
11130 +#define CACHE_LINE_SIZE 32
11131 +#define PAGELIST_WRITE 0
11132 +#define PAGELIST_READ 1
11133 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11134 +
11135 +typedef struct pagelist_struct {
11136 + unsigned long length;
11137 + unsigned short type;
11138 + unsigned short offset;
11139 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11140 + pages at consecutive addresses. */
11141 +} PAGELIST_T;
11142 +
11143 +typedef struct fragments_struct {
11144 + char headbuf[CACHE_LINE_SIZE];
11145 + char tailbuf[CACHE_LINE_SIZE];
11146 +} FRAGMENTS_T;
11147 +
11148 +#endif /* VCHIQ_PAGELIST_H */
11149 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11150 new file mode 100644
11151 index 0000000..8e59676
11152 --- /dev/null
11153 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11154 @@ -0,0 +1,253 @@
11155 +/**
11156 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11157 + *
11158 + * Redistribution and use in source and binary forms, with or without
11159 + * modification, are permitted provided that the following conditions
11160 + * are met:
11161 + * 1. Redistributions of source code must retain the above copyright
11162 + * notice, this list of conditions, and the following disclaimer,
11163 + * without modification.
11164 + * 2. Redistributions in binary form must reproduce the above copyright
11165 + * notice, this list of conditions and the following disclaimer in the
11166 + * documentation and/or other materials provided with the distribution.
11167 + * 3. The names of the above-listed copyright holders may not be used
11168 + * to endorse or promote products derived from this software without
11169 + * specific prior written permission.
11170 + *
11171 + * ALTERNATIVELY, this software may be distributed under the terms of the
11172 + * GNU General Public License ("GPL") version 2, as published by the Free
11173 + * Software Foundation.
11174 + *
11175 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11176 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11177 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11178 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11179 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11180 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11181 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11182 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11183 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11184 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11185 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11186 + */
11187 +
11188 +
11189 +#include <linux/proc_fs.h>
11190 +#include "vchiq_core.h"
11191 +#include "vchiq_arm.h"
11192 +
11193 +#if 1
11194 +
11195 +int vchiq_proc_init(void)
11196 +{
11197 + return 0;
11198 +}
11199 +
11200 +void vchiq_proc_deinit(void)
11201 +{
11202 +}
11203 +
11204 +#else
11205 +
11206 +struct vchiq_proc_info {
11207 + /* Global 'vc' proc entry used by all instances */
11208 + struct proc_dir_entry *vc_cfg_dir;
11209 +
11210 + /* one entry per client process */
11211 + struct proc_dir_entry *clients;
11212 +
11213 + /* log categories */
11214 + struct proc_dir_entry *log_categories;
11215 +};
11216 +
11217 +static struct vchiq_proc_info proc_info;
11218 +
11219 +struct proc_dir_entry *vchiq_proc_top(void)
11220 +{
11221 + BUG_ON(proc_info.vc_cfg_dir == NULL);
11222 + return proc_info.vc_cfg_dir;
11223 +}
11224 +
11225 +/****************************************************************************
11226 +*
11227 +* log category entries
11228 +*
11229 +***************************************************************************/
11230 +#define PROC_WRITE_BUF_SIZE 256
11231 +
11232 +#define VCHIQ_LOG_ERROR_STR "error"
11233 +#define VCHIQ_LOG_WARNING_STR "warning"
11234 +#define VCHIQ_LOG_INFO_STR "info"
11235 +#define VCHIQ_LOG_TRACE_STR "trace"
11236 +
11237 +static int log_cfg_read(char *buffer,
11238 + char **start,
11239 + off_t off,
11240 + int count,
11241 + int *eof,
11242 + void *data)
11243 +{
11244 + int len = 0;
11245 + char *log_value = NULL;
11246 +
11247 + switch (*((int *)data)) {
11248 + case VCHIQ_LOG_ERROR:
11249 + log_value = VCHIQ_LOG_ERROR_STR;
11250 + break;
11251 + case VCHIQ_LOG_WARNING:
11252 + log_value = VCHIQ_LOG_WARNING_STR;
11253 + break;
11254 + case VCHIQ_LOG_INFO:
11255 + log_value = VCHIQ_LOG_INFO_STR;
11256 + break;
11257 + case VCHIQ_LOG_TRACE:
11258 + log_value = VCHIQ_LOG_TRACE_STR;
11259 + break;
11260 + default:
11261 + break;
11262 + }
11263 +
11264 + len += sprintf(buffer + len,
11265 + "%s\n",
11266 + log_value ? log_value : "(null)");
11267 +
11268 + return len;
11269 +}
11270 +
11271 +
11272 +static int log_cfg_write(struct file *file,
11273 + const char __user *buffer,
11274 + unsigned long count,
11275 + void *data)
11276 +{
11277 + int *log_module = data;
11278 + char kbuf[PROC_WRITE_BUF_SIZE + 1];
11279 +
11280 + (void)file;
11281 +
11282 + memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
11283 + if (count >= PROC_WRITE_BUF_SIZE)
11284 + count = PROC_WRITE_BUF_SIZE;
11285 +
11286 + if (copy_from_user(kbuf,
11287 + buffer,
11288 + count) != 0)
11289 + return -EFAULT;
11290 + kbuf[count - 1] = 0;
11291 +
11292 + if (strncmp("error", kbuf, strlen("error")) == 0)
11293 + *log_module = VCHIQ_LOG_ERROR;
11294 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
11295 + *log_module = VCHIQ_LOG_WARNING;
11296 + else if (strncmp("info", kbuf, strlen("info")) == 0)
11297 + *log_module = VCHIQ_LOG_INFO;
11298 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
11299 + *log_module = VCHIQ_LOG_TRACE;
11300 + else
11301 + *log_module = VCHIQ_LOG_DEFAULT;
11302 +
11303 + return count;
11304 +}
11305 +
11306 +/* Log category proc entries */
11307 +struct vchiq_proc_log_entry {
11308 + const char *name;
11309 + int *plevel;
11310 + struct proc_dir_entry *dir;
11311 +};
11312 +
11313 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
11314 + { "core", &vchiq_core_log_level },
11315 + { "msg", &vchiq_core_msg_log_level },
11316 + { "sync", &vchiq_sync_log_level },
11317 + { "susp", &vchiq_susp_log_level },
11318 + { "arm", &vchiq_arm_log_level },
11319 +};
11320 +static int n_log_entries =
11321 + sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
11322 +
11323 +/* create an entry under /proc/vc/log for each log category */
11324 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
11325 +{
11326 + struct proc_dir_entry *dir;
11327 + size_t i;
11328 + int ret = 0;
11329 + dir = proc_mkdir("log", proc_info.vc_cfg_dir);
11330 + if (!dir)
11331 + return -ENOMEM;
11332 + proc_info.log_categories = dir;
11333 +
11334 + for (i = 0; i < n_log_entries; i++) {
11335 + dir = create_proc_entry(vchiq_proc_log_entries[i].name,
11336 + 0644,
11337 + proc_info.log_categories);
11338 + if (!dir) {
11339 + ret = -ENOMEM;
11340 + break;
11341 + }
11342 +
11343 + dir->read_proc = &log_cfg_read;
11344 + dir->write_proc = &log_cfg_write;
11345 + dir->data = (void *)vchiq_proc_log_entries[i].plevel;
11346 +
11347 + vchiq_proc_log_entries[i].dir = dir;
11348 + }
11349 + return ret;
11350 +}
11351 +
11352 +
11353 +int vchiq_proc_init(void)
11354 +{
11355 + BUG_ON(proc_info.vc_cfg_dir != NULL);
11356 +
11357 + proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
11358 + if (proc_info.vc_cfg_dir == NULL)
11359 + goto fail;
11360 +
11361 + proc_info.clients = proc_mkdir("clients",
11362 + proc_info.vc_cfg_dir);
11363 + if (!proc_info.clients)
11364 + goto fail;
11365 +
11366 + if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
11367 + goto fail;
11368 +
11369 + return 0;
11370 +
11371 +fail:
11372 + vchiq_proc_deinit();
11373 + vchiq_log_error(vchiq_arm_log_level,
11374 + "%s: failed to create proc directory",
11375 + __func__);
11376 +
11377 + return -ENOMEM;
11378 +}
11379 +
11380 +/* remove all the proc entries */
11381 +void vchiq_proc_deinit(void)
11382 +{
11383 + /* log category entries */
11384 + if (proc_info.log_categories) {
11385 + size_t i;
11386 + for (i = 0; i < n_log_entries; i++)
11387 + if (vchiq_proc_log_entries[i].dir)
11388 + remove_proc_entry(
11389 + vchiq_proc_log_entries[i].name,
11390 + proc_info.log_categories);
11391 +
11392 + remove_proc_entry(proc_info.log_categories->name,
11393 + proc_info.vc_cfg_dir);
11394 + }
11395 + if (proc_info.clients)
11396 + remove_proc_entry(proc_info.clients->name,
11397 + proc_info.vc_cfg_dir);
11398 + if (proc_info.vc_cfg_dir)
11399 + remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
11400 +}
11401 +
11402 +struct proc_dir_entry *vchiq_clients_top(void)
11403 +{
11404 + return proc_info.clients;
11405 +}
11406 +
11407 +#endif
11408 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11409 new file mode 100644
11410 index 0000000..a0b069d
11411 --- /dev/null
11412 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11413 @@ -0,0 +1,828 @@
11414 +/**
11415 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11416 + *
11417 + * Redistribution and use in source and binary forms, with or without
11418 + * modification, are permitted provided that the following conditions
11419 + * are met:
11420 + * 1. Redistributions of source code must retain the above copyright
11421 + * notice, this list of conditions, and the following disclaimer,
11422 + * without modification.
11423 + * 2. Redistributions in binary form must reproduce the above copyright
11424 + * notice, this list of conditions and the following disclaimer in the
11425 + * documentation and/or other materials provided with the distribution.
11426 + * 3. The names of the above-listed copyright holders may not be used
11427 + * to endorse or promote products derived from this software without
11428 + * specific prior written permission.
11429 + *
11430 + * ALTERNATIVELY, this software may be distributed under the terms of the
11431 + * GNU General Public License ("GPL") version 2, as published by the Free
11432 + * Software Foundation.
11433 + *
11434 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11435 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11436 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11437 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11438 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11439 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11440 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11441 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11442 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11443 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11444 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11445 + */
11446 +#include <linux/module.h>
11447 +#include <linux/types.h>
11448 +
11449 +#include "interface/vchi/vchi.h"
11450 +#include "vchiq.h"
11451 +#include "vchiq_core.h"
11452 +
11453 +#include "vchiq_util.h"
11454 +
11455 +#include <stddef.h>
11456 +
11457 +#define vchiq_status_to_vchi(status) ((int32_t)status)
11458 +
11459 +typedef struct {
11460 + VCHIQ_SERVICE_HANDLE_T handle;
11461 +
11462 + VCHIU_QUEUE_T queue;
11463 +
11464 + VCHI_CALLBACK_T callback;
11465 + void *callback_param;
11466 +} SHIM_SERVICE_T;
11467 +
11468 +/* ----------------------------------------------------------------------
11469 + * return pointer to the mphi message driver function table
11470 + * -------------------------------------------------------------------- */
11471 +const VCHI_MESSAGE_DRIVER_T *
11472 +vchi_mphi_message_driver_func_table(void)
11473 +{
11474 + return NULL;
11475 +}
11476 +
11477 +/* ----------------------------------------------------------------------
11478 + * return a pointer to the 'single' connection driver fops
11479 + * -------------------------------------------------------------------- */
11480 +const VCHI_CONNECTION_API_T *
11481 +single_get_func_table(void)
11482 +{
11483 + return NULL;
11484 +}
11485 +
11486 +VCHI_CONNECTION_T *vchi_create_connection(
11487 + const VCHI_CONNECTION_API_T *function_table,
11488 + const VCHI_MESSAGE_DRIVER_T *low_level)
11489 +{
11490 + (void)function_table;
11491 + (void)low_level;
11492 + return NULL;
11493 +}
11494 +
11495 +/***********************************************************
11496 + * Name: vchi_msg_peek
11497 + *
11498 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11499 + * void **data,
11500 + * uint32_t *msg_size,
11501 +
11502 +
11503 + * VCHI_FLAGS_T flags
11504 + *
11505 + * Description: Routine to return a pointer to the current message (to allow in
11506 + * place processing). The message can be removed using
11507 + * vchi_msg_remove when you're finished
11508 + *
11509 + * Returns: int32_t - success == 0
11510 + *
11511 + ***********************************************************/
11512 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
11513 + void **data,
11514 + uint32_t *msg_size,
11515 + VCHI_FLAGS_T flags)
11516 +{
11517 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11518 + VCHIQ_HEADER_T *header;
11519 +
11520 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11521 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11522 +
11523 + if (flags == VCHI_FLAGS_NONE)
11524 + if (vchiu_queue_is_empty(&service->queue))
11525 + return -1;
11526 +
11527 + header = vchiu_queue_peek(&service->queue);
11528 +
11529 + *data = header->data;
11530 + *msg_size = header->size;
11531 +
11532 + return 0;
11533 +}
11534 +EXPORT_SYMBOL(vchi_msg_peek);
11535 +
11536 +/***********************************************************
11537 + * Name: vchi_msg_remove
11538 + *
11539 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11540 + *
11541 + * Description: Routine to remove a message (after it has been read with
11542 + * vchi_msg_peek)
11543 + *
11544 + * Returns: int32_t - success == 0
11545 + *
11546 + ***********************************************************/
11547 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
11548 +{
11549 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11550 + VCHIQ_HEADER_T *header;
11551 +
11552 + header = vchiu_queue_pop(&service->queue);
11553 +
11554 + vchiq_release_message(service->handle, header);
11555 +
11556 + return 0;
11557 +}
11558 +EXPORT_SYMBOL(vchi_msg_remove);
11559 +
11560 +/***********************************************************
11561 + * Name: vchi_msg_queue
11562 + *
11563 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11564 + * const void *data,
11565 + * uint32_t data_size,
11566 + * VCHI_FLAGS_T flags,
11567 + * void *msg_handle,
11568 + *
11569 + * Description: Thin wrapper to queue a message onto a connection
11570 + *
11571 + * Returns: int32_t - success == 0
11572 + *
11573 + ***********************************************************/
11574 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
11575 + const void *data,
11576 + uint32_t data_size,
11577 + VCHI_FLAGS_T flags,
11578 + void *msg_handle)
11579 +{
11580 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11581 + VCHIQ_ELEMENT_T element = {data, data_size};
11582 + VCHIQ_STATUS_T status;
11583 +
11584 + (void)msg_handle;
11585 +
11586 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11587 +
11588 + status = vchiq_queue_message(service->handle, &element, 1);
11589 +
11590 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
11591 + ** implement a retry mechanism since this function is supposed
11592 + ** to block until queued
11593 + */
11594 + while (status == VCHIQ_RETRY) {
11595 + msleep(1);
11596 + status = vchiq_queue_message(service->handle, &element, 1);
11597 + }
11598 +
11599 + return vchiq_status_to_vchi(status);
11600 +}
11601 +EXPORT_SYMBOL(vchi_msg_queue);
11602 +
11603 +/***********************************************************
11604 + * Name: vchi_bulk_queue_receive
11605 + *
11606 + * Arguments: VCHI_BULK_HANDLE_T handle,
11607 + * void *data_dst,
11608 + * const uint32_t data_size,
11609 + * VCHI_FLAGS_T flags
11610 + * void *bulk_handle
11611 + *
11612 + * Description: Routine to setup a rcv buffer
11613 + *
11614 + * Returns: int32_t - success == 0
11615 + *
11616 + ***********************************************************/
11617 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
11618 + void *data_dst,
11619 + uint32_t data_size,
11620 + VCHI_FLAGS_T flags,
11621 + void *bulk_handle)
11622 +{
11623 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11624 + VCHIQ_BULK_MODE_T mode;
11625 + VCHIQ_STATUS_T status;
11626 +
11627 + switch ((int)flags) {
11628 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11629 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11630 + WARN_ON(!service->callback);
11631 + mode = VCHIQ_BULK_MODE_CALLBACK;
11632 + break;
11633 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11634 + mode = VCHIQ_BULK_MODE_BLOCKING;
11635 + break;
11636 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11637 + case VCHI_FLAGS_NONE:
11638 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11639 + break;
11640 + default:
11641 + WARN(1, "unsupported message\n");
11642 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11643 + }
11644 +
11645 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
11646 + bulk_handle, mode);
11647 +
11648 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
11649 + ** implement a retry mechanism since this function is supposed
11650 + ** to block until queued
11651 + */
11652 + while (status == VCHIQ_RETRY) {
11653 + msleep(1);
11654 + status = vchiq_bulk_receive(service->handle, data_dst,
11655 + data_size, bulk_handle, mode);
11656 + }
11657 +
11658 + return vchiq_status_to_vchi(status);
11659 +}
11660 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
11661 +
11662 +/***********************************************************
11663 + * Name: vchi_bulk_queue_transmit
11664 + *
11665 + * Arguments: VCHI_BULK_HANDLE_T handle,
11666 + * const void *data_src,
11667 + * uint32_t data_size,
11668 + * VCHI_FLAGS_T flags,
11669 + * void *bulk_handle
11670 + *
11671 + * Description: Routine to transmit some data
11672 + *
11673 + * Returns: int32_t - success == 0
11674 + *
11675 + ***********************************************************/
11676 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
11677 + const void *data_src,
11678 + uint32_t data_size,
11679 + VCHI_FLAGS_T flags,
11680 + void *bulk_handle)
11681 +{
11682 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11683 + VCHIQ_BULK_MODE_T mode;
11684 + VCHIQ_STATUS_T status;
11685 +
11686 + switch ((int)flags) {
11687 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11688 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11689 + WARN_ON(!service->callback);
11690 + mode = VCHIQ_BULK_MODE_CALLBACK;
11691 + break;
11692 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
11693 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11694 + mode = VCHIQ_BULK_MODE_BLOCKING;
11695 + break;
11696 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11697 + case VCHI_FLAGS_NONE:
11698 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11699 + break;
11700 + default:
11701 + WARN(1, "unsupported message\n");
11702 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11703 + }
11704 +
11705 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
11706 + bulk_handle, mode);
11707 +
11708 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
11709 + ** implement a retry mechanism since this function is supposed
11710 + ** to block until queued
11711 + */
11712 + while (status == VCHIQ_RETRY) {
11713 + msleep(1);
11714 + status = vchiq_bulk_transmit(service->handle, data_src,
11715 + data_size, bulk_handle, mode);
11716 + }
11717 +
11718 + return vchiq_status_to_vchi(status);
11719 +}
11720 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
11721 +
11722 +/***********************************************************
11723 + * Name: vchi_msg_dequeue
11724 + *
11725 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11726 + * void *data,
11727 + * uint32_t max_data_size_to_read,
11728 + * uint32_t *actual_msg_size
11729 + * VCHI_FLAGS_T flags
11730 + *
11731 + * Description: Routine to dequeue a message into the supplied buffer
11732 + *
11733 + * Returns: int32_t - success == 0
11734 + *
11735 + ***********************************************************/
11736 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
11737 + void *data,
11738 + uint32_t max_data_size_to_read,
11739 + uint32_t *actual_msg_size,
11740 + VCHI_FLAGS_T flags)
11741 +{
11742 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11743 + VCHIQ_HEADER_T *header;
11744 +
11745 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11746 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11747 +
11748 + if (flags == VCHI_FLAGS_NONE)
11749 + if (vchiu_queue_is_empty(&service->queue))
11750 + return -1;
11751 +
11752 + header = vchiu_queue_pop(&service->queue);
11753 +
11754 + memcpy(data, header->data, header->size < max_data_size_to_read ?
11755 + header->size : max_data_size_to_read);
11756 +
11757 + *actual_msg_size = header->size;
11758 +
11759 + vchiq_release_message(service->handle, header);
11760 +
11761 + return 0;
11762 +}
11763 +EXPORT_SYMBOL(vchi_msg_dequeue);
11764 +
11765 +/***********************************************************
11766 + * Name: vchi_msg_queuev
11767 + *
11768 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11769 + * VCHI_MSG_VECTOR_T *vector,
11770 + * uint32_t count,
11771 + * VCHI_FLAGS_T flags,
11772 + * void *msg_handle
11773 + *
11774 + * Description: Thin wrapper to queue a message onto a connection
11775 + *
11776 + * Returns: int32_t - success == 0
11777 + *
11778 + ***********************************************************/
11779 +
11780 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
11781 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
11782 + offsetof(VCHIQ_ELEMENT_T, data));
11783 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
11784 + offsetof(VCHIQ_ELEMENT_T, size));
11785 +
11786 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
11787 + VCHI_MSG_VECTOR_T *vector,
11788 + uint32_t count,
11789 + VCHI_FLAGS_T flags,
11790 + void *msg_handle)
11791 +{
11792 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11793 +
11794 + (void)msg_handle;
11795 +
11796 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11797 +
11798 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
11799 + (const VCHIQ_ELEMENT_T *)vector, count));
11800 +}
11801 +EXPORT_SYMBOL(vchi_msg_queuev);
11802 +
11803 +/***********************************************************
11804 + * Name: vchi_held_msg_release
11805 + *
11806 + * Arguments: VCHI_HELD_MSG_T *message
11807 + *
11808 + * Description: Routine to release a held message (after it has been read with
11809 + * vchi_msg_hold)
11810 + *
11811 + * Returns: int32_t - success == 0
11812 + *
11813 + ***********************************************************/
11814 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
11815 +{
11816 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
11817 + (VCHIQ_HEADER_T *)message->message);
11818 +
11819 + return 0;
11820 +}
11821 +EXPORT_SYMBOL(vchi_held_msg_release);
11822 +
11823 +/***********************************************************
11824 + * Name: vchi_msg_hold
11825 + *
11826 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11827 + * void **data,
11828 + * uint32_t *msg_size,
11829 + * VCHI_FLAGS_T flags,
11830 + * VCHI_HELD_MSG_T *message_handle
11831 + *
11832 + * Description: Routine to return a pointer to the current message (to allow
11833 + * in place processing). The message is dequeued - don't forget
11834 + * to release the message using vchi_held_msg_release when you're
11835 + * finished.
11836 + *
11837 + * Returns: int32_t - success == 0
11838 + *
11839 + ***********************************************************/
11840 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
11841 + void **data,
11842 + uint32_t *msg_size,
11843 + VCHI_FLAGS_T flags,
11844 + VCHI_HELD_MSG_T *message_handle)
11845 +{
11846 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11847 + VCHIQ_HEADER_T *header;
11848 +
11849 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11850 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11851 +
11852 + if (flags == VCHI_FLAGS_NONE)
11853 + if (vchiu_queue_is_empty(&service->queue))
11854 + return -1;
11855 +
11856 + header = vchiu_queue_pop(&service->queue);
11857 +
11858 + *data = header->data;
11859 + *msg_size = header->size;
11860 +
11861 + message_handle->service =
11862 + (struct opaque_vchi_service_t *)service->handle;
11863 + message_handle->message = header;
11864 +
11865 + return 0;
11866 +}
11867 +EXPORT_SYMBOL(vchi_msg_hold);
11868 +
11869 +/***********************************************************
11870 + * Name: vchi_initialise
11871 + *
11872 + * Arguments: VCHI_INSTANCE_T *instance_handle
11873 + * VCHI_CONNECTION_T **connections
11874 + * const uint32_t num_connections
11875 + *
11876 + * Description: Initialises the hardware but does not transmit anything
11877 + * When run as a Host App this will be called twice hence the need
11878 + * to malloc the state information
11879 + *
11880 + * Returns: 0 if successful, failure otherwise
11881 + *
11882 + ***********************************************************/
11883 +
11884 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
11885 +{
11886 + VCHIQ_INSTANCE_T instance;
11887 + VCHIQ_STATUS_T status;
11888 +
11889 + status = vchiq_initialise(&instance);
11890 +
11891 + *instance_handle = (VCHI_INSTANCE_T)instance;
11892 +
11893 + return vchiq_status_to_vchi(status);
11894 +}
11895 +EXPORT_SYMBOL(vchi_initialise);
11896 +
11897 +/***********************************************************
11898 + * Name: vchi_connect
11899 + *
11900 + * Arguments: VCHI_CONNECTION_T **connections
11901 + * const uint32_t num_connections
11902 + * VCHI_INSTANCE_T instance_handle)
11903 + *
11904 + * Description: Starts the command service on each connection,
11905 + * causing INIT messages to be pinged back and forth
11906 + *
11907 + * Returns: 0 if successful, failure otherwise
11908 + *
11909 + ***********************************************************/
11910 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
11911 + const uint32_t num_connections,
11912 + VCHI_INSTANCE_T instance_handle)
11913 +{
11914 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11915 +
11916 + (void)connections;
11917 + (void)num_connections;
11918 +
11919 + return vchiq_connect(instance);
11920 +}
11921 +EXPORT_SYMBOL(vchi_connect);
11922 +
11923 +
11924 +/***********************************************************
11925 + * Name: vchi_disconnect
11926 + *
11927 + * Arguments: VCHI_INSTANCE_T instance_handle
11928 + *
11929 + * Description: Stops the command service on each connection,
11930 + * causing DE-INIT messages to be pinged back and forth
11931 + *
11932 + * Returns: 0 if successful, failure otherwise
11933 + *
11934 + ***********************************************************/
11935 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
11936 +{
11937 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11938 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
11939 +}
11940 +EXPORT_SYMBOL(vchi_disconnect);
11941 +
11942 +
11943 +/***********************************************************
11944 + * Name: vchi_service_open
11945 + * Name: vchi_service_create
11946 + *
11947 + * Arguments: VCHI_INSTANCE_T *instance_handle
11948 + * SERVICE_CREATION_T *setup,
11949 + * VCHI_SERVICE_HANDLE_T *handle
11950 + *
11951 + * Description: Routine to open a service
11952 + *
11953 + * Returns: int32_t - success == 0
11954 + *
11955 + ***********************************************************/
11956 +
11957 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
11958 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
11959 +{
11960 + SHIM_SERVICE_T *service =
11961 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
11962 +
11963 + if (!service->callback)
11964 + goto release;
11965 +
11966 + switch (reason) {
11967 + case VCHIQ_MESSAGE_AVAILABLE:
11968 + vchiu_queue_push(&service->queue, header);
11969 +
11970 + service->callback(service->callback_param,
11971 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
11972 +
11973 + goto done;
11974 + break;
11975 +
11976 + case VCHIQ_BULK_TRANSMIT_DONE:
11977 + service->callback(service->callback_param,
11978 + VCHI_CALLBACK_BULK_SENT, bulk_user);
11979 + break;
11980 +
11981 + case VCHIQ_BULK_RECEIVE_DONE:
11982 + service->callback(service->callback_param,
11983 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
11984 + break;
11985 +
11986 + case VCHIQ_SERVICE_CLOSED:
11987 + service->callback(service->callback_param,
11988 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
11989 + break;
11990 +
11991 + case VCHIQ_SERVICE_OPENED:
11992 + /* No equivalent VCHI reason */
11993 + break;
11994 +
11995 + case VCHIQ_BULK_TRANSMIT_ABORTED:
11996 + service->callback(service->callback_param,
11997 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
11998 + bulk_user);
11999 + break;
12000 +
12001 + case VCHIQ_BULK_RECEIVE_ABORTED:
12002 + service->callback(service->callback_param,
12003 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
12004 + bulk_user);
12005 + break;
12006 +
12007 + default:
12008 + WARN(1, "not supported\n");
12009 + break;
12010 + }
12011 +
12012 +release:
12013 + vchiq_release_message(service->handle, header);
12014 +done:
12015 + return VCHIQ_SUCCESS;
12016 +}
12017 +
12018 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
12019 + SERVICE_CREATION_T *setup)
12020 +{
12021 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
12022 +
12023 + (void)instance;
12024 +
12025 + if (service) {
12026 + if (vchiu_queue_init(&service->queue, 64)) {
12027 + service->callback = setup->callback;
12028 + service->callback_param = setup->callback_param;
12029 + } else {
12030 + kfree(service);
12031 + service = NULL;
12032 + }
12033 + }
12034 +
12035 + return service;
12036 +}
12037 +
12038 +static void service_free(SHIM_SERVICE_T *service)
12039 +{
12040 + if (service) {
12041 + vchiu_queue_delete(&service->queue);
12042 + kfree(service);
12043 + }
12044 +}
12045 +
12046 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
12047 + SERVICE_CREATION_T *setup,
12048 + VCHI_SERVICE_HANDLE_T *handle)
12049 +{
12050 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12051 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12052 + if (service) {
12053 + VCHIQ_SERVICE_PARAMS_T params;
12054 + VCHIQ_STATUS_T status;
12055 +
12056 + memset(&params, 0, sizeof(params));
12057 + params.fourcc = setup->service_id;
12058 + params.callback = shim_callback;
12059 + params.userdata = service;
12060 + params.version = setup->version.version;
12061 + params.version_min = setup->version.version_min;
12062 +
12063 + status = vchiq_open_service(instance, &params,
12064 + &service->handle);
12065 + if (status != VCHIQ_SUCCESS) {
12066 + service_free(service);
12067 + service = NULL;
12068 + }
12069 + }
12070 +
12071 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12072 +
12073 + return (service != NULL) ? 0 : -1;
12074 +}
12075 +EXPORT_SYMBOL(vchi_service_open);
12076 +
12077 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
12078 + SERVICE_CREATION_T *setup,
12079 + VCHI_SERVICE_HANDLE_T *handle)
12080 +{
12081 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12082 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
12083 + if (service) {
12084 + VCHIQ_SERVICE_PARAMS_T params;
12085 + VCHIQ_STATUS_T status;
12086 +
12087 + memset(&params, 0, sizeof(params));
12088 + params.fourcc = setup->service_id;
12089 + params.callback = shim_callback;
12090 + params.userdata = service;
12091 + params.version = setup->version.version;
12092 + params.version_min = setup->version.version_min;
12093 + status = vchiq_add_service(instance, &params, &service->handle);
12094 +
12095 + if (status != VCHIQ_SUCCESS) {
12096 + service_free(service);
12097 + service = NULL;
12098 + }
12099 + }
12100 +
12101 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12102 +
12103 + return (service != NULL) ? 0 : -1;
12104 +}
12105 +EXPORT_SYMBOL(vchi_service_create);
12106 +
12107 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
12108 +{
12109 + int32_t ret = -1;
12110 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12111 + if (service) {
12112 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
12113 + if (status == VCHIQ_SUCCESS) {
12114 + service_free(service);
12115 + service = NULL;
12116 + }
12117 +
12118 + ret = vchiq_status_to_vchi(status);
12119 + }
12120 + return ret;
12121 +}
12122 +EXPORT_SYMBOL(vchi_service_close);
12123 +
12124 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
12125 +{
12126 + int32_t ret = -1;
12127 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12128 + if (service) {
12129 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
12130 + if (status == VCHIQ_SUCCESS) {
12131 + service_free(service);
12132 + service = NULL;
12133 + }
12134 +
12135 + ret = vchiq_status_to_vchi(status);
12136 + }
12137 + return ret;
12138 +}
12139 +EXPORT_SYMBOL(vchi_service_destroy);
12140 +
12141 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12142 +{
12143 + int32_t ret = -1;
12144 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12145 + if(service)
12146 + {
12147 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12148 + ret = vchiq_status_to_vchi( status );
12149 + }
12150 + return ret;
12151 +}
12152 +EXPORT_SYMBOL(vchi_get_peer_version);
12153 +
12154 +/* ----------------------------------------------------------------------
12155 + * read a uint32_t from buffer.
12156 + * network format is defined to be little endian
12157 + * -------------------------------------------------------------------- */
12158 +uint32_t
12159 +vchi_readbuf_uint32(const void *_ptr)
12160 +{
12161 + const unsigned char *ptr = _ptr;
12162 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12163 +}
12164 +
12165 +/* ----------------------------------------------------------------------
12166 + * write a uint32_t to buffer.
12167 + * network format is defined to be little endian
12168 + * -------------------------------------------------------------------- */
12169 +void
12170 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12171 +{
12172 + unsigned char *ptr = _ptr;
12173 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
12174 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
12175 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12176 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12177 +}
12178 +
12179 +/* ----------------------------------------------------------------------
12180 + * read a uint16_t from buffer.
12181 + * network format is defined to be little endian
12182 + * -------------------------------------------------------------------- */
12183 +uint16_t
12184 +vchi_readbuf_uint16(const void *_ptr)
12185 +{
12186 + const unsigned char *ptr = _ptr;
12187 + return ptr[0] | (ptr[1] << 8);
12188 +}
12189 +
12190 +/* ----------------------------------------------------------------------
12191 + * write a uint16_t into the buffer.
12192 + * network format is defined to be little endian
12193 + * -------------------------------------------------------------------- */
12194 +void
12195 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12196 +{
12197 + unsigned char *ptr = _ptr;
12198 + ptr[0] = (value >> 0) & 0xFF;
12199 + ptr[1] = (value >> 8) & 0xFF;
12200 +}
12201 +
12202 +/***********************************************************
12203 + * Name: vchi_service_use
12204 + *
12205 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12206 + *
12207 + * Description: Routine to increment refcount on a service
12208 + *
12209 + * Returns: void
12210 + *
12211 + ***********************************************************/
12212 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12213 +{
12214 + int32_t ret = -1;
12215 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12216 + if (service)
12217 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12218 + return ret;
12219 +}
12220 +EXPORT_SYMBOL(vchi_service_use);
12221 +
12222 +/***********************************************************
12223 + * Name: vchi_service_release
12224 + *
12225 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12226 + *
12227 + * Description: Routine to decrement refcount on a service
12228 + *
12229 + * Returns: void
12230 + *
12231 + ***********************************************************/
12232 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12233 +{
12234 + int32_t ret = -1;
12235 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12236 + if (service)
12237 + ret = vchiq_status_to_vchi(
12238 + vchiq_release_service(service->handle));
12239 + return ret;
12240 +}
12241 +EXPORT_SYMBOL(vchi_service_release);
12242 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12243 new file mode 100644
12244 index 0000000..c2eefef
12245 --- /dev/null
12246 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12247 @@ -0,0 +1,151 @@
12248 +/**
12249 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12250 + *
12251 + * Redistribution and use in source and binary forms, with or without
12252 + * modification, are permitted provided that the following conditions
12253 + * are met:
12254 + * 1. Redistributions of source code must retain the above copyright
12255 + * notice, this list of conditions, and the following disclaimer,
12256 + * without modification.
12257 + * 2. Redistributions in binary form must reproduce the above copyright
12258 + * notice, this list of conditions and the following disclaimer in the
12259 + * documentation and/or other materials provided with the distribution.
12260 + * 3. The names of the above-listed copyright holders may not be used
12261 + * to endorse or promote products derived from this software without
12262 + * specific prior written permission.
12263 + *
12264 + * ALTERNATIVELY, this software may be distributed under the terms of the
12265 + * GNU General Public License ("GPL") version 2, as published by the Free
12266 + * Software Foundation.
12267 + *
12268 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12269 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12270 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12271 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12272 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12273 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12274 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12275 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12276 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12277 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12278 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12279 + */
12280 +
12281 +#include "vchiq_util.h"
12282 +
12283 +static inline int is_pow2(int i)
12284 +{
12285 + return i && !(i & (i - 1));
12286 +}
12287 +
12288 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12289 +{
12290 + WARN_ON(!is_pow2(size));
12291 +
12292 + queue->size = size;
12293 + queue->read = 0;
12294 + queue->write = 0;
12295 +
12296 + sema_init(&queue->pop, 0);
12297 + sema_init(&queue->push, 0);
12298 +
12299 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12300 + if (queue->storage == NULL) {
12301 + vchiu_queue_delete(queue);
12302 + return 0;
12303 + }
12304 + return 1;
12305 +}
12306 +
12307 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12308 +{
12309 + if (queue->storage != NULL)
12310 + kfree(queue->storage);
12311 +}
12312 +
12313 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12314 +{
12315 + return queue->read == queue->write;
12316 +}
12317 +
12318 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12319 +{
12320 + return queue->write == queue->read + queue->size;
12321 +}
12322 +
12323 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12324 +{
12325 + while (queue->write == queue->read + queue->size) {
12326 + if (down_interruptible(&queue->pop) != 0) {
12327 + flush_signals(current);
12328 + }
12329 + }
12330 +
12331 + /*
12332 + * Write to queue->storage must be visible after read from
12333 + * queue->read
12334 + */
12335 + smp_mb();
12336 +
12337 + queue->storage[queue->write & (queue->size - 1)] = header;
12338 +
12339 + /*
12340 + * Write to queue->storage must be visible before write to
12341 + * queue->write
12342 + */
12343 + smp_wmb();
12344 +
12345 + queue->write++;
12346 +
12347 + up(&queue->push);
12348 +}
12349 +
12350 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12351 +{
12352 + while (queue->write == queue->read) {
12353 + if (down_interruptible(&queue->push) != 0) {
12354 + flush_signals(current);
12355 + }
12356 + }
12357 +
12358 + up(&queue->push); // We haven't removed anything from the queue.
12359 +
12360 + /*
12361 + * Read from queue->storage must be visible after read from
12362 + * queue->write
12363 + */
12364 + smp_rmb();
12365 +
12366 + return queue->storage[queue->read & (queue->size - 1)];
12367 +}
12368 +
12369 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12370 +{
12371 + VCHIQ_HEADER_T *header;
12372 +
12373 + while (queue->write == queue->read) {
12374 + if (down_interruptible(&queue->push) != 0) {
12375 + flush_signals(current);
12376 + }
12377 + }
12378 +
12379 + /*
12380 + * Read from queue->storage must be visible after read from
12381 + * queue->write
12382 + */
12383 + smp_rmb();
12384 +
12385 + header = queue->storage[queue->read & (queue->size - 1)];
12386 +
12387 + /*
12388 + * Read from queue->storage must be visible before write to
12389 + * queue->read
12390 + */
12391 + smp_mb();
12392 +
12393 + queue->read++;
12394 +
12395 + up(&queue->pop);
12396 +
12397 + return header;
12398 +}
12399 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12400 new file mode 100644
12401 index 0000000..f4d0b66
12402 --- /dev/null
12403 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12404 @@ -0,0 +1,81 @@
12405 +/**
12406 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12407 + *
12408 + * Redistribution and use in source and binary forms, with or without
12409 + * modification, are permitted provided that the following conditions
12410 + * are met:
12411 + * 1. Redistributions of source code must retain the above copyright
12412 + * notice, this list of conditions, and the following disclaimer,
12413 + * without modification.
12414 + * 2. Redistributions in binary form must reproduce the above copyright
12415 + * notice, this list of conditions and the following disclaimer in the
12416 + * documentation and/or other materials provided with the distribution.
12417 + * 3. The names of the above-listed copyright holders may not be used
12418 + * to endorse or promote products derived from this software without
12419 + * specific prior written permission.
12420 + *
12421 + * ALTERNATIVELY, this software may be distributed under the terms of the
12422 + * GNU General Public License ("GPL") version 2, as published by the Free
12423 + * Software Foundation.
12424 + *
12425 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12426 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12427 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12428 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12429 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12430 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12431 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12432 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12433 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12434 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12435 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12436 + */
12437 +
12438 +#ifndef VCHIQ_UTIL_H
12439 +#define VCHIQ_UTIL_H
12440 +
12441 +#include <linux/types.h>
12442 +#include <linux/semaphore.h>
12443 +#include <linux/mutex.h>
12444 +#include <linux/bitops.h>
12445 +#include <linux/kthread.h>
12446 +#include <linux/wait.h>
12447 +#include <linux/vmalloc.h>
12448 +#include <linux/jiffies.h>
12449 +#include <linux/delay.h>
12450 +#include <linux/string.h>
12451 +#include <linux/types.h>
12452 +#include <linux/interrupt.h>
12453 +#include <linux/random.h>
12454 +#include <linux/sched.h>
12455 +#include <linux/ctype.h>
12456 +#include <linux/uaccess.h>
12457 +#include <linux/time.h> /* for time_t */
12458 +#include <linux/slab.h>
12459 +#include <linux/vmalloc.h>
12460 +
12461 +#include "vchiq_if.h"
12462 +
12463 +typedef struct {
12464 + int size;
12465 + int read;
12466 + int write;
12467 +
12468 + struct semaphore pop;
12469 + struct semaphore push;
12470 +
12471 + VCHIQ_HEADER_T **storage;
12472 +} VCHIU_QUEUE_T;
12473 +
12474 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
12475 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
12476 +
12477 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
12478 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
12479 +
12480 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
12481 +
12482 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
12483 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
12484 +
12485 +#endif
12486 diff --git a/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12487 new file mode 100644
12488 index 0000000..b6bfa21
12489 --- /dev/null
12490 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12491 @@ -0,0 +1,59 @@
12492 +/**
12493 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12494 + *
12495 + * Redistribution and use in source and binary forms, with or without
12496 + * modification, are permitted provided that the following conditions
12497 + * are met:
12498 + * 1. Redistributions of source code must retain the above copyright
12499 + * notice, this list of conditions, and the following disclaimer,
12500 + * without modification.
12501 + * 2. Redistributions in binary form must reproduce the above copyright
12502 + * notice, this list of conditions and the following disclaimer in the
12503 + * documentation and/or other materials provided with the distribution.
12504 + * 3. The names of the above-listed copyright holders may not be used
12505 + * to endorse or promote products derived from this software without
12506 + * specific prior written permission.
12507 + *
12508 + * ALTERNATIVELY, this software may be distributed under the terms of the
12509 + * GNU General Public License ("GPL") version 2, as published by the Free
12510 + * Software Foundation.
12511 + *
12512 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12513 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12514 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12515 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12516 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12517 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12518 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12519 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12520 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12521 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12522 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12523 + */
12524 +#include "vchiq_build_info.h"
12525 +#include <linux/broadcom/vc_debug_sym.h>
12526 +
12527 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
12528 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
12529 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
12530 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
12531 +
12532 +const char *vchiq_get_build_hostname( void )
12533 +{
12534 + return vchiq_build_hostname;
12535 +}
12536 +
12537 +const char *vchiq_get_build_version( void )
12538 +{
12539 + return vchiq_build_version;
12540 +}
12541 +
12542 +const char *vchiq_get_build_date( void )
12543 +{
12544 + return vchiq_build_date;
12545 +}
12546 +
12547 +const char *vchiq_get_build_time( void )
12548 +{
12549 + return vchiq_build_time;
12550 +}
12551 --
12552 1.9.1
12553