29fff601459b4613b41b53a2e637ad8bbfb88f0e
[openwrt/staging/wigyori.git] / target / linux / brcm2708 / patches-4.4 / 0119-drm-vc4-Synchronize-validation-code-for-v2-submissio.patch
1 From 55242268abd2076c4560de14d0227ae5382f274e Mon Sep 17 00:00:00 2001
2 From: Eric Anholt <eric@anholt.net>
3 Date: Mon, 7 Dec 2015 12:35:01 -0800
4 Subject: [PATCH] drm/vc4: Synchronize validation code for v2 submission
5 upstream.
6
7 Signed-off-by: Eric Anholt <eric@anholt.net>
8 ---
9 drivers/gpu/drm/vc4/vc4_drv.h | 24 +--
10 drivers/gpu/drm/vc4/vc4_gem.c | 14 +-
11 drivers/gpu/drm/vc4/vc4_render_cl.c | 6 +-
12 drivers/gpu/drm/vc4/vc4_validate.c | 287 +++++++++++++++---------------------
13 4 files changed, 135 insertions(+), 196 deletions(-)
14
15 --- a/drivers/gpu/drm/vc4/vc4_drv.h
16 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
17 @@ -189,17 +189,6 @@ to_vc4_encoder(struct drm_encoder *encod
18 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
19 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
20
21 -enum vc4_bo_mode {
22 - VC4_MODE_UNDECIDED,
23 - VC4_MODE_RENDER,
24 - VC4_MODE_SHADER,
25 -};
26 -
27 -struct vc4_bo_exec_state {
28 - struct drm_gem_cma_object *bo;
29 - enum vc4_bo_mode mode;
30 -};
31 -
32 struct vc4_exec_info {
33 /* Sequence number for this bin/render job. */
34 uint64_t seqno;
35 @@ -210,7 +199,7 @@ struct vc4_exec_info {
36 /* This is the array of BOs that were looked up at the start of exec.
37 * Command validation will use indices into this array.
38 */
39 - struct vc4_bo_exec_state *bo;
40 + struct drm_gem_cma_object **bo;
41 uint32_t bo_count;
42
43 /* Pointers for our position in vc4->job_list */
44 @@ -238,7 +227,6 @@ struct vc4_exec_info {
45 * command lists.
46 */
47 struct vc4_shader_state {
48 - uint8_t packet;
49 uint32_t addr;
50 /* Maximum vertex index referenced by any primitive using this
51 * shader state.
52 @@ -254,6 +242,7 @@ struct vc4_exec_info {
53 bool found_tile_binning_mode_config_packet;
54 bool found_start_tile_binning_packet;
55 bool found_increment_semaphore_packet;
56 + bool found_flush;
57 uint8_t bin_tiles_x, bin_tiles_y;
58 struct drm_gem_cma_object *tile_bo;
59 uint32_t tile_alloc_offset;
60 @@ -265,6 +254,9 @@ struct vc4_exec_info {
61 uint32_t ct0ca, ct0ea;
62 uint32_t ct1ca, ct1ea;
63
64 + /* Pointer to the unvalidated bin CL (if present). */
65 + void *bin_u;
66 +
67 /* Pointers to the shader recs. These paddr gets incremented as CL
68 * packets are relocated in validate_gl_shader_state, and the vaddrs
69 * (u and v) get incremented and size decremented as the shader recs
70 @@ -455,10 +447,8 @@ vc4_validate_bin_cl(struct drm_device *d
71 int
72 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
73
74 -bool vc4_use_bo(struct vc4_exec_info *exec,
75 - uint32_t hindex,
76 - enum vc4_bo_mode mode,
77 - struct drm_gem_cma_object **obj);
78 +struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
79 + uint32_t hindex);
80
81 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
82
83 --- a/drivers/gpu/drm/vc4/vc4_gem.c
84 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
85 @@ -169,8 +169,8 @@ vc4_save_hang_state(struct drm_device *d
86 }
87
88 for (i = 0; i < exec->bo_count; i++) {
89 - drm_gem_object_reference(&exec->bo[i].bo->base);
90 - kernel_state->bo[i] = &exec->bo[i].bo->base;
91 + drm_gem_object_reference(&exec->bo[i]->base);
92 + kernel_state->bo[i] = &exec->bo[i]->base;
93 }
94
95 list_for_each_entry(bo, &exec->unref_list, unref_head) {
96 @@ -397,7 +397,7 @@ vc4_update_bo_seqnos(struct vc4_exec_inf
97 unsigned i;
98
99 for (i = 0; i < exec->bo_count; i++) {
100 - bo = to_vc4_bo(&exec->bo[i].bo->base);
101 + bo = to_vc4_bo(&exec->bo[i]->base);
102 bo->seqno = seqno;
103 }
104
105 @@ -467,7 +467,7 @@ vc4_cl_lookup_bos(struct drm_device *dev
106 return -EINVAL;
107 }
108
109 - exec->bo = kcalloc(exec->bo_count, sizeof(struct vc4_bo_exec_state),
110 + exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
111 GFP_KERNEL);
112 if (!exec->bo) {
113 DRM_ERROR("Failed to allocate validated BO pointers\n");
114 @@ -500,7 +500,7 @@ vc4_cl_lookup_bos(struct drm_device *dev
115 goto fail;
116 }
117 drm_gem_object_reference(bo);
118 - exec->bo[i].bo = (struct drm_gem_cma_object *)bo;
119 + exec->bo[i] = (struct drm_gem_cma_object *)bo;
120 }
121 spin_unlock(&file_priv->table_lock);
122
123 @@ -591,6 +591,8 @@ vc4_get_bcl(struct drm_device *dev, stru
124
125 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
126
127 + exec->bin_u = bin;
128 +
129 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
130 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
131 exec->shader_rec_size = args->shader_rec_size;
132 @@ -622,7 +624,7 @@ vc4_complete_exec(struct drm_device *dev
133 mutex_lock(&dev->struct_mutex);
134 if (exec->bo) {
135 for (i = 0; i < exec->bo_count; i++)
136 - drm_gem_object_unreference(&exec->bo[i].bo->base);
137 + drm_gem_object_unreference(&exec->bo[i]->base);
138 kfree(exec->bo);
139 }
140
141 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c
142 +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
143 @@ -436,7 +436,8 @@ static int vc4_rcl_surface_setup(struct
144 if (surf->hindex == ~0)
145 return 0;
146
147 - if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
148 + *obj = vc4_use_bo(exec, surf->hindex);
149 + if (!*obj)
150 return -EINVAL;
151
152 if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
153 @@ -537,7 +538,8 @@ vc4_rcl_render_config_surface_setup(stru
154 if (surf->hindex == ~0)
155 return 0;
156
157 - if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
158 + *obj = vc4_use_bo(exec, surf->hindex);
159 + if (!*obj)
160 return -EINVAL;
161
162 if (tiling > VC4_TILING_FORMAT_LT) {
163 --- a/drivers/gpu/drm/vc4/vc4_validate.c
164 +++ b/drivers/gpu/drm/vc4/vc4_validate.c
165 @@ -94,42 +94,42 @@ size_is_lt(uint32_t width, uint32_t heig
166 height <= 4 * utile_height(cpp));
167 }
168
169 -bool
170 -vc4_use_bo(struct vc4_exec_info *exec,
171 - uint32_t hindex,
172 - enum vc4_bo_mode mode,
173 - struct drm_gem_cma_object **obj)
174 +struct drm_gem_cma_object *
175 +vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
176 {
177 - *obj = NULL;
178 + struct drm_gem_cma_object *obj;
179 + struct vc4_bo *bo;
180
181 if (hindex >= exec->bo_count) {
182 DRM_ERROR("BO index %d greater than BO count %d\n",
183 hindex, exec->bo_count);
184 - return false;
185 + return NULL;
186 }
187 + obj = exec->bo[hindex];
188 + bo = to_vc4_bo(&obj->base);
189
190 - if (exec->bo[hindex].mode != mode) {
191 - if (exec->bo[hindex].mode == VC4_MODE_UNDECIDED) {
192 - exec->bo[hindex].mode = mode;
193 - } else {
194 - DRM_ERROR("BO index %d reused with mode %d vs %d\n",
195 - hindex, exec->bo[hindex].mode, mode);
196 - return false;
197 - }
198 + if (bo->validated_shader) {
199 + DRM_ERROR("Trying to use shader BO as something other than "
200 + "a shader\n");
201 + return NULL;
202 }
203
204 - *obj = exec->bo[hindex].bo;
205 - return true;
206 + return obj;
207 +}
208 +
209 +static struct drm_gem_cma_object *
210 +vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
211 +{
212 + return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
213 }
214
215 static bool
216 -vc4_use_handle(struct vc4_exec_info *exec,
217 - uint32_t gem_handles_packet_index,
218 - enum vc4_bo_mode mode,
219 - struct drm_gem_cma_object **obj)
220 +validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos)
221 {
222 - return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index],
223 - mode, obj);
224 + /* Note that the untrusted pointer passed to these functions is
225 + * incremented past the packet byte.
226 + */
227 + return (untrusted - 1 == exec->bin_u + pos);
228 }
229
230 static uint32_t
231 @@ -202,13 +202,13 @@ vc4_check_tex_size(struct vc4_exec_info
232 }
233
234 static int
235 -validate_flush_all(VALIDATE_ARGS)
236 +validate_flush(VALIDATE_ARGS)
237 {
238 - if (exec->found_increment_semaphore_packet) {
239 - DRM_ERROR("VC4_PACKET_FLUSH_ALL after "
240 - "VC4_PACKET_INCREMENT_SEMAPHORE\n");
241 + if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
242 + DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
243 return -EINVAL;
244 }
245 + exec->found_flush = true;
246
247 return 0;
248 }
249 @@ -233,17 +233,13 @@ validate_start_tile_binning(VALIDATE_ARG
250 static int
251 validate_increment_semaphore(VALIDATE_ARGS)
252 {
253 - if (exec->found_increment_semaphore_packet) {
254 - DRM_ERROR("Duplicate VC4_PACKET_INCREMENT_SEMAPHORE\n");
255 + if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
256 + DRM_ERROR("Bin CL must end with "
257 + "VC4_PACKET_INCREMENT_SEMAPHORE\n");
258 return -EINVAL;
259 }
260 exec->found_increment_semaphore_packet = true;
261
262 - /* Once we've found the semaphore increment, there should be one FLUSH
263 - * then the end of the command list. The FLUSH actually triggers the
264 - * increment, so we only need to make sure there
265 - */
266 -
267 return 0;
268 }
269
270 @@ -257,11 +253,6 @@ validate_indexed_prim_list(VALIDATE_ARGS
271 uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
272 struct vc4_shader_state *shader_state;
273
274 - if (exec->found_increment_semaphore_packet) {
275 - DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
276 - return -EINVAL;
277 - }
278 -
279 /* Check overflow condition */
280 if (exec->shader_state_count == 0) {
281 DRM_ERROR("shader state must precede primitives\n");
282 @@ -272,7 +263,8 @@ validate_indexed_prim_list(VALIDATE_ARGS
283 if (max_index > shader_state->max_index)
284 shader_state->max_index = max_index;
285
286 - if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &ib))
287 + ib = vc4_use_handle(exec, 0);
288 + if (!ib)
289 return -EINVAL;
290
291 if (offset > ib->base.size ||
292 @@ -295,11 +287,6 @@ validate_gl_array_primitive(VALIDATE_ARG
293 uint32_t max_index;
294 struct vc4_shader_state *shader_state;
295
296 - if (exec->found_increment_semaphore_packet) {
297 - DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
298 - return -EINVAL;
299 - }
300 -
301 /* Check overflow condition */
302 if (exec->shader_state_count == 0) {
303 DRM_ERROR("shader state must precede primitives\n");
304 @@ -329,7 +316,6 @@ validate_gl_shader_state(VALIDATE_ARGS)
305 return -EINVAL;
306 }
307
308 - exec->shader_state[i].packet = VC4_PACKET_GL_SHADER_STATE;
309 exec->shader_state[i].addr = *(uint32_t *)untrusted;
310 exec->shader_state[i].max_index = 0;
311
312 @@ -348,31 +334,6 @@ validate_gl_shader_state(VALIDATE_ARGS)
313 }
314
315 static int
316 -validate_nv_shader_state(VALIDATE_ARGS)
317 -{
318 - uint32_t i = exec->shader_state_count++;
319 -
320 - if (i >= exec->shader_state_size) {
321 - DRM_ERROR("More requests for shader states than declared\n");
322 - return -EINVAL;
323 - }
324 -
325 - exec->shader_state[i].packet = VC4_PACKET_NV_SHADER_STATE;
326 - exec->shader_state[i].addr = *(uint32_t *)untrusted;
327 -
328 - if (exec->shader_state[i].addr & 15) {
329 - DRM_ERROR("NV shader state address 0x%08x misaligned\n",
330 - exec->shader_state[i].addr);
331 - return -EINVAL;
332 - }
333 -
334 - *(uint32_t *)validated = (exec->shader_state[i].addr +
335 - exec->shader_rec_p);
336 -
337 - return 0;
338 -}
339 -
340 -static int
341 validate_tile_binning_config(VALIDATE_ARGS)
342 {
343 struct drm_device *dev = exec->exec_bo->base.dev;
344 @@ -473,8 +434,8 @@ static const struct cmd_info {
345 } cmd_info[] = {
346 VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL),
347 VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL),
348 - VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, NULL),
349 - VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, validate_flush_all),
350 + VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush),
351 + VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL),
352 VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING,
353 validate_start_tile_binning),
354 VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE,
355 @@ -488,7 +449,6 @@ static const struct cmd_info {
356 VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL),
357
358 VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state),
359 - VC4_DEFINE_PACKET(VC4_PACKET_NV_SHADER_STATE, validate_nv_shader_state),
360
361 VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL),
362 VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL),
363 @@ -575,8 +535,16 @@ vc4_validate_bin_cl(struct drm_device *d
364 return -EINVAL;
365 }
366
367 - if (!exec->found_increment_semaphore_packet) {
368 - DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE\n");
369 + /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The
370 + * semaphore is used to trigger the render CL to start up, and the
371 + * FLUSH is what caps the bin lists with
372 + * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main
373 + * render CL when they get called to) and actually triggers the queued
374 + * semaphore increment.
375 + */
376 + if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
377 + DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
378 + "VC4_PACKET_FLUSH\n");
379 return -EINVAL;
380 }
381
382 @@ -607,7 +575,8 @@ reloc_tex(struct vc4_exec_info *exec,
383 uint32_t cube_map_stride = 0;
384 enum vc4_texture_data_type type;
385
386 - if (!vc4_use_bo(exec, texture_handle_index, VC4_MODE_RENDER, &tex))
387 + tex = vc4_use_bo(exec, texture_handle_index);
388 + if (!tex)
389 return false;
390
391 if (sample->is_direct) {
392 @@ -755,51 +724,28 @@ reloc_tex(struct vc4_exec_info *exec,
393 }
394
395 static int
396 -validate_shader_rec(struct drm_device *dev,
397 - struct vc4_exec_info *exec,
398 - struct vc4_shader_state *state)
399 +validate_gl_shader_rec(struct drm_device *dev,
400 + struct vc4_exec_info *exec,
401 + struct vc4_shader_state *state)
402 {
403 uint32_t *src_handles;
404 void *pkt_u, *pkt_v;
405 - enum shader_rec_reloc_type {
406 - RELOC_CODE,
407 - RELOC_VBO,
408 - };
409 - struct shader_rec_reloc {
410 - enum shader_rec_reloc_type type;
411 - uint32_t offset;
412 - };
413 - static const struct shader_rec_reloc gl_relocs[] = {
414 - { RELOC_CODE, 4 }, /* fs */
415 - { RELOC_CODE, 16 }, /* vs */
416 - { RELOC_CODE, 28 }, /* cs */
417 - };
418 - static const struct shader_rec_reloc nv_relocs[] = {
419 - { RELOC_CODE, 4 }, /* fs */
420 - { RELOC_VBO, 12 }
421 + static const uint32_t shader_reloc_offsets[] = {
422 + 4, /* fs */
423 + 16, /* vs */
424 + 28, /* cs */
425 };
426 - const struct shader_rec_reloc *relocs;
427 - struct drm_gem_cma_object *bo[ARRAY_SIZE(gl_relocs) + 8];
428 - uint32_t nr_attributes = 0, nr_fixed_relocs, nr_relocs, packet_size;
429 + uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
430 + struct drm_gem_cma_object *bo[shader_reloc_count + 8];
431 + uint32_t nr_attributes, nr_relocs, packet_size;
432 int i;
433 - struct vc4_validated_shader_info *shader;
434
435 - if (state->packet == VC4_PACKET_NV_SHADER_STATE) {
436 - relocs = nv_relocs;
437 - nr_fixed_relocs = ARRAY_SIZE(nv_relocs);
438 -
439 - packet_size = 16;
440 - } else {
441 - relocs = gl_relocs;
442 - nr_fixed_relocs = ARRAY_SIZE(gl_relocs);
443 -
444 - nr_attributes = state->addr & 0x7;
445 - if (nr_attributes == 0)
446 - nr_attributes = 8;
447 - packet_size = gl_shader_rec_size(state->addr);
448 - }
449 - nr_relocs = nr_fixed_relocs + nr_attributes;
450 + nr_attributes = state->addr & 0x7;
451 + if (nr_attributes == 0)
452 + nr_attributes = 8;
453 + packet_size = gl_shader_rec_size(state->addr);
454
455 + nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
456 if (nr_relocs * 4 > exec->shader_rec_size) {
457 DRM_ERROR("overflowed shader recs reading %d handles "
458 "from %d bytes left\n",
459 @@ -829,21 +775,30 @@ validate_shader_rec(struct drm_device *d
460 exec->shader_rec_v += roundup(packet_size, 16);
461 exec->shader_rec_size -= packet_size;
462
463 - for (i = 0; i < nr_relocs; i++) {
464 - enum vc4_bo_mode mode;
465 + if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
466 + DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
467 + return -EINVAL;
468 + }
469
470 - if (i < nr_fixed_relocs && relocs[i].type == RELOC_CODE)
471 - mode = VC4_MODE_SHADER;
472 - else
473 - mode = VC4_MODE_RENDER;
474 + for (i = 0; i < shader_reloc_count; i++) {
475 + if (src_handles[i] > exec->bo_count) {
476 + DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
477 + return -EINVAL;
478 + }
479
480 - if (!vc4_use_bo(exec, src_handles[i], mode, &bo[i]))
481 - return false;
482 + bo[i] = exec->bo[src_handles[i]];
483 + if (!bo[i])
484 + return -EINVAL;
485 + }
486 + for (i = shader_reloc_count; i < nr_relocs; i++) {
487 + bo[i] = vc4_use_bo(exec, src_handles[i]);
488 + if (!bo[i])
489 + return -EINVAL;
490 }
491
492 - for (i = 0; i < nr_fixed_relocs; i++) {
493 - struct vc4_bo *vc4_bo;
494 - uint32_t o = relocs[i].offset;
495 + for (i = 0; i < shader_reloc_count; i++) {
496 + struct vc4_validated_shader_info *validated_shader;
497 + uint32_t o = shader_reloc_offsets[i];
498 uint32_t src_offset = *(uint32_t *)(pkt_u + o);
499 uint32_t *texture_handles_u;
500 void *uniform_data_u;
501 @@ -851,57 +806,50 @@ validate_shader_rec(struct drm_device *d
502
503 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
504
505 - switch (relocs[i].type) {
506 - case RELOC_CODE:
507 - if (src_offset != 0) {
508 - DRM_ERROR("Shaders must be at offset 0 "
509 - "of the BO.\n");
510 - goto fail;
511 - }
512 + if (src_offset != 0) {
513 + DRM_ERROR("Shaders must be at offset 0 of "
514 + "the BO.\n");
515 + return -EINVAL;
516 + }
517
518 - vc4_bo = to_vc4_bo(&bo[i]->base);
519 - shader = vc4_bo->validated_shader;
520 - if (!shader)
521 - goto fail;
522 + validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
523 + if (!validated_shader)
524 + return -EINVAL;
525
526 - if (shader->uniforms_src_size > exec->uniforms_size) {
527 - DRM_ERROR("Uniforms src buffer overflow\n");
528 - goto fail;
529 - }
530 + if (validated_shader->uniforms_src_size >
531 + exec->uniforms_size) {
532 + DRM_ERROR("Uniforms src buffer overflow\n");
533 + return -EINVAL;
534 + }
535
536 - texture_handles_u = exec->uniforms_u;
537 - uniform_data_u = (texture_handles_u +
538 - shader->num_texture_samples);
539 -
540 - memcpy(exec->uniforms_v, uniform_data_u,
541 - shader->uniforms_size);
542 -
543 - for (tex = 0;
544 - tex < shader->num_texture_samples;
545 - tex++) {
546 - if (!reloc_tex(exec,
547 - uniform_data_u,
548 - &shader->texture_samples[tex],
549 - texture_handles_u[tex])) {
550 - goto fail;
551 - }
552 + texture_handles_u = exec->uniforms_u;
553 + uniform_data_u = (texture_handles_u +
554 + validated_shader->num_texture_samples);
555 +
556 + memcpy(exec->uniforms_v, uniform_data_u,
557 + validated_shader->uniforms_size);
558 +
559 + for (tex = 0;
560 + tex < validated_shader->num_texture_samples;
561 + tex++) {
562 + if (!reloc_tex(exec,
563 + uniform_data_u,
564 + &validated_shader->texture_samples[tex],
565 + texture_handles_u[tex])) {
566 + return -EINVAL;
567 }
568 + }
569
570 - *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
571 -
572 - exec->uniforms_u += shader->uniforms_src_size;
573 - exec->uniforms_v += shader->uniforms_size;
574 - exec->uniforms_p += shader->uniforms_size;
575 -
576 - break;
577 + *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
578
579 - case RELOC_VBO:
580 - break;
581 - }
582 + exec->uniforms_u += validated_shader->uniforms_src_size;
583 + exec->uniforms_v += validated_shader->uniforms_size;
584 + exec->uniforms_p += validated_shader->uniforms_size;
585 }
586
587 for (i = 0; i < nr_attributes; i++) {
588 - struct drm_gem_cma_object *vbo = bo[nr_fixed_relocs + i];
589 + struct drm_gem_cma_object *vbo =
590 + bo[ARRAY_SIZE(shader_reloc_offsets) + i];
591 uint32_t o = 36 + i * 8;
592 uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
593 uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
594 @@ -933,9 +881,6 @@ validate_shader_rec(struct drm_device *d
595 }
596
597 return 0;
598 -
599 -fail:
600 - return -EINVAL;
601 }
602
603 int
604 @@ -946,7 +891,7 @@ vc4_validate_shader_recs(struct drm_devi
605 int ret = 0;
606
607 for (i = 0; i < exec->shader_state_count; i++) {
608 - ret = validate_shader_rec(dev, exec, &exec->shader_state[i]);
609 + ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
610 if (ret)
611 return ret;
612 }