1 From b2366bf5c6c6c13d7d3b0e7099c6e71f4f775df6 Mon Sep 17 00:00:00 2001
2 From: Eric Anholt <eric@anholt.net>
3 Date: Mon, 2 Mar 2015 13:01:12 -0800
4 Subject: [PATCH 090/423] drm/vc4: Add suport for 3D rendering using the V3D
7 This is a squash of the out-of-tree development series. Since that
8 series contained code from the first "get a demo triangle rendered
9 using a hacked up driver using binary shader code" to "plug the last
10 known security hole", it's hard to reconstruct a different series of
11 incremental development that's mergeable without security holes
14 Signed-off-by: Eric Anholt <eric@anholt.net>
16 drivers/gpu/drm/vc4/Makefile | 11 +-
17 drivers/gpu/drm/vc4/vc4_bo.c | 476 +++++++++++++-
18 drivers/gpu/drm/vc4/vc4_crtc.c | 98 ++-
19 drivers/gpu/drm/vc4/vc4_debugfs.c | 3 +
20 drivers/gpu/drm/vc4/vc4_drv.c | 45 +-
21 drivers/gpu/drm/vc4/vc4_drv.h | 317 ++++++++++
22 drivers/gpu/drm/vc4/vc4_gem.c | 686 +++++++++++++++++++++
23 drivers/gpu/drm/vc4/vc4_irq.c | 211 +++++++
24 drivers/gpu/drm/vc4/vc4_kms.c | 148 ++++-
25 drivers/gpu/drm/vc4/vc4_packet.h | 384 ++++++++++++
26 drivers/gpu/drm/vc4/vc4_plane.c | 40 ++
27 drivers/gpu/drm/vc4/vc4_qpu_defines.h | 268 ++++++++
28 drivers/gpu/drm/vc4/vc4_render_cl.c | 448 ++++++++++++++
29 drivers/gpu/drm/vc4/vc4_trace.h | 63 ++
30 drivers/gpu/drm/vc4/vc4_trace_points.c | 14 +
31 drivers/gpu/drm/vc4/vc4_v3d.c | 268 ++++++++
32 drivers/gpu/drm/vc4/vc4_validate.c | 958 +++++++++++++++++++++++++++++
33 drivers/gpu/drm/vc4/vc4_validate_shaders.c | 521 ++++++++++++++++
34 include/uapi/drm/vc4_drm.h | 229 +++++++
35 19 files changed, 5173 insertions(+), 15 deletions(-)
36 create mode 100644 drivers/gpu/drm/vc4/vc4_gem.c
37 create mode 100644 drivers/gpu/drm/vc4/vc4_irq.c
38 create mode 100644 drivers/gpu/drm/vc4/vc4_packet.h
39 create mode 100644 drivers/gpu/drm/vc4/vc4_qpu_defines.h
40 create mode 100644 drivers/gpu/drm/vc4/vc4_render_cl.c
41 create mode 100644 drivers/gpu/drm/vc4/vc4_trace.h
42 create mode 100644 drivers/gpu/drm/vc4/vc4_trace_points.c
43 create mode 100644 drivers/gpu/drm/vc4/vc4_v3d.c
44 create mode 100644 drivers/gpu/drm/vc4/vc4_validate.c
45 create mode 100644 drivers/gpu/drm/vc4/vc4_validate_shaders.c
46 create mode 100644 include/uapi/drm/vc4_drm.h
48 --- a/drivers/gpu/drm/vc4/Makefile
49 +++ b/drivers/gpu/drm/vc4/Makefile
50 @@ -8,10 +8,19 @@ vc4-y := \
61 + vc4_trace_points.o \
64 + vc4_validate_shaders.o
66 vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
68 obj-$(CONFIG_DRM_VC4) += vc4.o
70 +CFLAGS_vc4_trace_points.o := -I$(src)
71 --- a/drivers/gpu/drm/vc4/vc4_bo.c
72 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
77 +#include "uapi/drm/vc4_drm.h"
79 -struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size)
80 +static void vc4_bo_stats_dump(struct vc4_dev *vc4)
82 + DRM_INFO("num bos allocated: %d\n",
83 + vc4->bo_stats.num_allocated);
84 + DRM_INFO("size bos allocated: %dkb\n",
85 + vc4->bo_stats.size_allocated / 1024);
86 + DRM_INFO("num bos used: %d\n",
87 + vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
88 + DRM_INFO("size bos used: %dkb\n",
89 + (vc4->bo_stats.size_allocated -
90 + vc4->bo_stats.size_cached) / 1024);
91 + DRM_INFO("num bos cached: %d\n",
92 + vc4->bo_stats.num_cached);
93 + DRM_INFO("size bos cached: %dkb\n",
94 + vc4->bo_stats.size_cached / 1024);
97 +static uint32_t bo_page_index(size_t size)
99 + return (size / PAGE_SIZE) - 1;
102 +/* Must be called with bo_lock held. */
103 +static void vc4_bo_destroy(struct vc4_bo *bo)
105 + struct drm_gem_object *obj = &bo->base.base;
106 + struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
108 + if (bo->validated_shader) {
109 + kfree(bo->validated_shader->texture_samples);
110 + kfree(bo->validated_shader);
111 + bo->validated_shader = NULL;
114 + vc4->bo_stats.num_allocated--;
115 + vc4->bo_stats.size_allocated -= obj->size;
116 + drm_gem_cma_free_object(obj);
119 +/* Must be called with bo_lock held. */
120 +static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
122 + struct drm_gem_object *obj = &bo->base.base;
123 + struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
125 + vc4->bo_stats.num_cached--;
126 + vc4->bo_stats.size_cached -= obj->size;
128 + list_del(&bo->unref_head);
129 + list_del(&bo->size_head);
132 +static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
135 + struct vc4_dev *vc4 = to_vc4_dev(dev);
136 + uint32_t page_index = bo_page_index(size);
138 + if (vc4->bo_cache.size_list_size <= page_index) {
139 + uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
141 + struct list_head *new_list;
144 + new_list = kmalloc(new_size * sizeof(struct list_head),
149 + /* Rebase the old cached BO lists to their new list
152 + for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
153 + struct list_head *old_list = &vc4->bo_cache.size_list[i];
154 + if (list_empty(old_list))
155 + INIT_LIST_HEAD(&new_list[i]);
157 + list_replace(old_list, &new_list[i]);
159 + /* And initialize the brand new BO list heads. */
160 + for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
161 + INIT_LIST_HEAD(&new_list[i]);
163 + kfree(vc4->bo_cache.size_list);
164 + vc4->bo_cache.size_list = new_list;
165 + vc4->bo_cache.size_list_size = new_size;
168 + return &vc4->bo_cache.size_list[page_index];
171 +void vc4_bo_cache_purge(struct drm_device *dev)
173 + struct vc4_dev *vc4 = to_vc4_dev(dev);
175 + spin_lock(&vc4->bo_lock);
176 + while (!list_empty(&vc4->bo_cache.time_list)) {
177 + struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
178 + struct vc4_bo, unref_head);
179 + vc4_bo_remove_from_cache(bo);
180 + vc4_bo_destroy(bo);
182 + spin_unlock(&vc4->bo_lock);
185 +struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size)
187 + struct vc4_dev *vc4 = to_vc4_dev(dev);
188 + uint32_t size = roundup(unaligned_size, PAGE_SIZE);
189 + uint32_t page_index = bo_page_index(size);
190 struct drm_gem_cma_object *cma_obj;
193 - cma_obj = drm_gem_cma_create(dev, size);
194 - if (IS_ERR(cma_obj))
198 - return to_vc4_bo(&cma_obj->base);
200 + /* First, try to get a vc4_bo from the kernel BO cache. */
201 + spin_lock(&vc4->bo_lock);
202 + if (page_index < vc4->bo_cache.size_list_size &&
203 + !list_empty(&vc4->bo_cache.size_list[page_index])) {
204 + struct vc4_bo *bo =
205 + list_first_entry(&vc4->bo_cache.size_list[page_index],
206 + struct vc4_bo, size_head);
207 + vc4_bo_remove_from_cache(bo);
208 + spin_unlock(&vc4->bo_lock);
209 + kref_init(&bo->base.base.refcount);
212 + spin_unlock(&vc4->bo_lock);
214 + /* Otherwise, make a new BO. */
215 + for (pass = 0; ; pass++) {
216 + cma_obj = drm_gem_cma_create(dev, size);
217 + if (!IS_ERR(cma_obj))
223 + * If we've run out of CMA memory, kill the cache of
224 + * CMA allocations we've got laying around and try again.
226 + vc4_bo_cache_purge(dev);
230 + * Getting desperate, so try to wait for any
231 + * previous rendering to finish, free its
232 + * unreferenced BOs to the cache, and then
235 + vc4_wait_for_seqno(dev, vc4->emit_seqno, ~0ull, true);
236 + vc4_job_handle_completed(vc4);
237 + vc4_bo_cache_purge(dev);
240 + DRM_ERROR("Failed to allocate from CMA:\n");
241 + vc4_bo_stats_dump(vc4);
246 + vc4->bo_stats.num_allocated++;
247 + vc4->bo_stats.size_allocated += size;
249 + return to_vc4_bo(&cma_obj->base);
252 int vc4_dumb_create(struct drm_file *file_priv,
253 @@ -41,7 +199,129 @@ int vc4_dumb_create(struct drm_file *fil
254 if (args->size < args->pitch * args->height)
255 args->size = args->pitch * args->height;
257 - bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE));
258 + bo = vc4_bo_create(dev, args->size);
262 + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
263 + drm_gem_object_unreference_unlocked(&bo->base.base);
269 +vc4_bo_cache_free_old(struct drm_device *dev)
271 + struct vc4_dev *vc4 = to_vc4_dev(dev);
272 + unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
274 + spin_lock(&vc4->bo_lock);
275 + while (!list_empty(&vc4->bo_cache.time_list)) {
276 + struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
277 + struct vc4_bo, unref_head);
278 + if (time_before(expire_time, bo->free_time)) {
279 + mod_timer(&vc4->bo_cache.time_timer,
280 + round_jiffies_up(jiffies +
281 + msecs_to_jiffies(1000)));
282 + spin_unlock(&vc4->bo_lock);
286 + vc4_bo_remove_from_cache(bo);
287 + vc4_bo_destroy(bo);
289 + spin_unlock(&vc4->bo_lock);
292 +/* Called on the last userspace/kernel unreference of the BO. Returns
293 + * it to the BO cache if possible, otherwise frees it.
295 + * Note that this is called with the struct_mutex held.
297 +void vc4_free_object(struct drm_gem_object *gem_bo)
299 + struct drm_device *dev = gem_bo->dev;
300 + struct vc4_dev *vc4 = to_vc4_dev(dev);
301 + struct vc4_bo *bo = to_vc4_bo(gem_bo);
302 + struct list_head *cache_list;
304 + /* If the object references someone else's memory, we can't cache it.
306 + if (gem_bo->import_attach) {
307 + vc4_bo_destroy(bo);
311 + /* Don't cache if it was publicly named. */
312 + if (gem_bo->name) {
313 + vc4_bo_destroy(bo);
317 + spin_lock(&vc4->bo_lock);
318 + cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
320 + vc4_bo_destroy(bo);
321 + spin_unlock(&vc4->bo_lock);
325 + if (bo->validated_shader) {
326 + kfree(bo->validated_shader->texture_samples);
327 + kfree(bo->validated_shader);
328 + bo->validated_shader = NULL;
331 + bo->free_time = jiffies;
332 + list_add(&bo->size_head, cache_list);
333 + list_add(&bo->unref_head, &vc4->bo_cache.time_list);
335 + vc4->bo_stats.num_cached++;
336 + vc4->bo_stats.size_cached += gem_bo->size;
337 + spin_unlock(&vc4->bo_lock);
339 + vc4_bo_cache_free_old(dev);
342 +static void vc4_bo_cache_time_work(struct work_struct *work)
344 + struct vc4_dev *vc4 =
345 + container_of(work, struct vc4_dev, bo_cache.time_work);
346 + struct drm_device *dev = vc4->dev;
348 + vc4_bo_cache_free_old(dev);
351 +static void vc4_bo_cache_time_timer(unsigned long data)
353 + struct drm_device *dev = (struct drm_device *)data;
354 + struct vc4_dev *vc4 = to_vc4_dev(dev);
356 + schedule_work(&vc4->bo_cache.time_work);
360 +vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
362 + struct vc4_bo *bo = to_vc4_bo(obj);
364 + if (bo->validated_shader) {
365 + DRM_ERROR("Attempting to export shader BO\n");
366 + return ERR_PTR(-EINVAL);
369 + return drm_gem_prime_export(dev, obj, flags);
373 +vc4_create_bo_ioctl(struct drm_device *dev, void *data,
374 + struct drm_file *file_priv)
376 + struct drm_vc4_create_bo *args = data;
377 + struct vc4_bo *bo = NULL;
380 + bo = vc4_bo_create(dev, args->size);
384 @@ -50,3 +330,187 @@ int vc4_dumb_create(struct drm_file *fil
390 +vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
391 + struct drm_file *file_priv)
393 + struct drm_vc4_create_shader_bo *args = data;
394 + struct vc4_bo *bo = NULL;
397 + if (args->size == 0)
400 + if (args->size % sizeof(u64) != 0)
403 + if (args->flags != 0) {
404 + DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
408 + if (args->pad != 0) {
409 + DRM_INFO("Pad set: 0x%08x\n", args->pad);
413 + bo = vc4_bo_create(dev, args->size);
417 + ret = copy_from_user(bo->base.vaddr,
418 + (void __user *)(uintptr_t)args->data,
423 + bo->validated_shader = vc4_validate_shader(&bo->base);
424 + if (!bo->validated_shader) {
429 + /* We have to create the handle after validation, to avoid
430 + * races for users to do doing things like mmap the shader BO.
432 + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
435 + drm_gem_object_unreference_unlocked(&bo->base.base);
441 +vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
442 + struct drm_file *file_priv)
444 + struct drm_vc4_mmap_bo *args = data;
445 + struct drm_gem_object *gem_obj;
447 + gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
449 + DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
453 + /* The mmap offset was set up at BO allocation time. */
454 + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
456 + drm_gem_object_unreference(gem_obj);
460 +int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
462 + struct drm_gem_object *gem_obj;
466 + ret = drm_gem_mmap(filp, vma);
470 + gem_obj = vma->vm_private_data;
471 + bo = to_vc4_bo(gem_obj);
473 + if (bo->validated_shader) {
474 + DRM_ERROR("mmaping of shader BOs not allowed.\n");
479 + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
480 + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
481 + * the whole buffer.
483 + vma->vm_flags &= ~VM_PFNMAP;
486 + ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
487 + bo->base.vaddr, bo->base.paddr,
488 + vma->vm_end - vma->vm_start);
490 + drm_gem_vm_close(vma);
495 +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
497 + struct vc4_bo *bo = to_vc4_bo(obj);
499 + if (bo->validated_shader) {
500 + DRM_ERROR("mmaping of shader BOs not allowed.\n");
504 + return drm_gem_cma_prime_mmap(obj, vma);
507 +void *vc4_prime_vmap(struct drm_gem_object *obj)
509 + struct vc4_bo *bo = to_vc4_bo(obj);
511 + if (bo->validated_shader) {
512 + DRM_ERROR("mmaping of shader BOs not allowed.\n");
513 + return ERR_PTR(-EINVAL);
516 + return drm_gem_cma_prime_vmap(obj);
519 +void vc4_bo_cache_init(struct drm_device *dev)
521 + struct vc4_dev *vc4 = to_vc4_dev(dev);
523 + spin_lock_init(&vc4->bo_lock);
525 + INIT_LIST_HEAD(&vc4->bo_cache.time_list);
527 + INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
528 + setup_timer(&vc4->bo_cache.time_timer,
529 + vc4_bo_cache_time_timer,
530 + (unsigned long) dev);
533 +void vc4_bo_cache_destroy(struct drm_device *dev)
535 + struct vc4_dev *vc4 = to_vc4_dev(dev);
537 + del_timer(&vc4->bo_cache.time_timer);
538 + cancel_work_sync(&vc4->bo_cache.time_work);
540 + vc4_bo_cache_purge(dev);
542 + if (vc4->bo_stats.num_allocated) {
543 + DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
544 + vc4_bo_stats_dump(vc4);
548 +#ifdef CONFIG_DEBUG_FS
549 +int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
551 + struct drm_info_node *node = (struct drm_info_node *) m->private;
552 + struct drm_device *dev = node->minor->dev;
553 + struct vc4_dev *vc4 = to_vc4_dev(dev);
554 + struct vc4_bo_stats stats;
556 + spin_lock(&vc4->bo_lock);
557 + stats = vc4->bo_stats;
558 + spin_unlock(&vc4->bo_lock);
560 + seq_printf(m, "num bos allocated: %d\n", stats.num_allocated);
561 + seq_printf(m, "size bos allocated: %dkb\n", stats.size_allocated / 1024);
562 + seq_printf(m, "num bos used: %d\n", (stats.num_allocated -
563 + stats.num_cached));
564 + seq_printf(m, "size bos used: %dkb\n", (stats.size_allocated -
565 + stats.size_cached) / 1024);
566 + seq_printf(m, "num bos cached: %d\n", stats.num_cached);
567 + seq_printf(m, "size bos cached: %dkb\n", stats.size_cached / 1024);
572 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
573 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
575 #include "drm_atomic_helper.h"
576 #include "drm_crtc_helper.h"
577 #include "linux/clk.h"
578 +#include "drm_fb_cma_helper.h"
579 #include "linux/component.h"
580 #include "linux/of_device.h"
582 @@ -476,10 +477,105 @@ static irqreturn_t vc4_crtc_irq_handler(
586 +struct vc4_async_flip_state {
587 + struct drm_crtc *crtc;
588 + struct drm_framebuffer *fb;
589 + struct drm_pending_vblank_event *event;
591 + struct vc4_seqno_cb cb;
594 +/* Called when the V3D execution for the BO being flipped to is done, so that
595 + * we can actually update the plane's address to point to it.
598 +vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
600 + struct vc4_async_flip_state *flip_state =
601 + container_of(cb, struct vc4_async_flip_state, cb);
602 + struct drm_crtc *crtc = flip_state->crtc;
603 + struct drm_device *dev = crtc->dev;
604 + struct vc4_dev *vc4 = to_vc4_dev(dev);
605 + struct drm_plane *plane = crtc->primary;
607 + vc4_plane_async_set_fb(plane, flip_state->fb);
608 + if (flip_state->event) {
609 + unsigned long flags;
610 + spin_lock_irqsave(&dev->event_lock, flags);
611 + drm_crtc_send_vblank_event(crtc, flip_state->event);
612 + spin_unlock_irqrestore(&dev->event_lock, flags);
615 + drm_framebuffer_unreference(flip_state->fb);
618 + up(&vc4->async_modeset);
621 +/* Implements async (non-vblank-synced) page flips.
623 + * The page flip ioctl needs to return immediately, so we grab the
624 + * modeset semaphore on the pipe, and queue the address update for
625 + * when V3D is done with the BO being flipped to.
627 +static int vc4_async_page_flip(struct drm_crtc *crtc,
628 + struct drm_framebuffer *fb,
629 + struct drm_pending_vblank_event *event,
632 + struct drm_device *dev = crtc->dev;
633 + struct vc4_dev *vc4 = to_vc4_dev(dev);
634 + struct drm_plane *plane = crtc->primary;
636 + struct vc4_async_flip_state *flip_state;
637 + struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
638 + struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
640 + flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
644 + drm_framebuffer_reference(fb);
645 + flip_state->fb = fb;
646 + flip_state->crtc = crtc;
647 + flip_state->event = event;
649 + /* Make sure all other async modesetes have landed. */
650 + ret = down_interruptible(&vc4->async_modeset);
656 + /* Immediately update the plane's legacy fb pointer, so that later
657 + * modeset prep sees the state that will be present when the semaphore
660 + drm_atomic_set_fb_for_plane(plane->state, fb);
663 + vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
664 + vc4_async_page_flip_complete);
666 + /* Driver takes ownership of state on successful async commit. */
670 +static int vc4_page_flip(struct drm_crtc *crtc,
671 + struct drm_framebuffer *fb,
672 + struct drm_pending_vblank_event *event,
675 + if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
676 + return vc4_async_page_flip(crtc, fb, event, flags);
678 + return drm_atomic_helper_page_flip(crtc, fb, event, flags);
681 static const struct drm_crtc_funcs vc4_crtc_funcs = {
682 .set_config = drm_atomic_helper_set_config,
683 .destroy = vc4_crtc_destroy,
684 - .page_flip = drm_atomic_helper_page_flip,
685 + .page_flip = vc4_page_flip,
686 .set_property = NULL,
687 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
688 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
689 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c
690 +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
692 #include "vc4_regs.h"
694 static const struct drm_info_list vc4_debugfs_list[] = {
695 + {"bo_stats", vc4_bo_stats_debugfs, 0},
696 {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
697 {"hvs_regs", vc4_hvs_debugfs_regs, 0},
698 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
699 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
700 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
701 + {"v3d_ident", vc4_v3d_debugfs_ident, 0},
702 + {"v3d_regs", vc4_v3d_debugfs_regs, 0},
705 #define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
706 --- a/drivers/gpu/drm/vc4/vc4_drv.c
707 +++ b/drivers/gpu/drm/vc4/vc4_drv.c
709 #include <linux/module.h>
710 #include <linux/of_platform.h>
711 #include <linux/platform_device.h>
712 +#include <soc/bcm2835/raspberrypi-firmware.h>
713 #include "drm_fb_cma_helper.h"
715 +#include "uapi/drm/vc4_drm.h"
717 #include "vc4_regs.h"
719 @@ -63,7 +65,7 @@ static const struct file_operations vc4_
721 .release = drm_release,
722 .unlocked_ioctl = drm_ioctl,
723 - .mmap = drm_gem_cma_mmap,
728 @@ -73,16 +75,28 @@ static const struct file_operations vc4_
731 static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
732 + DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
733 + DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
734 + DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
735 + DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
736 + DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
737 + DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
740 static struct drm_driver vc4_drm_driver = {
741 .driver_features = (DRIVER_MODESET |
746 .lastclose = vc4_lastclose,
747 .preclose = vc4_drm_preclose,
749 + .irq_handler = vc4_irq,
750 + .irq_preinstall = vc4_irq_preinstall,
751 + .irq_postinstall = vc4_irq_postinstall,
752 + .irq_uninstall = vc4_irq_uninstall,
754 .enable_vblank = vc4_enable_vblank,
755 .disable_vblank = vc4_disable_vblank,
756 .get_vblank_counter = drm_vblank_count,
757 @@ -92,18 +106,18 @@ static struct drm_driver vc4_drm_driver
758 .debugfs_cleanup = vc4_debugfs_cleanup,
761 - .gem_free_object = drm_gem_cma_free_object,
762 + .gem_free_object = vc4_free_object,
763 .gem_vm_ops = &drm_gem_cma_vm_ops,
765 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
766 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
767 .gem_prime_import = drm_gem_prime_import,
768 - .gem_prime_export = drm_gem_prime_export,
769 + .gem_prime_export = vc4_prime_export,
770 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
771 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
772 - .gem_prime_vmap = drm_gem_cma_prime_vmap,
773 + .gem_prime_vmap = vc4_prime_vmap,
774 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
775 - .gem_prime_mmap = drm_gem_cma_prime_mmap,
776 + .gem_prime_mmap = vc4_prime_mmap,
778 .dumb_create = vc4_dumb_create,
779 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
780 @@ -113,6 +127,8 @@ static struct drm_driver vc4_drm_driver
781 .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
782 .fops = &vc4_drm_fops,
784 + .gem_obj_size = sizeof(struct vc4_bo),
789 @@ -153,6 +169,7 @@ static int vc4_drm_bind(struct device *d
790 struct drm_device *drm;
791 struct drm_connector *connector;
793 + struct device_node *firmware_node;
796 dev->coherent_dma_mask = DMA_BIT_MASK(32);
797 @@ -161,6 +178,14 @@ static int vc4_drm_bind(struct device *d
801 + firmware_node = of_parse_phandle(dev->of_node, "firmware", 0);
802 + vc4->firmware = rpi_firmware_get(firmware_node);
803 + if (!vc4->firmware) {
804 + DRM_DEBUG("Failed to get Raspberry Pi firmware reference.\n");
805 + return -EPROBE_DEFER;
807 + of_node_put(firmware_node);
809 drm = drm_dev_alloc(&vc4_drm_driver, dev);
812 @@ -170,13 +195,17 @@ static int vc4_drm_bind(struct device *d
814 drm_dev_set_unique(drm, dev_name(dev));
816 + vc4_bo_cache_init(drm);
818 drm_mode_config_init(drm);
824 ret = component_bind_all(dev, drm);
829 ret = drm_dev_register(drm, 0);
831 @@ -200,8 +229,11 @@ unregister:
832 drm_dev_unregister(drm);
834 component_unbind_all(dev, drm);
836 + vc4_gem_destroy(drm);
839 + vc4_bo_cache_destroy(drm);
843 @@ -228,6 +260,7 @@ static struct platform_driver *const com
850 static int vc4_platform_drm_probe(struct platform_device *pdev)
851 --- a/drivers/gpu/drm/vc4/vc4_drv.h
852 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
853 @@ -15,8 +15,85 @@ struct vc4_dev {
854 struct vc4_hdmi *hdmi;
856 struct vc4_crtc *crtc[3];
857 + struct vc4_v3d *v3d;
859 struct drm_fbdev_cma *fbdev;
860 + struct rpi_firmware *firmware;
862 + /* The kernel-space BO cache. Tracks buffers that have been
863 + * unreferenced by all other users (refcounts of 0!) but not
864 + * yet freed, so we can do cheap allocations.
866 + struct vc4_bo_cache {
867 + /* Array of list heads for entries in the BO cache,
868 + * based on number of pages, so we can do O(1) lookups
869 + * in the cache when allocating.
871 + struct list_head *size_list;
872 + uint32_t size_list_size;
874 + /* List of all BOs in the cache, ordered by age, so we
875 + * can do O(1) lookups when trying to free old
878 + struct list_head time_list;
879 + struct work_struct time_work;
880 + struct timer_list time_timer;
883 + struct vc4_bo_stats {
885 + u32 size_allocated;
890 + /* Protects bo_cache and the BO stats. */
891 + spinlock_t bo_lock;
893 + /* Sequence number for the last job queued in job_list.
894 + * Starts at 0 (no jobs emitted).
896 + uint64_t emit_seqno;
898 + /* Sequence number for the last completed job on the GPU.
899 + * Starts at 0 (no jobs completed).
901 + uint64_t finished_seqno;
903 + /* List of all struct vc4_exec_info for jobs to be executed.
904 + * The first job in the list is the one currently programmed
905 + * into ct0ca/ct1ca for execution.
907 + struct list_head job_list;
908 + /* List of the finished vc4_exec_infos waiting to be freed by
911 + struct list_head job_done_list;
912 + spinlock_t job_lock;
913 + wait_queue_head_t job_wait_queue;
914 + struct work_struct job_done_work;
916 + /* List of struct vc4_seqno_cb for callbacks to be made from a
917 + * workqueue when the given seqno is passed.
919 + struct list_head seqno_cb_list;
921 + /* The binner overflow memory that's currently set up in
922 + * BPOA/BPOS registers. When overflow occurs and a new one is
923 + * allocated, the previous one will be moved to
924 + * vc4->current_exec's free list.
926 + struct vc4_bo *overflow_mem;
927 + struct work_struct overflow_mem_work;
930 + uint32_t last_ct0ca, last_ct1ca;
931 + struct timer_list timer;
932 + struct work_struct reset_work;
935 + struct semaphore async_modeset;
938 static inline struct vc4_dev *
939 @@ -27,6 +104,25 @@ to_vc4_dev(struct drm_device *dev)
942 struct drm_gem_cma_object base;
944 + /* seqno of the last job to render to this BO. */
947 + /* List entry for the BO's position in either
948 + * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
950 + struct list_head unref_head;
952 + /* Time in jiffies when the BO was put in vc4->bo_cache. */
953 + unsigned long free_time;
955 + /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
956 + struct list_head size_head;
958 + /* Struct for shader validation state, if created by
959 + * DRM_IOCTL_VC4_CREATE_SHADER_BO.
961 + struct vc4_validated_shader_info *validated_shader;
964 static inline struct vc4_bo *
965 @@ -35,6 +131,17 @@ to_vc4_bo(struct drm_gem_object *bo)
966 return (struct vc4_bo *)bo;
969 +struct vc4_seqno_cb {
970 + struct work_struct work;
972 + void (*func)(struct vc4_seqno_cb *cb);
976 + struct platform_device *pdev;
977 + void __iomem *regs;
981 struct platform_device *pdev;
983 @@ -72,9 +179,151 @@ to_vc4_encoder(struct drm_encoder *encod
984 return container_of(encoder, struct vc4_encoder, base);
987 +#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
988 +#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
989 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
990 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
993 + VC4_MODE_UNDECIDED,
998 +struct vc4_bo_exec_state {
999 + struct drm_gem_cma_object *bo;
1000 + enum vc4_bo_mode mode;
1003 +struct vc4_exec_info {
1004 + /* Sequence number for this bin/render job. */
1007 + /* Kernel-space copy of the ioctl arguments */
1008 + struct drm_vc4_submit_cl *args;
1010 + /* This is the array of BOs that were looked up at the start of exec.
1011 + * Command validation will use indices into this array.
1013 + struct vc4_bo_exec_state *bo;
1014 + uint32_t bo_count;
1016 + /* Pointers for our position in vc4->job_list */
1017 + struct list_head head;
1019 + /* List of other BOs used in the job that need to be released
1020 + * once the job is complete.
1022 + struct list_head unref_list;
1024 + /* Current unvalidated indices into @bo loaded by the non-hardware
1025 + * VC4_PACKET_GEM_HANDLES.
1027 + uint32_t bo_index[2];
1029 + /* This is the BO where we store the validated command lists, shader
1030 + * records, and uniforms.
1032 + struct drm_gem_cma_object *exec_bo;
1035 + * This tracks the per-shader-record state (packet 64) that
1036 + * determines the length of the shader record and the offset
1037 + * it's expected to be found at. It gets read in from the
1040 + struct vc4_shader_state {
1043 + /* Maximum vertex index referenced by any primitive using this
1046 + uint32_t max_index;
1049 + /** How many shader states the user declared they were using. */
1050 + uint32_t shader_state_size;
1051 + /** How many shader state records the validator has seen. */
1052 + uint32_t shader_state_count;
1054 + bool found_tile_binning_mode_config_packet;
1055 + bool found_start_tile_binning_packet;
1056 + bool found_increment_semaphore_packet;
1057 + uint8_t bin_tiles_x, bin_tiles_y;
1058 + struct drm_gem_cma_object *tile_bo;
1059 + uint32_t tile_alloc_offset;
1062 + * Computed addresses pointing into exec_bo where we start the
1063 + * bin thread (ct0) and render thread (ct1).
1065 + uint32_t ct0ca, ct0ea;
1066 + uint32_t ct1ca, ct1ea;
1068 + /* Pointers to the shader recs. These paddr gets incremented as CL
1069 + * packets are relocated in validate_gl_shader_state, and the vaddrs
1070 + * (u and v) get incremented and size decremented as the shader recs
1071 + * themselves are validated.
1073 + void *shader_rec_u;
1074 + void *shader_rec_v;
1075 + uint32_t shader_rec_p;
1076 + uint32_t shader_rec_size;
1078 + /* Pointers to the uniform data. These pointers are incremented, and
1079 + * size decremented, as each batch of uniforms is uploaded.
1083 + uint32_t uniforms_p;
1084 + uint32_t uniforms_size;
1087 +static inline struct vc4_exec_info *
1088 +vc4_first_job(struct vc4_dev *vc4)
1090 + if (list_empty(&vc4->job_list))
1092 + return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
1096 + * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
1097 + * setup parameters.
1099 + * This will be used at draw time to relocate the reference to the texture
1100 + * contents in p0, and validate that the offset combined with
1101 + * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
1102 + * Note that the hardware treats unprovided config parameters as 0, so not all
1103 + * of them need to be set up for every texure sample, and we'll store ~0 as
1104 + * the offset to mark the unused ones.
1106 + * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
1107 + * Setup") for definitions of the texture parameters.
1109 +struct vc4_texture_sample_info {
1111 + uint32_t p_offset[4];
1115 + * struct vc4_validated_shader_info - information about validated shaders that
1116 + * needs to be used from command list validation.
1118 + * For a given shader, each time a shader state record references it, we need
1119 + * to verify that the shader doesn't read more uniforms than the shader state
1120 + * record's uniform BO pointer can provide, and we need to apply relocations
1121 + * and validate the shader state record's uniforms that define the texture
1124 +struct vc4_validated_shader_info
1126 + uint32_t uniforms_size;
1127 + uint32_t uniforms_src_size;
1128 + uint32_t num_texture_samples;
1129 + struct vc4_texture_sample_info *texture_samples;
1133 * _wait_for - magic (register) wait macro
1135 @@ -111,6 +360,18 @@ int vc4_dumb_create(struct drm_file *fil
1136 struct drm_mode_create_dumb *args);
1137 struct dma_buf *vc4_prime_export(struct drm_device *dev,
1138 struct drm_gem_object *obj, int flags);
1139 +int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
1140 + struct drm_file *file_priv);
1141 +int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
1142 + struct drm_file *file_priv);
1143 +int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
1144 + struct drm_file *file_priv);
1145 +int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
1146 +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
1147 +void *vc4_prime_vmap(struct drm_gem_object *obj);
1148 +void vc4_bo_cache_init(struct drm_device *dev);
1149 +void vc4_bo_cache_destroy(struct drm_device *dev);
1150 +int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
1153 extern struct platform_driver vc4_crtc_driver;
1154 @@ -126,10 +387,34 @@ void vc4_debugfs_cleanup(struct drm_mino
1156 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
1159 +void vc4_gem_init(struct drm_device *dev);
1160 +void vc4_gem_destroy(struct drm_device *dev);
1161 +int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1162 + struct drm_file *file_priv);
1163 +int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1164 + struct drm_file *file_priv);
1165 +int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1166 + struct drm_file *file_priv);
1167 +void vc4_submit_next_job(struct drm_device *dev);
1168 +int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
1169 + uint64_t timeout_ns, bool interruptible);
1170 +void vc4_job_handle_completed(struct vc4_dev *vc4);
1171 +int vc4_queue_seqno_cb(struct drm_device *dev,
1172 + struct vc4_seqno_cb *cb, uint64_t seqno,
1173 + void (*func)(struct vc4_seqno_cb *cb));
1176 extern struct platform_driver vc4_hdmi_driver;
1177 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
1180 +irqreturn_t vc4_irq(int irq, void *arg);
1181 +void vc4_irq_preinstall(struct drm_device *dev);
1182 +int vc4_irq_postinstall(struct drm_device *dev);
1183 +void vc4_irq_uninstall(struct drm_device *dev);
1184 +void vc4_irq_reset(struct drm_device *dev);
1187 extern struct platform_driver vc4_hvs_driver;
1188 void vc4_hvs_dump_state(struct drm_device *dev);
1189 @@ -143,3 +428,35 @@ struct drm_plane *vc4_plane_init(struct
1190 enum drm_plane_type type);
1191 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
1192 u32 vc4_plane_dlist_size(struct drm_plane_state *state);
1193 +void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb);
1196 +extern struct platform_driver vc4_v3d_driver;
1197 +int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
1198 +int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
1199 +int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
1201 +/* vc4_validate.c */
1203 +vc4_validate_bin_cl(struct drm_device *dev,
1205 + void *unvalidated,
1206 + struct vc4_exec_info *exec);
1209 +vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
1211 +struct vc4_validated_shader_info *
1212 +vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
1214 +bool vc4_use_bo(struct vc4_exec_info *exec,
1216 + enum vc4_bo_mode mode,
1217 + struct drm_gem_cma_object **obj);
1219 +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
1221 +bool vc4_check_tex_size(struct vc4_exec_info *exec,
1222 + struct drm_gem_cma_object *fbo,
1223 + uint32_t offset, uint8_t tiling_format,
1224 + uint32_t width, uint32_t height, uint8_t cpp);
1226 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
1229 + * Copyright © 2014 Broadcom
1231 + * Permission is hereby granted, free of charge, to any person obtaining a
1232 + * copy of this software and associated documentation files (the "Software"),
1233 + * to deal in the Software without restriction, including without limitation
1234 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1235 + * and/or sell copies of the Software, and to permit persons to whom the
1236 + * Software is furnished to do so, subject to the following conditions:
1238 + * The above copyright notice and this permission notice (including the next
1239 + * paragraph) shall be included in all copies or substantial portions of the
1242 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1243 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1244 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1245 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1246 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1247 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1248 + * IN THE SOFTWARE.
1251 +#include <linux/module.h>
1252 +#include <linux/platform_device.h>
1253 +#include <linux/device.h>
1254 +#include <linux/io.h>
1256 +#include "uapi/drm/vc4_drm.h"
1257 +#include "vc4_drv.h"
1258 +#include "vc4_regs.h"
1259 +#include "vc4_trace.h"
1262 +vc4_queue_hangcheck(struct drm_device *dev)
1264 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1266 + mod_timer(&vc4->hangcheck.timer,
1267 + round_jiffies_up(jiffies + msecs_to_jiffies(100)));
1271 +vc4_reset(struct drm_device *dev)
1273 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1275 + DRM_INFO("Resetting GPU.\n");
1276 + vc4_v3d_set_power(vc4, false);
1277 + vc4_v3d_set_power(vc4, true);
1279 + vc4_irq_reset(dev);
1281 + /* Rearm the hangcheck -- another job might have been waiting
1282 + * for our hung one to get kicked off, and vc4_irq_reset()
1283 + * would have started it.
1285 + vc4_queue_hangcheck(dev);
1289 +vc4_reset_work(struct work_struct *work)
1291 + struct vc4_dev *vc4 =
1292 + container_of(work, struct vc4_dev, hangcheck.reset_work);
1294 + vc4_reset(vc4->dev);
1298 +vc4_hangcheck_elapsed(unsigned long data)
1300 + struct drm_device *dev = (struct drm_device *)data;
1301 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1302 + uint32_t ct0ca, ct1ca;
1304 + /* If idle, we can stop watching for hangs. */
1305 + if (list_empty(&vc4->job_list))
1308 + ct0ca = V3D_READ(V3D_CTNCA(0));
1309 + ct1ca = V3D_READ(V3D_CTNCA(1));
1311 + /* If we've made any progress in execution, rearm the timer
1314 + if (ct0ca != vc4->hangcheck.last_ct0ca ||
1315 + ct1ca != vc4->hangcheck.last_ct1ca) {
1316 + vc4->hangcheck.last_ct0ca = ct0ca;
1317 + vc4->hangcheck.last_ct1ca = ct1ca;
1318 + vc4_queue_hangcheck(dev);
1322 + /* We've gone too long with no progress, reset. This has to
1323 + * be done from a work struct, since resetting can sleep and
1324 + * this timer hook isn't allowed to.
1326 + schedule_work(&vc4->hangcheck.reset_work);
1330 +submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
1332 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1334 + /* Stop any existing thread and set state to "stopped at halt" */
1335 + V3D_WRITE(V3D_CTNCS(thread), V3D_CTRUN);
1338 + V3D_WRITE(V3D_CTNCA(thread), start);
1341 + /* Set the end address of the control list. Writing this
1342 + * register is what starts the job.
1344 + V3D_WRITE(V3D_CTNEA(thread), end);
1349 +vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
1350 + bool interruptible)
1352 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1354 + unsigned long timeout_expire;
1355 + DEFINE_WAIT(wait);
1357 + if (vc4->finished_seqno >= seqno)
1360 + if (timeout_ns == 0)
1363 + timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
1365 + trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
1367 + prepare_to_wait(&vc4->job_wait_queue, &wait,
1368 + interruptible ? TASK_INTERRUPTIBLE :
1369 + TASK_UNINTERRUPTIBLE);
1371 + if (interruptible && signal_pending(current)) {
1372 + ret = -ERESTARTSYS;
1376 + if (vc4->finished_seqno >= seqno)
1379 + if (timeout_ns != ~0ull) {
1380 + if (time_after_eq(jiffies, timeout_expire)) {
1384 + schedule_timeout(timeout_expire - jiffies);
1390 + finish_wait(&vc4->job_wait_queue, &wait);
1391 + trace_vc4_wait_for_seqno_end(dev, seqno);
1393 + if (ret && ret != -ERESTARTSYS) {
1394 + DRM_ERROR("timeout waiting for render thread idle\n");
1402 +vc4_flush_caches(struct drm_device *dev)
1404 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1406 + /* Flush the GPU L2 caches. These caches sit on top of system
1407 + * L3 (the 128kb or so shared with the CPU), and are
1408 + * non-allocating in the L3.
1410 + V3D_WRITE(V3D_L2CACTL,
1411 + V3D_L2CACTL_L2CCLR);
1413 + V3D_WRITE(V3D_SLCACTL,
1414 + VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
1415 + VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
1416 + VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
1417 + VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
1420 +/* Sets the registers for the next job to be actually be executed in
1423 + * The job_lock should be held during this.
1426 +vc4_submit_next_job(struct drm_device *dev)
1428 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1429 + struct vc4_exec_info *exec = vc4_first_job(vc4);
1434 + vc4_flush_caches(dev);
1436 + /* Disable the binner's pre-loaded overflow memory address */
1437 + V3D_WRITE(V3D_BPOA, 0);
1438 + V3D_WRITE(V3D_BPOS, 0);
1440 + if (exec->ct0ca != exec->ct0ea)
1441 + submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
1442 + submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
1446 +vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
1448 + struct vc4_bo *bo;
1451 + for (i = 0; i < exec->bo_count; i++) {
1452 + bo = to_vc4_bo(&exec->bo[i].bo->base);
1453 + bo->seqno = seqno;
1456 + list_for_each_entry(bo, &exec->unref_list, unref_head) {
1457 + bo->seqno = seqno;
1461 +/* Queues a struct vc4_exec_info for execution. If no job is
1462 + * currently executing, then submits it.
1464 + * Unlike most GPUs, our hardware only handles one command list at a
1465 + * time. To queue multiple jobs at once, we'd need to edit the
1466 + * previous command list to have a jump to the new one at the end, and
1467 + * then bump the end address. That's a change for a later date,
1471 +vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
1473 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1474 + uint64_t seqno = ++vc4->emit_seqno;
1475 + unsigned long irqflags;
1477 + exec->seqno = seqno;
1478 + vc4_update_bo_seqnos(exec, seqno);
1480 + spin_lock_irqsave(&vc4->job_lock, irqflags);
1481 + list_add_tail(&exec->head, &vc4->job_list);
1483 + /* If no job was executing, kick ours off. Otherwise, it'll
1484 + * get started when the previous job's frame done interrupt
1487 + if (vc4_first_job(vc4) == exec) {
1488 + vc4_submit_next_job(dev);
1489 + vc4_queue_hangcheck(dev);
1492 + spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1496 + * Looks up a bunch of GEM handles for BOs and stores the array for
1497 + * use in the command validator that actually writes relocated
1498 + * addresses pointing to them.
1501 +vc4_cl_lookup_bos(struct drm_device *dev,
1502 + struct drm_file *file_priv,
1503 + struct vc4_exec_info *exec)
1505 + struct drm_vc4_submit_cl *args = exec->args;
1506 + uint32_t *handles;
1510 + exec->bo_count = args->bo_handle_count;
1512 + if (!exec->bo_count) {
1513 + /* See comment on bo_index for why we have to check
1516 + DRM_ERROR("Rendering requires BOs to validate\n");
1520 + exec->bo = kcalloc(exec->bo_count, sizeof(struct vc4_bo_exec_state),
1523 + DRM_ERROR("Failed to allocate validated BO pointers\n");
1527 + handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
1529 + DRM_ERROR("Failed to allocate incoming GEM handles\n");
1533 + ret = copy_from_user(handles,
1534 + (void __user *)(uintptr_t)args->bo_handles,
1535 + exec->bo_count * sizeof(uint32_t));
1537 + DRM_ERROR("Failed to copy in GEM handles\n");
1541 + spin_lock(&file_priv->table_lock);
1542 + for (i = 0; i < exec->bo_count; i++) {
1543 + struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
1546 + DRM_ERROR("Failed to look up GEM BO %d: %d\n",
1549 + spin_unlock(&file_priv->table_lock);
1552 + drm_gem_object_reference(bo);
1553 + exec->bo[i].bo = (struct drm_gem_cma_object *)bo;
1555 + spin_unlock(&file_priv->table_lock);
1563 +vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
1565 + struct drm_vc4_submit_cl *args = exec->args;
1566 + void *temp = NULL;
1569 + uint32_t bin_offset = 0;
1570 + uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
1572 + uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
1573 + uint32_t exec_size = uniforms_offset + args->uniforms_size;
1574 + uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
1575 + args->shader_rec_count);
1576 + struct vc4_bo *bo;
1578 + if (uniforms_offset < shader_rec_offset ||
1579 + exec_size < uniforms_offset ||
1580 + args->shader_rec_count >= (UINT_MAX /
1581 + sizeof(struct vc4_shader_state)) ||
1582 + temp_size < exec_size) {
1583 + DRM_ERROR("overflow in exec arguments\n");
1587 + /* Allocate space where we'll store the copied in user command lists
1588 + * and shader records.
1590 + * We don't just copy directly into the BOs because we need to
1591 + * read the contents back for validation, and I think the
1592 + * bo->vaddr is uncached access.
1594 + temp = kmalloc(temp_size, GFP_KERNEL);
1596 + DRM_ERROR("Failed to allocate storage for copying "
1597 + "in bin/render CLs.\n");
1601 + bin = temp + bin_offset;
1602 + exec->shader_rec_u = temp + shader_rec_offset;
1603 + exec->uniforms_u = temp + uniforms_offset;
1604 + exec->shader_state = temp + exec_size;
1605 + exec->shader_state_size = args->shader_rec_count;
1607 + ret = copy_from_user(bin,
1608 + (void __user *)(uintptr_t)args->bin_cl,
1609 + args->bin_cl_size);
1611 + DRM_ERROR("Failed to copy in bin cl\n");
1615 + ret = copy_from_user(exec->shader_rec_u,
1616 + (void __user *)(uintptr_t)args->shader_rec,
1617 + args->shader_rec_size);
1619 + DRM_ERROR("Failed to copy in shader recs\n");
1623 + ret = copy_from_user(exec->uniforms_u,
1624 + (void __user *)(uintptr_t)args->uniforms,
1625 + args->uniforms_size);
1627 + DRM_ERROR("Failed to copy in uniforms cl\n");
1631 + bo = vc4_bo_create(dev, exec_size);
1633 + DRM_ERROR("Couldn't allocate BO for binning\n");
1634 + ret = PTR_ERR(exec->exec_bo);
1637 + exec->exec_bo = &bo->base;
1639 + list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
1640 + &exec->unref_list);
1642 + exec->ct0ca = exec->exec_bo->paddr + bin_offset;
1644 + exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
1645 + exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
1646 + exec->shader_rec_size = args->shader_rec_size;
1648 + exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
1649 + exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
1650 + exec->uniforms_size = args->uniforms_size;
1652 + ret = vc4_validate_bin_cl(dev,
1653 + exec->exec_bo->vaddr + bin_offset,
1659 + ret = vc4_validate_shader_recs(dev, exec);
1667 +vc4_complete_exec(struct vc4_exec_info *exec)
1672 + for (i = 0; i < exec->bo_count; i++)
1673 + drm_gem_object_unreference(&exec->bo[i].bo->base);
1677 + while (!list_empty(&exec->unref_list)) {
1678 + struct vc4_bo *bo = list_first_entry(&exec->unref_list,
1679 + struct vc4_bo, unref_head);
1680 + list_del(&bo->unref_head);
1681 + drm_gem_object_unreference(&bo->base.base);
1688 +vc4_job_handle_completed(struct vc4_dev *vc4)
1690 + unsigned long irqflags;
1691 + struct vc4_seqno_cb *cb, *cb_temp;
1693 + spin_lock_irqsave(&vc4->job_lock, irqflags);
1694 + while (!list_empty(&vc4->job_done_list)) {
1695 + struct vc4_exec_info *exec =
1696 + list_first_entry(&vc4->job_done_list,
1697 + struct vc4_exec_info, head);
1698 + list_del(&exec->head);
1700 + spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1701 + vc4_complete_exec(exec);
1702 + spin_lock_irqsave(&vc4->job_lock, irqflags);
1704 + spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1706 + list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1707 + if (cb->seqno <= vc4->finished_seqno) {
1708 + list_del_init(&cb->work.entry);
1709 + schedule_work(&cb->work);
1714 +static void vc4_seqno_cb_work(struct work_struct *work)
1716 + struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1720 +int vc4_queue_seqno_cb(struct drm_device *dev,
1721 + struct vc4_seqno_cb *cb, uint64_t seqno,
1722 + void (*func)(struct vc4_seqno_cb *cb))
1724 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1728 + INIT_WORK(&cb->work, vc4_seqno_cb_work);
1730 + mutex_lock(&dev->struct_mutex);
1731 + if (seqno > vc4->finished_seqno) {
1732 + cb->seqno = seqno;
1733 + list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1735 + schedule_work(&cb->work);
1737 + mutex_unlock(&dev->struct_mutex);
1742 +/* Scheduled when any job has been completed, this walks the list of
1743 + * jobs that had completed and unrefs their BOs and frees their exec
1747 +vc4_job_done_work(struct work_struct *work)
1749 + struct vc4_dev *vc4 =
1750 + container_of(work, struct vc4_dev, job_done_work);
1751 + struct drm_device *dev = vc4->dev;
1753 + /* Need the struct lock for drm_gem_object_unreference(). */
1754 + mutex_lock(&dev->struct_mutex);
1755 + vc4_job_handle_completed(vc4);
1756 + mutex_unlock(&dev->struct_mutex);
1760 +vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1762 + uint64_t *timeout_ns)
1764 + unsigned long start = jiffies;
1765 + int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1767 + if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1768 + uint64_t delta = jiffies_to_nsecs(jiffies - start);
1769 + if (*timeout_ns >= delta)
1770 + *timeout_ns -= delta;
1777 +vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1778 + struct drm_file *file_priv)
1780 + struct drm_vc4_wait_seqno *args = data;
1782 + return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1783 + &args->timeout_ns);
1787 +vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1788 + struct drm_file *file_priv)
1791 + struct drm_vc4_wait_bo *args = data;
1792 + struct drm_gem_object *gem_obj;
1793 + struct vc4_bo *bo;
1795 + gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1797 + DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1800 + bo = to_vc4_bo(gem_obj);
1802 + ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, &args->timeout_ns);
1804 + drm_gem_object_unreference(gem_obj);
1809 + * Submits a command list to the VC4.
1811 + * This is what is called batchbuffer emitting on other hardware.
1814 +vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1815 + struct drm_file *file_priv)
1817 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1818 + struct drm_vc4_submit_cl *args = data;
1819 + struct vc4_exec_info *exec;
1822 + if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
1823 + DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
1827 + exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1829 + DRM_ERROR("malloc failure on exec struct\n");
1833 + exec->args = args;
1834 + INIT_LIST_HEAD(&exec->unref_list);
1836 + mutex_lock(&dev->struct_mutex);
1838 + ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1842 + if (exec->args->bin_cl_size != 0) {
1843 + ret = vc4_get_bcl(dev, exec);
1847 + exec->ct0ca = exec->ct0ea = 0;
1850 + ret = vc4_get_rcl(dev, exec);
1854 + /* Clear this out of the struct we'll be putting in the queue,
1855 + * since it's part of our stack.
1857 + exec->args = NULL;
1859 + vc4_queue_submit(dev, exec);
1861 + /* Return the seqno for our job. */
1862 + args->seqno = vc4->emit_seqno;
1864 + mutex_unlock(&dev->struct_mutex);
1869 + vc4_complete_exec(exec);
1871 + mutex_unlock(&dev->struct_mutex);
1877 +vc4_gem_init(struct drm_device *dev)
1879 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1881 + INIT_LIST_HEAD(&vc4->job_list);
1882 + INIT_LIST_HEAD(&vc4->job_done_list);
1883 + INIT_LIST_HEAD(&vc4->seqno_cb_list);
1884 + spin_lock_init(&vc4->job_lock);
1886 + INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1887 + setup_timer(&vc4->hangcheck.timer,
1888 + vc4_hangcheck_elapsed,
1889 + (unsigned long) dev);
1891 + INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1895 +vc4_gem_destroy(struct drm_device *dev)
1897 + struct vc4_dev *vc4 = to_vc4_dev(dev);
1899 + /* Waiting for exec to finish would need to be done before
1900 + * unregistering V3D.
1902 + WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1904 + /* V3D should already have disabled its interrupt and cleared
1905 + * the overflow allocation registers. Now free the object.
1907 + if (vc4->overflow_mem) {
1908 + drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
1909 + vc4->overflow_mem = NULL;
1912 + vc4_bo_cache_destroy(dev);
1915 +++ b/drivers/gpu/drm/vc4/vc4_irq.c
1918 + * Copyright © 2014 Broadcom
1920 + * Permission is hereby granted, free of charge, to any person obtaining a
1921 + * copy of this software and associated documentation files (the "Software"),
1922 + * to deal in the Software without restriction, including without limitation
1923 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1924 + * and/or sell copies of the Software, and to permit persons to whom the
1925 + * Software is furnished to do so, subject to the following conditions:
1927 + * The above copyright notice and this permission notice (including the next
1928 + * paragraph) shall be included in all copies or substantial portions of the
1931 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1932 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1933 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1934 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1935 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1936 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1937 + * IN THE SOFTWARE.
1940 +/** DOC: Interrupt management for the V3D engine.
1942 + * We have an interrupt status register (V3D_INTCTL) which reports
1943 + * interrupts, and where writing 1 bits clears those interrupts.
1944 + * There are also a pair of interrupt registers
1945 + * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
1946 + * disables that specific interrupt, and 0s written are ignored
1947 + * (reading either one returns the set of enabled interrupts).
1949 + * When we take a render frame interrupt, we need to wake the
1950 + * processes waiting for some frame to be done, and get the next frame
1951 + * submitted ASAP (so the hardware doesn't sit idle when there's work
1954 + * When we take the binner out of memory interrupt, we need to
1955 + * allocate some new memory and pass it to the binner so that the
1956 + * current job can make progress.
1959 +#include "vc4_drv.h"
1960 +#include "vc4_regs.h"
1962 +#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
1965 +DECLARE_WAIT_QUEUE_HEAD(render_wait);
1968 +vc4_overflow_mem_work(struct work_struct *work)
1970 + struct vc4_dev *vc4 =
1971 + container_of(work, struct vc4_dev, overflow_mem_work);
1972 + struct drm_device *dev = vc4->dev;
1973 + struct vc4_bo *bo;
1975 + bo = vc4_bo_create(dev, 256 * 1024);
1977 + DRM_ERROR("Couldn't allocate binner overflow mem\n");
1981 + /* If there's a job executing currently, then our previous
1982 + * overflow allocation is getting used in that job and we need
1983 + * to queue it to be released when the job is done. But if no
1984 + * job is executing at all, then we can free the old overflow
1985 + * object direcctly.
1987 + * No lock necessary for this pointer since we're the only
1988 + * ones that update the pointer, and our workqueue won't
1991 + if (vc4->overflow_mem) {
1992 + struct vc4_exec_info *current_exec;
1993 + unsigned long irqflags;
1995 + spin_lock_irqsave(&vc4->job_lock, irqflags);
1996 + current_exec = vc4_first_job(vc4);
1997 + if (current_exec) {
1998 + vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
1999 + list_add_tail(&vc4->overflow_mem->unref_head,
2000 + ¤t_exec->unref_list);
2001 + vc4->overflow_mem = NULL;
2003 + spin_unlock_irqrestore(&vc4->job_lock, irqflags);
2006 + if (vc4->overflow_mem) {
2007 + drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
2009 + vc4->overflow_mem = bo;
2011 + V3D_WRITE(V3D_BPOA, bo->base.paddr);
2012 + V3D_WRITE(V3D_BPOS, bo->base.base.size);
2013 + V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
2014 + V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
2018 +vc4_irq_finish_job(struct drm_device *dev)
2020 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2021 + struct vc4_exec_info *exec = vc4_first_job(vc4);
2026 + vc4->finished_seqno++;
2027 + list_move_tail(&exec->head, &vc4->job_done_list);
2028 + vc4_submit_next_job(dev);
2030 + wake_up_all(&vc4->job_wait_queue);
2031 + schedule_work(&vc4->job_done_work);
2035 +vc4_irq(int irq, void *arg)
2037 + struct drm_device *dev = arg;
2038 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2040 + irqreturn_t status = IRQ_NONE;
2043 + intctl = V3D_READ(V3D_INTCTL);
2045 + /* Acknowledge the interrupts we're handling here. The render
2046 + * frame done interrupt will be cleared, while OUTOMEM will
2047 + * stay high until the underlying cause is cleared.
2049 + V3D_WRITE(V3D_INTCTL, intctl);
2051 + if (intctl & V3D_INT_OUTOMEM) {
2052 + /* Disable OUTOMEM until the work is done. */
2053 + V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
2054 + schedule_work(&vc4->overflow_mem_work);
2055 + status = IRQ_HANDLED;
2058 + if (intctl & V3D_INT_FRDONE) {
2059 + spin_lock(&vc4->job_lock);
2060 + vc4_irq_finish_job(dev);
2061 + spin_unlock(&vc4->job_lock);
2062 + status = IRQ_HANDLED;
2069 +vc4_irq_preinstall(struct drm_device *dev)
2071 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2073 + init_waitqueue_head(&vc4->job_wait_queue);
2074 + INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
2076 + /* Clear any pending interrupts someone might have left around
2079 + V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2083 +vc4_irq_postinstall(struct drm_device *dev)
2085 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2087 + /* Enable both the render done and out of memory interrupts. */
2088 + V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
2094 +vc4_irq_uninstall(struct drm_device *dev)
2096 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2098 + /* Disable sending interrupts for our driver's IRQs. */
2099 + V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
2101 + /* Clear any pending interrupts we might have left. */
2102 + V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2104 + cancel_work_sync(&vc4->overflow_mem_work);
2107 +/** Reinitializes interrupt registers when a GPU reset is performed. */
2108 +void vc4_irq_reset(struct drm_device *dev)
2110 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2111 + unsigned long irqflags;
2113 + /* Acknowledge any stale IRQs. */
2114 + V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2117 + * Turn all our interrupts on. Binner out of memory is the
2118 + * only one we expect to trigger at this point, since we've
2119 + * just come from poweron and haven't supplied any overflow
2122 + V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
2124 + spin_lock_irqsave(&vc4->job_lock, irqflags);
2125 + vc4_irq_finish_job(dev);
2126 + spin_unlock_irqrestore(&vc4->job_lock, irqflags);
2128 --- a/drivers/gpu/drm/vc4/vc4_kms.c
2129 +++ b/drivers/gpu/drm/vc4/vc4_kms.c
2133 #include "drm_crtc.h"
2134 +#include "drm_atomic.h"
2135 #include "drm_atomic_helper.h"
2136 #include "drm_crtc_helper.h"
2137 #include "drm_plane_helper.h"
2138 @@ -29,10 +30,151 @@ static void vc4_output_poll_changed(stru
2139 drm_fbdev_cma_hotplug_event(vc4->fbdev);
2142 +struct vc4_commit {
2143 + struct drm_device *dev;
2144 + struct drm_atomic_state *state;
2145 + struct vc4_seqno_cb cb;
2149 +vc4_atomic_complete_commit(struct vc4_commit *c)
2151 + struct drm_atomic_state *state = c->state;
2152 + struct drm_device *dev = state->dev;
2153 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2155 + drm_atomic_helper_commit_modeset_disables(dev, state);
2157 + drm_atomic_helper_commit_planes(dev, state);
2159 + drm_atomic_helper_commit_modeset_enables(dev, state);
2161 + drm_atomic_helper_wait_for_vblanks(dev, state);
2163 + drm_atomic_helper_cleanup_planes(dev, state);
2165 + drm_atomic_state_free(state);
2167 + up(&vc4->async_modeset);
2173 +vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
2175 + struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
2177 + vc4_atomic_complete_commit(c);
2180 +static struct vc4_commit *commit_init(struct drm_atomic_state *state)
2182 + struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
2186 + c->dev = state->dev;
2193 + * vc4_atomic_commit - commit validated state object
2194 + * @dev: DRM device
2195 + * @state: the driver state object
2196 + * @async: asynchronous commit
2198 + * This function commits a with drm_atomic_helper_check() pre-validated state
2199 + * object. This can still fail when e.g. the framebuffer reservation fails. For
2200 + * now this doesn't implement asynchronous commits.
2203 + * Zero for success or -errno.
2205 +static int vc4_atomic_commit(struct drm_device *dev,
2206 + struct drm_atomic_state *state,
2209 + struct vc4_dev *vc4 = to_vc4_dev(dev);
2212 + uint64_t wait_seqno = 0;
2213 + struct vc4_commit *c;
2215 + c = commit_init(state);
2219 + /* Make sure that any outstanding modesets have finished. */
2220 + ret = down_interruptible(&vc4->async_modeset);
2226 + ret = drm_atomic_helper_prepare_planes(dev, state);
2229 + up(&vc4->async_modeset);
2233 + for (i = 0; i < dev->mode_config.num_total_plane; i++) {
2234 + struct drm_plane *plane = state->planes[i];
2235 + struct drm_plane_state *new_state = state->plane_states[i];
2240 + if ((plane->state->fb != new_state->fb) && new_state->fb) {
2241 + struct drm_gem_cma_object *cma_bo =
2242 + drm_fb_cma_get_gem_obj(new_state->fb, 0);
2243 + struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
2244 + wait_seqno = max(bo->seqno, wait_seqno);
2249 + * This is the point of no return - everything below never fails except
2250 + * when the hw goes bonghits. Which means we can commit the new state on
2251 + * the software side now.
2254 + drm_atomic_helper_swap_state(dev, state);
2257 + * Everything below can be run asynchronously without the need to grab
2258 + * any modeset locks at all under one condition: It must be guaranteed
2259 + * that the asynchronous work has either been cancelled (if the driver
2260 + * supports it, which at least requires that the framebuffers get
2261 + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2262 + * before the new state gets committed on the software side with
2263 + * drm_atomic_helper_swap_state().
2265 + * This scheme allows new atomic state updates to be prepared and
2266 + * checked in parallel to the asynchronous completion of the previous
2267 + * update. Which is important since compositors need to figure out the
2268 + * composition of the next frame right after having submitted the
2273 + vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
2274 + vc4_atomic_complete_commit_seqno_cb);
2276 + vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
2277 + vc4_atomic_complete_commit(c);
2283 static const struct drm_mode_config_funcs vc4_mode_funcs = {
2284 .output_poll_changed = vc4_output_poll_changed,
2285 .atomic_check = drm_atomic_helper_check,
2286 - .atomic_commit = drm_atomic_helper_commit,
2287 + .atomic_commit = vc4_atomic_commit,
2288 .fb_create = drm_fb_cma_create,
2291 @@ -41,6 +183,8 @@ int vc4_kms_load(struct drm_device *dev)
2292 struct vc4_dev *vc4 = to_vc4_dev(dev);
2295 + sema_init(&vc4->async_modeset, 1);
2297 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
2299 dev_err(dev->dev, "failed to initialize vblank\n");
2300 @@ -51,6 +195,8 @@ int vc4_kms_load(struct drm_device *dev)
2301 dev->mode_config.max_height = 2048;
2302 dev->mode_config.funcs = &vc4_mode_funcs;
2303 dev->mode_config.preferred_depth = 24;
2304 + dev->mode_config.async_page_flip = true;
2306 dev->vblank_disable_allowed = true;
2308 drm_mode_config_reset(dev);
2310 +++ b/drivers/gpu/drm/vc4/vc4_packet.h
2313 + * Copyright © 2014 Broadcom
2315 + * Permission is hereby granted, free of charge, to any person obtaining a
2316 + * copy of this software and associated documentation files (the "Software"),
2317 + * to deal in the Software without restriction, including without limitation
2318 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2319 + * and/or sell copies of the Software, and to permit persons to whom the
2320 + * Software is furnished to do so, subject to the following conditions:
2322 + * The above copyright notice and this permission notice (including the next
2323 + * paragraph) shall be included in all copies or substantial portions of the
2326 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2327 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2328 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
2329 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2330 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2331 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2332 + * IN THE SOFTWARE.
2335 +#ifndef VC4_PACKET_H
2336 +#define VC4_PACKET_H
2338 +#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
2341 + VC4_PACKET_HALT = 0,
2342 + VC4_PACKET_NOP = 1,
2344 + VC4_PACKET_FLUSH = 4,
2345 + VC4_PACKET_FLUSH_ALL = 5,
2346 + VC4_PACKET_START_TILE_BINNING = 6,
2347 + VC4_PACKET_INCREMENT_SEMAPHORE = 7,
2348 + VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
2350 + VC4_PACKET_BRANCH = 16,
2351 + VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
2353 + VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
2354 + VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
2355 + VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
2356 + VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
2357 + VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
2358 + VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
2360 + VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
2361 + VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
2363 + VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
2364 + VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
2366 + VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
2368 + VC4_PACKET_GL_SHADER_STATE = 64,
2369 + VC4_PACKET_NV_SHADER_STATE = 65,
2370 + VC4_PACKET_VG_SHADER_STATE = 66,
2372 + VC4_PACKET_CONFIGURATION_BITS = 96,
2373 + VC4_PACKET_FLAT_SHADE_FLAGS = 97,
2374 + VC4_PACKET_POINT_SIZE = 98,
2375 + VC4_PACKET_LINE_WIDTH = 99,
2376 + VC4_PACKET_RHT_X_BOUNDARY = 100,
2377 + VC4_PACKET_DEPTH_OFFSET = 101,
2378 + VC4_PACKET_CLIP_WINDOW = 102,
2379 + VC4_PACKET_VIEWPORT_OFFSET = 103,
2380 + VC4_PACKET_Z_CLIPPING = 104,
2381 + VC4_PACKET_CLIPPER_XY_SCALING = 105,
2382 + VC4_PACKET_CLIPPER_Z_SCALING = 106,
2384 + VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
2385 + VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
2386 + VC4_PACKET_CLEAR_COLORS = 114,
2387 + VC4_PACKET_TILE_COORDINATES = 115,
2389 + /* Not an actual hardware packet -- this is what we use to put
2390 + * references to GEM bos in the command stream, since we need the u32
2391 + * int the actual address packet in order to store the offset from the
2392 + * start of the BO.
2394 + VC4_PACKET_GEM_HANDLES = 254,
2395 +} __attribute__ ((__packed__));
2397 +#define VC4_PACKET_HALT_SIZE 1
2398 +#define VC4_PACKET_NOP_SIZE 1
2399 +#define VC4_PACKET_FLUSH_SIZE 1
2400 +#define VC4_PACKET_FLUSH_ALL_SIZE 1
2401 +#define VC4_PACKET_START_TILE_BINNING_SIZE 1
2402 +#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
2403 +#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
2404 +#define VC4_PACKET_BRANCH_SIZE 5
2405 +#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
2406 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
2407 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
2408 +#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5
2409 +#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5
2410 +#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
2411 +#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
2412 +#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
2413 +#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
2414 +#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1
2415 +#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1
2416 +#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
2417 +#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
2418 +#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
2419 +#define VC4_PACKET_VG_SHADER_STATE_SIZE 5
2420 +#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
2421 +#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
2422 +#define VC4_PACKET_POINT_SIZE_SIZE 5
2423 +#define VC4_PACKET_LINE_WIDTH_SIZE 5
2424 +#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
2425 +#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
2426 +#define VC4_PACKET_CLIP_WINDOW_SIZE 9
2427 +#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
2428 +#define VC4_PACKET_Z_CLIPPING_SIZE 9
2429 +#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
2430 +#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
2431 +#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
2432 +#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
2433 +#define VC4_PACKET_CLEAR_COLORS_SIZE 14
2434 +#define VC4_PACKET_TILE_COORDINATES_SIZE 3
2435 +#define VC4_PACKET_GEM_HANDLES_SIZE 9
2438 + * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2439 + * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
2441 +#define VC4_TILING_FORMAT_LINEAR 0
2442 +#define VC4_TILING_FORMAT_T 1
2443 +#define VC4_TILING_FORMAT_LT 2
2448 + * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
2449 + * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
2451 +#define VC4_LOADSTORE_FULL_RES_EOF (1 << 3)
2452 +#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL (1 << 2)
2453 +#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS (1 << 1)
2454 +#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR (1 << 0)
2458 + * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2459 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
2462 +#define VC4_LOADSTORE_TILE_BUFFER_EOF (1 << 3)
2463 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK (1 << 2)
2464 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS (1 << 1)
2465 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR (1 << 0)
2471 + * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2472 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
2474 +#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR (1 << 15)
2475 +#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR (1 << 14)
2476 +#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR (1 << 13)
2477 +#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP (1 << 12)
2479 +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
2480 +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
2481 +#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
2482 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
2483 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
2488 + * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2489 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
2491 +#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
2492 +#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
2493 +#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
2494 +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
2495 +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
2497 +/** The values of the field are VC4_TILING_FORMAT_* */
2498 +#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
2499 +#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
2501 +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
2502 +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
2503 +#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
2504 +#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
2505 +#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
2506 +#define VC4_LOADSTORE_TILE_BUFFER_Z 3
2507 +#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
2508 +#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
2511 +#define VC4_INDEX_BUFFER_U8 (0 << 4)
2512 +#define VC4_INDEX_BUFFER_U16 (1 << 4)
2514 +/* This flag is only present in NV shader state. */
2515 +#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS (1 << 3)
2516 +#define VC4_SHADER_FLAG_ENABLE_CLIPPING (1 << 2)
2517 +#define VC4_SHADER_FLAG_VS_POINT_SIZE (1 << 1)
2518 +#define VC4_SHADER_FLAG_FS_SINGLE_THREAD (1 << 0)
2520 +/** @{ byte 2 of config bits. */
2521 +#define VC4_CONFIG_BITS_EARLY_Z_UPDATE (1 << 1)
2522 +#define VC4_CONFIG_BITS_EARLY_Z (1 << 0)
2525 +/** @{ byte 1 of config bits. */
2526 +#define VC4_CONFIG_BITS_Z_UPDATE (1 << 7)
2527 +/** same values in this 3-bit field as PIPE_FUNC_* */
2528 +#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
2529 +#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE (1 << 3)
2531 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
2532 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
2533 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
2534 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
2536 +#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT (1 << 0)
2539 +/** @{ byte 0 of config bits. */
2540 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
2541 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
2542 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
2544 +#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES (1 << 4)
2545 +#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET (1 << 3)
2546 +#define VC4_CONFIG_BITS_CW_PRIMITIVES (1 << 2)
2547 +#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK (1 << 1)
2548 +#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT (1 << 0)
2551 +/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
2552 +#define VC4_BIN_CONFIG_DB_NON_MS (1 << 7)
2554 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
2555 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
2556 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
2557 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
2558 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
2559 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
2561 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
2562 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
2563 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
2564 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
2565 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
2566 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
2568 +#define VC4_BIN_CONFIG_AUTO_INIT_TSDA (1 << 2)
2569 +#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT (1 << 1)
2570 +#define VC4_BIN_CONFIG_MS_MODE_4X (1 << 0)
2573 +/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
2574 +#define VC4_RENDER_CONFIG_DB_NON_MS (1 << 12)
2575 +#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE (1 << 11)
2576 +#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G (1 << 10)
2577 +#define VC4_RENDER_CONFIG_COVERAGE_MODE (1 << 9)
2578 +#define VC4_RENDER_CONFIG_ENABLE_VG_MASK (1 << 8)
2580 +/** The values of the field are VC4_TILING_FORMAT_* */
2581 +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
2582 +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
2584 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
2585 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
2586 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
2588 +#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
2589 +#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
2590 +#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
2591 +#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
2592 +#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
2594 +#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT (1 << 1)
2595 +#define VC4_RENDER_CONFIG_MS_MODE_4X (1 << 0)
2597 +#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
2598 +#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
2599 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
2600 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
2601 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
2602 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
2604 +enum vc4_texture_data_type {
2605 + VC4_TEXTURE_TYPE_RGBA8888 = 0,
2606 + VC4_TEXTURE_TYPE_RGBX8888 = 1,
2607 + VC4_TEXTURE_TYPE_RGBA4444 = 2,
2608 + VC4_TEXTURE_TYPE_RGBA5551 = 3,
2609 + VC4_TEXTURE_TYPE_RGB565 = 4,
2610 + VC4_TEXTURE_TYPE_LUMINANCE = 5,
2611 + VC4_TEXTURE_TYPE_ALPHA = 6,
2612 + VC4_TEXTURE_TYPE_LUMALPHA = 7,
2613 + VC4_TEXTURE_TYPE_ETC1 = 8,
2614 + VC4_TEXTURE_TYPE_S16F = 9,
2615 + VC4_TEXTURE_TYPE_S8 = 10,
2616 + VC4_TEXTURE_TYPE_S16 = 11,
2617 + VC4_TEXTURE_TYPE_BW1 = 12,
2618 + VC4_TEXTURE_TYPE_A4 = 13,
2619 + VC4_TEXTURE_TYPE_A1 = 14,
2620 + VC4_TEXTURE_TYPE_RGBA64 = 15,
2621 + VC4_TEXTURE_TYPE_RGBA32R = 16,
2622 + VC4_TEXTURE_TYPE_YUV422R = 17,
2625 +#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
2626 +#define VC4_TEX_P0_OFFSET_SHIFT 12
2627 +#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
2628 +#define VC4_TEX_P0_CSWIZ_SHIFT 10
2629 +#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
2630 +#define VC4_TEX_P0_CMMODE_SHIFT 9
2631 +#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
2632 +#define VC4_TEX_P0_FLIPY_SHIFT 8
2633 +#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
2634 +#define VC4_TEX_P0_TYPE_SHIFT 4
2635 +#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
2636 +#define VC4_TEX_P0_MIPLVLS_SHIFT 0
2638 +#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
2639 +#define VC4_TEX_P1_TYPE4_SHIFT 31
2640 +#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
2641 +#define VC4_TEX_P1_HEIGHT_SHIFT 20
2642 +#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
2643 +#define VC4_TEX_P1_ETCFLIP_SHIFT 19
2644 +#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
2645 +#define VC4_TEX_P1_WIDTH_SHIFT 8
2647 +#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
2648 +#define VC4_TEX_P1_MAGFILT_SHIFT 7
2649 +# define VC4_TEX_P1_MAGFILT_LINEAR 0
2650 +# define VC4_TEX_P1_MAGFILT_NEAREST 1
2652 +#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
2653 +#define VC4_TEX_P1_MINFILT_SHIFT 4
2654 +# define VC4_TEX_P1_MINFILT_LINEAR 0
2655 +# define VC4_TEX_P1_MINFILT_NEAREST 1
2656 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
2657 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
2658 +# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
2659 +# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
2661 +#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
2662 +#define VC4_TEX_P1_WRAP_T_SHIFT 2
2663 +#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
2664 +#define VC4_TEX_P1_WRAP_S_SHIFT 0
2665 +# define VC4_TEX_P1_WRAP_REPEAT 0
2666 +# define VC4_TEX_P1_WRAP_CLAMP 1
2667 +# define VC4_TEX_P1_WRAP_MIRROR 2
2668 +# define VC4_TEX_P1_WRAP_BORDER 3
2670 +#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
2671 +#define VC4_TEX_P2_PTYPE_SHIFT 30
2672 +# define VC4_TEX_P2_PTYPE_IGNORED 0
2673 +# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
2674 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
2675 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
2677 +/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
2678 +#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
2679 +#define VC4_TEX_P2_CMST_SHIFT 12
2680 +#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
2681 +#define VC4_TEX_P2_BSLOD_SHIFT 0
2683 +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
2684 +#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
2685 +#define VC4_TEX_P2_CHEIGHT_SHIFT 12
2686 +#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
2687 +#define VC4_TEX_P2_CWIDTH_SHIFT 0
2689 +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
2690 +#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
2691 +#define VC4_TEX_P2_CYOFF_SHIFT 12
2692 +#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
2693 +#define VC4_TEX_P2_CXOFF_SHIFT 0
2695 +#endif /* VC4_PACKET_H */
2696 --- a/drivers/gpu/drm/vc4/vc4_plane.c
2697 +++ b/drivers/gpu/drm/vc4/vc4_plane.c
2698 @@ -29,6 +29,14 @@ struct vc4_plane_state {
2700 u32 dlist_size; /* Number of dwords in allocated for the display list */
2701 u32 dlist_count; /* Number of used dwords in the display list. */
2703 + /* Offset in the dlist to pointer word 0. */
2706 + /* Offset where the plane's dlist was last stored in the
2707 + hardware at vc4_crtc_atomic_flush() time.
2712 static inline struct vc4_plane_state *
2713 @@ -207,6 +215,8 @@ static int vc4_plane_mode_set(struct drm
2714 /* Position Word 3: Context. Written by the HVS. */
2715 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
2717 + vc4_state->pw0_offset = vc4_state->dlist_count;
2719 /* Pointer Word 0: RGB / Y Pointer */
2720 vc4_dlist_write(vc4_state, bo->paddr + offset);
2722 @@ -258,6 +268,8 @@ u32 vc4_plane_write_dlist(struct drm_pla
2723 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
2726 + vc4_state->hw_dlist = dlist;
2728 /* Can't memcpy_toio() because it needs to be 32-bit writes. */
2729 for (i = 0; i < vc4_state->dlist_count; i++)
2730 writel(vc4_state->dlist[i], &dlist[i]);
2731 @@ -272,6 +284,34 @@ u32 vc4_plane_dlist_size(struct drm_plan
2732 return vc4_state->dlist_count;
2735 +/* Updates the plane to immediately (well, once the FIFO needs
2736 + * refilling) scan out from at a new framebuffer.
2738 +void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
2740 + struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
2741 + struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
2744 + /* We're skipping the address adjustment for negative origin,
2745 + * because this is only called on the primary plane.
2747 + WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
2748 + addr = bo->paddr + fb->offsets[0];
2750 + /* Write the new address into the hardware immediately. The
2751 + * scanout will start from this address as soon as the FIFO
2752 + * needs to refill with pixels.
2754 + writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
2756 + /* Also update the CPU-side dlist copy, so that any later
2757 + * atomic updates that don't do a new modeset on our plane
2758 + * also use our updated address.
2760 + vc4_state->dlist[vc4_state->pw0_offset] = addr;
2763 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
2767 +++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
2770 + * Copyright © 2014 Broadcom
2772 + * Permission is hereby granted, free of charge, to any person obtaining a
2773 + * copy of this software and associated documentation files (the "Software"),
2774 + * to deal in the Software without restriction, including without limitation
2775 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2776 + * and/or sell copies of the Software, and to permit persons to whom the
2777 + * Software is furnished to do so, subject to the following conditions:
2779 + * The above copyright notice and this permission notice (including the next
2780 + * paragraph) shall be included in all copies or substantial portions of the
2783 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2784 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2785 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
2786 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2787 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2788 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2789 + * IN THE SOFTWARE.
2792 +#ifndef VC4_QPU_DEFINES_H
2793 +#define VC4_QPU_DEFINES_H
2818 + QPU_A_V8ADDS = 30,
2819 + QPU_A_V8SUBS = 31,
2834 + QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
2835 + /* 0-31 are the plain regfile a or b fields */
2838 + QPU_R_ELEM_QPU = 38,
2840 + QPU_R_XY_PIXEL_COORD = 41,
2841 + QPU_R_MS_REV_FLAGS = 41,
2843 + QPU_R_VPM_LD_BUSY,
2844 + QPU_R_VPM_LD_WAIT,
2845 + QPU_R_MUTEX_ACQUIRE,
2849 + /* 0-31 are the plain regfile a or b fields */
2850 + QPU_W_ACC0 = 32, /* aka r0 */
2858 + QPU_W_UNIFORMS_ADDRESS,
2859 + QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
2860 + QPU_W_MS_FLAGS = 42,
2861 + QPU_W_REV_FLAG = 42,
2862 + QPU_W_TLB_STENCIL_SETUP = 43,
2864 + QPU_W_TLB_COLOR_MS,
2865 + QPU_W_TLB_COLOR_ALL,
2866 + QPU_W_TLB_ALPHA_MASK,
2868 + QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
2869 + QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
2870 + QPU_W_MUTEX_RELEASE,
2872 + QPU_W_SFU_RECIPSQRT,
2885 +enum qpu_sig_bits {
2886 + QPU_SIG_SW_BREAKPOINT,
2888 + QPU_SIG_THREAD_SWITCH,
2890 + QPU_SIG_WAIT_FOR_SCOREBOARD,
2891 + QPU_SIG_SCOREBOARD_UNLOCK,
2892 + QPU_SIG_LAST_THREAD_SWITCH,
2893 + QPU_SIG_COVERAGE_LOAD,
2894 + QPU_SIG_COLOR_LOAD,
2895 + QPU_SIG_COLOR_LOAD_END,
2896 + QPU_SIG_LOAD_TMU0,
2897 + QPU_SIG_LOAD_TMU1,
2898 + QPU_SIG_ALPHA_MASK_LOAD,
2899 + QPU_SIG_SMALL_IMM,
2905 + /* hardware mux values */
2915 + /* non-hardware mux values */
2930 +enum qpu_pack_mul {
2932 + QPU_PACK_MUL_8888 = 3, /* replicated to each 8 bits of the 32-bit dst. */
2941 + /* convert to 16 bit float if float input, or to int16. */
2944 + /* replicated to each 8 bits of the 32-bit dst. */
2946 + /* Convert to 8-bit unsigned int. */
2952 + /* Saturating variants of the previous instructions. */
2953 + QPU_PACK_A_32_SAT, /* int-only */
2954 + QPU_PACK_A_16A_SAT, /* int or float */
2955 + QPU_PACK_A_16B_SAT,
2956 + QPU_PACK_A_8888_SAT,
2957 + QPU_PACK_A_8A_SAT,
2958 + QPU_PACK_A_8B_SAT,
2959 + QPU_PACK_A_8C_SAT,
2960 + QPU_PACK_A_8D_SAT,
2963 +enum qpu_unpack_r4 {
2964 + QPU_UNPACK_R4_NOP,
2965 + QPU_UNPACK_R4_F16A_TO_F32,
2966 + QPU_UNPACK_R4_F16B_TO_F32,
2967 + QPU_UNPACK_R4_8D_REP,
2974 +#define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
2975 +/* Using the GNU statement expression extension */
2976 +#define QPU_SET_FIELD(value, field) \
2978 + uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
2979 + assert((fieldval & ~ field ## _MASK) == 0); \
2980 + fieldval & field ## _MASK; \
2983 +#define QPU_GET_FIELD(word, field) ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
2985 +#define QPU_SIG_SHIFT 60
2986 +#define QPU_SIG_MASK QPU_MASK(63, 60)
2988 +#define QPU_UNPACK_SHIFT 57
2989 +#define QPU_UNPACK_MASK QPU_MASK(59, 57)
2992 + * If set, the pack field means PACK_MUL or R4 packing, instead of normal
2993 + * regfile a packing.
2995 +#define QPU_PM ((uint64_t)1 << 56)
2997 +#define QPU_PACK_SHIFT 52
2998 +#define QPU_PACK_MASK QPU_MASK(55, 52)
3000 +#define QPU_COND_ADD_SHIFT 49
3001 +#define QPU_COND_ADD_MASK QPU_MASK(51, 49)
3002 +#define QPU_COND_MUL_SHIFT 46
3003 +#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
3005 +#define QPU_SF ((uint64_t)1 << 45)
3007 +#define QPU_WADDR_ADD_SHIFT 38
3008 +#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38)
3009 +#define QPU_WADDR_MUL_SHIFT 32
3010 +#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32)
3012 +#define QPU_OP_MUL_SHIFT 29
3013 +#define QPU_OP_MUL_MASK QPU_MASK(31, 29)
3015 +#define QPU_RADDR_A_SHIFT 18
3016 +#define QPU_RADDR_A_MASK QPU_MASK(23, 18)
3017 +#define QPU_RADDR_B_SHIFT 12
3018 +#define QPU_RADDR_B_MASK QPU_MASK(17, 12)
3019 +#define QPU_SMALL_IMM_SHIFT 12
3020 +#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12)
3022 +#define QPU_ADD_A_SHIFT 9
3023 +#define QPU_ADD_A_MASK QPU_MASK(11, 9)
3024 +#define QPU_ADD_B_SHIFT 6
3025 +#define QPU_ADD_B_MASK QPU_MASK(8, 6)
3026 +#define QPU_MUL_A_SHIFT 3
3027 +#define QPU_MUL_A_MASK QPU_MASK(5, 3)
3028 +#define QPU_MUL_B_SHIFT 0
3029 +#define QPU_MUL_B_MASK QPU_MASK(2, 0)
3031 +#define QPU_WS ((uint64_t)1 << 44)
3033 +#define QPU_OP_ADD_SHIFT 24
3034 +#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
3036 +#endif /* VC4_QPU_DEFINES_H */
3038 +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
3041 + * Copyright © 2014-2015 Broadcom
3043 + * Permission is hereby granted, free of charge, to any person obtaining a
3044 + * copy of this software and associated documentation files (the "Software"),
3045 + * to deal in the Software without restriction, including without limitation
3046 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3047 + * and/or sell copies of the Software, and to permit persons to whom the
3048 + * Software is furnished to do so, subject to the following conditions:
3050 + * The above copyright notice and this permission notice (including the next
3051 + * paragraph) shall be included in all copies or substantial portions of the
3054 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3055 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3056 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
3057 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3058 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3059 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
3060 + * IN THE SOFTWARE.
3064 + * DOC: Render command list generation
3066 + * In the VC4 driver, render command list generation is performed by the
3067 + * kernel instead of userspace. We do this because validating a
3068 + * user-submitted command list is hard to get right and has high CPU overhead,
3069 + * while the number of valid configurations for render command lists is
3070 + * actually fairly low.
3073 +#include "uapi/drm/vc4_drm.h"
3074 +#include "vc4_drv.h"
3075 +#include "vc4_packet.h"
3077 +struct vc4_rcl_setup {
3078 + struct drm_gem_cma_object *color_read;
3079 + struct drm_gem_cma_object *color_ms_write;
3080 + struct drm_gem_cma_object *zs_read;
3081 + struct drm_gem_cma_object *zs_write;
3083 + struct drm_gem_cma_object *rcl;
3087 +static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
3089 + *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
3090 + setup->next_offset += 1;
3093 +static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val)
3095 + *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
3096 + setup->next_offset += 2;
3099 +static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val)
3101 + *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
3102 + setup->next_offset += 4;
3107 + * Emits a no-op STORE_TILE_BUFFER_GENERAL.
3109 + * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
3110 + * some sort before another load is triggered.
3112 +static void vc4_store_before_load(struct vc4_rcl_setup *setup)
3114 + rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3116 + VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE,
3117 + VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
3118 + VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
3119 + VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
3120 + VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR);
3121 + rcl_u32(setup, 0); /* no address, since we're in None mode */
3125 + * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
3127 + * The tile coordinates packet triggers a pending load if there is one, are
3128 + * used for clipping during rendering, and determine where loads/stores happen
3129 + * relative to their base address.
3131 +static void vc4_tile_coordinates(struct vc4_rcl_setup *setup,
3132 + uint32_t x, uint32_t y)
3134 + rcl_u8(setup, VC4_PACKET_TILE_COORDINATES);
3139 +static void emit_tile(struct vc4_exec_info *exec,
3140 + struct vc4_rcl_setup *setup,
3141 + uint8_t x, uint8_t y, bool first, bool last)
3143 + struct drm_vc4_submit_cl *args = exec->args;
3144 + bool has_bin = args->bin_cl_size != 0;
3146 + /* Note that the load doesn't actually occur until the
3147 + * tile coords packet is processed, and only one load
3148 + * may be outstanding at a time.
3150 + if (setup->color_read) {
3151 + rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
3152 + rcl_u16(setup, args->color_read.bits);
3154 + setup->color_read->paddr + args->color_read.offset);
3157 + if (setup->zs_read) {
3158 + if (setup->color_read) {
3159 + /* Exec previous load. */
3160 + vc4_tile_coordinates(setup, x, y);
3161 + vc4_store_before_load(setup);
3164 + rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
3165 + rcl_u16(setup, args->zs_read.bits);
3166 + rcl_u32(setup, setup->zs_read->paddr + args->zs_read.offset);
3169 + /* Clipping depends on tile coordinates having been
3170 + * emitted, so we always need one here.
3172 + vc4_tile_coordinates(setup, x, y);
3174 + /* Wait for the binner before jumping to the first
3177 + if (first && has_bin)
3178 + rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE);
3181 + rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST);
3182 + rcl_u32(setup, (exec->tile_bo->paddr +
3183 + exec->tile_alloc_offset +
3184 + (y * exec->bin_tiles_x + x) * 32));
3187 + if (setup->zs_write) {
3188 + rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3189 + rcl_u16(setup, args->zs_write.bits |
3190 + (setup->color_ms_write ?
3191 + VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR : 0));
3193 + (setup->zs_write->paddr + args->zs_write.offset) |
3194 + ((last && !setup->color_ms_write) ?
3195 + VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
3198 + if (setup->color_ms_write) {
3199 + if (setup->zs_write) {
3200 + /* Reset after previous store */
3201 + vc4_tile_coordinates(setup, x, y);
3205 + rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
3207 + rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER);
3211 +static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
3212 + struct vc4_rcl_setup *setup)
3214 + struct drm_vc4_submit_cl *args = exec->args;
3215 + bool has_bin = args->bin_cl_size != 0;
3216 + uint8_t min_x_tile = args->min_x_tile;
3217 + uint8_t min_y_tile = args->min_y_tile;
3218 + uint8_t max_x_tile = args->max_x_tile;
3219 + uint8_t max_y_tile = args->max_y_tile;
3220 + uint8_t xtiles = max_x_tile - min_x_tile + 1;
3221 + uint8_t ytiles = max_y_tile - min_y_tile + 1;
3223 + uint32_t size, loop_body_size;
3225 + size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
3226 + loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
3228 + if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
3229 + size += VC4_PACKET_CLEAR_COLORS_SIZE +
3230 + VC4_PACKET_TILE_COORDINATES_SIZE +
3231 + VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3234 + if (setup->color_read) {
3235 + loop_body_size += (VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE);
3237 + if (setup->zs_read) {
3238 + if (setup->color_read) {
3239 + loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
3240 + loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3242 + loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
3246 + size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE;
3247 + loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE;
3250 + if (setup->zs_write)
3251 + loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3252 + if (setup->color_ms_write) {
3253 + if (setup->zs_write)
3254 + loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
3255 + loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE;
3257 + size += xtiles * ytiles * loop_body_size;
3259 + setup->rcl = &vc4_bo_create(dev, size)->base;
3262 + list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
3263 + &exec->unref_list);
3265 + rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
3267 + (setup->color_ms_write ?
3268 + (setup->color_ms_write->paddr +
3269 + args->color_ms_write.offset) :
3271 + rcl_u16(setup, args->width);
3272 + rcl_u16(setup, args->height);
3273 + rcl_u16(setup, args->color_ms_write.bits);
3275 + /* The tile buffer gets cleared when the previous tile is stored. If
3276 + * the clear values changed between frames, then the tile buffer has
3277 + * stale clear values in it, so we have to do a store in None mode (no
3278 + * writes) so that we trigger the tile buffer clear.
3280 + if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
3281 + rcl_u8(setup, VC4_PACKET_CLEAR_COLORS);
3282 + rcl_u32(setup, args->clear_color[0]);
3283 + rcl_u32(setup, args->clear_color[1]);
3284 + rcl_u32(setup, args->clear_z);
3285 + rcl_u8(setup, args->clear_s);
3287 + vc4_tile_coordinates(setup, 0, 0);
3289 + rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3290 + rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE);
3291 + rcl_u32(setup, 0); /* no address, since we're in None mode */
3294 + for (y = min_y_tile; y <= max_y_tile; y++) {
3295 + for (x = min_x_tile; x <= max_x_tile; x++) {
3296 + bool first = (x == min_x_tile && y == min_y_tile);
3297 + bool last = (x == max_x_tile && y == max_y_tile);
3298 + emit_tile(exec, setup, x, y, first, last);
3302 + BUG_ON(setup->next_offset != size);
3303 + exec->ct1ca = setup->rcl->paddr;
3304 + exec->ct1ea = setup->rcl->paddr + setup->next_offset;
3309 +static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
3310 + struct drm_gem_cma_object **obj,
3311 + struct drm_vc4_submit_rcl_surface *surf)
3313 + uint8_t tiling = VC4_GET_FIELD(surf->bits,
3314 + VC4_LOADSTORE_TILE_BUFFER_TILING);
3315 + uint8_t buffer = VC4_GET_FIELD(surf->bits,
3316 + VC4_LOADSTORE_TILE_BUFFER_BUFFER);
3317 + uint8_t format = VC4_GET_FIELD(surf->bits,
3318 + VC4_LOADSTORE_TILE_BUFFER_FORMAT);
3321 + if (surf->pad != 0) {
3322 + DRM_ERROR("Padding unset\n");
3326 + if (surf->hindex == ~0)
3329 + if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
3332 + if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
3333 + VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
3334 + VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
3335 + DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
3340 + if (tiling > VC4_TILING_FORMAT_LT) {
3341 + DRM_ERROR("Bad tiling format\n");
3345 + if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
3346 + if (format != 0) {
3347 + DRM_ERROR("No color format should be set for ZS\n");
3351 + } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) {
3353 + case VC4_LOADSTORE_TILE_BUFFER_BGR565:
3354 + case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER:
3357 + case VC4_LOADSTORE_TILE_BUFFER_RGBA8888:
3361 + DRM_ERROR("Bad tile buffer format\n");
3365 + DRM_ERROR("Bad load/store buffer %d.\n", buffer);
3369 + if (surf->offset & 0xf) {
3370 + DRM_ERROR("load/store buffer must be 16b aligned.\n");
3374 + if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
3375 + exec->args->width, exec->args->height, cpp)) {
3383 +vc4_rcl_ms_surface_setup(struct vc4_exec_info *exec,
3384 + struct drm_gem_cma_object **obj,
3385 + struct drm_vc4_submit_rcl_surface *surf)
3387 + uint8_t tiling = VC4_GET_FIELD(surf->bits,
3388 + VC4_RENDER_CONFIG_MEMORY_FORMAT);
3389 + uint8_t format = VC4_GET_FIELD(surf->bits,
3390 + VC4_RENDER_CONFIG_FORMAT);
3393 + if (surf->pad != 0) {
3394 + DRM_ERROR("Padding unset\n");
3398 + if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK |
3399 + VC4_RENDER_CONFIG_FORMAT_MASK)) {
3400 + DRM_ERROR("Unknown bits in render config: 0x%04x\n",
3405 + if (surf->hindex == ~0)
3408 + if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
3411 + if (tiling > VC4_TILING_FORMAT_LT) {
3412 + DRM_ERROR("Bad tiling format\n");
3417 + case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED:
3418 + case VC4_RENDER_CONFIG_FORMAT_BGR565:
3421 + case VC4_RENDER_CONFIG_FORMAT_RGBA8888:
3425 + DRM_ERROR("Bad tile buffer format\n");
3429 + if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
3430 + exec->args->width, exec->args->height, cpp)) {
3437 +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
3439 + struct vc4_rcl_setup setup = {0};
3440 + struct drm_vc4_submit_cl *args = exec->args;
3441 + bool has_bin = args->bin_cl_size != 0;
3444 + if (args->min_x_tile > args->max_x_tile ||
3445 + args->min_y_tile > args->max_y_tile) {
3446 + DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
3447 + args->min_x_tile, args->min_y_tile,
3448 + args->max_x_tile, args->max_y_tile);
3453 + (args->max_x_tile > exec->bin_tiles_x ||
3454 + args->max_y_tile > exec->bin_tiles_y)) {
3455 + DRM_ERROR("Render tiles (%d,%d) outside of bin config (%d,%d)\n",
3456 + args->max_x_tile, args->max_y_tile,
3457 + exec->bin_tiles_x, exec->bin_tiles_y);
3461 + ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
3465 + ret = vc4_rcl_ms_surface_setup(exec, &setup.color_ms_write,
3466 + &args->color_ms_write);
3470 + ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
3474 + ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
3478 + /* We shouldn't even have the job submitted to us if there's no
3479 + * surface to write out.
3481 + if (!setup.color_ms_write && !setup.zs_write) {
3482 + DRM_ERROR("RCL requires color or Z/S write\n");
3486 + return vc4_create_rcl_bo(dev, exec, &setup);
3489 +++ b/drivers/gpu/drm/vc4/vc4_trace.h
3492 + * Copyright (C) 2015 Broadcom
3494 + * This program is free software; you can redistribute it and/or modify
3495 + * it under the terms of the GNU General Public License version 2 as
3496 + * published by the Free Software Foundation.
3499 +#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
3500 +#define _VC4_TRACE_H_
3502 +#include <linux/stringify.h>
3503 +#include <linux/types.h>
3504 +#include <linux/tracepoint.h>
3506 +#undef TRACE_SYSTEM
3507 +#define TRACE_SYSTEM vc4
3508 +#define TRACE_INCLUDE_FILE vc4_trace
3510 +TRACE_EVENT(vc4_wait_for_seqno_begin,
3511 + TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout),
3512 + TP_ARGS(dev, seqno, timeout),
3516 + __field(u64, seqno)
3517 + __field(u64, timeout)
3521 + __entry->dev = dev->primary->index;
3522 + __entry->seqno = seqno;
3523 + __entry->timeout = timeout;
3526 + TP_printk("dev=%u, seqno=%llu, timeout=%llu",
3527 + __entry->dev, __entry->seqno, __entry->timeout)
3530 +TRACE_EVENT(vc4_wait_for_seqno_end,
3531 + TP_PROTO(struct drm_device *dev, uint64_t seqno),
3532 + TP_ARGS(dev, seqno),
3536 + __field(u64, seqno)
3540 + __entry->dev = dev->primary->index;
3541 + __entry->seqno = seqno;
3544 + TP_printk("dev=%u, seqno=%llu",
3545 + __entry->dev, __entry->seqno)
3548 +#endif /* _VC4_TRACE_H_ */
3550 +/* This part must be outside protection */
3551 +#undef TRACE_INCLUDE_PATH
3552 +#define TRACE_INCLUDE_PATH .
3553 +#include <trace/define_trace.h>
3555 +++ b/drivers/gpu/drm/vc4/vc4_trace_points.c
3558 + * Copyright (C) 2015 Broadcom
3560 + * This program is free software; you can redistribute it and/or modify
3561 + * it under the terms of the GNU General Public License version 2 as
3562 + * published by the Free Software Foundation.
3565 +#include "vc4_drv.h"
3567 +#ifndef __CHECKER__
3568 +#define CREATE_TRACE_POINTS
3569 +#include "vc4_trace.h"
3572 +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
3575 + * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3576 + * Copyright (C) 2013 Red Hat
3577 + * Author: Rob Clark <robdclark@gmail.com>
3579 + * This program is free software; you can redistribute it and/or modify it
3580 + * under the terms of the GNU General Public License version 2 as published by
3581 + * the Free Software Foundation.
3583 + * This program is distributed in the hope that it will be useful, but WITHOUT
3584 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3585 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3588 + * You should have received a copy of the GNU General Public License along with
3589 + * this program. If not, see <http://www.gnu.org/licenses/>.
3592 +#include "linux/component.h"
3593 +#include "soc/bcm2835/raspberrypi-firmware.h"
3594 +#include "vc4_drv.h"
3595 +#include "vc4_regs.h"
3597 +#ifdef CONFIG_DEBUG_FS
3598 +#define REGDEF(reg) { reg, #reg }
3599 +static const struct {
3602 +} vc4_reg_defs[] = {
3603 + REGDEF(V3D_IDENT0),
3604 + REGDEF(V3D_IDENT1),
3605 + REGDEF(V3D_IDENT2),
3606 + REGDEF(V3D_SCRATCH),
3607 + REGDEF(V3D_L2CACTL),
3608 + REGDEF(V3D_SLCACTL),
3609 + REGDEF(V3D_INTCTL),
3610 + REGDEF(V3D_INTENA),
3611 + REGDEF(V3D_INTDIS),
3612 + REGDEF(V3D_CT0CS),
3613 + REGDEF(V3D_CT1CS),
3614 + REGDEF(V3D_CT0EA),
3615 + REGDEF(V3D_CT1EA),
3616 + REGDEF(V3D_CT0CA),
3617 + REGDEF(V3D_CT1CA),
3618 + REGDEF(V3D_CT00RA0),
3619 + REGDEF(V3D_CT01RA0),
3620 + REGDEF(V3D_CT0LC),
3621 + REGDEF(V3D_CT1LC),
3622 + REGDEF(V3D_CT0PC),
3623 + REGDEF(V3D_CT1PC),
3632 + REGDEF(V3D_SQRSV0),
3633 + REGDEF(V3D_SQRSV1),
3634 + REGDEF(V3D_SQCNTL),
3635 + REGDEF(V3D_SRQPC),
3636 + REGDEF(V3D_SRQUA),
3637 + REGDEF(V3D_SRQUL),
3638 + REGDEF(V3D_SRQCS),
3639 + REGDEF(V3D_VPACNTL),
3640 + REGDEF(V3D_VPMBASE),
3641 + REGDEF(V3D_PCTRC),
3642 + REGDEF(V3D_PCTRE),
3643 + REGDEF(V3D_PCTR0),
3644 + REGDEF(V3D_PCTRS0),
3645 + REGDEF(V3D_PCTR1),
3646 + REGDEF(V3D_PCTRS1),
3647 + REGDEF(V3D_PCTR2),
3648 + REGDEF(V3D_PCTRS2),
3649 + REGDEF(V3D_PCTR3),
3650 + REGDEF(V3D_PCTRS3),
3651 + REGDEF(V3D_PCTR4),
3652 + REGDEF(V3D_PCTRS4),
3653 + REGDEF(V3D_PCTR5),
3654 + REGDEF(V3D_PCTRS5),
3655 + REGDEF(V3D_PCTR6),
3656 + REGDEF(V3D_PCTRS6),
3657 + REGDEF(V3D_PCTR7),
3658 + REGDEF(V3D_PCTRS7),
3659 + REGDEF(V3D_PCTR8),
3660 + REGDEF(V3D_PCTRS8),
3661 + REGDEF(V3D_PCTR9),
3662 + REGDEF(V3D_PCTRS9),
3663 + REGDEF(V3D_PCTR10),
3664 + REGDEF(V3D_PCTRS10),
3665 + REGDEF(V3D_PCTR11),
3666 + REGDEF(V3D_PCTRS11),
3667 + REGDEF(V3D_PCTR12),
3668 + REGDEF(V3D_PCTRS12),
3669 + REGDEF(V3D_PCTR13),
3670 + REGDEF(V3D_PCTRS13),
3671 + REGDEF(V3D_PCTR14),
3672 + REGDEF(V3D_PCTRS14),
3673 + REGDEF(V3D_PCTR15),
3674 + REGDEF(V3D_PCTRS15),
3676 + REGDEF(V3D_FDBGO),
3677 + REGDEF(V3D_FDBGB),
3678 + REGDEF(V3D_FDBGR),
3679 + REGDEF(V3D_FDBGS),
3680 + REGDEF(V3D_ERRSTAT),
3683 +int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
3685 + struct drm_info_node *node = (struct drm_info_node *) m->private;
3686 + struct drm_device *dev = node->minor->dev;
3687 + struct vc4_dev *vc4 = to_vc4_dev(dev);
3690 + for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
3691 + seq_printf(m, "%s (0x%04x): 0x%08x\n",
3692 + vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
3693 + V3D_READ(vc4_reg_defs[i].reg));
3699 +int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
3701 + struct drm_info_node *node = (struct drm_info_node *) m->private;
3702 + struct drm_device *dev = node->minor->dev;
3703 + struct vc4_dev *vc4 = to_vc4_dev(dev);
3704 + uint32_t ident1 = V3D_READ(V3D_IDENT1);
3705 + uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
3706 + uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
3707 + uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
3709 + seq_printf(m, "Revision: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
3710 + seq_printf(m, "Slices: %d\n", nslc);
3711 + seq_printf(m, "TMUs: %d\n", nslc * tups);
3712 + seq_printf(m, "QPUs: %d\n", nslc * qups);
3713 + seq_printf(m, "Semaphores: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
3717 +#endif /* CONFIG_DEBUG_FS */
3720 + * Asks the firmware to turn on power to the V3D engine.
3722 + * This may be doable with just the clocks interface, though this
3723 + * packet does some other register setup from the firmware, too.
3726 +vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
3730 + return rpi_firmware_property(vc4->firmware,
3731 + RPI_FIRMWARE_SET_ENABLE_QPU,
3732 + &packet, sizeof(packet));
3735 +static void vc4_v3d_init_hw(struct drm_device *dev)
3737 + struct vc4_dev *vc4 = to_vc4_dev(dev);
3739 + /* Take all the memory that would have been reserved for user
3740 + * QPU programs, since we don't have an interface for running
3743 + V3D_WRITE(V3D_VPMBASE, 0);
3746 +static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
3748 + struct platform_device *pdev = to_platform_device(dev);
3749 + struct drm_device *drm = dev_get_drvdata(master);
3750 + struct vc4_dev *vc4 = to_vc4_dev(drm);
3751 + struct vc4_v3d *v3d = NULL;
3754 + v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
3760 + v3d->regs = vc4_ioremap_regs(pdev, 0);
3761 + if (IS_ERR(v3d->regs))
3762 + return PTR_ERR(v3d->regs);
3766 + ret = vc4_v3d_set_power(vc4, true);
3770 + if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
3771 + DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
3772 + V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
3776 + /* Reset the binner overflow address/size at setup, to be sure
3777 + * we don't reuse an old one.
3779 + V3D_WRITE(V3D_BPOA, 0);
3780 + V3D_WRITE(V3D_BPOS, 0);
3782 + vc4_v3d_init_hw(drm);
3784 + ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
3786 + DRM_ERROR("Failed to install IRQ handler\n");
3793 +static void vc4_v3d_unbind(struct device *dev, struct device *master,
3796 + struct drm_device *drm = dev_get_drvdata(master);
3797 + struct vc4_dev *vc4 = to_vc4_dev(drm);
3799 + drm_irq_uninstall(drm);
3801 + /* Disable the binner's overflow memory address, so the next
3802 + * driver probe (if any) doesn't try to reuse our old
3805 + V3D_WRITE(V3D_BPOA, 0);
3806 + V3D_WRITE(V3D_BPOS, 0);
3808 + vc4_v3d_set_power(vc4, false);
3813 +static const struct component_ops vc4_v3d_ops = {
3814 + .bind = vc4_v3d_bind,
3815 + .unbind = vc4_v3d_unbind,
3818 +static int vc4_v3d_dev_probe(struct platform_device *pdev)
3820 + return component_add(&pdev->dev, &vc4_v3d_ops);
3823 +static int vc4_v3d_dev_remove(struct platform_device *pdev)
3825 + component_del(&pdev->dev, &vc4_v3d_ops);
3829 +static const struct of_device_id vc4_v3d_dt_match[] = {
3830 + { .compatible = "brcm,vc4-v3d" },
3834 +struct platform_driver vc4_v3d_driver = {
3835 + .probe = vc4_v3d_dev_probe,
3836 + .remove = vc4_v3d_dev_remove,
3838 + .name = "vc4_v3d",
3839 + .of_match_table = vc4_v3d_dt_match,
3843 +++ b/drivers/gpu/drm/vc4/vc4_validate.c
3846 + * Copyright © 2014 Broadcom
3848 + * Permission is hereby granted, free of charge, to any person obtaining a
3849 + * copy of this software and associated documentation files (the "Software"),
3850 + * to deal in the Software without restriction, including without limitation
3851 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3852 + * and/or sell copies of the Software, and to permit persons to whom the
3853 + * Software is furnished to do so, subject to the following conditions:
3855 + * The above copyright notice and this permission notice (including the next
3856 + * paragraph) shall be included in all copies or substantial portions of the
3859 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3860 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3861 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
3862 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3863 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3864 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
3865 + * IN THE SOFTWARE.
3869 + * Command list validator for VC4.
3871 + * The VC4 has no IOMMU between it and system memory. So, a user with
3872 + * access to execute command lists could escalate privilege by
3873 + * overwriting system memory (drawing to it as a framebuffer) or
3874 + * reading system memory it shouldn't (reading it as a texture, or
3875 + * uniform data, or vertex data).
3877 + * This validates command lists to ensure that all accesses are within
3878 + * the bounds of the GEM objects referenced. It explicitly whitelists
3879 + * packets, and looks at the offsets in any address fields to make
3880 + * sure they're constrained within the BOs they reference.
3882 + * Note that because of the validation that's happening anyway, this
3883 + * is where GEM relocation processing happens.
3886 +#include "uapi/drm/vc4_drm.h"
3887 +#include "vc4_drv.h"
3888 +#include "vc4_packet.h"
3890 +#define VALIDATE_ARGS \
3891 + struct vc4_exec_info *exec, \
3892 + void *validated, \
3896 +/** Return the width in pixels of a 64-byte microtile. */
3898 +utile_width(int cpp)
3909 + DRM_ERROR("unknown cpp: %d\n", cpp);
3914 +/** Return the height in pixels of a 64-byte microtile. */
3916 +utile_height(int cpp)
3926 + DRM_ERROR("unknown cpp: %d\n", cpp);
3932 + * The texture unit decides what tiling format a particular miplevel is using
3933 + * this function, so we lay out our miptrees accordingly.
3936 +size_is_lt(uint32_t width, uint32_t height, int cpp)
3938 + return (width <= 4 * utile_width(cpp) ||
3939 + height <= 4 * utile_height(cpp));
3943 +vc4_use_bo(struct vc4_exec_info *exec,
3945 + enum vc4_bo_mode mode,
3946 + struct drm_gem_cma_object **obj)
3950 + if (hindex >= exec->bo_count) {
3951 + DRM_ERROR("BO index %d greater than BO count %d\n",
3952 + hindex, exec->bo_count);
3956 + if (exec->bo[hindex].mode != mode) {
3957 + if (exec->bo[hindex].mode == VC4_MODE_UNDECIDED) {
3958 + exec->bo[hindex].mode = mode;
3960 + DRM_ERROR("BO index %d reused with mode %d vs %d\n",
3961 + hindex, exec->bo[hindex].mode, mode);
3966 + *obj = exec->bo[hindex].bo;
3971 +vc4_use_handle(struct vc4_exec_info *exec,
3972 + uint32_t gem_handles_packet_index,
3973 + enum vc4_bo_mode mode,
3974 + struct drm_gem_cma_object **obj)
3976 + return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index],
3981 +gl_shader_rec_size(uint32_t pointer_bits)
3983 + uint32_t attribute_count = pointer_bits & 7;
3984 + bool extended = pointer_bits & 8;
3986 + if (attribute_count == 0)
3987 + attribute_count = 8;
3990 + return 100 + attribute_count * 4;
3992 + return 36 + attribute_count * 8;
3996 +vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
3997 + uint32_t offset, uint8_t tiling_format,
3998 + uint32_t width, uint32_t height, uint8_t cpp)
4000 + uint32_t aligned_width, aligned_height, stride, size;
4001 + uint32_t utile_w = utile_width(cpp);
4002 + uint32_t utile_h = utile_height(cpp);
4004 + /* The shaded vertex format stores signed 12.4 fixed point
4005 + * (-2048,2047) offsets from the viewport center, so we should
4006 + * never have a render target larger than 4096. The texture
4007 + * unit can only sample from 2048x2048, so it's even more
4008 + * restricted. This lets us avoid worrying about overflow in
4011 + if (width > 4096 || height > 4096) {
4012 + DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
4016 + switch (tiling_format) {
4017 + case VC4_TILING_FORMAT_LINEAR:
4018 + aligned_width = round_up(width, utile_w);
4019 + aligned_height = height;
4021 + case VC4_TILING_FORMAT_T:
4022 + aligned_width = round_up(width, utile_w * 8);
4023 + aligned_height = round_up(height, utile_h * 8);
4025 + case VC4_TILING_FORMAT_LT:
4026 + aligned_width = round_up(width, utile_w);
4027 + aligned_height = round_up(height, utile_h);
4030 + DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
4034 + stride = aligned_width * cpp;
4035 + size = stride * aligned_height;
4037 + if (size + offset < size ||
4038 + size + offset > fbo->base.size) {
4039 + DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %d)\n",
4041 + aligned_width, aligned_height,
4042 + size, offset, fbo->base.size);
4050 +validate_flush_all(VALIDATE_ARGS)
4052 + if (exec->found_increment_semaphore_packet) {
4053 + DRM_ERROR("VC4_PACKET_FLUSH_ALL after "
4054 + "VC4_PACKET_INCREMENT_SEMAPHORE\n");
4062 +validate_start_tile_binning(VALIDATE_ARGS)
4064 + if (exec->found_start_tile_binning_packet) {
4065 + DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
4068 + exec->found_start_tile_binning_packet = true;
4070 + if (!exec->found_tile_binning_mode_config_packet) {
4071 + DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
4079 +validate_increment_semaphore(VALIDATE_ARGS)
4081 + if (exec->found_increment_semaphore_packet) {
4082 + DRM_ERROR("Duplicate VC4_PACKET_INCREMENT_SEMAPHORE\n");
4085 + exec->found_increment_semaphore_packet = true;
4087 + /* Once we've found the semaphore increment, there should be one FLUSH
4088 + * then the end of the command list. The FLUSH actually triggers the
4089 + * increment, so we only need to make sure there
4096 +validate_indexed_prim_list(VALIDATE_ARGS)
4098 + struct drm_gem_cma_object *ib;
4099 + uint32_t length = *(uint32_t *)(untrusted + 1);
4100 + uint32_t offset = *(uint32_t *)(untrusted + 5);
4101 + uint32_t max_index = *(uint32_t *)(untrusted + 9);
4102 + uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
4103 + struct vc4_shader_state *shader_state;
4105 + if (exec->found_increment_semaphore_packet) {
4106 + DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
4110 + /* Check overflow condition */
4111 + if (exec->shader_state_count == 0) {
4112 + DRM_ERROR("shader state must precede primitives\n");
4115 + shader_state = &exec->shader_state[exec->shader_state_count - 1];
4117 + if (max_index > shader_state->max_index)
4118 + shader_state->max_index = max_index;
4120 + if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &ib))
4123 + if (offset > ib->base.size ||
4124 + (ib->base.size - offset) / index_size < length) {
4125 + DRM_ERROR("IB access overflow (%d + %d*%d > %d)\n",
4126 + offset, length, index_size, ib->base.size);
4130 + *(uint32_t *)(validated + 5) = ib->paddr + offset;
4136 +validate_gl_array_primitive(VALIDATE_ARGS)
4138 + uint32_t length = *(uint32_t *)(untrusted + 1);
4139 + uint32_t base_index = *(uint32_t *)(untrusted + 5);
4140 + uint32_t max_index;
4141 + struct vc4_shader_state *shader_state;
4143 + if (exec->found_increment_semaphore_packet) {
4144 + DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
4148 + /* Check overflow condition */
4149 + if (exec->shader_state_count == 0) {
4150 + DRM_ERROR("shader state must precede primitives\n");
4153 + shader_state = &exec->shader_state[exec->shader_state_count - 1];
4155 + if (length + base_index < length) {
4156 + DRM_ERROR("primitive vertex count overflow\n");
4159 + max_index = length + base_index - 1;
4161 + if (max_index > shader_state->max_index)
4162 + shader_state->max_index = max_index;
4168 +validate_gl_shader_state(VALIDATE_ARGS)
4170 + uint32_t i = exec->shader_state_count++;
4172 + if (i >= exec->shader_state_size) {
4173 + DRM_ERROR("More requests for shader states than declared\n");
4177 + exec->shader_state[i].packet = VC4_PACKET_GL_SHADER_STATE;
4178 + exec->shader_state[i].addr = *(uint32_t *)untrusted;
4179 + exec->shader_state[i].max_index = 0;
4181 + if (exec->shader_state[i].addr & ~0xf) {
4182 + DRM_ERROR("high bits set in GL shader rec reference\n");
4186 + *(uint32_t *)validated = (exec->shader_rec_p +
4187 + exec->shader_state[i].addr);
4189 + exec->shader_rec_p +=
4190 + roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
4196 +validate_nv_shader_state(VALIDATE_ARGS)
4198 + uint32_t i = exec->shader_state_count++;
4200 + if (i >= exec->shader_state_size) {
4201 + DRM_ERROR("More requests for shader states than declared\n");
4205 + exec->shader_state[i].packet = VC4_PACKET_NV_SHADER_STATE;
4206 + exec->shader_state[i].addr = *(uint32_t *)untrusted;
4208 + if (exec->shader_state[i].addr & 15) {
4209 + DRM_ERROR("NV shader state address 0x%08x misaligned\n",
4210 + exec->shader_state[i].addr);
4214 + *(uint32_t *)validated = (exec->shader_state[i].addr +
4215 + exec->shader_rec_p);
4221 +validate_tile_binning_config(VALIDATE_ARGS)
4223 + struct drm_device *dev = exec->exec_bo->base.dev;
4225 + uint32_t tile_state_size, tile_alloc_size;
4226 + uint32_t tile_count;
4228 + if (exec->found_tile_binning_mode_config_packet) {
4229 + DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
4232 + exec->found_tile_binning_mode_config_packet = true;
4234 + exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
4235 + exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
4236 + tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
4237 + flags = *(uint8_t *)(untrusted + 14);
4239 + if (exec->bin_tiles_x == 0 ||
4240 + exec->bin_tiles_y == 0) {
4241 + DRM_ERROR("Tile binning config of %dx%d too small\n",
4242 + exec->bin_tiles_x, exec->bin_tiles_y);
4246 + if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
4247 + VC4_BIN_CONFIG_TILE_BUFFER_64BIT |
4248 + VC4_BIN_CONFIG_MS_MODE_4X)) {
4249 + DRM_ERROR("unsupported bining config flags 0x%02x\n", flags);
4253 + /* The tile state data array is 48 bytes per tile, and we put it at
4254 + * the start of a BO containing both it and the tile alloc.
4256 + tile_state_size = 48 * tile_count;
4258 + /* Since the tile alloc array will follow us, align. */
4259 + exec->tile_alloc_offset = roundup(tile_state_size, 4096);
4261 + *(uint8_t *)(validated + 14) =
4262 + ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
4263 + VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
4264 + VC4_BIN_CONFIG_AUTO_INIT_TSDA |
4265 + VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
4266 + VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
4267 + VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
4268 + VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
4270 + /* Initial block size. */
4271 + tile_alloc_size = 32 * tile_count;
4274 + * The initial allocation gets rounded to the next 256 bytes before
4275 + * the hardware starts fulfilling further allocations.
4277 + tile_alloc_size = roundup(tile_alloc_size, 256);
4279 + /* Add space for the extra allocations. This is what gets used first,
4280 + * before overflow memory. It must have at least 4096 bytes, but we
4281 + * want to avoid overflow memory usage if possible.
4283 + tile_alloc_size += 1024 * 1024;
4285 + exec->tile_bo = &vc4_bo_create(dev, exec->tile_alloc_offset +
4286 + tile_alloc_size)->base;
4287 + if (!exec->tile_bo)
4289 + list_add_tail(&to_vc4_bo(&exec->tile_bo->base)->unref_head,
4290 + &exec->unref_list);
4292 + /* tile alloc address. */
4293 + *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr +
4294 + exec->tile_alloc_offset);
4295 + /* tile alloc size. */
4296 + *(uint32_t *)(validated + 4) = tile_alloc_size;
4297 + /* tile state address. */
4298 + *(uint32_t *)(validated + 8) = exec->tile_bo->paddr;
4304 +validate_gem_handles(VALIDATE_ARGS)
4306 + memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
4310 +#define VC4_DEFINE_PACKET(packet, name, func) \
4311 + [packet] = { packet ## _SIZE, name, func }
4313 +static const struct cmd_info {
4316 + int (*func)(struct vc4_exec_info *exec, void *validated,
4319 + VC4_DEFINE_PACKET(VC4_PACKET_HALT, "halt", NULL),
4320 + VC4_DEFINE_PACKET(VC4_PACKET_NOP, "nop", NULL),
4321 + VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, "flush", NULL),
4322 + VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, "flush all state", validate_flush_all),
4323 + VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING, "start tile binning", validate_start_tile_binning),
4324 + VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE, "increment semaphore", validate_increment_semaphore),
4326 + VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE, "Indexed Primitive List", validate_indexed_prim_list),
4328 + VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE, "Vertex Array Primitives", validate_gl_array_primitive),
4330 + /* This is only used by clipped primitives (packets 48 and 49), which
4331 + * we don't support parsing yet.
4333 + VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, "primitive list format", NULL),
4335 + VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, "GL Shader State", validate_gl_shader_state),
4336 + VC4_DEFINE_PACKET(VC4_PACKET_NV_SHADER_STATE, "NV Shader State", validate_nv_shader_state),
4338 + VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, "configuration bits", NULL),
4339 + VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, "flat shade flags", NULL),
4340 + VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, "point size", NULL),
4341 + VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, "line width", NULL),
4342 + VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, "RHT X boundary", NULL),
4343 + VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, "Depth Offset", NULL),
4344 + VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, "Clip Window", NULL),
4345 + VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, "Viewport Offset", NULL),
4346 + VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, "Clipper XY Scaling", NULL),
4347 + /* Note: The docs say this was also 105, but it was 106 in the
4348 + * initial userland code drop.
4350 + VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, "Clipper Z Scale and Offset", NULL),
4352 + VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG, "tile binning configuration", validate_tile_binning_config),
4354 + VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, "GEM handles", validate_gem_handles),
4358 +vc4_validate_bin_cl(struct drm_device *dev,
4360 + void *unvalidated,
4361 + struct vc4_exec_info *exec)
4363 + uint32_t len = exec->args->bin_cl_size;
4364 + uint32_t dst_offset = 0;
4365 + uint32_t src_offset = 0;
4367 + while (src_offset < len) {
4368 + void *dst_pkt = validated + dst_offset;
4369 + void *src_pkt = unvalidated + src_offset;
4370 + u8 cmd = *(uint8_t *)src_pkt;
4371 + const struct cmd_info *info;
4373 + if (cmd > ARRAY_SIZE(cmd_info)) {
4374 + DRM_ERROR("0x%08x: packet %d out of bounds\n",
4379 + info = &cmd_info[cmd];
4380 + if (!info->name) {
4381 + DRM_ERROR("0x%08x: packet %d invalid\n",
4387 + DRM_INFO("0x%08x: packet %d (%s) size %d processing...\n",
4388 + src_offset, cmd, info->name, info->len);
4391 + if (src_offset + info->len > len) {
4392 + DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
4393 + "exceeds bounds (0x%08x)\n",
4394 + src_offset, cmd, info->name, info->len,
4395 + src_offset + len);
4399 + if (cmd != VC4_PACKET_GEM_HANDLES)
4400 + memcpy(dst_pkt, src_pkt, info->len);
4402 + if (info->func && info->func(exec,
4405 + DRM_ERROR("0x%08x: packet %d (%s) failed to "
4407 + src_offset, cmd, info->name);
4411 + src_offset += info->len;
4412 + /* GEM handle loading doesn't produce HW packets. */
4413 + if (cmd != VC4_PACKET_GEM_HANDLES)
4414 + dst_offset += info->len;
4416 + /* When the CL hits halt, it'll stop reading anything else. */
4417 + if (cmd == VC4_PACKET_HALT)
4421 + exec->ct0ea = exec->ct0ca + dst_offset;
4423 + if (!exec->found_start_tile_binning_packet) {
4424 + DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
4428 + if (!exec->found_increment_semaphore_packet) {
4429 + DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE\n");
4437 +reloc_tex(struct vc4_exec_info *exec,
4438 + void *uniform_data_u,
4439 + struct vc4_texture_sample_info *sample,
4440 + uint32_t texture_handle_index)
4443 + struct drm_gem_cma_object *tex;
4444 + uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
4445 + uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
4446 + uint32_t p2 = (sample->p_offset[2] != ~0 ?
4447 + *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
4448 + uint32_t p3 = (sample->p_offset[3] != ~0 ?
4449 + *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
4450 + uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
4451 + uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
4452 + uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
4453 + uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
4454 + uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
4455 + uint32_t cpp, tiling_format, utile_w, utile_h;
4457 + uint32_t cube_map_stride = 0;
4458 + enum vc4_texture_data_type type;
4460 + if (!vc4_use_bo(exec, texture_handle_index, VC4_MODE_RENDER, &tex))
4463 + if (sample->is_direct) {
4464 + uint32_t remaining_size = tex->base.size - p0;
4465 + if (p0 > tex->base.size - 4) {
4466 + DRM_ERROR("UBO offset greater than UBO size\n");
4469 + if (p1 > remaining_size - 4) {
4470 + DRM_ERROR("UBO clamp would allow reads outside of UBO\n");
4473 + *validated_p0 = tex->paddr + p0;
4482 + if (p0 & VC4_TEX_P0_CMMODE_MASK) {
4483 + if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
4484 + VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
4485 + cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
4486 + if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
4487 + VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
4488 + if (cube_map_stride) {
4489 + DRM_ERROR("Cube map stride set twice\n");
4493 + cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
4495 + if (!cube_map_stride) {
4496 + DRM_ERROR("Cube map stride not set\n");
4501 + type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
4502 + (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
4505 + case VC4_TEXTURE_TYPE_RGBA8888:
4506 + case VC4_TEXTURE_TYPE_RGBX8888:
4507 + case VC4_TEXTURE_TYPE_RGBA32R:
4510 + case VC4_TEXTURE_TYPE_RGBA4444:
4511 + case VC4_TEXTURE_TYPE_RGBA5551:
4512 + case VC4_TEXTURE_TYPE_RGB565:
4513 + case VC4_TEXTURE_TYPE_LUMALPHA:
4514 + case VC4_TEXTURE_TYPE_S16F:
4515 + case VC4_TEXTURE_TYPE_S16:
4518 + case VC4_TEXTURE_TYPE_LUMINANCE:
4519 + case VC4_TEXTURE_TYPE_ALPHA:
4520 + case VC4_TEXTURE_TYPE_S8:
4523 + case VC4_TEXTURE_TYPE_ETC1:
4524 + case VC4_TEXTURE_TYPE_BW1:
4525 + case VC4_TEXTURE_TYPE_A4:
4526 + case VC4_TEXTURE_TYPE_A1:
4527 + case VC4_TEXTURE_TYPE_RGBA64:
4528 + case VC4_TEXTURE_TYPE_YUV422R:
4530 + DRM_ERROR("Texture format %d unsupported\n", type);
4533 + utile_w = utile_width(cpp);
4534 + utile_h = utile_height(cpp);
4536 + if (type == VC4_TEXTURE_TYPE_RGBA32R) {
4537 + tiling_format = VC4_TILING_FORMAT_LINEAR;
4539 + if (size_is_lt(width, height, cpp))
4540 + tiling_format = VC4_TILING_FORMAT_LT;
4542 + tiling_format = VC4_TILING_FORMAT_T;
4545 + if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
4546 + tiling_format, width, height, cpp)) {
4550 + /* The mipmap levels are stored before the base of the texture. Make
4551 + * sure there is actually space in the BO.
4553 + for (i = 1; i <= miplevels; i++) {
4554 + uint32_t level_width = max(width >> i, 1u);
4555 + uint32_t level_height = max(height >> i, 1u);
4556 + uint32_t aligned_width, aligned_height;
4557 + uint32_t level_size;
4559 + /* Once the levels get small enough, they drop from T to LT. */
4560 + if (tiling_format == VC4_TILING_FORMAT_T &&
4561 + size_is_lt(level_width, level_height, cpp)) {
4562 + tiling_format = VC4_TILING_FORMAT_LT;
4565 + switch (tiling_format) {
4566 + case VC4_TILING_FORMAT_T:
4567 + aligned_width = round_up(level_width, utile_w * 8);
4568 + aligned_height = round_up(level_height, utile_h * 8);
4570 + case VC4_TILING_FORMAT_LT:
4571 + aligned_width = round_up(level_width, utile_w);
4572 + aligned_height = round_up(level_height, utile_h);
4575 + aligned_width = round_up(level_width, utile_w);
4576 + aligned_height = level_height;
4580 + level_size = aligned_width * cpp * aligned_height;
4582 + if (offset < level_size) {
4583 + DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
4584 + "overflowed buffer bounds (offset %d)\n",
4585 + i, level_width, level_height,
4586 + aligned_width, aligned_height,
4587 + level_size, offset);
4591 + offset -= level_size;
4594 + *validated_p0 = tex->paddr + p0;
4598 + DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
4599 + DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1);
4600 + DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2);
4601 + DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3);
4606 +validate_shader_rec(struct drm_device *dev,
4607 + struct vc4_exec_info *exec,
4608 + struct vc4_shader_state *state)
4610 + uint32_t *src_handles;
4611 + void *pkt_u, *pkt_v;
4612 + enum shader_rec_reloc_type {
4616 + struct shader_rec_reloc {
4617 + enum shader_rec_reloc_type type;
4620 + static const struct shader_rec_reloc gl_relocs[] = {
4621 + { RELOC_CODE, 4 }, /* fs */
4622 + { RELOC_CODE, 16 }, /* vs */
4623 + { RELOC_CODE, 28 }, /* cs */
4625 + static const struct shader_rec_reloc nv_relocs[] = {
4626 + { RELOC_CODE, 4 }, /* fs */
4629 + const struct shader_rec_reloc *relocs;
4630 + struct drm_gem_cma_object *bo[ARRAY_SIZE(gl_relocs) + 8];
4631 + uint32_t nr_attributes = 0, nr_fixed_relocs, nr_relocs, packet_size;
4633 + struct vc4_validated_shader_info *validated_shader;
4635 + if (state->packet == VC4_PACKET_NV_SHADER_STATE) {
4636 + relocs = nv_relocs;
4637 + nr_fixed_relocs = ARRAY_SIZE(nv_relocs);
4641 + relocs = gl_relocs;
4642 + nr_fixed_relocs = ARRAY_SIZE(gl_relocs);
4644 + nr_attributes = state->addr & 0x7;
4645 + if (nr_attributes == 0)
4646 + nr_attributes = 8;
4647 + packet_size = gl_shader_rec_size(state->addr);
4649 + nr_relocs = nr_fixed_relocs + nr_attributes;
4651 + if (nr_relocs * 4 > exec->shader_rec_size) {
4652 + DRM_ERROR("overflowed shader recs reading %d handles "
4653 + "from %d bytes left\n",
4654 + nr_relocs, exec->shader_rec_size);
4657 + src_handles = exec->shader_rec_u;
4658 + exec->shader_rec_u += nr_relocs * 4;
4659 + exec->shader_rec_size -= nr_relocs * 4;
4661 + if (packet_size > exec->shader_rec_size) {
4662 + DRM_ERROR("overflowed shader recs copying %db packet "
4663 + "from %d bytes left\n",
4664 + packet_size, exec->shader_rec_size);
4667 + pkt_u = exec->shader_rec_u;
4668 + pkt_v = exec->shader_rec_v;
4669 + memcpy(pkt_v, pkt_u, packet_size);
4670 + exec->shader_rec_u += packet_size;
4671 + /* Shader recs have to be aligned to 16 bytes (due to the attribute
4672 + * flags being in the low bytes), so round the next validated shader
4673 + * rec address up. This should be safe, since we've got so many
4674 + * relocations in a shader rec packet.
4676 + BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
4677 + exec->shader_rec_v += roundup(packet_size, 16);
4678 + exec->shader_rec_size -= packet_size;
4680 + for (i = 0; i < nr_relocs; i++) {
4681 + enum vc4_bo_mode mode;
4683 + if (i < nr_fixed_relocs && relocs[i].type == RELOC_CODE)
4684 + mode = VC4_MODE_SHADER;
4686 + mode = VC4_MODE_RENDER;
4688 + if (!vc4_use_bo(exec, src_handles[i], mode, &bo[i])) {
4693 + for (i = 0; i < nr_fixed_relocs; i++) {
4694 + uint32_t o = relocs[i].offset;
4695 + uint32_t src_offset = *(uint32_t *)(pkt_u + o);
4696 + uint32_t *texture_handles_u;
4697 + void *uniform_data_u;
4700 + *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
4702 + switch (relocs[i].type) {
4704 + if (src_offset != 0) {
4705 + DRM_ERROR("Shaders must be at offset 0 of "
4710 + validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
4711 + if (!validated_shader)
4714 + if (validated_shader->uniforms_src_size >
4715 + exec->uniforms_size) {
4716 + DRM_ERROR("Uniforms src buffer overflow\n");
4720 + texture_handles_u = exec->uniforms_u;
4721 + uniform_data_u = (texture_handles_u +
4722 + validated_shader->num_texture_samples);
4724 + memcpy(exec->uniforms_v, uniform_data_u,
4725 + validated_shader->uniforms_size);
4728 + tex < validated_shader->num_texture_samples;
4730 + if (!reloc_tex(exec,
4732 + &validated_shader->texture_samples[tex],
4733 + texture_handles_u[tex])) {
4738 + *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
4740 + exec->uniforms_u += validated_shader->uniforms_src_size;
4741 + exec->uniforms_v += validated_shader->uniforms_size;
4742 + exec->uniforms_p += validated_shader->uniforms_size;
4751 + for (i = 0; i < nr_attributes; i++) {
4752 + struct drm_gem_cma_object *vbo = bo[nr_fixed_relocs + i];
4753 + uint32_t o = 36 + i * 8;
4754 + uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
4755 + uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
4756 + uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
4757 + uint32_t max_index;
4759 + if (state->addr & 0x8)
4760 + stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
4762 + if (vbo->base.size < offset ||
4763 + vbo->base.size - offset < attr_size) {
4764 + DRM_ERROR("BO offset overflow (%d + %d > %d)\n",
4765 + offset, attr_size, vbo->base.size);
4769 + if (stride != 0) {
4770 + max_index = ((vbo->base.size - offset - attr_size) /
4772 + if (state->max_index > max_index) {
4773 + DRM_ERROR("primitives use index %d out of supplied %d\n",
4774 + state->max_index, max_index);
4779 + *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
4789 +vc4_validate_shader_recs(struct drm_device *dev,
4790 + struct vc4_exec_info *exec)
4795 + for (i = 0; i < exec->shader_state_count; i++) {
4796 + ret = validate_shader_rec(dev, exec, &exec->shader_state[i]);
4804 +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
4807 + * Copyright © 2014 Broadcom
4809 + * Permission is hereby granted, free of charge, to any person obtaining a
4810 + * copy of this software and associated documentation files (the "Software"),
4811 + * to deal in the Software without restriction, including without limitation
4812 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4813 + * and/or sell copies of the Software, and to permit persons to whom the
4814 + * Software is furnished to do so, subject to the following conditions:
4816 + * The above copyright notice and this permission notice (including the next
4817 + * paragraph) shall be included in all copies or substantial portions of the
4820 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4821 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4822 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
4823 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4824 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4825 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4826 + * IN THE SOFTWARE.
4830 + * DOC: Shader validator for VC4.
4832 + * The VC4 has no IOMMU between it and system memory. So, a user with access
4833 + * to execute shaders could escalate privilege by overwriting system memory
4834 + * (using the VPM write address register in the general-purpose DMA mode) or
4835 + * reading system memory it shouldn't (reading it as a texture, or uniform
4836 + * data, or vertex data).
4838 + * This walks over a shader starting from some offset within a BO, ensuring
4839 + * that its accesses are appropriately bounded, and recording how many texture
4840 + * accesses are made and where so that we can do relocations for them in the
4843 + * The kernel API has shaders stored in user-mapped BOs. The BOs will be
4844 + * forcibly unmapped from the process before validation, and any cache of
4845 + * validated state will be flushed if the mapping is faulted back in.
4847 + * Storing the shaders in BOs means that the validation process will be slow
4848 + * due to uncached reads, but since shaders are long-lived and shader BOs are
4849 + * never actually modified, this shouldn't be a problem.
4852 +#include "vc4_drv.h"
4853 +#include "vc4_qpu_defines.h"
4855 +struct vc4_shader_validation_state {
4856 + struct vc4_texture_sample_info tmu_setup[2];
4857 + int tmu_write_count[2];
4859 + /* For registers that were last written to by a MIN instruction with
4860 + * one argument being a uniform, the address of the uniform.
4863 + * This is used for the validation of direct address memory reads.
4865 + uint32_t live_min_clamp_offsets[32 + 32 + 4];
4866 + bool live_max_clamp_regs[32 + 32 + 4];
4870 +waddr_to_live_reg_index(uint32_t waddr, bool is_b)
4874 + return 32 + waddr;
4877 + } else if (waddr <= QPU_W_ACC3) {
4879 + return 64 + waddr - QPU_W_ACC0;
4886 +raddr_add_a_to_live_reg_index(uint64_t inst)
4888 + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
4889 + uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
4890 + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
4891 + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
4893 + if (add_a == QPU_MUX_A) {
4895 + } else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM) {
4896 + return 32 + raddr_b;
4897 + } else if (add_a <= QPU_MUX_R3) {
4898 + return 64 + add_a;
4905 +is_tmu_submit(uint32_t waddr)
4907 + return (waddr == QPU_W_TMU0_S ||
4908 + waddr == QPU_W_TMU1_S);
4912 +is_tmu_write(uint32_t waddr)
4914 + return (waddr >= QPU_W_TMU0_S &&
4915 + waddr <= QPU_W_TMU1_B);
4919 +record_validated_texture_sample(struct vc4_validated_shader_info *validated_shader,
4920 + struct vc4_shader_validation_state *validation_state,
4923 + uint32_t s = validated_shader->num_texture_samples;
4925 + struct vc4_texture_sample_info *temp_samples;
4927 + temp_samples = krealloc(validated_shader->texture_samples,
4928 + (s + 1) * sizeof(*temp_samples),
4930 + if (!temp_samples)
4933 + memcpy(&temp_samples[s],
4934 + &validation_state->tmu_setup[tmu],
4935 + sizeof(*temp_samples));
4937 + validated_shader->num_texture_samples = s + 1;
4938 + validated_shader->texture_samples = temp_samples;
4940 + for (i = 0; i < 4; i++)
4941 + validation_state->tmu_setup[tmu].p_offset[i] = ~0;
4947 +check_tmu_write(uint64_t inst,
4948 + struct vc4_validated_shader_info *validated_shader,
4949 + struct vc4_shader_validation_state *validation_state,
4952 + uint32_t waddr = (is_mul ?
4953 + QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
4954 + QPU_GET_FIELD(inst, QPU_WADDR_ADD));
4955 + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
4956 + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
4957 + int tmu = waddr > QPU_W_TMU0_B;
4958 + bool submit = is_tmu_submit(waddr);
4959 + bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
4960 + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
4963 + uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
4964 + uint32_t clamp_reg, clamp_offset;
4966 + if (sig == QPU_SIG_SMALL_IMM) {
4967 + DRM_ERROR("direct TMU read used small immediate\n");
4971 + /* Make sure that this texture load is an add of the base
4972 + * address of the UBO to a clamped offset within the UBO.
4975 + QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
4976 + DRM_ERROR("direct TMU load wasn't an add\n");
4980 + /* We assert that the the clamped address is the first
4981 + * argument, and the UBO base address is the second argument.
4982 + * This is arbitrary, but simpler than supporting flipping the
4985 + clamp_reg = raddr_add_a_to_live_reg_index(inst);
4986 + if (clamp_reg == ~0) {
4987 + DRM_ERROR("direct TMU load wasn't clamped\n");
4991 + clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
4992 + if (clamp_offset == ~0) {
4993 + DRM_ERROR("direct TMU load wasn't clamped\n");
4997 + /* Store the clamp value's offset in p1 (see reloc_tex() in
4998 + * vc4_validate.c).
5000 + validation_state->tmu_setup[tmu].p_offset[1] =
5003 + if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
5004 + !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
5005 + DRM_ERROR("direct TMU load didn't add to a uniform\n");
5009 + validation_state->tmu_setup[tmu].is_direct = true;
5011 + if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
5012 + raddr_b == QPU_R_UNIF)) {
5013 + DRM_ERROR("uniform read in the same instruction as "
5014 + "texture setup.\n");
5019 + if (validation_state->tmu_write_count[tmu] >= 4) {
5020 + DRM_ERROR("TMU%d got too many parameters before dispatch\n",
5024 + validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
5025 + validated_shader->uniforms_size;
5026 + validation_state->tmu_write_count[tmu]++;
5027 + /* Since direct uses a RADDR uniform reference, it will get counted in
5028 + * check_instruction_reads()
5031 + validated_shader->uniforms_size += 4;
5034 + if (!record_validated_texture_sample(validated_shader,
5035 + validation_state, tmu)) {
5039 + validation_state->tmu_write_count[tmu] = 0;
5046 +check_register_write(uint64_t inst,
5047 + struct vc4_validated_shader_info *validated_shader,
5048 + struct vc4_shader_validation_state *validation_state,
5051 + uint32_t waddr = (is_mul ?
5052 + QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
5053 + QPU_GET_FIELD(inst, QPU_WADDR_ADD));
5056 + case QPU_W_UNIFORMS_ADDRESS:
5057 + /* XXX: We'll probably need to support this for reladdr, but
5058 + * it's definitely a security-related one.
5060 + DRM_ERROR("uniforms address load unsupported\n");
5063 + case QPU_W_TLB_COLOR_MS:
5064 + case QPU_W_TLB_COLOR_ALL:
5066 + /* These only interact with the tile buffer, not main memory,
5067 + * so they're safe.
5071 + case QPU_W_TMU0_S:
5072 + case QPU_W_TMU0_T:
5073 + case QPU_W_TMU0_R:
5074 + case QPU_W_TMU0_B:
5075 + case QPU_W_TMU1_S:
5076 + case QPU_W_TMU1_T:
5077 + case QPU_W_TMU1_R:
5078 + case QPU_W_TMU1_B:
5079 + return check_tmu_write(inst, validated_shader, validation_state,
5082 + case QPU_W_HOST_INT:
5083 + case QPU_W_TMU_NOSWAP:
5084 + case QPU_W_TLB_ALPHA_MASK:
5085 + case QPU_W_MUTEX_RELEASE:
5086 + /* XXX: I haven't thought about these, so don't support them
5089 + DRM_ERROR("Unsupported waddr %d\n", waddr);
5092 + case QPU_W_VPM_ADDR:
5093 + DRM_ERROR("General VPM DMA unsupported\n");
5097 + case QPU_W_VPMVCD_SETUP:
5098 + /* We allow VPM setup in general, even including VPM DMA
5099 + * configuration setup, because the (unsafe) DMA can only be
5100 + * triggered by QPU_W_VPM_ADDR writes.
5104 + case QPU_W_TLB_STENCIL_SETUP:
5112 +track_live_clamps(uint64_t inst,
5113 + struct vc4_validated_shader_info *validated_shader,
5114 + struct vc4_shader_validation_state *validation_state)
5116 + uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
5117 + uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
5118 + uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
5119 + uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
5120 + uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
5121 + uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
5122 + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
5123 + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
5124 + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5125 + bool ws = inst & QPU_WS;
5126 + uint32_t lri_add_a, lri_add, lri_mul;
5127 + bool add_a_is_min_0;
5129 + /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
5130 + * before we clear previous live state.
5132 + lri_add_a = raddr_add_a_to_live_reg_index(inst);
5133 + add_a_is_min_0 = (lri_add_a != ~0 &&
5134 + validation_state->live_max_clamp_regs[lri_add_a]);
5136 + /* Clear live state for registers written by our instruction. */
5137 + lri_add = waddr_to_live_reg_index(waddr_add, ws);
5138 + lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
5139 + if (lri_mul != ~0) {
5140 + validation_state->live_max_clamp_regs[lri_mul] = false;
5141 + validation_state->live_min_clamp_offsets[lri_mul] = ~0;
5143 + if (lri_add != ~0) {
5144 + validation_state->live_max_clamp_regs[lri_add] = false;
5145 + validation_state->live_min_clamp_offsets[lri_add] = ~0;
5147 + /* Nothing further to do for live tracking, since only ADDs
5148 + * generate new live clamp registers.
5153 + /* Now, handle remaining live clamp tracking for the ADD operation. */
5155 + if (cond_add != QPU_COND_ALWAYS)
5158 + if (op_add == QPU_A_MAX) {
5159 + /* Track live clamps of a value to a minimum of 0 (in either
5162 + if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
5163 + (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
5167 + validation_state->live_max_clamp_regs[lri_add] = true;
5168 + } if (op_add == QPU_A_MIN) {
5169 + /* Track live clamps of a value clamped to a minimum of 0 and
5170 + * a maximum of some uniform's offset.
5172 + if (!add_a_is_min_0)
5175 + if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
5176 + !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
5177 + sig != QPU_SIG_SMALL_IMM)) {
5181 + validation_state->live_min_clamp_offsets[lri_add] =
5182 + validated_shader->uniforms_size;
5187 +check_instruction_writes(uint64_t inst,
5188 + struct vc4_validated_shader_info *validated_shader,
5189 + struct vc4_shader_validation_state *validation_state)
5191 + uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
5192 + uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
5195 + if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
5196 + DRM_ERROR("ADD and MUL both set up textures\n");
5200 + ok = (check_register_write(inst, validated_shader, validation_state, false) &&
5201 + check_register_write(inst, validated_shader, validation_state, true));
5203 + track_live_clamps(inst, validated_shader, validation_state);
5209 +check_instruction_reads(uint64_t inst,
5210 + struct vc4_validated_shader_info *validated_shader)
5212 + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
5213 + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
5214 + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5216 + if (raddr_a == QPU_R_UNIF ||
5217 + (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
5218 + /* This can't overflow the uint32_t, because we're reading 8
5219 + * bytes of instruction to increment by 4 here, so we'd
5222 + validated_shader->uniforms_size += 4;
5228 +struct vc4_validated_shader_info *
5229 +vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
5231 + bool found_shader_end = false;
5232 + int shader_end_ip = 0;
5233 + uint32_t ip, max_ip;
5235 + struct vc4_validated_shader_info *validated_shader;
5236 + struct vc4_shader_validation_state validation_state;
5239 + memset(&validation_state, 0, sizeof(validation_state));
5241 + for (i = 0; i < 8; i++)
5242 + validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
5243 + for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
5244 + validation_state.live_min_clamp_offsets[i] = ~0;
5246 + shader = shader_obj->vaddr;
5247 + max_ip = shader_obj->base.size / sizeof(uint64_t);
5249 + validated_shader = kcalloc(sizeof(*validated_shader), 1, GFP_KERNEL);
5250 + if (!validated_shader)
5253 + for (ip = 0; ip < max_ip; ip++) {
5254 + uint64_t inst = shader[ip];
5255 + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5258 + case QPU_SIG_NONE:
5259 + case QPU_SIG_WAIT_FOR_SCOREBOARD:
5260 + case QPU_SIG_SCOREBOARD_UNLOCK:
5261 + case QPU_SIG_COLOR_LOAD:
5262 + case QPU_SIG_LOAD_TMU0:
5263 + case QPU_SIG_LOAD_TMU1:
5264 + case QPU_SIG_PROG_END:
5265 + case QPU_SIG_SMALL_IMM:
5266 + if (!check_instruction_writes(inst, validated_shader,
5267 + &validation_state)) {
5268 + DRM_ERROR("Bad write at ip %d\n", ip);
5272 + if (!check_instruction_reads(inst, validated_shader))
5275 + if (sig == QPU_SIG_PROG_END) {
5276 + found_shader_end = true;
5277 + shader_end_ip = ip;
5282 + case QPU_SIG_LOAD_IMM:
5283 + if (!check_instruction_writes(inst, validated_shader,
5284 + &validation_state)) {
5285 + DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
5291 + DRM_ERROR("Unsupported QPU signal %d at "
5292 + "instruction %d\n", sig, ip);
5296 + /* There are two delay slots after program end is signaled
5297 + * that are still executed, then we're finished.
5299 + if (found_shader_end && ip == shader_end_ip + 2)
5303 + if (ip == max_ip) {
5304 + DRM_ERROR("shader failed to terminate before "
5305 + "shader BO end at %d\n",
5306 + shader_obj->base.size);
5310 + /* Again, no chance of integer overflow here because the worst case
5311 + * scenario is 8 bytes of uniforms plus handles per 8-byte
5314 + validated_shader->uniforms_src_size =
5315 + (validated_shader->uniforms_size +
5316 + 4 * validated_shader->num_texture_samples);
5318 + return validated_shader;
5321 + if (validated_shader) {
5322 + kfree(validated_shader->texture_samples);
5323 + kfree(validated_shader);
5328 +++ b/include/uapi/drm/vc4_drm.h
5331 + * Copyright © 2014-2015 Broadcom
5333 + * Permission is hereby granted, free of charge, to any person obtaining a
5334 + * copy of this software and associated documentation files (the "Software"),
5335 + * to deal in the Software without restriction, including without limitation
5336 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
5337 + * and/or sell copies of the Software, and to permit persons to whom the
5338 + * Software is furnished to do so, subject to the following conditions:
5340 + * The above copyright notice and this permission notice (including the next
5341 + * paragraph) shall be included in all copies or substantial portions of the
5344 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5345 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5346 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
5347 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
5348 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
5349 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
5350 + * IN THE SOFTWARE.
5353 +#ifndef _UAPI_VC4_DRM_H_
5354 +#define _UAPI_VC4_DRM_H_
5356 +#include <drm/drm.h>
5358 +#define DRM_VC4_SUBMIT_CL 0x00
5359 +#define DRM_VC4_WAIT_SEQNO 0x01
5360 +#define DRM_VC4_WAIT_BO 0x02
5361 +#define DRM_VC4_CREATE_BO 0x03
5362 +#define DRM_VC4_MMAP_BO 0x04
5363 +#define DRM_VC4_CREATE_SHADER_BO 0x05
5365 +#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
5366 +#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
5367 +#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
5368 +#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
5369 +#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
5370 +#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
5372 +struct drm_vc4_submit_rcl_surface {
5373 + uint32_t hindex; /* Handle index, or ~0 if not present. */
5374 + uint32_t offset; /* Offset to start of buffer. */
5376 + * Bits for either render config (color_ms_write) or load/store packet.
5383 + * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
5386 + * Drivers typically use GPU BOs to store batchbuffers / command lists and
5387 + * their associated state. However, because the VC4 lacks an MMU, we have to
5388 + * do validation of memory accesses by the GPU commands. If we were to store
5389 + * our commands in BOs, we'd need to do uncached readback from them to do the
5390 + * validation process, which is too expensive. Instead, userspace accumulates
5391 + * commands and associated state in plain memory, then the kernel copies the
5392 + * data to its own address space, and then validates and stores it in a GPU
5395 +struct drm_vc4_submit_cl {
5396 + /* Pointer to the binner command list.
5398 + * This is the first set of commands executed, which runs the
5399 + * coordinate shader to determine where primitives land on the screen,
5400 + * then writes out the state updates and draw calls necessary per tile
5401 + * to the tile allocation BO.
5405 + /* Pointer to the shader records.
5407 + * Shader records are the structures read by the hardware that contain
5408 + * pointers to uniforms, shaders, and vertex attributes. The
5409 + * reference to the shader record has enough information to determine
5410 + * how many pointers are necessary (fixed number for shaders/uniforms,
5411 + * and an attribute count), so those BO indices into bo_handles are
5412 + * just stored as uint32_ts before each shader record passed in.
5414 + uint64_t shader_rec;
5416 + /* Pointer to uniform data and texture handles for the textures
5417 + * referenced by the shader.
5419 + * For each shader state record, there is a set of uniform data in the
5420 + * order referenced by the record (FS, VS, then CS). Each set of
5421 + * uniform data has a uint32_t index into bo_handles per texture
5422 + * sample operation, in the order the QPU_W_TMUn_S writes appear in
5423 + * the program. Following the texture BO handle indices is the actual
5426 + * The individual uniform state blocks don't have sizes passed in,
5427 + * because the kernel has to determine the sizes anyway during shader
5428 + * code validation.
5430 + uint64_t uniforms;
5431 + uint64_t bo_handles;
5433 + /* Size in bytes of the binner command list. */
5434 + uint32_t bin_cl_size;
5435 + /* Size in bytes of the set of shader records. */
5436 + uint32_t shader_rec_size;
5437 + /* Number of shader records.
5439 + * This could just be computed from the contents of shader_records and
5440 + * the address bits of references to them from the bin CL, but it
5441 + * keeps the kernel from having to resize some allocations it makes.
5443 + uint32_t shader_rec_count;
5444 + /* Size in bytes of the uniform state. */
5445 + uint32_t uniforms_size;
5447 + /* Number of BO handles passed in (size is that times 4). */
5448 + uint32_t bo_handle_count;
5453 + uint8_t min_x_tile;
5454 + uint8_t min_y_tile;
5455 + uint8_t max_x_tile;
5456 + uint8_t max_y_tile;
5457 + struct drm_vc4_submit_rcl_surface color_read;
5458 + struct drm_vc4_submit_rcl_surface color_ms_write;
5459 + struct drm_vc4_submit_rcl_surface zs_read;
5460 + struct drm_vc4_submit_rcl_surface zs_write;
5461 + uint32_t clear_color[2];
5467 +#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
5470 + /* Returned value of the seqno of this render job (for the
5477 + * struct drm_vc4_wait_seqno - ioctl argument for waiting for
5478 + * DRM_VC4_SUBMIT_CL completion using its returned seqno.
5480 + * timeout_ns is the timeout in nanoseconds, where "0" means "don't
5481 + * block, just return the status."
5483 +struct drm_vc4_wait_seqno {
5485 + uint64_t timeout_ns;
5489 + * struct drm_vc4_wait_bo - ioctl argument for waiting for
5490 + * completion of the last DRM_VC4_SUBMIT_CL on a BO.
5492 + * This is useful for cases where multiple processes might be
5493 + * rendering to a BO and you want to wait for all rendering to be
5496 +struct drm_vc4_wait_bo {
5499 + uint64_t timeout_ns;
5503 + * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
5505 + * There are currently no values for the flags argument, but it may be
5506 + * used in a future extension.
5508 +struct drm_vc4_create_bo {
5511 + /** Returned GEM handle for the BO. */
5517 + * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
5520 + * Since allowing a shader to be overwritten while it's also being
5521 + * executed from would allow privlege escalation, shaders must be
5522 + * created using this ioctl, and they can't be mmapped later.
5524 +struct drm_vc4_create_shader_bo {
5525 + /* Size of the data argument. */
5527 + /* Flags, currently must be 0. */
5530 + /* Pointer to the data. */
5533 + /** Returned GEM handle for the BO. */
5535 + /* Pad, must be 0. */
5540 + * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
5542 + * This doesn't actually perform an mmap. Instead, it returns the
5543 + * offset you need to use in an mmap on the DRM device node. This
5544 + * means that tools like valgrind end up knowing about the mapped
5547 + * There are currently no values for the flags argument, but it may be
5548 + * used in a future extension.
5550 +struct drm_vc4_mmap_bo {
5551 + /** Handle for the object being mapped. */
5554 + /** offset into the drm node to use for subsequent mmap call. */
5558 +#endif /* _UAPI_VC4_DRM_H_ */