ef92e7b2aa490aa4aacb8c0091886b6cda0943a2
[openwrt/openwrt.git] / target / linux / bcm27xx / patches-5.15 / 950-0905-drm-vc4-crtc-Use-an-union-to-store-the-page-flip-cal.patch
1 From 92f7f613ece28ecff26cbe5b5af20343bb624db1 Mon Sep 17 00:00:00 2001
2 From: Maxime Ripard <maxime@cerno.tech>
3 Date: Mon, 2 May 2022 10:20:56 +0200
4 Subject: [PATCH] drm/vc4: crtc: Use an union to store the page flip
5 callback
6
7 We'll need to extend the vc4_async_flip_state structure to rely on
8 another callback implementation, so let's move the current one into a
9 union.
10
11 Signed-off-by: Maxime Ripard <maxime@cerno.tech>
12 ---
13 drivers/gpu/drm/vc4/vc4_crtc.c | 30 +++++++++++++++++++-----------
14 1 file changed, 19 insertions(+), 11 deletions(-)
15
16 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
17 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
18 @@ -818,18 +818,18 @@ struct vc4_async_flip_state {
19 struct drm_framebuffer *old_fb;
20 struct drm_pending_vblank_event *event;
21
22 - struct vc4_seqno_cb cb;
23 - struct dma_fence_cb fence_cb;
24 + union {
25 + struct dma_fence_cb fence;
26 + struct vc4_seqno_cb seqno;
27 + } cb;
28 };
29
30 /* Called when the V3D execution for the BO being flipped to is done, so that
31 * we can actually update the plane's address to point to it.
32 */
33 static void
34 -vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
35 +vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
36 {
37 - struct vc4_async_flip_state *flip_state =
38 - container_of(cb, struct vc4_async_flip_state, cb);
39 struct drm_crtc *crtc = flip_state->crtc;
40 struct drm_device *dev = crtc->dev;
41 struct drm_plane *plane = crtc->primary;
42 @@ -865,13 +865,21 @@ vc4_async_page_flip_complete(struct vc4_
43 kfree(flip_state);
44 }
45
46 +static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
47 +{
48 + struct vc4_async_flip_state *flip_state =
49 + container_of(cb, struct vc4_async_flip_state, cb.seqno);
50 +
51 + vc4_async_page_flip_complete(flip_state);
52 +}
53 +
54 static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
55 struct dma_fence_cb *cb)
56 {
57 struct vc4_async_flip_state *flip_state =
58 - container_of(cb, struct vc4_async_flip_state, fence_cb);
59 + container_of(cb, struct vc4_async_flip_state, cb.fence);
60
61 - vc4_async_page_flip_complete(&flip_state->cb);
62 + vc4_async_page_flip_complete(flip_state);
63 dma_fence_put(fence);
64 }
65
66 @@ -886,14 +894,14 @@ static int vc4_async_set_fence_cb(struct
67 if (!vc4->is_vc5) {
68 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
69
70 - return vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
71 - vc4_async_page_flip_complete);
72 + return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
73 + vc4_async_page_flip_seqno_complete);
74 }
75
76 fence = dma_fence_get(dma_resv_excl_fence(cma_bo->base.resv));
77 - if (dma_fence_add_callback(fence, &flip_state->fence_cb,
78 + if (dma_fence_add_callback(fence, &flip_state->cb.fence,
79 vc4_async_page_flip_fence_complete))
80 - vc4_async_page_flip_fence_complete(fence, &flip_state->fence_cb);
81 + vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
82
83 return 0;
84 }