1 From 645f9aea7c4c7880059f87a715a8bdd004ef9604 Mon Sep 17 00:00:00 2001
2 From: Eric Anholt <eric@anholt.net>
3 Date: Mon, 28 Dec 2015 13:25:41 -0800
4 Subject: [PATCH 281/381] drm/vc4: Make the CRTCs cooperate on allocating
7 So far, we've only ever lit up one CRTC, so this has been fine. To
8 extend to more displays or more planes, we need to make sure we don't
9 run our display lists into each other.
11 Signed-off-by: Eric Anholt <eric@anholt.net>
12 (cherry picked from commit d8dbf44f13b91185c618219d912b246817a8d132)
14 drivers/gpu/drm/vc4/vc4_crtc.c | 115 +++++++++++++++++++++++------------------
15 drivers/gpu/drm/vc4/vc4_drv.h | 8 ++-
16 drivers/gpu/drm/vc4/vc4_hvs.c | 13 +++++
17 3 files changed, 84 insertions(+), 52 deletions(-)
19 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
20 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
21 @@ -49,22 +49,27 @@ struct vc4_crtc {
22 /* Which HVS channel we're using for our CRTC. */
25 - /* Pointer to the actual hardware display list memory for the
30 - u32 dlist_size; /* in dwords */
32 struct drm_pending_vblank_event *event;
35 +struct vc4_crtc_state {
36 + struct drm_crtc_state base;
37 + /* Dlist area for this CRTC configuration. */
38 + struct drm_mm_node mm;
41 static inline struct vc4_crtc *
42 to_vc4_crtc(struct drm_crtc *crtc)
44 return (struct vc4_crtc *)crtc;
47 +static inline struct vc4_crtc_state *
48 +to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
50 + return (struct vc4_crtc_state *)crtc_state;
53 struct vc4_crtc_data {
54 /* Which channel of the HVS this pixelvalve sources from. */
56 @@ -319,11 +324,13 @@ static void vc4_crtc_enable(struct drm_c
57 static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
58 struct drm_crtc_state *state)
60 + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
61 struct drm_device *dev = crtc->dev;
62 struct vc4_dev *vc4 = to_vc4_dev(dev);
63 struct drm_plane *plane;
64 - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
65 + unsigned long flags;
69 /* The pixelvalve can only feed one encoder (and encoders are
70 * 1:1 with connectors.)
71 @@ -346,18 +353,12 @@ static int vc4_crtc_atomic_check(struct
73 dlist_count++; /* Account for SCALER_CTL0_END. */
75 - if (!vc4_crtc->dlist || dlist_count > vc4_crtc->dlist_size) {
76 - vc4_crtc->dlist = ((u32 __iomem *)vc4->hvs->dlist +
77 - HVS_BOOTLOADER_DLIST_END);
78 - vc4_crtc->dlist_size = ((SCALER_DLIST_SIZE >> 2) -
79 - HVS_BOOTLOADER_DLIST_END);
81 - if (dlist_count > vc4_crtc->dlist_size) {
82 - DRM_DEBUG_KMS("dlist too large for CRTC (%d > %d).\n",
83 - dlist_count, vc4_crtc->dlist_size);
87 + spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
88 + ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
90 + spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
96 @@ -368,47 +369,29 @@ static void vc4_crtc_atomic_flush(struct
97 struct drm_device *dev = crtc->dev;
98 struct vc4_dev *vc4 = to_vc4_dev(dev);
99 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
100 + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
101 struct drm_plane *plane;
102 bool debug_dump_regs = false;
103 - u32 __iomem *dlist_next = vc4_crtc->dlist;
104 + u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
105 + u32 __iomem *dlist_next = dlist_start;
107 if (debug_dump_regs) {
108 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
109 vc4_hvs_dump_state(dev);
112 - /* Copy all the active planes' dlist contents to the hardware dlist.
114 - * XXX: If the new display list was large enough that it
115 - * overlapped a currently-read display list, we need to do
116 - * something like disable scanout before putting in the new
117 - * list. For now, we're safe because we only have the two
120 + /* Copy all the active planes' dlist contents to the hardware dlist. */
121 drm_atomic_crtc_for_each_plane(plane, crtc) {
122 dlist_next += vc4_plane_write_dlist(plane, dlist_next);
125 - if (dlist_next == vc4_crtc->dlist) {
126 - /* If no planes were enabled, use the SCALER_CTL0_END
127 - * at the start of the display list memory (in the
128 - * bootloader section). We'll rewrite that
129 - * SCALER_CTL0_END, just in case, though.
131 - writel(SCALER_CTL0_END, vc4->hvs->dlist);
132 - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 0);
134 - writel(SCALER_CTL0_END, dlist_next);
137 - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
138 - (u32 __iomem *)vc4_crtc->dlist -
139 - (u32 __iomem *)vc4->hvs->dlist);
141 - /* Make the next display list start after ours. */
142 - vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
143 - vc4_crtc->dlist = dlist_next;
145 + writel(SCALER_CTL0_END, dlist_next);
148 + WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
150 + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
151 + vc4_state->mm.start);
153 if (debug_dump_regs) {
154 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
155 @@ -573,6 +556,36 @@ static int vc4_page_flip(struct drm_crtc
156 return drm_atomic_helper_page_flip(crtc, fb, event, flags);
159 +static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
161 + struct vc4_crtc_state *vc4_state;
163 + vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
167 + __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
168 + return &vc4_state->base;
171 +static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
172 + struct drm_crtc_state *state)
174 + struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
175 + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
177 + if (vc4_state->mm.allocated) {
178 + unsigned long flags;
180 + spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
181 + drm_mm_remove_node(&vc4_state->mm);
182 + spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
186 + __drm_atomic_helper_crtc_destroy_state(crtc, state);
189 static const struct drm_crtc_funcs vc4_crtc_funcs = {
190 .set_config = drm_atomic_helper_set_config,
191 .destroy = vc4_crtc_destroy,
192 @@ -581,8 +594,8 @@ static const struct drm_crtc_funcs vc4_c
193 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
194 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
195 .reset = drm_atomic_helper_crtc_reset,
196 - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
197 - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
198 + .atomic_duplicate_state = vc4_crtc_duplicate_state,
199 + .atomic_destroy_state = vc4_crtc_destroy_state,
202 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
203 --- a/drivers/gpu/drm/vc4/vc4_drv.h
204 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
205 @@ -150,7 +150,13 @@ struct vc4_v3d {
207 struct platform_device *pdev;
209 - void __iomem *dlist;
210 + u32 __iomem *dlist;
212 + /* Memory manager for CRTCs to allocate space in the display
213 + * list. Units are dwords.
215 + struct drm_mm dlist_mm;
216 + spinlock_t mm_lock;
220 --- a/drivers/gpu/drm/vc4/vc4_hvs.c
221 +++ b/drivers/gpu/drm/vc4/vc4_hvs.c
222 @@ -119,6 +119,17 @@ static int vc4_hvs_bind(struct device *d
224 hvs->dlist = hvs->regs + SCALER_DLIST_START;
226 + spin_lock_init(&hvs->mm_lock);
228 + /* Set up the HVS display list memory manager. We never
229 + * overwrite the setup from the bootloader (just 128b out of
230 + * our 16K), since we don't want to scramble the screen when
231 + * transitioning from the firmware's boot setup to runtime.
233 + drm_mm_init(&hvs->dlist_mm,
234 + HVS_BOOTLOADER_DLIST_END,
235 + (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
240 @@ -129,6 +140,8 @@ static void vc4_hvs_unbind(struct device
241 struct drm_device *drm = dev_get_drvdata(master);
242 struct vc4_dev *vc4 = drm->dev_private;
244 + drm_mm_takedown(&vc4->hvs->dlist_mm);