kernel: bump 4.14 to 4.14.99
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-4.14 / 950-0174-drm-vc4-Add-the-DRM_IOCTL_VC4_GEM_MADVISE-ioctl.patch
1 From 4264bba50d050577580cc6309524e3d92959fff2 Mon Sep 17 00:00:00 2001
2 From: Boris Brezillon <boris.brezillon@free-electrons.com>
3 Date: Thu, 19 Oct 2017 14:57:48 +0200
4 Subject: [PATCH 174/454] drm/vc4: Add the DRM_IOCTL_VC4_GEM_MADVISE ioctl
5
6 This ioctl will allow us to purge inactive userspace buffers when the
7 system is running out of contiguous memory.
8
9 For now, the purge logic is rather dumb in that it does not try to
10 release only the amount of BO needed to meet the last CMA alloc request
11 but instead purges all objects placed in the purgeable pool as soon as
12 we experience a CMA allocation failure.
13
14 Note that the in-kernel BO cache is always purged before the purgeable
15 cache because those objects are known to be unused while objects marked
16 as purgeable by a userspace application/library might have to be
17 restored when they are marked back as unpurgeable, which can be
18 expensive.
19
20 Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
21 Signed-off-by: Eric Anholt <eric@anholt.net>
22 Reviewed-by: Eric Anholt <eric@anholt.net>
23 Link: https://patchwork.freedesktop.org/patch/msgid/20171019125748.3152-1-boris.brezillon@free-electrons.com
24 (cherry picked from commit b9f19259b84dc648f207a46f3581d15eeaedf4b6)
25 ---
26 drivers/gpu/drm/vc4/vc4_bo.c | 287 +++++++++++++++++++++++++++++++-
27 drivers/gpu/drm/vc4/vc4_drv.c | 10 +-
28 drivers/gpu/drm/vc4/vc4_drv.h | 30 ++++
29 drivers/gpu/drm/vc4/vc4_gem.c | 156 ++++++++++++++++-
30 drivers/gpu/drm/vc4/vc4_plane.c | 20 +++
31 include/uapi/drm/vc4_drm.h | 19 +++
32 6 files changed, 507 insertions(+), 15 deletions(-)
33
34 --- a/drivers/gpu/drm/vc4/vc4_bo.c
35 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
36 @@ -53,6 +53,17 @@ static void vc4_bo_stats_dump(struct vc4
37 vc4->bo_labels[i].size_allocated / 1024,
38 vc4->bo_labels[i].num_allocated);
39 }
40 +
41 + mutex_lock(&vc4->purgeable.lock);
42 + if (vc4->purgeable.num)
43 + DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
44 + vc4->purgeable.size / 1024, vc4->purgeable.num);
45 +
46 + if (vc4->purgeable.purged_num)
47 + DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
48 + vc4->purgeable.purged_size / 1024,
49 + vc4->purgeable.purged_num);
50 + mutex_unlock(&vc4->purgeable.lock);
51 }
52
53 #ifdef CONFIG_DEBUG_FS
54 @@ -75,6 +86,17 @@ int vc4_bo_stats_debugfs(struct seq_file
55 }
56 mutex_unlock(&vc4->bo_lock);
57
58 + mutex_lock(&vc4->purgeable.lock);
59 + if (vc4->purgeable.num)
60 + seq_printf(m, "%30s: %6dkb BOs (%d)\n", "userspace BO cache",
61 + vc4->purgeable.size / 1024, vc4->purgeable.num);
62 +
63 + if (vc4->purgeable.purged_num)
64 + seq_printf(m, "%30s: %6dkb BOs (%d)\n", "total purged BO",
65 + vc4->purgeable.purged_size / 1024,
66 + vc4->purgeable.purged_num);
67 + mutex_unlock(&vc4->purgeable.lock);
68 +
69 return 0;
70 }
71 #endif
72 @@ -248,6 +270,109 @@ static void vc4_bo_cache_purge(struct dr
73 mutex_unlock(&vc4->bo_lock);
74 }
75
76 +void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
77 +{
78 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
79 +
80 + mutex_lock(&vc4->purgeable.lock);
81 + list_add_tail(&bo->size_head, &vc4->purgeable.list);
82 + vc4->purgeable.num++;
83 + vc4->purgeable.size += bo->base.base.size;
84 + mutex_unlock(&vc4->purgeable.lock);
85 +}
86 +
87 +static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
88 +{
89 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
90 +
91 + /* list_del_init() is used here because the caller might release
92 + * the purgeable lock in order to acquire the madv one and update the
93 + * madv status.
94 + * During this short period of time a user might decide to mark
95 + * the BO as unpurgeable, and if bo->madv is set to
96 + * VC4_MADV_DONTNEED it will try to remove the BO from the
97 + * purgeable list which will fail if the ->next/prev fields
98 + * are set to LIST_POISON1/LIST_POISON2 (which is what
99 + * list_del() does).
100 + * Re-initializing the list element guarantees that list_del()
101 + * will work correctly even if it's a NOP.
102 + */
103 + list_del_init(&bo->size_head);
104 + vc4->purgeable.num--;
105 + vc4->purgeable.size -= bo->base.base.size;
106 +}
107 +
108 +void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
109 +{
110 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
111 +
112 + mutex_lock(&vc4->purgeable.lock);
113 + vc4_bo_remove_from_purgeable_pool_locked(bo);
114 + mutex_unlock(&vc4->purgeable.lock);
115 +}
116 +
117 +static void vc4_bo_purge(struct drm_gem_object *obj)
118 +{
119 + struct vc4_bo *bo = to_vc4_bo(obj);
120 + struct drm_device *dev = obj->dev;
121 +
122 + WARN_ON(!mutex_is_locked(&bo->madv_lock));
123 + WARN_ON(bo->madv != VC4_MADV_DONTNEED);
124 +
125 + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
126 +
127 + dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
128 + bo->base.vaddr = NULL;
129 + bo->madv = __VC4_MADV_PURGED;
130 +}
131 +
132 +static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
133 +{
134 + struct vc4_dev *vc4 = to_vc4_dev(dev);
135 +
136 + mutex_lock(&vc4->purgeable.lock);
137 + while (!list_empty(&vc4->purgeable.list)) {
138 + struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
139 + struct vc4_bo, size_head);
140 + struct drm_gem_object *obj = &bo->base.base;
141 + size_t purged_size = 0;
142 +
143 + vc4_bo_remove_from_purgeable_pool_locked(bo);
144 +
145 + /* Release the purgeable lock while we're purging the BO so
146 + * that other people can continue inserting things in the
147 + * purgeable pool without having to wait for all BOs to be
148 + * purged.
149 + */
150 + mutex_unlock(&vc4->purgeable.lock);
151 + mutex_lock(&bo->madv_lock);
152 +
153 + /* Since we released the purgeable pool lock before acquiring
154 + * the BO madv one, the user may have marked the BO as WILLNEED
155 + * and re-used it in the meantime.
156 + * Before purging the BO we need to make sure
157 + * - it is still marked as DONTNEED
158 + * - it has not been re-inserted in the purgeable list
159 + * - it is not used by HW blocks
160 + * If one of these conditions is not met, just skip the entry.
161 + */
162 + if (bo->madv == VC4_MADV_DONTNEED &&
163 + list_empty(&bo->size_head) &&
164 + !refcount_read(&bo->usecnt)) {
165 + purged_size = bo->base.base.size;
166 + vc4_bo_purge(obj);
167 + }
168 + mutex_unlock(&bo->madv_lock);
169 + mutex_lock(&vc4->purgeable.lock);
170 +
171 + if (purged_size) {
172 + vc4->purgeable.purged_size += purged_size;
173 + vc4->purgeable.purged_num++;
174 + }
175 + }
176 + mutex_unlock(&vc4->purgeable.lock);
177 +}
178 +
179 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
180 uint32_t size,
181 enum vc4_kernel_bo_type type)
182 @@ -294,6 +419,9 @@ struct drm_gem_object *vc4_create_object
183 if (!bo)
184 return ERR_PTR(-ENOMEM);
185
186 + bo->madv = VC4_MADV_WILLNEED;
187 + refcount_set(&bo->usecnt, 0);
188 + mutex_init(&bo->madv_lock);
189 mutex_lock(&vc4->bo_lock);
190 bo->label = VC4_BO_TYPE_KERNEL;
191 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
192 @@ -331,16 +459,38 @@ struct vc4_bo *vc4_bo_create(struct drm_
193 * CMA allocations we've got laying around and try again.
194 */
195 vc4_bo_cache_purge(dev);
196 + cma_obj = drm_gem_cma_create(dev, size);
197 + }
198
199 + if (IS_ERR(cma_obj)) {
200 + /*
201 + * Still not enough CMA memory, purge the userspace BO
202 + * cache and retry.
203 + * This is sub-optimal since we purge the whole userspace
204 + * BO cache which forces user that want to re-use the BO to
205 + * restore its initial content.
206 + * Ideally, we should purge entries one by one and retry
207 + * after each to see if CMA allocation succeeds. Or even
208 + * better, try to find an entry with at least the same
209 + * size.
210 + */
211 + vc4_bo_userspace_cache_purge(dev);
212 cma_obj = drm_gem_cma_create(dev, size);
213 - if (IS_ERR(cma_obj)) {
214 - DRM_ERROR("Failed to allocate from CMA:\n");
215 - vc4_bo_stats_dump(vc4);
216 - return ERR_PTR(-ENOMEM);
217 - }
218 + }
219 +
220 + if (IS_ERR(cma_obj)) {
221 + DRM_ERROR("Failed to allocate from CMA:\n");
222 + vc4_bo_stats_dump(vc4);
223 + return ERR_PTR(-ENOMEM);
224 }
225 bo = to_vc4_bo(&cma_obj->base);
226
227 + /* By default, BOs do not support the MADV ioctl. This will be enabled
228 + * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
229 + * BOs).
230 + */
231 + bo->madv = __VC4_MADV_NOTSUPP;
232 +
233 mutex_lock(&vc4->bo_lock);
234 vc4_bo_set_label(&cma_obj->base, type);
235 mutex_unlock(&vc4->bo_lock);
236 @@ -366,6 +516,8 @@ int vc4_dumb_create(struct drm_file *fil
237 if (IS_ERR(bo))
238 return PTR_ERR(bo);
239
240 + bo->madv = VC4_MADV_WILLNEED;
241 +
242 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
243 drm_gem_object_put_unlocked(&bo->base.base);
244
245 @@ -404,6 +556,12 @@ void vc4_free_object(struct drm_gem_obje
246 struct vc4_bo *bo = to_vc4_bo(gem_bo);
247 struct list_head *cache_list;
248
249 + /* Remove the BO from the purgeable list. */
250 + mutex_lock(&bo->madv_lock);
251 + if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
252 + vc4_bo_remove_from_purgeable_pool(bo);
253 + mutex_unlock(&bo->madv_lock);
254 +
255 mutex_lock(&vc4->bo_lock);
256 /* If the object references someone else's memory, we can't cache it.
257 */
258 @@ -419,7 +577,8 @@ void vc4_free_object(struct drm_gem_obje
259 }
260
261 /* If this object was partially constructed but CMA allocation
262 - * had failed, just free it.
263 + * had failed, just free it. Can also happen when the BO has been
264 + * purged.
265 */
266 if (!bo->base.vaddr) {
267 vc4_bo_destroy(bo);
268 @@ -439,6 +598,10 @@ void vc4_free_object(struct drm_gem_obje
269 bo->validated_shader = NULL;
270 }
271
272 + /* Reset madv and usecnt before adding the BO to the cache. */
273 + bo->madv = __VC4_MADV_NOTSUPP;
274 + refcount_set(&bo->usecnt, 0);
275 +
276 bo->t_format = false;
277 bo->free_time = jiffies;
278 list_add(&bo->size_head, cache_list);
279 @@ -463,6 +626,56 @@ static void vc4_bo_cache_time_work(struc
280 mutex_unlock(&vc4->bo_lock);
281 }
282
283 +int vc4_bo_inc_usecnt(struct vc4_bo *bo)
284 +{
285 + int ret;
286 +
287 + /* Fast path: if the BO is already retained by someone, no need to
288 + * check the madv status.
289 + */
290 + if (refcount_inc_not_zero(&bo->usecnt))
291 + return 0;
292 +
293 + mutex_lock(&bo->madv_lock);
294 + switch (bo->madv) {
295 + case VC4_MADV_WILLNEED:
296 + refcount_inc(&bo->usecnt);
297 + ret = 0;
298 + break;
299 + case VC4_MADV_DONTNEED:
300 + /* We shouldn't use a BO marked as purgeable if at least
301 + * someone else retained its content by incrementing usecnt.
302 + * Luckily the BO hasn't been purged yet, but something wrong
303 + * is happening here. Just throw an error instead of
304 + * authorizing this use case.
305 + */
306 + case __VC4_MADV_PURGED:
307 + /* We can't use a purged BO. */
308 + default:
309 + /* Invalid madv value. */
310 + ret = -EINVAL;
311 + break;
312 + }
313 + mutex_unlock(&bo->madv_lock);
314 +
315 + return ret;
316 +}
317 +
318 +void vc4_bo_dec_usecnt(struct vc4_bo *bo)
319 +{
320 + /* Fast path: if the BO is still retained by someone, no need to test
321 + * the madv value.
322 + */
323 + if (refcount_dec_not_one(&bo->usecnt))
324 + return;
325 +
326 + mutex_lock(&bo->madv_lock);
327 + if (refcount_dec_and_test(&bo->usecnt) &&
328 + bo->madv == VC4_MADV_DONTNEED)
329 + vc4_bo_add_to_purgeable_pool(bo);
330 + mutex_unlock(&bo->madv_lock);
331 +}
332 +
333 static void vc4_bo_cache_time_timer(unsigned long data)
334 {
335 struct drm_device *dev = (struct drm_device *)data;
336 @@ -482,18 +695,52 @@ struct dma_buf *
337 vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
338 {
339 struct vc4_bo *bo = to_vc4_bo(obj);
340 + struct dma_buf *dmabuf;
341 + int ret;
342
343 if (bo->validated_shader) {
344 DRM_DEBUG("Attempting to export shader BO\n");
345 return ERR_PTR(-EINVAL);
346 }
347
348 - return drm_gem_prime_export(dev, obj, flags);
349 + /* Note: as soon as the BO is exported it becomes unpurgeable, because
350 + * noone ever decrements the usecnt even if the reference held by the
351 + * exported BO is released. This shouldn't be a problem since we don't
352 + * expect exported BOs to be marked as purgeable.
353 + */
354 + ret = vc4_bo_inc_usecnt(bo);
355 + if (ret) {
356 + DRM_ERROR("Failed to increment BO usecnt\n");
357 + return ERR_PTR(ret);
358 + }
359 +
360 + dmabuf = drm_gem_prime_export(dev, obj, flags);
361 + if (IS_ERR(dmabuf))
362 + vc4_bo_dec_usecnt(bo);
363 +
364 + return dmabuf;
365 +}
366 +
367 +int vc4_fault(struct vm_fault *vmf)
368 +{
369 + struct vm_area_struct *vma = vmf->vma;
370 + struct drm_gem_object *obj = vma->vm_private_data;
371 + struct vc4_bo *bo = to_vc4_bo(obj);
372 +
373 + /* The only reason we would end up here is when user-space accesses
374 + * BO's memory after it's been purged.
375 + */
376 + mutex_lock(&bo->madv_lock);
377 + WARN_ON(bo->madv != __VC4_MADV_PURGED);
378 + mutex_unlock(&bo->madv_lock);
379 +
380 + return VM_FAULT_SIGBUS;
381 }
382
383 int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
384 {
385 struct drm_gem_object *gem_obj;
386 + unsigned long vm_pgoff;
387 struct vc4_bo *bo;
388 int ret;
389
390 @@ -509,16 +756,36 @@ int vc4_mmap(struct file *filp, struct v
391 return -EINVAL;
392 }
393
394 + if (bo->madv != VC4_MADV_WILLNEED) {
395 + DRM_DEBUG("mmaping of %s BO not allowed\n",
396 + bo->madv == VC4_MADV_DONTNEED ?
397 + "purgeable" : "purged");
398 + return -EINVAL;
399 + }
400 +
401 /*
402 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
403 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
404 * the whole buffer.
405 */
406 vma->vm_flags &= ~VM_PFNMAP;
407 - vma->vm_pgoff = 0;
408
409 + /* This ->vm_pgoff dance is needed to make all parties happy:
410 + * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
411 + * mem-region, hence the need to set it to zero (the value set by
412 + * the DRM core is a virtual offset encoding the GEM object-id)
413 + * - the mmap() core logic needs ->vm_pgoff to be restored to its
414 + * initial value before returning from this function because it
415 + * encodes the offset of this GEM in the dev->anon_inode pseudo-file
416 + * and this information will be used when we invalidate userspace
417 + * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
418 + */
419 + vm_pgoff = vma->vm_pgoff;
420 + vma->vm_pgoff = 0;
421 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
422 bo->base.paddr, vma->vm_end - vma->vm_start);
423 + vma->vm_pgoff = vm_pgoff;
424 +
425 if (ret)
426 drm_gem_vm_close(vma);
427
428 @@ -582,6 +849,8 @@ int vc4_create_bo_ioctl(struct drm_devic
429 if (IS_ERR(bo))
430 return PTR_ERR(bo);
431
432 + bo->madv = VC4_MADV_WILLNEED;
433 +
434 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
435 drm_gem_object_put_unlocked(&bo->base.base);
436
437 @@ -635,6 +904,8 @@ vc4_create_shader_bo_ioctl(struct drm_de
438 if (IS_ERR(bo))
439 return PTR_ERR(bo);
440
441 + bo->madv = VC4_MADV_WILLNEED;
442 +
443 if (copy_from_user(bo->base.vaddr,
444 (void __user *)(uintptr_t)args->data,
445 args->size)) {
446 --- a/drivers/gpu/drm/vc4/vc4_drv.c
447 +++ b/drivers/gpu/drm/vc4/vc4_drv.c
448 @@ -100,6 +100,7 @@ static int vc4_get_param_ioctl(struct dr
449 case DRM_VC4_PARAM_SUPPORTS_ETC1:
450 case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
451 case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
452 + case DRM_VC4_PARAM_SUPPORTS_MADVISE:
453 args->value = true;
454 break;
455 default:
456 @@ -117,6 +118,12 @@ static void vc4_lastclose(struct drm_dev
457 drm_fbdev_cma_restore_mode(vc4->fbdev);
458 }
459
460 +static const struct vm_operations_struct vc4_vm_ops = {
461 + .fault = vc4_fault,
462 + .open = drm_gem_vm_open,
463 + .close = drm_gem_vm_close,
464 +};
465 +
466 static const struct file_operations vc4_drm_fops = {
467 .owner = THIS_MODULE,
468 .open = drm_open,
469 @@ -142,6 +149,7 @@ static const struct drm_ioctl_desc vc4_d
470 DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
471 DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
472 DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
473 + DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW),
474 };
475
476 static struct drm_driver vc4_drm_driver = {
477 @@ -166,7 +174,7 @@ static struct drm_driver vc4_drm_driver
478
479 .gem_create_object = vc4_create_object,
480 .gem_free_object_unlocked = vc4_free_object,
481 - .gem_vm_ops = &drm_gem_cma_vm_ops,
482 + .gem_vm_ops = &vc4_vm_ops,
483
484 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
485 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
486 --- a/drivers/gpu/drm/vc4/vc4_drv.h
487 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
488 @@ -77,6 +77,19 @@ struct vc4_dev {
489 /* Protects bo_cache and bo_labels. */
490 struct mutex bo_lock;
491
492 + /* Purgeable BO pool. All BOs in this pool can have their memory
493 + * reclaimed if the driver is unable to allocate new BOs. We also
494 + * keep stats related to the purge mechanism here.
495 + */
496 + struct {
497 + struct list_head list;
498 + unsigned int num;
499 + size_t size;
500 + unsigned int purged_num;
501 + size_t purged_size;
502 + struct mutex lock;
503 + } purgeable;
504 +
505 uint64_t dma_fence_context;
506
507 /* Sequence number for the last job queued in bin_job_list.
508 @@ -195,6 +208,16 @@ struct vc4_bo {
509 * for user-allocated labels.
510 */
511 int label;
512 +
513 + /* Count the number of active users. This is needed to determine
514 + * whether we can move the BO to the purgeable list or not (when the BO
515 + * is used by the GPU or the display engine we can't purge it).
516 + */
517 + refcount_t usecnt;
518 +
519 + /* Store purgeable/purged state here */
520 + u32 madv;
521 + struct mutex madv_lock;
522 };
523
524 static inline struct vc4_bo *
525 @@ -506,6 +529,7 @@ int vc4_get_hang_state_ioctl(struct drm_
526 struct drm_file *file_priv);
527 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
528 struct drm_file *file_priv);
529 +int vc4_fault(struct vm_fault *vmf);
530 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
531 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
532 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
533 @@ -516,6 +540,10 @@ void *vc4_prime_vmap(struct drm_gem_obje
534 int vc4_bo_cache_init(struct drm_device *dev);
535 void vc4_bo_cache_destroy(struct drm_device *dev);
536 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
537 +int vc4_bo_inc_usecnt(struct vc4_bo *bo);
538 +void vc4_bo_dec_usecnt(struct vc4_bo *bo);
539 +void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
540 +void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
541
542 /* vc4_crtc.c */
543 extern struct platform_driver vc4_crtc_driver;
544 @@ -564,6 +592,8 @@ void vc4_job_handle_completed(struct vc4
545 int vc4_queue_seqno_cb(struct drm_device *dev,
546 struct vc4_seqno_cb *cb, uint64_t seqno,
547 void (*func)(struct vc4_seqno_cb *cb));
548 +int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
549 + struct drm_file *file_priv);
550
551 /* vc4_hdmi.c */
552 extern struct platform_driver vc4_hdmi_driver;
553 --- a/drivers/gpu/drm/vc4/vc4_gem.c
554 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
555 @@ -188,11 +188,22 @@ vc4_save_hang_state(struct drm_device *d
556 continue;
557
558 for (j = 0; j < exec[i]->bo_count; j++) {
559 + bo = to_vc4_bo(&exec[i]->bo[j]->base);
560 +
561 + /* Retain BOs just in case they were marked purgeable.
562 + * This prevents the BO from being purged before
563 + * someone had a chance to dump the hang state.
564 + */
565 + WARN_ON(!refcount_read(&bo->usecnt));
566 + refcount_inc(&bo->usecnt);
567 drm_gem_object_get(&exec[i]->bo[j]->base);
568 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
569 }
570
571 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
572 + /* No need to retain BOs coming from the ->unref_list
573 + * because they are naturally unpurgeable.
574 + */
575 drm_gem_object_get(&bo->base.base);
576 kernel_state->bo[k++] = &bo->base.base;
577 }
578 @@ -233,6 +244,26 @@ vc4_save_hang_state(struct drm_device *d
579 state->fdbgs = V3D_READ(V3D_FDBGS);
580 state->errstat = V3D_READ(V3D_ERRSTAT);
581
582 + /* We need to turn purgeable BOs into unpurgeable ones so that
583 + * userspace has a chance to dump the hang state before the kernel
584 + * decides to purge those BOs.
585 + * Note that BO consistency at dump time cannot be guaranteed. For
586 + * example, if the owner of these BOs decides to re-use them or mark
587 + * them purgeable again there's nothing we can do to prevent it.
588 + */
589 + for (i = 0; i < kernel_state->user_state.bo_count; i++) {
590 + struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
591 +
592 + if (bo->madv == __VC4_MADV_NOTSUPP)
593 + continue;
594 +
595 + mutex_lock(&bo->madv_lock);
596 + if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
597 + bo->madv = VC4_MADV_WILLNEED;
598 + refcount_dec(&bo->usecnt);
599 + mutex_unlock(&bo->madv_lock);
600 + }
601 +
602 spin_lock_irqsave(&vc4->job_lock, irqflags);
603 if (vc4->hang_state) {
604 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
605 @@ -639,9 +670,6 @@ vc4_queue_submit(struct drm_device *dev,
606 * The command validator needs to reference BOs by their index within
607 * the submitted job's BO list. This does the validation of the job's
608 * BO list and reference counting for the lifetime of the job.
609 - *
610 - * Note that this function doesn't need to unreference the BOs on
611 - * failure, because that will happen at vc4_complete_exec() time.
612 */
613 static int
614 vc4_cl_lookup_bos(struct drm_device *dev,
615 @@ -693,16 +721,47 @@ vc4_cl_lookup_bos(struct drm_device *dev
616 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
617 i, handles[i]);
618 ret = -EINVAL;
619 - spin_unlock(&file_priv->table_lock);
620 - goto fail;
621 + break;
622 }
623 +
624 drm_gem_object_get(bo);
625 exec->bo[i] = (struct drm_gem_cma_object *)bo;
626 }
627 spin_unlock(&file_priv->table_lock);
628
629 + if (ret)
630 + goto fail_put_bo;
631 +
632 + for (i = 0; i < exec->bo_count; i++) {
633 + ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
634 + if (ret)
635 + goto fail_dec_usecnt;
636 + }
637 +
638 + kvfree(handles);
639 + return 0;
640 +
641 +fail_dec_usecnt:
642 + /* Decrease usecnt on acquired objects.
643 + * We cannot rely on vc4_complete_exec() to release resources here,
644 + * because vc4_complete_exec() has no information about which BO has
645 + * had its ->usecnt incremented.
646 + * To make things easier we just free everything explicitly and set
647 + * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
648 + * step.
649 + */
650 + for (i-- ; i >= 0; i--)
651 + vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
652 +
653 +fail_put_bo:
654 + /* Release any reference to acquired objects. */
655 + for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
656 + drm_gem_object_put_unlocked(&exec->bo[i]->base);
657 +
658 fail:
659 kvfree(handles);
660 + kvfree(exec->bo);
661 + exec->bo = NULL;
662 return ret;
663 }
664
665 @@ -835,8 +894,12 @@ vc4_complete_exec(struct drm_device *dev
666 }
667
668 if (exec->bo) {
669 - for (i = 0; i < exec->bo_count; i++)
670 + for (i = 0; i < exec->bo_count; i++) {
671 + struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
672 +
673 + vc4_bo_dec_usecnt(bo);
674 drm_gem_object_put_unlocked(&exec->bo[i]->base);
675 + }
676 kvfree(exec->bo);
677 }
678
679 @@ -1100,6 +1163,9 @@ vc4_gem_init(struct drm_device *dev)
680 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
681
682 mutex_init(&vc4->power_lock);
683 +
684 + INIT_LIST_HEAD(&vc4->purgeable.list);
685 + mutex_init(&vc4->purgeable.lock);
686 }
687
688 void
689 @@ -1123,3 +1189,81 @@ vc4_gem_destroy(struct drm_device *dev)
690 if (vc4->hang_state)
691 vc4_free_hang_state(dev, vc4->hang_state);
692 }
693 +
694 +int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
695 + struct drm_file *file_priv)
696 +{
697 + struct drm_vc4_gem_madvise *args = data;
698 + struct drm_gem_object *gem_obj;
699 + struct vc4_bo *bo;
700 + int ret;
701 +
702 + switch (args->madv) {
703 + case VC4_MADV_DONTNEED:
704 + case VC4_MADV_WILLNEED:
705 + break;
706 + default:
707 + return -EINVAL;
708 + }
709 +
710 + if (args->pad != 0)
711 + return -EINVAL;
712 +
713 + gem_obj = drm_gem_object_lookup(file_priv, args->handle);
714 + if (!gem_obj) {
715 + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
716 + return -ENOENT;
717 + }
718 +
719 + bo = to_vc4_bo(gem_obj);
720 +
721 + /* Only BOs exposed to userspace can be purged. */
722 + if (bo->madv == __VC4_MADV_NOTSUPP) {
723 + DRM_DEBUG("madvise not supported on this BO\n");
724 + ret = -EINVAL;
725 + goto out_put_gem;
726 + }
727 +
728 + /* Not sure it's safe to purge imported BOs. Let's just assume it's
729 + * not until proven otherwise.
730 + */
731 + if (gem_obj->import_attach) {
732 + DRM_DEBUG("madvise not supported on imported BOs\n");
733 + ret = -EINVAL;
734 + goto out_put_gem;
735 + }
736 +
737 + mutex_lock(&bo->madv_lock);
738 +
739 + if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
740 + !refcount_read(&bo->usecnt)) {
741 + /* If the BO is about to be marked as purgeable, is not used
742 + * and is not already purgeable or purged, add it to the
743 + * purgeable list.
744 + */
745 + vc4_bo_add_to_purgeable_pool(bo);
746 + } else if (args->madv == VC4_MADV_WILLNEED &&
747 + bo->madv == VC4_MADV_DONTNEED &&
748 + !refcount_read(&bo->usecnt)) {
749 + /* The BO has not been purged yet, just remove it from
750 + * the purgeable list.
751 + */
752 + vc4_bo_remove_from_purgeable_pool(bo);
753 + }
754 +
755 + /* Save the purged state. */
756 + args->retained = bo->madv != __VC4_MADV_PURGED;
757 +
758 + /* Update internal madv state only if the bo was not purged. */
759 + if (bo->madv != __VC4_MADV_PURGED)
760 + bo->madv = args->madv;
761 +
762 + mutex_unlock(&bo->madv_lock);
763 +
764 + ret = 0;
765 +
766 +out_put_gem:
767 + drm_gem_object_put_unlocked(gem_obj);
768 +
769 + return ret;
770 +}
771 --- a/drivers/gpu/drm/vc4/vc4_plane.c
772 +++ b/drivers/gpu/drm/vc4/vc4_plane.c
773 @@ -23,6 +23,7 @@
774 #include <drm/drm_fb_cma_helper.h>
775 #include <drm/drm_plane_helper.h>
776
777 +#include "uapi/drm/vc4_drm.h"
778 #include "vc4_drv.h"
779 #include "vc4_regs.h"
780
781 @@ -779,21 +780,40 @@ static int vc4_prepare_fb(struct drm_pla
782 {
783 struct vc4_bo *bo;
784 struct dma_fence *fence;
785 + int ret;
786
787 if ((plane->state->fb == state->fb) || !state->fb)
788 return 0;
789
790 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
791 +
792 + ret = vc4_bo_inc_usecnt(bo);
793 + if (ret)
794 + return ret;
795 +
796 fence = reservation_object_get_excl_rcu(bo->resv);
797 drm_atomic_set_fence_for_plane(state, fence);
798
799 return 0;
800 }
801
802 +static void vc4_cleanup_fb(struct drm_plane *plane,
803 + struct drm_plane_state *state)
804 +{
805 + struct vc4_bo *bo;
806 +
807 + if (plane->state->fb == state->fb || !state->fb)
808 + return;
809 +
810 + bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
811 + vc4_bo_dec_usecnt(bo);
812 +}
813 +
814 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
815 .atomic_check = vc4_plane_atomic_check,
816 .atomic_update = vc4_plane_atomic_update,
817 .prepare_fb = vc4_prepare_fb,
818 + .cleanup_fb = vc4_cleanup_fb,
819 };
820
821 static void vc4_plane_destroy(struct drm_plane *plane)
822 --- a/include/uapi/drm/vc4_drm.h
823 +++ b/include/uapi/drm/vc4_drm.h
824 @@ -41,6 +41,7 @@ extern "C" {
825 #define DRM_VC4_SET_TILING 0x08
826 #define DRM_VC4_GET_TILING 0x09
827 #define DRM_VC4_LABEL_BO 0x0a
828 +#define DRM_VC4_GEM_MADVISE 0x0b
829
830 #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
831 #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
832 @@ -53,6 +54,7 @@ extern "C" {
833 #define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
834 #define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
835 #define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
836 +#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
837
838 struct drm_vc4_submit_rcl_surface {
839 __u32 hindex; /* Handle index, or ~0 if not present. */
840 @@ -305,6 +307,7 @@ struct drm_vc4_get_hang_state {
841 #define DRM_VC4_PARAM_SUPPORTS_ETC1 4
842 #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
843 #define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
844 +#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
845
846 struct drm_vc4_get_param {
847 __u32 param;
848 @@ -333,6 +336,22 @@ struct drm_vc4_label_bo {
849 __u64 name;
850 };
851
852 +/*
853 + * States prefixed with '__' are internal states and cannot be passed to the
854 + * DRM_IOCTL_VC4_GEM_MADVISE ioctl.
855 + */
856 +#define VC4_MADV_WILLNEED 0
857 +#define VC4_MADV_DONTNEED 1
858 +#define __VC4_MADV_PURGED 2
859 +#define __VC4_MADV_NOTSUPP 3
860 +
861 +struct drm_vc4_gem_madvise {
862 + __u32 handle;
863 + __u32 madv;
864 + __u32 retained;
865 + __u32 pad;
866 +};
867 +
868 #if defined(__cplusplus)
869 }
870 #endif