lkml.org 
[lkml]   [2022]   [Sep]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH v4 06/21] drm/i915: Prepare to dynamic dma-buf locking specification
From
Am 31.08.22 um 17:37 schrieb Dmitry Osipenko:
> Prepare i915 driver to the common dynamic dma-buf locking convention
> by starting to use the unlocked versions of dma-buf API functions
> and handling cases where importer now holds the reservation lock.
>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>

Acked-by: Christian König <christian.koenig@amd.com>, but it's probably
best if somebody from the Intel guys take a look as well.

> ---
> drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 2 +-
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 12 ++++++++++++
> .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 16 ++++++++--------
> 3 files changed, 21 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> index f5062d0c6333..07eee1c09aaf 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> @@ -72,7 +72,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
> struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
> void *vaddr;
>
> - vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
> + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
> if (IS_ERR(vaddr))
> return PTR_ERR(vaddr);
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 389e9f157ca5..7e2a9b02526c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -331,7 +331,19 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
> continue;
> }
>
> + /*
> + * dma_buf_unmap_attachment() requires reservation to be
> + * locked. The imported GEM shouldn't share reservation lock,
> + * so it's safe to take the lock.
> + */
> + if (obj->base.import_attach)
> + i915_gem_object_lock(obj, NULL);
> +
> __i915_gem_object_pages_fini(obj);
> +
> + if (obj->base.import_attach)
> + i915_gem_object_unlock(obj);
> +
> __i915_gem_free_object(obj);
>
> /* But keep the pointer alive for RCU-protected lookups */
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> index 62c61af77a42..9e3ed634aa0e 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> @@ -213,7 +213,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
> goto out_import;
> }
>
> - st = dma_buf_map_attachment(import_attach, DMA_BIDIRECTIONAL);
> + st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL);
> if (IS_ERR(st)) {
> err = PTR_ERR(st);
> goto out_detach;
> @@ -226,7 +226,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
> timeout = -ETIME;
> }
> err = timeout > 0 ? 0 : timeout;
> - dma_buf_unmap_attachment(import_attach, st, DMA_BIDIRECTIONAL);
> + dma_buf_unmap_attachment_unlocked(import_attach, st, DMA_BIDIRECTIONAL);
> out_detach:
> dma_buf_detach(dmabuf, import_attach);
> out_import:
> @@ -296,7 +296,7 @@ static int igt_dmabuf_import(void *arg)
> goto out_obj;
> }
>
> - err = dma_buf_vmap(dmabuf, &map);
> + err = dma_buf_vmap_unlocked(dmabuf, &map);
> dma_map = err ? NULL : map.vaddr;
> if (!dma_map) {
> pr_err("dma_buf_vmap failed\n");
> @@ -337,7 +337,7 @@ static int igt_dmabuf_import(void *arg)
>
> err = 0;
> out_dma_map:
> - dma_buf_vunmap(dmabuf, &map);
> + dma_buf_vunmap_unlocked(dmabuf, &map);
> out_obj:
> i915_gem_object_put(obj);
> out_dmabuf:
> @@ -358,7 +358,7 @@ static int igt_dmabuf_import_ownership(void *arg)
> if (IS_ERR(dmabuf))
> return PTR_ERR(dmabuf);
>
> - err = dma_buf_vmap(dmabuf, &map);
> + err = dma_buf_vmap_unlocked(dmabuf, &map);
> ptr = err ? NULL : map.vaddr;
> if (!ptr) {
> pr_err("dma_buf_vmap failed\n");
> @@ -367,7 +367,7 @@ static int igt_dmabuf_import_ownership(void *arg)
> }
>
> memset(ptr, 0xc5, PAGE_SIZE);
> - dma_buf_vunmap(dmabuf, &map);
> + dma_buf_vunmap_unlocked(dmabuf, &map);
>
> obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
> if (IS_ERR(obj)) {
> @@ -418,7 +418,7 @@ static int igt_dmabuf_export_vmap(void *arg)
> }
> i915_gem_object_put(obj);
>
> - err = dma_buf_vmap(dmabuf, &map);
> + err = dma_buf_vmap_unlocked(dmabuf, &map);
> ptr = err ? NULL : map.vaddr;
> if (!ptr) {
> pr_err("dma_buf_vmap failed\n");
> @@ -435,7 +435,7 @@ static int igt_dmabuf_export_vmap(void *arg)
> memset(ptr, 0xc5, dmabuf->size);
>
> err = 0;
> - dma_buf_vunmap(dmabuf, &map);
> + dma_buf_vunmap_unlocked(dmabuf, &map);
> out:
> dma_buf_put(dmabuf);
> return err;

\
 
 \ /
  Last update: 2022-09-01 08:46    [W:0.534 / U:0.380 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site