lkml.org 
[lkml]   [2021]   [Nov]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2] drm: omapdrm: Export correct scatterlist for TILER backed BOs
Date
Memory of BOs backed by TILER is not contiguous, but omap_gem_map_dma_buf()
exports it like it is. This leads to (possibly) invalid memory accesses if
another device imports such a BO.

Fix that by providing a scatterlist that correctly describes TILER memory
layout.

Suggested-by: Matthijs van Duin <matthijsvanduin@gmail.com>
Signed-off-by: Ivaylo Dimitrov <ivo.g.dimitrov.75@gmail.com>
---
drivers/gpu/drm/omapdrm/omap_gem.c | 76 +++++++++++++++++++++++++++++++
drivers/gpu/drm/omapdrm/omap_gem.h | 3 +-
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 32 ++-----------
3 files changed, 82 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 97e5fe6..2ffcc37 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -862,6 +862,11 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj)
return;

if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ if (omap_obj->sgt) {
+ sg_free_table(omap_obj->sgt);
+ kfree(omap_obj->sgt);
+ omap_obj->sgt = NULL;
+ }
ret = tiler_unpin(omap_obj->block);
if (ret) {
dev_err(obj->dev->dev,
@@ -974,6 +979,77 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0;
}

+struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ dma_addr_t addr;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int count, len, stride, i;
+ int ret;
+
+ ret = omap_gem_pin(obj, &addr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_lock(&omap_obj->lock);
+
+ sgt = omap_obj->sgt;
+ if (sgt)
+ goto out;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!sgt)
+ goto out_unpin;
+
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+
+ len = omap_obj->width << (int)fmt;
+ count = omap_obj->height;
+ stride = tiler_stride(fmt, 0);
+ } else {
+ len = obj->size;
+ count = 1;
+ stride = 0;
+ }
+
+ ret = sg_alloc_table(sgt, count, GFP_KERNEL);
+ if (ret)
+ goto out_free;
+
+ for_each_sg(sgt->sgl, sg, count, i) {
+ sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr));
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = len;
+
+ addr += stride;
+ }
+
+ omap_obj->sgt = sgt;
+out:
+ mutex_unlock(&omap_obj->lock);
+ return sgt;
+
+out_free:
+ kfree(sgt);
+out_unpin:
+ mutex_unlock(&omap_obj->lock);
+ omap_gem_unpin(obj);
+ return ERR_PTR(ret);
+}
+
+void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (WARN_ON(omap_obj->sgt != sgt))
+ return;
+
+ omap_gem_unpin(obj);
+}
+
#ifdef CONFIG_DRM_FBDEV_EMULATION
/*
* Get kernel virtual address for CPU access.. this more or less only
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index eda9b48..3b61cfc 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -77,10 +77,11 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
-
u32 omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr);
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
+struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj);
+void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt);

#endif /* __OMAPDRM_GEM_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index f4cde3a..9650416 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -21,45 +21,21 @@ static struct sg_table *omap_gem_map_dma_buf(
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg;
- dma_addr_t dma_addr;
- int ret;
-
- sg = kzalloc(sizeof(*sg), GFP_KERNEL);
- if (!sg)
- return ERR_PTR(-ENOMEM);
-
- /* camera, etc, need physically contiguous.. but we need a
- * better way to know this..
- */
- ret = omap_gem_pin(obj, &dma_addr);
- if (ret)
- goto out;
-
- ret = sg_alloc_table(sg, 1, GFP_KERNEL);
- if (ret)
- goto out;
-
- sg_init_table(sg->sgl, 1);
- sg_dma_len(sg->sgl) = obj->size;
- sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
- sg_dma_address(sg->sgl) = dma_addr;
+ sg = omap_gem_get_sg(obj);
+ if (IS_ERR(sg))
+ return sg;

/* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir);

return sg;
-out:
- kfree(sg);
- return ERR_PTR(ret);
}

static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg, enum dma_data_direction dir)
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
- omap_gem_unpin(obj);
- sg_free_table(sg);
- kfree(sg);
+ omap_gem_put_sg(obj, sg);
}

static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
--
1.9.1
\
 
 \ /
  Last update: 2021-11-13 10:59    [W:0.114 / U:0.024 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site