lkml.org 
[lkml]   [2021]   [Dec]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH RESEND] dma-buf: heaps: Fix mutex lock area and generalize struct dma_heap_attachment
Date
Fix cma_heap_buffer mutex lock area to protect vmap_cnt and vaddr. And
move struct dma_heap_attachment to dma-heap.h so that vendor dma heaps
can use it, the same behaviour as struct dma_buf_attachment.

Fixes: a5d2d29e24be ("dma-buf: heaps: Move heap-helper logic into the cma_heap implementation")
Signed-off-by: Weizhao Ouyang <o451686892@gmail.com>
---
Resend to correct email addresses.

drivers/dma-buf/heaps/cma_heap.c | 25 ++++++++++---------------
drivers/dma-buf/heaps/system_heap.c | 12 ++----------
include/linux/dma-heap.h | 15 +++++++++++++++
3 files changed, 27 insertions(+), 25 deletions(-)

diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 0c05b79870f9..23dad5b6421e 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -40,13 +40,6 @@ struct cma_heap_buffer {
void *vaddr;
};

-struct dma_heap_attachment {
- struct device *dev;
- struct sg_table table;
- struct list_head list;
- bool mapped;
-};
-
static int cma_heap_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
@@ -58,7 +51,7 @@ static int cma_heap_attach(struct dma_buf *dmabuf,
if (!a)
return -ENOMEM;

- ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+ ret = sg_alloc_table_from_pages(a->table, buffer->pages,
buffer->pagecount, 0,
buffer->pagecount << PAGE_SHIFT,
GFP_KERNEL);
@@ -90,7 +83,7 @@ static void cma_heap_detach(struct dma_buf *dmabuf,
list_del(&a->list);
mutex_unlock(&buffer->lock);

- sg_free_table(&a->table);
+ sg_free_table(a->table);
kfree(a);
}

@@ -98,12 +91,12 @@ static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachme
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
- struct sg_table *table = &a->table;
+ struct sg_table *table = a->table;
int ret;

ret = dma_map_sgtable(attachment->dev, table, direction, 0);
if (ret)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
a->mapped = true;
return table;
}
@@ -124,14 +117,15 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;

+ mutex_lock(&buffer->lock);
+
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);

- mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
}
mutex_unlock(&buffer->lock);

@@ -144,14 +138,15 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;

+ mutex_lock(&buffer->lock);
+
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->len);

- mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ dma_sync_sgtable_for_device(a->dev, a->table, direction);
}
mutex_unlock(&buffer->lock);

diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index ab7fd896d2c4..aac8fc660ea6 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -17,7 +17,6 @@
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

@@ -33,13 +32,6 @@ struct system_heap_buffer {
void *vaddr;
};

-struct dma_heap_attachment {
- struct device *dev;
- struct sg_table *table;
- struct list_head list;
- bool mapped;
-};
-
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
@@ -68,7 +60,7 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
if (ret) {
kfree(new_table);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}

new_sg = new_table->sgl;
@@ -94,7 +86,7 @@ static int system_heap_attach(struct dma_buf *dmabuf,
table = dup_sg_table(&buffer->sg_table);
if (IS_ERR(table)) {
kfree(a);
- return -ENOMEM;
+ return PTR_ERR(table);
}

a->table = table;
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 0c05561cad6e..7d02aefe0e78 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -11,6 +11,7 @@

#include <linux/cdev.h>
#include <linux/types.h>
+#include <linux/scatterlist.h>

struct dma_heap;

@@ -41,6 +42,20 @@ struct dma_heap_export_info {
void *priv;
};

+/**
+ * struct dma_heap_attachment - holds device-heap attachment data
+ * @dev: device attached to the heap
+ * @table: sgtables for tracking the associated pages
+ * @list: list of dma_heap_attachment
+ * @mapped: true if attachment is actually mapped on the device
+ */
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+ bool mapped;
+};
+
/**
* dma_heap_get_drvdata() - get per-heap driver data
* @heap: DMA-Heap to retrieve private data for
--
2.32.0
\
 
 \ /
  Last update: 2021-12-29 08:11    [W:0.060 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site