Messages in this thread Patch in this message | | | From | Bartlomiej Zolnierkiewicz <> | Subject | [PATCH 16/20] async_tx: do DMA unmap in async_raid6_recov.c for PQ operations | Date | Mon, 05 Nov 2012 11:00:27 +0100 |
| |
Convert core async_tx code (async_sum_product() and async_mult()) to do DMA unmapping itself using the ->callback functionality.
Cc: Dan Williams <djbw@fb.com> Cc: Tomasz Figa <t.figa@samsung.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> --- crypto/async_tx/async_raid6_recov.c | 50 ++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-)
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a9f08a6..3db97aa 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -27,6 +27,20 @@ #include <linux/raid/pq.h> #include <linux/async_tx.h> +static void async_sum_product_cb(void *dma_async_param) +{ + struct dma_async_tx_descriptor *tx = dma_async_param; + struct dma_device *dev = tx->chan->device; + + dma_unmap_page(dev->dev, tx->dma_dst[1], tx->dma_len, + DMA_BIDIRECTIONAL); + dma_unmap_page(dev->dev, tx->dma_src[0], tx->dma_len, DMA_TO_DEVICE); + dma_unmap_page(dev->dev, tx->dma_src[1], tx->dma_len, DMA_TO_DEVICE); + + if (tx->orig_callback) + tx->orig_callback(tx->orig_callback_param); +} + static struct dma_async_tx_descriptor * async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, size_t len, struct async_submit_ctl *submit) @@ -43,7 +57,9 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, dma_addr_t dma_src[2]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP | + DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; @@ -53,7 +69,13 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, len, dma_flags); if (tx) { - async_tx_submit(chan, tx, submit); + tx->dma_dst[1] = dma_dest[1]; + tx->dma_src[0] = dma_src[0]; + tx->dma_src[1] = dma_src[1]; + tx->dma_len = len; + + __async_tx_submit(chan, tx, async_sum_product_cb, tx, + submit); return tx; } @@ -82,6 +104,20 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, return NULL; } +static void async_mult_cb(void *dma_async_param) +{ + struct dma_async_tx_descriptor *tx = dma_async_param; + struct dma_device *dev = tx->chan->device; + + dma_unmap_page(dev->dev, tx->dma_dst[1], tx->dma_len, + DMA_BIDIRECTIONAL); + dma_unmap_page(dev->dev, tx->dma_src[0], tx->dma_len, + DMA_TO_DEVICE); + + if (tx->orig_callback) + tx->orig_callback(tx->orig_callback_param); +} + static struct dma_async_tx_descriptor * async_mult(struct page *dest, struct page *src, u8 coef, size_t len, struct async_submit_ctl *submit) @@ -97,7 +133,9 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, dma_addr_t dma_src[1]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP | + DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; @@ -106,7 +144,11 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, len, dma_flags); if (tx) { - async_tx_submit(chan, tx, submit); + tx->dma_dst[1] = dma_dest[1]; + tx->dma_src[0] = dma_src[0]; + tx->dma_len = len; + + __async_tx_submit(chan, tx, async_mult_cb, tx, submit); return tx; } -- 1.8.0
| |