lkml.org 
[lkml]   [2022]   [Jun]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.17 191/772] crypto: ccree - use fine grained DMA mapping dir
    Date
    From: Gilad Ben-Yossef <gilad@benyossef.com>

    [ Upstream commit a260436c98171cd825955a84a7f6e62bc8f4f00d ]

    Use a fine grained specification of DMA mapping directions
    in certain cases, allowing both a more optimized operation
    as well as shushing out a harmless, though persky
    dma-debug warning.

    Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
    Reported-by: Corentin Labbe <clabbe.montjoie@gmail.com>
    Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/crypto/ccree/cc_buffer_mgr.c | 27 +++++++++++++++------------
    1 file changed, 15 insertions(+), 12 deletions(-)

    diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
    index 11e0278c8631..6140e4927322 100644
    --- a/drivers/crypto/ccree/cc_buffer_mgr.c
    +++ b/drivers/crypto/ccree/cc_buffer_mgr.c
    @@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
    req_ctx->mlli_params.mlli_dma_addr);
    }

    - dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
    - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
    -
    if (src != dst) {
    - dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
    + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
    + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
    dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
    + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
    + } else {
    + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
    + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
    }
    }

    @@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
    u32 dummy = 0;
    int rc = 0;
    u32 mapped_nents = 0;
    + int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);

    req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
    mlli_params->curr_pool = NULL;
    @@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
    }

    /* Map the src SGL */
    - rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
    + rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
    LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
    if (rc)
    goto cipher_exit;
    @@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
    }
    } else {
    /* Map the dst sg */
    - rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
    + rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
    &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
    &dummy, &mapped_nents);
    if (rc)
    @@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
    struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
    unsigned int hw_iv_size = areq_ctx->hw_iv_size;
    struct cc_drvdata *drvdata = dev_get_drvdata(dev);
    + int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);

    if (areq_ctx->mac_buf_dma_addr) {
    dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
    @@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
    sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
    areq_ctx->assoclen, req->cryptlen);

    - dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
    - DMA_BIDIRECTIONAL);
    + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
    if (req->src != req->dst) {
    dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
    sg_virt(req->dst));
    - dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
    - DMA_BIDIRECTIONAL);
    + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
    }
    if (drvdata->coherent &&
    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
    @@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
    else
    size_for_map -= authsize;

    - rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
    + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
    &areq_ctx->dst.mapped_nents,
    LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
    &dst_mapped_nents);
    @@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
    size_to_map += authsize;
    }

    - rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
    + rc = cc_map_sg(dev, req->src, size_to_map,
    + (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
    &areq_ctx->src.mapped_nents,
    (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
    LLI_MAX_NUM_OF_DATA_ENTRIES),
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-06-07 22:48    [W:5.834 / U:0.236 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site