lkml.org 
[lkml]   [2022]   [Oct]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 280/390] crypto: qat - use pre-allocated buffers in datapath
    Date
    From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>

    [ Upstream commit e0831e7af4e03f2715de102e18e9179ec0a81562 ]

    In order to do DMAs, the QAT device requires that the scatterlist
    structures are mapped and translated into a format that the firmware can
    understand. This is defined as the composition of a scatter gather list
    (SGL) descriptor header, the struct qat_alg_buf_list, plus a variable
    number of flat buffer descriptors, the struct qat_alg_buf.

    The allocation and mapping of these data structures is done each time a
    request is received from the skcipher and aead APIs.
    In an OOM situation, this behaviour might lead to a dead-lock if an
    allocation fails.

    Based on the conversation in [1], increase the size of the aead and
    skcipher request contexts to include an SGL descriptor that can handle
    a maximum of 4 flat buffers.
    If requests exceed 4 entries buffers, memory is allocated dynamically.

    [1] https://lore.kernel.org/linux-crypto/20200722072932.GA27544@gondor.apana.org.au/

    Cc: stable@vger.kernel.org
    Fixes: d370cec32194 ("crypto: qat - Intel(R) QAT crypto interface")
    Reported-by: Mikulas Patocka <mpatocka@redhat.com>
    Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
    Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
    Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
    Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
    Stable-dep-of: cf5bb835b7c8 ("crypto: qat - fix DMA transfer direction")
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/crypto/qat/qat_common/qat_algs.c | 64 +++++++++++++---------
    drivers/crypto/qat/qat_common/qat_crypto.h | 24 ++++++++
    2 files changed, 61 insertions(+), 27 deletions(-)

    diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
    index 8625e299d445..2e2c2ac53609 100644
    --- a/drivers/crypto/qat/qat_common/qat_algs.c
    +++ b/drivers/crypto/qat/qat_common/qat_algs.c
    @@ -34,19 +34,6 @@
    static DEFINE_MUTEX(algs_lock);
    static unsigned int active_devs;

    -struct qat_alg_buf {
    - u32 len;
    - u32 resrvd;
    - u64 addr;
    -} __packed;
    -
    -struct qat_alg_buf_list {
    - u64 resrvd;
    - u32 num_bufs;
    - u32 num_mapped_bufs;
    - struct qat_alg_buf bufers[];
    -} __packed __aligned(64);
    -
    /* Common content descriptor */
    struct qat_alg_cd {
    union {
    @@ -644,7 +631,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
    bl->bufers[i].len, DMA_BIDIRECTIONAL);

    dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
    - kfree(bl);
    +
    + if (!qat_req->buf.sgl_src_valid)
    + kfree(bl);
    +
    if (blp != blpout) {
    /* If out of place operation dma unmap only data */
    int bufless = blout->num_bufs - blout->num_mapped_bufs;
    @@ -655,7 +645,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
    DMA_BIDIRECTIONAL);
    }
    dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
    - kfree(blout);
    +
    + if (!qat_req->buf.sgl_dst_valid)
    + kfree(blout);
    }
    }

    @@ -672,15 +664,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
    dma_addr_t blp = DMA_MAPPING_ERROR;
    dma_addr_t bloutp = DMA_MAPPING_ERROR;
    struct scatterlist *sg;
    - size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
    + size_t sz_out, sz = struct_size(bufl, bufers, n);
    + int node = dev_to_node(&GET_DEV(inst->accel_dev));

    if (unlikely(!n))
    return -EINVAL;

    - bufl = kzalloc_node(sz, GFP_ATOMIC,
    - dev_to_node(&GET_DEV(inst->accel_dev)));
    - if (unlikely(!bufl))
    - return -ENOMEM;
    + qat_req->buf.sgl_src_valid = false;
    + qat_req->buf.sgl_dst_valid = false;
    +
    + if (n > QAT_MAX_BUFF_DESC) {
    + bufl = kzalloc_node(sz, GFP_ATOMIC, node);
    + if (unlikely(!bufl))
    + return -ENOMEM;
    + } else {
    + bufl = &qat_req->buf.sgl_src.sgl_hdr;
    + memset(bufl, 0, sizeof(struct qat_alg_buf_list));
    + qat_req->buf.sgl_src_valid = true;
    + }

    for_each_sg(sgl, sg, n, i)
    bufl->bufers[i].addr = DMA_MAPPING_ERROR;
    @@ -711,12 +712,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
    struct qat_alg_buf *bufers;

    n = sg_nents(sglout);
    - sz_out = struct_size(buflout, bufers, n + 1);
    + sz_out = struct_size(buflout, bufers, n);
    sg_nctr = 0;
    - buflout = kzalloc_node(sz_out, GFP_ATOMIC,
    - dev_to_node(&GET_DEV(inst->accel_dev)));
    - if (unlikely(!buflout))
    - goto err_in;
    +
    + if (n > QAT_MAX_BUFF_DESC) {
    + buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
    + if (unlikely(!buflout))
    + goto err_in;
    + } else {
    + buflout = &qat_req->buf.sgl_dst.sgl_hdr;
    + memset(buflout, 0, sizeof(struct qat_alg_buf_list));
    + qat_req->buf.sgl_dst_valid = true;
    + }

    bufers = buflout->bufers;
    for_each_sg(sglout, sg, n, i)
    @@ -761,7 +768,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
    dma_unmap_single(dev, buflout->bufers[i].addr,
    buflout->bufers[i].len,
    DMA_BIDIRECTIONAL);
    - kfree(buflout);
    +
    + if (!qat_req->buf.sgl_dst_valid)
    + kfree(buflout);

    err_in:
    if (!dma_mapping_error(dev, blp))
    @@ -774,7 +783,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
    bufl->bufers[i].len,
    DMA_BIDIRECTIONAL);

    - kfree(bufl);
    + if (!qat_req->buf.sgl_src_valid)
    + kfree(bufl);

    dev_err(dev, "Failed to map buf for dma\n");
    return -ENOMEM;
    diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
    index 12682d1e9f5f..5f9328201ba4 100644
    --- a/drivers/crypto/qat/qat_common/qat_crypto.h
    +++ b/drivers/crypto/qat/qat_common/qat_crypto.h
    @@ -20,6 +20,26 @@ struct qat_crypto_instance {
    atomic_t refctr;
    };

    +#define QAT_MAX_BUFF_DESC 4
    +
    +struct qat_alg_buf {
    + u32 len;
    + u32 resrvd;
    + u64 addr;
    +} __packed;
    +
    +struct qat_alg_buf_list {
    + u64 resrvd;
    + u32 num_bufs;
    + u32 num_mapped_bufs;
    + struct qat_alg_buf bufers[];
    +} __packed;
    +
    +struct qat_alg_fixed_buf_list {
    + struct qat_alg_buf_list sgl_hdr;
    + struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
    +} __packed __aligned(64);
    +
    struct qat_crypto_request_buffs {
    struct qat_alg_buf_list *bl;
    dma_addr_t blp;
    @@ -27,6 +47,10 @@ struct qat_crypto_request_buffs {
    dma_addr_t bloutp;
    size_t sz;
    size_t sz_out;
    + bool sgl_src_valid;
    + bool sgl_dst_valid;
    + struct qat_alg_fixed_buf_list sgl_src;
    + struct qat_alg_fixed_buf_list sgl_dst;
    };

    struct qat_crypto_request;
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-10-24 18:08    [W:2.583 / U:0.940 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site