lkml.org 
[lkml]   [2022]   [Oct]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 271/390] crypto: sahara - dont sleep when in softirq
    Date
    From: Zhengchao Shao <shaozhengchao@huawei.com>

    [ Upstream commit 108586eba094b318e6a831f977f4ddcc403a15da ]

    Function of sahara_aes_crypt maybe could be called by function
    of crypto_skcipher_encrypt during the rx softirq, so it is not
    allowed to use mutex lock.

    Fixes: c0c3c89ae347 ("crypto: sahara - replace tasklets with...")
    Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
    Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/crypto/sahara.c | 18 +++++++++---------
    1 file changed, 9 insertions(+), 9 deletions(-)

    diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
    index d60679c79822..2043dd061121 100644
    --- a/drivers/crypto/sahara.c
    +++ b/drivers/crypto/sahara.c
    @@ -25,10 +25,10 @@
    #include <linux/kernel.h>
    #include <linux/kthread.h>
    #include <linux/module.h>
    -#include <linux/mutex.h>
    #include <linux/of.h>
    #include <linux/of_device.h>
    #include <linux/platform_device.h>
    +#include <linux/spinlock.h>

    #define SHA_BUFFER_LEN PAGE_SIZE
    #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
    @@ -195,7 +195,7 @@ struct sahara_dev {
    void __iomem *regs_base;
    struct clk *clk_ipg;
    struct clk *clk_ahb;
    - struct mutex queue_mutex;
    + spinlock_t queue_spinlock;
    struct task_struct *kthread;
    struct completion dma_completion;

    @@ -641,9 +641,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)

    rctx->mode = mode;

    - mutex_lock(&dev->queue_mutex);
    + spin_lock_bh(&dev->queue_spinlock);
    err = crypto_enqueue_request(&dev->queue, &req->base);
    - mutex_unlock(&dev->queue_mutex);
    + spin_unlock_bh(&dev->queue_spinlock);

    wake_up_process(dev->kthread);

    @@ -1042,10 +1042,10 @@ static int sahara_queue_manage(void *data)
    do {
    __set_current_state(TASK_INTERRUPTIBLE);

    - mutex_lock(&dev->queue_mutex);
    + spin_lock_bh(&dev->queue_spinlock);
    backlog = crypto_get_backlog(&dev->queue);
    async_req = crypto_dequeue_request(&dev->queue);
    - mutex_unlock(&dev->queue_mutex);
    + spin_unlock_bh(&dev->queue_spinlock);

    if (backlog)
    backlog->complete(backlog, -EINPROGRESS);
    @@ -1091,9 +1091,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
    rctx->first = 1;
    }

    - mutex_lock(&dev->queue_mutex);
    + spin_lock_bh(&dev->queue_spinlock);
    ret = crypto_enqueue_request(&dev->queue, &req->base);
    - mutex_unlock(&dev->queue_mutex);
    + spin_unlock_bh(&dev->queue_spinlock);

    wake_up_process(dev->kthread);

    @@ -1454,7 +1454,7 @@ static int sahara_probe(struct platform_device *pdev)

    crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);

    - mutex_init(&dev->queue_mutex);
    + spin_lock_init(&dev->queue_spinlock);

    dev_ptr = dev;

    --
    2.35.1


    \
     
     \ /
      Last update: 2022-10-24 15:26    [W:3.218 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site