lkml.org 
[lkml]   [2020]   [May]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH 5/5] crypto: stm32/crc: protect from concurrent accesses
On Tue, 12 May 2020 at 16:13, Nicolas Toromanoff
<nicolas.toromanoff@st.com> wrote:
>
> Protect STM32 CRC device from concurrent accesses.
>
> As we create a spinlocked section that increase with buffer size,
> we provide a module parameter to release the pressure by splitting
> critical section in chunks.
>
> Size of each chunk is defined in burst_size module parameter.
> By default burst_size=0, i.e. don't split incoming buffer.
>
> Signed-off-by: Nicolas Toromanoff <nicolas.toromanoff@st.com>

Would you mind explaining the usage model here? It looks like you are
sharing a CRC hardware accelerator with a synchronous interface
between different users by using spinlocks? You are aware that this
will tie up the waiting CPUs completely during this time, right? So it
would be much better to use a mutex here. Or perhaps it would make
more sense to fall back to a s/w based CRC routine if the h/w is tied
up working for another task?

Using spinlocks for this is really not acceptable.



> ---
> drivers/crypto/stm32/stm32-crc32.c | 47 ++++++++++++++++++++++++++++--
> 1 file changed, 45 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
> index 413415c216ef..3ba41148c2a4 100644
> --- a/drivers/crypto/stm32/stm32-crc32.c
> +++ b/drivers/crypto/stm32/stm32-crc32.c
> @@ -35,11 +35,16 @@
>
> #define CRC_AUTOSUSPEND_DELAY 50
>
> +static unsigned int burst_size;
> +module_param(burst_size, uint, 0644);
> +MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
> +
> struct stm32_crc {
> struct list_head list;
> struct device *dev;
> void __iomem *regs;
> struct clk *clk;
> + spinlock_t lock;
> };
>
> struct stm32_crc_list {
> @@ -109,6 +114,7 @@ static int stm32_crc_init(struct shash_desc *desc)
> struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
> struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
> struct stm32_crc *crc;
> + unsigned long flags;
>
> crc = stm32_crc_get_next_crc();
> if (!crc)
> @@ -116,6 +122,8 @@ static int stm32_crc_init(struct shash_desc *desc)
>
> pm_runtime_get_sync(crc->dev);
>
> + spin_lock_irqsave(&crc->lock, flags);
> +
> /* Reset, set key, poly and configure in bit reverse mode */
> writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
> writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
> @@ -125,18 +133,21 @@ static int stm32_crc_init(struct shash_desc *desc)
> /* Store partial result */
> ctx->partial = readl_relaxed(crc->regs + CRC_DR);
>
> + spin_unlock_irqrestore(&crc->lock, flags);
> +
> pm_runtime_mark_last_busy(crc->dev);
> pm_runtime_put_autosuspend(crc->dev);
>
> return 0;
> }
>
> -static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
> - unsigned int length)
> +static int burst_update(struct shash_desc *desc, const u8 *d8,
> + size_t length)
> {
> struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
> struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
> struct stm32_crc *crc;
> + unsigned long flags;
>
> crc = stm32_crc_get_next_crc();
> if (!crc)
> @@ -144,6 +155,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
>
> pm_runtime_get_sync(crc->dev);
>
> + spin_lock_irqsave(&crc->lock, flags);
> +
> /*
> * Restore previously calculated CRC for this context as init value
> * Restore polynomial configuration
> @@ -182,12 +195,40 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
> /* Store partial result */
> ctx->partial = readl_relaxed(crc->regs + CRC_DR);
>
> + spin_unlock_irqrestore(&crc->lock, flags);
> +
> pm_runtime_mark_last_busy(crc->dev);
> pm_runtime_put_autosuspend(crc->dev);
>
> return 0;
> }
>
> +static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
> + unsigned int length)
> +{
> + const unsigned int burst_sz = burst_size;
> + unsigned int rem_sz;
> + const u8 *cur;
> + size_t size;
> + int ret;
> +
> + if (!burst_sz)
> + return burst_update(desc, d8, length);
> +
> + /* Digest first bytes not 32bit aligned at first pass in the loop */
> + size = min(length,
> + burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
> + sizeof(u32)));
> + for (rem_sz = length, cur = d8; rem_sz;
> + rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
> + ret = burst_update(desc, cur, size);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> static int stm32_crc_final(struct shash_desc *desc, u8 *out)
> {
> struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
> @@ -300,6 +341,8 @@ static int stm32_crc_probe(struct platform_device *pdev)
> pm_runtime_irq_safe(dev);
> pm_runtime_enable(dev);
>
> + spin_lock_init(&crc->lock);
> +
> platform_set_drvdata(pdev, crc);
>
> spin_lock(&crc_list.lock);
> --
> 2.17.1
>

\
 
 \ /
  Last update: 2020-05-22 18:12    [W:0.147 / U:0.224 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site