lkml.org 
[lkml]   [2020]   [May]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip: core/rcu] arm64: Prepare arch_nmi_enter() for recursion
    The following commit has been merged into the core/rcu branch of tip:

    Commit-ID: 28f6bf9e247fe23d177cfdbf7e709270e8cc7fa6
    Gitweb: https://git.kernel.org/tip/28f6bf9e247fe23d177cfdbf7e709270e8cc7fa6
    Author: Frederic Weisbecker <frederic@kernel.org>
    AuthorDate: Thu, 27 Feb 2020 09:51:40 +01:00
    Committer: Thomas Gleixner <tglx@linutronix.de>
    CommitterDate: Tue, 19 May 2020 15:51:17 +02:00

    arm64: Prepare arch_nmi_enter() for recursion

    When using nmi_enter() recursively, arch_nmi_enter() must also be recursion
    safe. In particular, it must be ensured that HCR_TGE is always set while in
    NMI context when in HYP mode, and be restored to it's former state when
    done.

    The current code fails this when interleaved wrong. Notably it overwrites
    the original hcr state on nesting.

    Introduce a nesting counter to make sure to store the original value.

    Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
    Cc: Will Deacon <will@kernel.org>
    Cc: Catalin Marinas <catalin.marinas@arm.com>
    Link: https://lkml.kernel.org/r/20200505134100.771491291@linutronix.de


    ---
    arch/arm64/include/asm/hardirq.h | 78 +++++++++++++++++++++++--------
    1 file changed, 59 insertions(+), 19 deletions(-)

    diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
    index 87ad961..985493a 100644
    --- a/arch/arm64/include/asm/hardirq.h
    +++ b/arch/arm64/include/asm/hardirq.h
    @@ -32,30 +32,70 @@ u64 smp_irq_stat_cpu(unsigned int cpu);

    struct nmi_ctx {
    u64 hcr;
    + unsigned int cnt;
    };

    DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);

    -#define arch_nmi_enter() \
    - do { \
    - if (is_kernel_in_hyp_mode()) { \
    - struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
    - nmi_ctx->hcr = read_sysreg(hcr_el2); \
    - if (!(nmi_ctx->hcr & HCR_TGE)) { \
    - write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
    - isb(); \
    - } \
    - } \
    - } while (0)
    +#define arch_nmi_enter() \
    +do { \
    + struct nmi_ctx *___ctx; \
    + u64 ___hcr; \
    + \
    + if (!is_kernel_in_hyp_mode()) \
    + break; \
    + \
    + ___ctx = this_cpu_ptr(&nmi_contexts); \
    + if (___ctx->cnt) { \
    + ___ctx->cnt++; \
    + break; \
    + } \
    + \
    + ___hcr = read_sysreg(hcr_el2); \
    + if (!(___hcr & HCR_TGE)) { \
    + write_sysreg(___hcr | HCR_TGE, hcr_el2); \
    + isb(); \
    + } \
    + /* \
    + * Make sure the sysreg write is performed before ___ctx->cnt \
    + * is set to 1. NMIs that see cnt == 1 will rely on us. \
    + */ \
    + barrier(); \
    + ___ctx->cnt = 1; \
    + /* \
    + * Make sure ___ctx->cnt is set before we save ___hcr. We \
    + * don't want ___ctx->hcr to be overwritten. \
    + */ \
    + barrier(); \
    + ___ctx->hcr = ___hcr; \
    +} while (0)

    -#define arch_nmi_exit() \
    - do { \
    - if (is_kernel_in_hyp_mode()) { \
    - struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
    - if (!(nmi_ctx->hcr & HCR_TGE)) \
    - write_sysreg(nmi_ctx->hcr, hcr_el2); \
    - } \
    - } while (0)
    +#define arch_nmi_exit() \
    +do { \
    + struct nmi_ctx *___ctx; \
    + u64 ___hcr; \
    + \
    + if (!is_kernel_in_hyp_mode()) \
    + break; \
    + \
    + ___ctx = this_cpu_ptr(&nmi_contexts); \
    + ___hcr = ___ctx->hcr; \
    + /* \
    + * Make sure we read ___ctx->hcr before we release \
    + * ___ctx->cnt as it makes ___ctx->hcr updatable again. \
    + */ \
    + barrier(); \
    + ___ctx->cnt--; \
    + /* \
    + * Make sure ___ctx->cnt release is visible before we \
    + * restore the sysreg. Otherwise a new NMI occurring \
    + * right after write_sysreg() can be fooled and think \
    + * we secured things for it. \
    + */ \
    + barrier(); \
    + if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
    + write_sysreg(___hcr, hcr_el2); \
    +} while (0)

    static inline void ack_bad_irq(unsigned int irq)
    {
    \
     
     \ /
      Last update: 2020-05-19 21:53    [W:4.074 / U:0.020 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site