lkml.org 
[lkml]   [2018]   [Feb]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.14 047/195] [Variant 3/Meltdown] arm64: kpti: Fix the interaction between ASID switching and software PAN
    Date
    4.14-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Catalin Marinas <catalin.marinas@arm.com>


    Commit 6b88a32c7af6 upstream.

    With ARM64_SW_TTBR0_PAN enabled, the exception entry code checks the
    active ASID to decide whether user access was enabled (non-zero ASID)
    when the exception was taken. On return from exception, if user access
    was previously disabled, it re-instates TTBR0_EL1 from the per-thread
    saved value (updated in switch_mm() or efi_set_pgd()).

    Commit 7655abb95386 ("arm64: mm: Move ASID from TTBR0 to TTBR1") makes a
    TTBR0_EL1 + ASID switching non-atomic. Subsequently, commit 27a921e75711
    ("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN") changes the
    __uaccess_ttbr0_disable() function and asm macro to first write the
    reserved TTBR0_EL1 followed by the ASID=0 update in TTBR1_EL1. If an
    exception occurs between these two, the exception return code will
    re-instate a valid TTBR0_EL1. Similar scenario can happen in
    cpu_switch_mm() between setting the reserved TTBR0_EL1 and the ASID
    update in cpu_do_switch_mm().

    This patch reverts the entry.S check for ASID == 0 to TTBR0_EL1 and
    disables the interrupts around the TTBR0_EL1 and ASID switching code in
    __uaccess_ttbr0_disable(). It also ensures that, when returning from the
    EFI runtime services, efi_set_pgd() doesn't leave a non-zero ASID in
    TTBR1_EL1 by using uaccess_ttbr0_{enable,disable}.

    The accesses to current_thread_info()->ttbr0 are updated to use
    READ_ONCE/WRITE_ONCE.

    As a safety measure, __uaccess_ttbr0_enable() always masks out any
    existing non-zero ASID TTBR1_EL1 before writing in the new ASID.

    Fixes: 27a921e75711 ("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN")
    Acked-by: Will Deacon <will.deacon@arm.com>
    Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
    Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
    Reviewed-by: James Morse <james.morse@arm.com>
    Tested-by: James Morse <james.morse@arm.com>
    Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
    Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
    Signed-off-by: Will Deacon <will.deacon@arm.com>
    Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/arm64/include/asm/asm-uaccess.h | 12 +++++++-----
    arch/arm64/include/asm/efi.h | 12 +++++++-----
    arch/arm64/include/asm/mmu_context.h | 3 ++-
    arch/arm64/include/asm/uaccess.h | 9 ++++++---
    arch/arm64/kernel/entry.S | 2 +-
    arch/arm64/lib/clear_user.S | 2 +-
    arch/arm64/lib/copy_from_user.S | 2 +-
    arch/arm64/lib/copy_in_user.S | 2 +-
    arch/arm64/lib/copy_to_user.S | 2 +-
    arch/arm64/mm/cache.S | 2 +-
    arch/arm64/mm/proc.S | 3 +++
    arch/arm64/xen/hypercall.S | 2 +-
    12 files changed, 32 insertions(+), 21 deletions(-)

    --- a/arch/arm64/include/asm/asm-uaccess.h
    +++ b/arch/arm64/include/asm/asm-uaccess.h
    @@ -14,11 +14,11 @@
    #ifdef CONFIG_ARM64_SW_TTBR0_PAN
    .macro __uaccess_ttbr0_disable, tmp1
    mrs \tmp1, ttbr1_el1 // swapper_pg_dir
    + bic \tmp1, \tmp1, #TTBR_ASID_MASK
    add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
    msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
    isb
    sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE
    - bic \tmp1, \tmp1, #TTBR_ASID_MASK
    msr ttbr1_el1, \tmp1 // set reserved ASID
    isb
    .endm
    @@ -35,9 +35,11 @@
    isb
    .endm

    - .macro uaccess_ttbr0_disable, tmp1
    + .macro uaccess_ttbr0_disable, tmp1, tmp2
    alternative_if_not ARM64_HAS_PAN
    + save_and_disable_irq \tmp2 // avoid preemption
    __uaccess_ttbr0_disable \tmp1
    + restore_irq \tmp2
    alternative_else_nop_endif
    .endm

    @@ -49,7 +51,7 @@ alternative_if_not ARM64_HAS_PAN
    alternative_else_nop_endif
    .endm
    #else
    - .macro uaccess_ttbr0_disable, tmp1
    + .macro uaccess_ttbr0_disable, tmp1, tmp2
    .endm

    .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
    @@ -59,8 +61,8 @@ alternative_else_nop_endif
    /*
    * These macros are no-ops when UAO is present.
    */
    - .macro uaccess_disable_not_uao, tmp1
    - uaccess_ttbr0_disable \tmp1
    + .macro uaccess_disable_not_uao, tmp1, tmp2
    + uaccess_ttbr0_disable \tmp1, \tmp2
    alternative_if ARM64_ALT_PAN_NOT_UAO
    SET_PSTATE_PAN(1)
    alternative_else_nop_endif
    --- a/arch/arm64/include/asm/efi.h
    +++ b/arch/arm64/include/asm/efi.h
    @@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm
    if (mm != current->active_mm) {
    /*
    * Update the current thread's saved ttbr0 since it is
    - * restored as part of a return from exception. Set
    - * the hardware TTBR0_EL1 using cpu_switch_mm()
    - * directly to enable potential errata workarounds.
    + * restored as part of a return from exception. Enable
    + * access to the valid TTBR0_EL1 and invoke the errata
    + * workaround directly since there is no return from
    + * exception when invoking the EFI run-time services.
    */
    update_saved_ttbr0(current, mm);
    - cpu_switch_mm(mm->pgd, mm);
    + uaccess_ttbr0_enable();
    + post_ttbr_update_workaround();
    } else {
    /*
    * Defer the switch to the current thread's TTBR0_EL1
    * until uaccess_enable(). Restore the current
    * thread's saved ttbr0 corresponding to its active_mm
    */
    - cpu_set_reserved_ttbr0();
    + uaccess_ttbr0_disable();
    update_saved_ttbr0(current, current->active_mm);
    }
    }
    --- a/arch/arm64/include/asm/mmu_context.h
    +++ b/arch/arm64/include/asm/mmu_context.h
    @@ -175,7 +175,7 @@ static inline void update_saved_ttbr0(st
    else
    ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;

    - task_thread_info(tsk)->ttbr0 = ttbr;
    + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
    }
    #else
    static inline void update_saved_ttbr0(struct task_struct *tsk,
    @@ -230,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct
    #define activate_mm(prev,next) switch_mm(prev, next, current)

    void verify_cpu_asid_bits(void);
    +void post_ttbr_update_workaround(void);

    #endif /* !__ASSEMBLY__ */

    --- a/arch/arm64/include/asm/uaccess.h
    +++ b/arch/arm64/include/asm/uaccess.h
    @@ -105,16 +105,18 @@ static inline void set_fs(mm_segment_t f
    #ifdef CONFIG_ARM64_SW_TTBR0_PAN
    static inline void __uaccess_ttbr0_disable(void)
    {
    - unsigned long ttbr;
    + unsigned long flags, ttbr;

    + local_irq_save(flags);
    ttbr = read_sysreg(ttbr1_el1);
    + ttbr &= ~TTBR_ASID_MASK;
    /* reserved_ttbr0 placed at the end of swapper_pg_dir */
    write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
    isb();
    /* Set reserved ASID */
    - ttbr &= ~TTBR_ASID_MASK;
    write_sysreg(ttbr, ttbr1_el1);
    isb();
    + local_irq_restore(flags);
    }

    static inline void __uaccess_ttbr0_enable(void)
    @@ -127,10 +129,11 @@ static inline void __uaccess_ttbr0_enabl
    * roll-over and an update of 'ttbr0'.
    */
    local_irq_save(flags);
    - ttbr0 = current_thread_info()->ttbr0;
    + ttbr0 = READ_ONCE(current_thread_info()->ttbr0);

    /* Restore active ASID */
    ttbr1 = read_sysreg(ttbr1_el1);
    + ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
    ttbr1 |= ttbr0 & TTBR_ASID_MASK;
    write_sysreg(ttbr1, ttbr1_el1);
    isb();
    --- a/arch/arm64/kernel/entry.S
    +++ b/arch/arm64/kernel/entry.S
    @@ -204,7 +204,7 @@ alternative_if ARM64_HAS_PAN
    alternative_else_nop_endif

    .if \el != 0
    - mrs x21, ttbr1_el1
    + mrs x21, ttbr0_el1
    tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
    orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
    b.eq 1f // TTBR0 access already disabled
    --- a/arch/arm64/lib/clear_user.S
    +++ b/arch/arm64/lib/clear_user.S
    @@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wz
    b.mi 5f
    uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
    5: mov x0, #0
    - uaccess_disable_not_uao x2
    + uaccess_disable_not_uao x2, x3
    ret
    ENDPROC(__clear_user)

    --- a/arch/arm64/lib/copy_from_user.S
    +++ b/arch/arm64/lib/copy_from_user.S
    @@ -67,7 +67,7 @@ ENTRY(__arch_copy_from_user)
    uaccess_enable_not_uao x3, x4, x5
    add end, x0, x2
    #include "copy_template.S"
    - uaccess_disable_not_uao x3
    + uaccess_disable_not_uao x3, x4
    mov x0, #0 // Nothing to copy
    ret
    ENDPROC(__arch_copy_from_user)
    --- a/arch/arm64/lib/copy_in_user.S
    +++ b/arch/arm64/lib/copy_in_user.S
    @@ -68,7 +68,7 @@ ENTRY(raw_copy_in_user)
    uaccess_enable_not_uao x3, x4, x5
    add end, x0, x2
    #include "copy_template.S"
    - uaccess_disable_not_uao x3
    + uaccess_disable_not_uao x3, x4
    mov x0, #0
    ret
    ENDPROC(raw_copy_in_user)
    --- a/arch/arm64/lib/copy_to_user.S
    +++ b/arch/arm64/lib/copy_to_user.S
    @@ -66,7 +66,7 @@ ENTRY(__arch_copy_to_user)
    uaccess_enable_not_uao x3, x4, x5
    add end, x0, x2
    #include "copy_template.S"
    - uaccess_disable_not_uao x3
    + uaccess_disable_not_uao x3, x4
    mov x0, #0
    ret
    ENDPROC(__arch_copy_to_user)
    --- a/arch/arm64/mm/cache.S
    +++ b/arch/arm64/mm/cache.S
    @@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I
    isb
    mov x0, #0
    1:
    - uaccess_ttbr0_disable x1
    + uaccess_ttbr0_disable x1, x2
    ret
    9:
    mov x0, #-EFAULT
    --- a/arch/arm64/mm/proc.S
    +++ b/arch/arm64/mm/proc.S
    @@ -140,6 +140,9 @@ ENDPROC(cpu_do_resume)
    ENTRY(cpu_do_switch_mm)
    mrs x2, ttbr1_el1
    mmid x1, x1 // get mm->context.id
    +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
    + bfi x0, x1, #48, #16 // set the ASID field in TTBR0
    +#endif
    bfi x2, x1, #48, #16 // set the ASID
    msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
    isb
    --- a/arch/arm64/xen/hypercall.S
    +++ b/arch/arm64/xen/hypercall.S
    @@ -107,6 +107,6 @@ ENTRY(privcmd_call)
    /*
    * Disable userspace access from kernel once the hyp call completed.
    */
    - uaccess_ttbr0_disable x6
    + uaccess_ttbr0_disable x6, x7
    ret
    ENDPROC(privcmd_call);

    \
     
     \ /
      Last update: 2018-02-15 18:31    [W:4.042 / U:0.200 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site