lkml.org 
[lkml]   [2022]   [Jun]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.17 535/772] x86/mce: relocate set{clear}_mce_nospec() functions
    Date
    From: Jane Chu <jane.chu@oracle.com>

    [ Upstream commit b3fdf9398a16f01dc013967a4ab25e99c3f4fc12 ]

    Relocate the twin mce functions to arch/x86/mm/pat/set_memory.c
    file where they belong.

    While at it, fixup a function name in a comment.

    Reviewed-by: Christoph Hellwig <hch@lst.de>
    Reviewed-by: Dan Williams <dan.j.williams@intel.com>
    Signed-off-by: Jane Chu <jane.chu@oracle.com>
    Acked-by: Borislav Petkov <bp@suse.de>
    Cc: Stephen Rothwell <sfr@canb.auug.org.au>
    [sfr: gate {set,clear}_mce_nospec() by CONFIG_X86_64]
    Link: https://lore.kernel.org/r/165272527328.90175.8336008202048685278.stgit@dwillia2-desk3.amr.corp.intel.com
    Signed-off-by: Dan Williams <dan.j.williams@intel.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    arch/x86/include/asm/set_memory.h | 52 -------------------------------
    arch/x86/mm/pat/set_memory.c | 50 +++++++++++++++++++++++++++--
    include/linux/set_memory.h | 8 ++---
    3 files changed, 52 insertions(+), 58 deletions(-)

    diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
    index ff0f2d90338a..648be0bd20df 100644
    --- a/arch/x86/include/asm/set_memory.h
    +++ b/arch/x86/include/asm/set_memory.h
    @@ -88,56 +88,4 @@ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);

    extern int kernel_set_to_readonly;

    -#ifdef CONFIG_X86_64
    -/*
    - * Prevent speculative access to the page by either unmapping
    - * it (if we do not require access to any part of the page) or
    - * marking it uncacheable (if we want to try to retrieve data
    - * from non-poisoned lines in the page).
    - */
    -static inline int set_mce_nospec(unsigned long pfn, bool unmap)
    -{
    - unsigned long decoy_addr;
    - int rc;
    -
    - /* SGX pages are not in the 1:1 map */
    - if (arch_is_platform_page(pfn << PAGE_SHIFT))
    - return 0;
    - /*
    - * We would like to just call:
    - * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
    - * but doing that would radically increase the odds of a
    - * speculative access to the poison page because we'd have
    - * the virtual address of the kernel 1:1 mapping sitting
    - * around in registers.
    - * Instead we get tricky. We create a non-canonical address
    - * that looks just like the one we want, but has bit 63 flipped.
    - * This relies on set_memory_XX() properly sanitizing any __pa()
    - * results with __PHYSICAL_MASK or PTE_PFN_MASK.
    - */
    - decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
    -
    - if (unmap)
    - rc = set_memory_np(decoy_addr, 1);
    - else
    - rc = set_memory_uc(decoy_addr, 1);
    - if (rc)
    - pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
    - return rc;
    -}
    -#define set_mce_nospec set_mce_nospec
    -
    -/* Restore full speculative operation to the pfn. */
    -static inline int clear_mce_nospec(unsigned long pfn)
    -{
    - return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
    -}
    -#define clear_mce_nospec clear_mce_nospec
    -#else
    -/*
    - * Few people would run a 32-bit kernel on a machine that supports
    - * recoverable errors because they have too much memory to boot 32-bit.
    - */
    -#endif
    -
    #endif /* _ASM_X86_SET_MEMORY_H */
    diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
    index b4072115c8ef..b143972885eb 100644
    --- a/arch/x86/mm/pat/set_memory.c
    +++ b/arch/x86/mm/pat/set_memory.c
    @@ -19,6 +19,7 @@
    #include <linux/vmstat.h>
    #include <linux/kernel.h>
    #include <linux/cc_platform.h>
    +#include <linux/set_memory.h>

    #include <asm/e820/api.h>
    #include <asm/processor.h>
    @@ -29,7 +30,6 @@
    #include <asm/pgalloc.h>
    #include <asm/proto.h>
    #include <asm/memtype.h>
    -#include <asm/set_memory.h>
    #include <asm/hyperv-tlfs.h>
    #include <asm/mshyperv.h>

    @@ -1816,7 +1816,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
    }

    /*
    - * _set_memory_prot is an internal helper for callers that have been passed
    + * __set_memory_prot is an internal helper for callers that have been passed
    * a pgprot_t value from upper layers and a reservation has already been taken.
    * If you want to set the pgprot to a specific page protocol, use the
    * set_memory_xx() functions.
    @@ -1925,6 +1925,52 @@ int set_memory_wb(unsigned long addr, int numpages)
    }
    EXPORT_SYMBOL(set_memory_wb);

    +/*
    + * Prevent speculative access to the page by either unmapping
    + * it (if we do not require access to any part of the page) or
    + * marking it uncacheable (if we want to try to retrieve data
    + * from non-poisoned lines in the page).
    + */
    +#ifdef CONFIG_X86_64
    +int set_mce_nospec(unsigned long pfn, bool unmap)
    +{
    + unsigned long decoy_addr;
    + int rc;
    +
    + /* SGX pages are not in the 1:1 map */
    + if (arch_is_platform_page(pfn << PAGE_SHIFT))
    + return 0;
    + /*
    + * We would like to just call:
    + * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
    + * but doing that would radically increase the odds of a
    + * speculative access to the poison page because we'd have
    + * the virtual address of the kernel 1:1 mapping sitting
    + * around in registers.
    + * Instead we get tricky. We create a non-canonical address
    + * that looks just like the one we want, but has bit 63 flipped.
    + * This relies on set_memory_XX() properly sanitizing any __pa()
    + * results with __PHYSICAL_MASK or PTE_PFN_MASK.
    + */
    + decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
    +
    + if (unmap)
    + rc = set_memory_np(decoy_addr, 1);
    + else
    + rc = set_memory_uc(decoy_addr, 1);
    + if (rc)
    + pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
    + return rc;
    +}
    +
    +/* Restore full speculative operation to the pfn. */
    +int clear_mce_nospec(unsigned long pfn)
    +{
    + return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
    +}
    +EXPORT_SYMBOL_GPL(clear_mce_nospec);
    +#endif /* CONFIG_X86_64 */
    +
    int set_memory_x(unsigned long addr, int numpages)
    {
    if (!(__supported_pte_mask & _PAGE_NX))
    diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
    index f36be5166c19..683a6c3f7179 100644
    --- a/include/linux/set_memory.h
    +++ b/include/linux/set_memory.h
    @@ -42,14 +42,14 @@ static inline bool can_set_direct_map(void)
    #endif
    #endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */

    -#ifndef set_mce_nospec
    +#ifdef CONFIG_X86_64
    +int set_mce_nospec(unsigned long pfn, bool unmap);
    +int clear_mce_nospec(unsigned long pfn);
    +#else
    static inline int set_mce_nospec(unsigned long pfn, bool unmap)
    {
    return 0;
    }
    -#endif
    -
    -#ifndef clear_mce_nospec
    static inline int clear_mce_nospec(unsigned long pfn)
    {
    return 0;
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-06-07 23:28    [W:3.653 / U:0.516 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site