lkml.org 
[lkml]   [2015]   [Feb]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 12/15] x86/asm: Cleanup prefetch primitives
    Date
    From: Borislav Petkov <bp@suse.de>

    This is based on a patch originally by hpa.

    With the current improvements to the alternatives, we can simply use %P1
    as a mem8 operand constraint and rely on the toolchain to generate the
    proper instruction sizes. For example, on 32-bit, where we use an empty
    old instruction we get:

    apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
    c104648b: alt_insn: 90 90 90 90
    c195566c: rpl_insn: 0f 0d 4b 5c

    ...

    apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
    c18e09b4: alt_insn: 90 90 90
    c1955948: rpl_insn: 0f 0d 08

    ...

    apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
    c1190cf9: alt_insn: 90 90 90 90 90 90 90
    c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1

    all with the proper padding done depending on the size of the
    replacement instruction the compiler generates.

    Signed-off-by: Borislav Petkov <bp@suse.de>
    Cc: H. Peter Anvin <hpa@linux.intel.com>
    ---
    arch/x86/include/asm/apic.h | 2 +-
    arch/x86/include/asm/processor.h | 16 +++++++---------
    arch/x86/kernel/cpu/amd.c | 5 +++++
    3 files changed, 13 insertions(+), 10 deletions(-)

    diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
    index efc3b22d896e..8118e94d50ab 100644
    --- a/arch/x86/include/asm/apic.h
    +++ b/arch/x86/include/asm/apic.h
    @@ -91,7 +91,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
    {
    volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);

    - alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP,
    + alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
    ASM_OUTPUT2("=r" (v), "=m" (*addr)),
    ASM_OUTPUT2("0" (v), "m" (*addr)));
    }
    diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
    index ec1c93588cef..7be2c9a6caba 100644
    --- a/arch/x86/include/asm/processor.h
    +++ b/arch/x86/include/asm/processor.h
    @@ -761,10 +761,10 @@ extern char ignore_fpu_irq;
    #define ARCH_HAS_SPINLOCK_PREFETCH

    #ifdef CONFIG_X86_32
    -# define BASE_PREFETCH ASM_NOP4
    +# define BASE_PREFETCH ""
    # define ARCH_HAS_PREFETCH
    #else
    -# define BASE_PREFETCH "prefetcht0 (%1)"
    +# define BASE_PREFETCH "prefetcht0 %P1"
    #endif

    /*
    @@ -775,10 +775,9 @@ extern char ignore_fpu_irq;
    */
    static inline void prefetch(const void *x)
    {
    - alternative_input(BASE_PREFETCH,
    - "prefetchnta (%1)",
    + alternative_input(BASE_PREFETCH, "prefetchnta %P1",
    X86_FEATURE_XMM,
    - "r" (x));
    + "m" (*(const char *)x));
    }

    /*
    @@ -788,10 +787,9 @@ static inline void prefetch(const void *x)
    */
    static inline void prefetchw(const void *x)
    {
    - alternative_input(BASE_PREFETCH,
    - "prefetchw (%1)",
    - X86_FEATURE_3DNOW,
    - "r" (x));
    + alternative_input(BASE_PREFETCH, "prefetchw %P1",
    + X86_FEATURE_3DNOWPREFETCH,
    + "m" (*(const char *)x));
    }

    static inline void spin_lock_prefetch(const void *x)
    diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
    index a220239cea65..dd9e50500297 100644
    --- a/arch/x86/kernel/cpu/amd.c
    +++ b/arch/x86/kernel/cpu/amd.c
    @@ -711,6 +711,11 @@ static void init_amd(struct cpuinfo_x86 *c)
    set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);

    rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
    +
    + /* 3DNow or LM implies PREFETCHW */
    + if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
    + if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
    + set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
    }

    #ifdef CONFIG_X86_32
    --
    2.2.0.33.gc18b867


    \
     
     \ /
      Last update: 2015-02-24 12:41    [W:4.081 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site