lkml.org 
[lkml]   [2022]   [Jul]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 093/130] x86: Use return-thunk in asm code
    Date
    From: Peter Zijlstra <peterz@infradead.org>

    commit aa3d480315ba6c3025a60958e1981072ea37c3df upstream.

    Use the return thunk in asm code. If the thunk isn't needed, it will
    get patched into a RET instruction during boot by apply_returns().

    Since alternatives can't handle relocations outside of the first
    instruction, putting a 'jmp __x86_return_thunk' in one is not valid,
    therefore carve out the memmove ERMS path into a separate label and jump
    to it.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Borislav Petkov <bp@suse.de>
    Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
    Signed-off-by: Borislav Petkov <bp@suse.de>
    [cascardo: no RANDSTRUCT_CFLAGS]
    Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
    [bwh: Backported to 5.10: adjust context]
    Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/x86/entry/vdso/Makefile | 1 +
    arch/x86/include/asm/linkage.h | 8 ++++++++
    arch/x86/lib/memmove_64.S | 7 ++++++-
    3 files changed, 15 insertions(+), 1 deletion(-)

    --- a/arch/x86/entry/vdso/Makefile
    +++ b/arch/x86/entry/vdso/Makefile
    @@ -91,6 +91,7 @@ endif
    endif

    $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
    +$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO

    #
    # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
    --- a/arch/x86/include/asm/linkage.h
    +++ b/arch/x86/include/asm/linkage.h
    @@ -18,19 +18,27 @@
    #define __ALIGN_STR __stringify(__ALIGN)
    #endif

    +#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
    +#define RET jmp __x86_return_thunk
    +#else /* CONFIG_RETPOLINE */
    #ifdef CONFIG_SLS
    #define RET ret; int3
    #else
    #define RET ret
    #endif
    +#endif /* CONFIG_RETPOLINE */

    #else /* __ASSEMBLY__ */

    +#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
    +#define ASM_RET "jmp __x86_return_thunk\n\t"
    +#else /* CONFIG_RETPOLINE */
    #ifdef CONFIG_SLS
    #define ASM_RET "ret; int3\n\t"
    #else
    #define ASM_RET "ret\n\t"
    #endif
    +#endif /* CONFIG_RETPOLINE */

    #endif /* __ASSEMBLY__ */

    --- a/arch/x86/lib/memmove_64.S
    +++ b/arch/x86/lib/memmove_64.S
    @@ -40,7 +40,7 @@ SYM_FUNC_START(__memmove)
    /* FSRM implies ERMS => no length checks, do the copy directly */
    .Lmemmove_begin_forward:
    ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
    - ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS
    + ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS

    /*
    * movsq instruction have many startup latency
    @@ -206,6 +206,11 @@ SYM_FUNC_START(__memmove)
    movb %r11b, (%rdi)
    13:
    RET
    +
    +.Lmemmove_erms:
    + movq %rdx, %rcx
    + rep movsb
    + RET
    SYM_FUNC_END(__memmove)
    SYM_FUNC_END_ALIAS(memmove)
    EXPORT_SYMBOL(__memmove)

    \
     
     \ /
      Last update: 2022-07-12 20:53    [W:4.147 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site