lkml.org 
[lkml]   [2020]   [May]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [patch V4 part 2 10/18] x86/entry/64: Check IF in __preempt_enable_notrace() thunk
From
Date


On 5/5/20 3:41 PM, Thomas Gleixner wrote:
> The preempt_enable_notrace() ASM thunk is called from tracing, entry code
> RCU and other places which are already in or going to be in the noinstr
> section which protects sensitve code from being instrumented.

typo: "sensitve"

alex.

> Calls out of these sections happen with interrupts disabled, which is
> handled in C code, but the push regs, call, pop regs sequence can be
> completely avoided in this case.
>
> This is also a preparatory step for annotating the call from the thunk to
> preempt_enable_notrace() safe from a noinstr section.
>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> arch/x86/entry/thunk_64.S | 27 +++++++++++++++++++++++----
> arch/x86/include/asm/irqflags.h | 3 +--
> arch/x86/include/asm/paravirt.h | 3 +--
> 3 files changed, 25 insertions(+), 8 deletions(-)
>
> --- a/arch/x86/entry/thunk_64.S
> +++ b/arch/x86/entry/thunk_64.S
> @@ -9,10 +9,28 @@
> #include "calling.h"
> #include <asm/asm.h>
> #include <asm/export.h>
> +#include <asm/irqflags.h>
> +
> +.code64
>
> /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
> - .macro THUNK name, func, put_ret_addr_in_rdi=0
> + .macro THUNK name, func, put_ret_addr_in_rdi=0, check_if=0
> SYM_FUNC_START_NOALIGN(\name)
> +
> + .if \check_if
> + /*
> + * Check for interrupts disabled right here. No point in
> + * going all the way down
> + */
> + pushq %rax
> + SAVE_FLAGS(CLBR_RAX)
> + testl $X86_EFLAGS_IF, %eax
> + popq %rax
> + jnz 1f
> + ret
> +1:
> + .endif
> +
> pushq %rbp
> movq %rsp, %rbp
>
> @@ -38,14 +56,15 @@ SYM_FUNC_END(\name)
> .endm
>
> #ifdef CONFIG_TRACE_IRQFLAGS
> - THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
> - THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
> + THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller, put_ret_addr_in_rdi=1
> + THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller, put_ret_addr_in_rdi=1
> #endif
>
> #ifdef CONFIG_PREEMPTION
> THUNK preempt_schedule_thunk, preempt_schedule
> - THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
> EXPORT_SYMBOL(preempt_schedule_thunk)
> +
> + THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace, check_if=1
> EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
> #endif
>
> --- a/arch/x86/include/asm/irqflags.h
> +++ b/arch/x86/include/asm/irqflags.h
> @@ -127,9 +127,8 @@ static inline notrace unsigned long arch
> #define DISABLE_INTERRUPTS(x) cli
>
> #ifdef CONFIG_X86_64
> -#ifdef CONFIG_DEBUG_ENTRY
> +
> #define SAVE_FLAGS(x) pushfq; popq %rax
> -#endif
>
> #define SWAPGS swapgs
> /*
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -907,14 +907,13 @@ extern void default_banner(void);
> ANNOTATE_RETPOLINE_SAFE; \
> jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
>
> -#ifdef CONFIG_DEBUG_ENTRY
> #define SAVE_FLAGS(clobbers) \
> PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
> PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
> ANNOTATE_RETPOLINE_SAFE; \
> call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
> PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
> -#endif
> +
> #endif /* CONFIG_PARAVIRT_XXL */
> #endif /* CONFIG_X86_64 */
>
>

\
 
 \ /
  Last update: 2020-05-07 16:18    [W:0.416 / U:0.196 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site