Messages in this thread Patch in this message | | | Date | Tue, 05 May 2020 15:49:40 +0200 | From | Thomas Gleixner <> | Subject | [patch V4 part 4 14/24] x86/nmi: Protect NMI entry against instrumentation |
| |
Mark all functions in the fragile code parts noinstr or force inlining so they can't be instrumented.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- arch/x86/include/asm/desc.h | 8 ++++---- arch/x86/kernel/cpu/common.c | 6 ++---- arch/x86/kernel/nmi.c | 10 ++++++---- 3 files changed, 12 insertions(+), 12 deletions(-)
--- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -214,7 +214,7 @@ static inline void native_load_gdt(const asm volatile("lgdt %0"::"m" (*dtr)); } -static inline void native_load_idt(const struct desc_ptr *dtr) +static __always_inline void native_load_idt(const struct desc_ptr *dtr) { asm volatile("lidt %0"::"m" (*dtr)); } @@ -393,7 +393,7 @@ extern unsigned long system_vectors[]; #ifdef CONFIG_X86_64 DECLARE_PER_CPU(u32, debug_idt_ctr); -static inline bool is_debug_idt_enabled(void) +static __always_inline bool is_debug_idt_enabled(void) { if (this_cpu_read(debug_idt_ctr)) return true; @@ -401,7 +401,7 @@ static inline bool is_debug_idt_enabled( return false; } -static inline void load_debug_idt(void) +static __always_inline void load_debug_idt(void) { load_idt((const struct desc_ptr *)&debug_idt_descr); } @@ -423,7 +423,7 @@ static inline void load_debug_idt(void) * that doesn't need to disable interrupts, as nothing should be * bothering the CPU then. */ -static inline void load_current_idt(void) +static __always_inline void load_current_idt(void) { if (is_debug_idt_enabled()) load_debug_idt(); --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1692,21 +1692,19 @@ void syscall_init(void) DEFINE_PER_CPU(int, debug_stack_usage); DEFINE_PER_CPU(u32, debug_idt_ctr); -void debug_stack_set_zero(void) +noinstr void debug_stack_set_zero(void) { this_cpu_inc(debug_idt_ctr); load_current_idt(); } -NOKPROBE_SYMBOL(debug_stack_set_zero); -void debug_stack_reset(void) +noinstr void debug_stack_reset(void) { if (WARN_ON(!this_cpu_read(debug_idt_ctr))) return; if (this_cpu_dec_return(debug_idt_ctr) == 0) load_current_idt(); } -NOKPROBE_SYMBOL(debug_stack_reset); #else /* CONFIG_X86_64 */ --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -307,7 +307,7 @@ NOKPROBE_SYMBOL(unknown_nmi_error); static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(unsigned long, last_nmi_rip); -static void default_do_nmi(struct pt_regs *regs) +static noinstr void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; int handled; @@ -333,6 +333,7 @@ static void default_do_nmi(struct pt_reg __this_cpu_write(last_nmi_rip, regs->ip); + instr_begin(); handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { @@ -346,6 +347,7 @@ static void default_do_nmi(struct pt_reg */ if (handled > 1) __this_cpu_write(swallow_nmi, true); + instr_end(); return; } @@ -378,6 +380,7 @@ static void default_do_nmi(struct pt_reg #endif __this_cpu_add(nmi_stats.external, 1); raw_spin_unlock(&nmi_reason_lock); + instr_end(); return; } raw_spin_unlock(&nmi_reason_lock); @@ -416,8 +419,8 @@ static void default_do_nmi(struct pt_reg __this_cpu_add(nmi_stats.swallow, 1); else unknown_nmi_error(reason, regs); + instr_end(); } -NOKPROBE_SYMBOL(default_do_nmi); /* * NMIs can page fault or hit breakpoints which will cause it to lose @@ -489,7 +492,7 @@ static DEFINE_PER_CPU(unsigned long, nmi */ static DEFINE_PER_CPU(int, update_debug_stack); -static bool notrace is_debug_stack(unsigned long addr) +static noinstr bool is_debug_stack(unsigned long addr) { struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks); unsigned long top = CEA_ESTACK_TOP(cs, DB); @@ -504,7 +507,6 @@ static bool notrace is_debug_stack(unsig */ return addr >= bot && addr < top; } -NOKPROBE_SYMBOL(is_debug_stack); #endif DEFINE_IDTENTRY_NMI(exc_nmi)
| |