lkml.org 
[lkml]   [2020]   [May]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip: x86/entry] x86/entry/32: Remove redundant irq disable code
    The following commit has been merged into the x86/entry branch of tip:

    Commit-ID: 3728dd935c76646515e91e109067659200eb3a1b
    Gitweb: https://git.kernel.org/tip/3728dd935c76646515e91e109067659200eb3a1b
    Author: Thomas Gleixner <tglx@linutronix.de>
    AuthorDate: Thu, 21 May 2020 22:05:49 +02:00
    Committer: Ingo Molnar <mingo@kernel.org>
    CommitterDate: Tue, 26 May 2020 19:06:29 +02:00

    x86/entry/32: Remove redundant irq disable code

    All exceptions/interrupts return with interrupts disabled now. No point in
    doing this in ASM again.

    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    Acked-by: Andy Lutomirski <luto@kernel.org>
    Link: https://lore.kernel.org/r/20200521202120.221223450@linutronix.de
    ---
    arch/x86/entry/entry_32.S | 76 +--------------------------------------
    1 file changed, 76 deletions(-)

    diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
    index 4a4f34b..96fa462 100644
    --- a/arch/x86/entry/entry_32.S
    +++ b/arch/x86/entry/entry_32.S
    @@ -51,34 +51,6 @@

    .section .entry.text, "ax"

    -/*
    - * We use macros for low-level operations which need to be overridden
    - * for paravirtualization. The following will never clobber any registers:
    - * INTERRUPT_RETURN (aka. "iret")
    - * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
    - * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
    - *
    - * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
    - * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
    - * Allowing a register to be clobbered can shrink the paravirt replacement
    - * enough to patch inline, increasing performance.
    - */
    -
    -#ifdef CONFIG_PREEMPTION
    -# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
    -#else
    -# define preempt_stop(clobbers)
    -#endif
    -
    -.macro TRACE_IRQS_IRET
    -#ifdef CONFIG_TRACE_IRQFLAGS
    - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
    - jz 1f
    - TRACE_IRQS_ON
    -1:
    -#endif
    -.endm
    -
    #define PTI_SWITCH_MASK (1 << PAGE_SHIFT)

    /*
    @@ -881,38 +853,6 @@ SYM_CODE_START(ret_from_fork)
    SYM_CODE_END(ret_from_fork)
    .popsection

    -/*
    - * Return to user mode is not as complex as all this looks,
    - * but we want the default path for a system call return to
    - * go as quickly as possible which is why some of this is
    - * less clear than it otherwise should be.
    - */
    -
    - # userspace resumption stub bypassing syscall exit tracing
    -SYM_CODE_START_LOCAL(ret_from_exception)
    - preempt_stop(CLBR_ANY)
    -ret_from_intr:
    -#ifdef CONFIG_VM86
    - movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
    - movb PT_CS(%esp), %al
    - andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
    -#else
    - /*
    - * We can be coming here from child spawned by kernel_thread().
    - */
    - movl PT_CS(%esp), %eax
    - andl $SEGMENT_RPL_MASK, %eax
    -#endif
    - cmpl $USER_RPL, %eax
    - jb restore_all_kernel # not returning to v8086 or userspace
    -
    - DISABLE_INTERRUPTS(CLBR_ANY)
    - TRACE_IRQS_OFF
    - movl %esp, %eax
    - call prepare_exit_to_usermode
    - jmp restore_all_switch_stack
    -SYM_CODE_END(ret_from_exception)
    -
    SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
    /*
    * All code from here through __end_SYSENTER_singlestep_region is subject
    @@ -1147,22 +1087,6 @@ restore_all_switch_stack:
    */
    INTERRUPT_RETURN

    -restore_all_kernel:
    -#ifdef CONFIG_PREEMPTION
    - DISABLE_INTERRUPTS(CLBR_ANY)
    - cmpl $0, PER_CPU_VAR(__preempt_count)
    - jnz .Lno_preempt
    - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
    - jz .Lno_preempt
    - call preempt_schedule_irq
    -.Lno_preempt:
    -#endif
    - TRACE_IRQS_IRET
    - PARANOID_EXIT_TO_KERNEL_MODE
    - BUG_IF_WRONG_CR3
    - RESTORE_REGS 4
    - jmp .Lirq_return
    -
    .section .fixup, "ax"
    SYM_CODE_START(asm_iret_error)
    pushl $0 # no error code
    \
     
     \ /
      Last update: 2020-05-27 10:40    [W:4.141 / U:0.132 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site