lkml.org 
[lkml]   [2021]   [Jun]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC][PATCH 14/18] x86/xen: Make irq_enable() noinstr
    vmlinux.o: warning: objtool: pv_ops[32]: native_irq_enable
    vmlinux.o: warning: objtool: pv_ops[32]: __raw_callee_save_xen_irq_enable
    vmlinux.o: warning: objtool: pv_ops[32]: xen_irq_enable_direct
    vmlinux.o: warning: objtool: lock_is_held_type()+0xfe: call to pv_ops[32]() leaves .noinstr.text section

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    ---
    arch/x86/kernel/paravirt.c | 7 ++++-
    arch/x86/xen/irq.c | 4 +--
    arch/x86/xen/xen-asm.S | 56 ++++++++++++++++++++++-----------------------
    3 files changed, 36 insertions(+), 31 deletions(-)

    --- a/arch/x86/kernel/paravirt.c
    +++ b/arch/x86/kernel/paravirt.c
    @@ -258,6 +258,11 @@ static noinstr void pv_native_set_debugr
    native_set_debugreg(regno, val);
    }

    +static noinstr void pv_native_irq_enable(void)
    +{
    + native_irq_enable();
    +}
    +
    struct paravirt_patch_template pv_ops = {
    /* Cpu ops. */
    .cpu.io_delay = native_io_delay,
    @@ -302,7 +307,7 @@ struct paravirt_patch_template pv_ops =
    /* Irq ops. */
    .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
    .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
    - .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
    + .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
    .irq.safe_halt = native_safe_halt,
    .irq.halt = native_halt,
    #endif /* CONFIG_PARAVIRT_XXL */
    --- a/arch/x86/xen/irq.c
    +++ b/arch/x86/xen/irq.c
    @@ -53,7 +53,7 @@ asmlinkage __visible void xen_irq_disabl
    }
    PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);

    -asmlinkage __visible void xen_irq_enable(void)
    +asmlinkage __visible noinstr void xen_irq_enable(void)
    {
    struct vcpu_info *vcpu;

    @@ -76,7 +76,7 @@ asmlinkage __visible void xen_irq_enable

    preempt_enable();
    }
    -PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
    +__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable, ".noinstr.text");

    static void xen_safe_halt(void)
    {
    --- a/arch/x86/xen/xen-asm.S
    +++ b/arch/x86/xen/xen-asm.S
    @@ -22,33 +22,6 @@
    #include <linux/linkage.h>

    /*
    - * Enable events. This clears the event mask and tests the pending
    - * event status with one and operation. If there are pending events,
    - * then enter the hypervisor to get them handled.
    - */
    -SYM_FUNC_START(xen_irq_enable_direct)
    - FRAME_BEGIN
    - /* Unmask events */
    - movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
    -
    - /*
    - * Preempt here doesn't matter because that will deal with any
    - * pending interrupts. The pending check may end up being run
    - * on the wrong CPU, but that doesn't hurt.
    - */
    -
    - /* Test for pending */
    - testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
    - jz 1f
    -
    - call check_events
    -1:
    - FRAME_END
    - ret
    -SYM_FUNC_END(xen_irq_enable_direct)
    -
    -
    -/*
    * Disabling events is simply a matter of making the event mask
    * non-zero.
    */
    @@ -57,6 +30,8 @@ SYM_FUNC_START(xen_irq_disable_direct)
    ret
    SYM_FUNC_END(xen_irq_disable_direct)

    +.pushsection .noinstr.text, "ax"
    +
    /*
    * Force an event check by making a hypercall, but preserve regs
    * before making the call.
    @@ -86,7 +61,32 @@ SYM_FUNC_START(check_events)
    ret
    SYM_FUNC_END(check_events)

    -.pushsection .noinstr.text, "ax"
    +/*
    + * Enable events. This clears the event mask and tests the pending
    + * event status with one and operation. If there are pending events,
    + * then enter the hypervisor to get them handled.
    + */
    +SYM_FUNC_START(xen_irq_enable_direct)
    + FRAME_BEGIN
    + /* Unmask events */
    + movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
    +
    + /*
    + * Preempt here doesn't matter because that will deal with any
    + * pending interrupts. The pending check may end up being run
    + * on the wrong CPU, but that doesn't hurt.
    + */
    +
    + /* Test for pending */
    + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
    + jz 1f
    +
    + call check_events
    +1:
    + FRAME_END
    + ret
    +SYM_FUNC_END(xen_irq_enable_direct)
    +
    /*
    * (xen_)save_fl is used to get the current interrupt enable status.
    * Callers expect the status to be in X86_EFLAGS_IF, and other bits

    \
     
     \ /
      Last update: 2021-06-21 14:04    [W:7.540 / U:1.252 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site