lkml.org 
[lkml]   [2013]   [Apr]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[ 26/27] x86, mm: Patch out arch_flush_lazy_mmu_mode() when running on bare metal
    Date
    3.8-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Boris Ostrovsky <boris.ostrovsky@oracle.com>

    commit 511ba86e1d386f671084b5d0e6f110bb30b8eeb2 upstream.

    Invoking arch_flush_lazy_mmu_mode() results in calls to
    preempt_enable()/disable() which may have performance impact.

    Since lazy MMU is not used on bare metal we can patch away
    arch_flush_lazy_mmu_mode() so that it is never called in such
    environment.

    [ hpa: the previous patch "Fix vmalloc_fault oops during lazy MMU
    updates" may cause a minor performance regression on
    bare metal. This patch resolves that performance regression. It is
    somewhat unclear to me if this is a good -stable candidate. ]

    Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
    Link: http://lkml.kernel.org/r/1364045796-10720-2-git-send-email-konrad.wilk@oracle.com
    Tested-by: Josh Boyer <jwboyer@redhat.com>
    Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
    Acked-by: Borislav Petkov <bp@suse.de>
    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
    Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    arch/x86/include/asm/paravirt.h | 5 ++++-
    arch/x86/include/asm/paravirt_types.h | 2 ++
    arch/x86/kernel/paravirt.c | 25 +++++++++++++------------
    arch/x86/lguest/boot.c | 1 +
    arch/x86/xen/mmu.c | 1 +
    5 files changed, 21 insertions(+), 13 deletions(-)

    --- a/arch/x86/include/asm/paravirt.h
    +++ b/arch/x86/include/asm/paravirt.h
    @@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_m
    PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
    }

    -void arch_flush_lazy_mmu_mode(void);
    +static inline void arch_flush_lazy_mmu_mode(void)
    +{
    + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
    +}

    static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
    phys_addr_t phys, pgprot_t flags)
    --- a/arch/x86/include/asm/paravirt_types.h
    +++ b/arch/x86/include/asm/paravirt_types.h
    @@ -91,6 +91,7 @@ struct pv_lazy_ops {
    /* Set deferred update mode, used for batching operations. */
    void (*enter)(void);
    void (*leave)(void);
    + void (*flush)(void);
    };

    struct pv_time_ops {
    @@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct

    void paravirt_enter_lazy_mmu(void);
    void paravirt_leave_lazy_mmu(void);
    +void paravirt_flush_lazy_mmu(void);

    void _paravirt_nop(void);
    u32 _paravirt_ident_32(u32);
    --- a/arch/x86/kernel/paravirt.c
    +++ b/arch/x86/kernel/paravirt.c
    @@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
    leave_lazy(PARAVIRT_LAZY_MMU);
    }

    +void paravirt_flush_lazy_mmu(void)
    +{
    + preempt_disable();
    +
    + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
    + arch_leave_lazy_mmu_mode();
    + arch_enter_lazy_mmu_mode();
    + }
    +
    + preempt_enable();
    +}
    +
    void paravirt_start_context_switch(struct task_struct *prev)
    {
    BUG_ON(preemptible());
    @@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_laz
    return this_cpu_read(paravirt_lazy_mode);
    }

    -void arch_flush_lazy_mmu_mode(void)
    -{
    - preempt_disable();
    -
    - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
    - arch_leave_lazy_mmu_mode();
    - arch_enter_lazy_mmu_mode();
    - }
    -
    - preempt_enable();
    -}
    -
    struct pv_info pv_info = {
    .name = "bare hardware",
    .paravirt_enabled = 0,
    @@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
    .lazy_mode = {
    .enter = paravirt_nop,
    .leave = paravirt_nop,
    + .flush = paravirt_nop,
    },

    .set_fixmap = native_set_fixmap,
    --- a/arch/x86/lguest/boot.c
    +++ b/arch/x86/lguest/boot.c
    @@ -1333,6 +1333,7 @@ __init void lguest_init(void)
    pv_mmu_ops.read_cr3 = lguest_read_cr3;
    pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
    pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
    + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
    pv_mmu_ops.pte_update = lguest_pte_update;
    pv_mmu_ops.pte_update_defer = lguest_pte_update;

    --- a/arch/x86/xen/mmu.c
    +++ b/arch/x86/xen/mmu.c
    @@ -2190,6 +2190,7 @@ static const struct pv_mmu_ops xen_mmu_o
    .lazy_mode = {
    .enter = paravirt_enter_lazy_mmu,
    .leave = xen_leave_lazy_mmu,
    + .flush = paravirt_flush_lazy_mmu,
    },

    .set_fixmap = xen_set_fixmap,



    \
     
     \ /
      Last update: 2013-04-15 06:21    [W:5.191 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site