lkml.org 
[lkml]   [2013]   [Nov]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 1/7] x86, acpi, idle: Restructure the mwait idle routines
    People seem to delight in writing wrong and broken mwait idle routines;
    collapse the lot.

    This leaves mwait_play_dead() the sole remaining user of __mwait() and
    new __mwait() users are probably doing it wrong.

    Also remove __sti_mwait() as its unused.

    Cc: arjan@linux.intel.com
    Cc: jacob.jun.pan@linux.intel.com
    Cc: Mike Galbraith <bitbucket@online.de>
    Cc: Ingo Molnar <mingo@kernel.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: hpa@zytor.com
    Cc: lenb@kernel.org
    Cc: rui.zhang@intel.com
    Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
    Signed-off-by: Peter Zijlstra <peterz@infradead.org>
    ---
    arch/x86/include/asm/mwait.h | 40 +++++++++++++++++++++++++++++++++++++
    arch/x86/include/asm/processor.h | 23 ---------------------
    arch/x86/kernel/acpi/cstate.c | 23 ---------------------
    drivers/acpi/acpi_pad.c | 5 ----
    drivers/acpi/processor_idle.c | 15 -------------
    drivers/idle/intel_idle.c | 8 -------
    drivers/thermal/intel_powerclamp.c | 4 ---
    7 files changed, 43 insertions(+), 75 deletions(-)

    --- a/arch/x86/include/asm/mwait.h
    +++ b/arch/x86/include/asm/mwait.h
    @@ -1,6 +1,8 @@
    #ifndef _ASM_X86_MWAIT_H
    #define _ASM_X86_MWAIT_H

    +#include <linux/sched.h>
    +
    #define MWAIT_SUBSTATE_MASK 0xf
    #define MWAIT_CSTATE_MASK 0xf
    #define MWAIT_SUBSTATE_SIZE 4
    @@ -13,4 +15,42 @@

    #define MWAIT_ECX_INTERRUPT_BREAK 0x1

    +static inline void __monitor(const void *eax, unsigned long ecx,
    + unsigned long edx)
    +{
    + /* "monitor %eax, %ecx, %edx;" */
    + asm volatile(".byte 0x0f, 0x01, 0xc8;"
    + :: "a" (eax), "c" (ecx), "d"(edx));
    +}
    +
    +static inline void __mwait(unsigned long eax, unsigned long ecx)
    +{
    + /* "mwait %eax, %ecx;" */
    + asm volatile(".byte 0x0f, 0x01, 0xc9;"
    + :: "a" (eax), "c" (ecx));
    +}
    +
    +/*
    + * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
    + * which can obviate IPI to trigger checking of need_resched.
    + * We execute MONITOR against need_resched and enter optimized wait state
    + * through MWAIT. Whenever someone changes need_resched, we would be woken
    + * up from MWAIT (without an IPI).
    + *
    + * New with Core Duo processors, MWAIT can take some hints based on CPU
    + * capability.
    + */
    +static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
    +{
    + if (!current_set_polling_and_test()) {
    + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
    + clflush((void *)&current_thread_info()->flags);
    +
    + __monitor((void *)&current_thread_info()->flags, 0, 0);
    + if (!need_resched())
    + __mwait(eax, ecx);
    + }
    + __current_clr_polling();
    +}
    +
    #endif /* _ASM_X86_MWAIT_H */
    --- a/arch/x86/include/asm/processor.h
    +++ b/arch/x86/include/asm/processor.h
    @@ -700,29 +700,6 @@ static inline void sync_core(void)
    #endif
    }

    -static inline void __monitor(const void *eax, unsigned long ecx,
    - unsigned long edx)
    -{
    - /* "monitor %eax, %ecx, %edx;" */
    - asm volatile(".byte 0x0f, 0x01, 0xc8;"
    - :: "a" (eax), "c" (ecx), "d"(edx));
    -}
    -
    -static inline void __mwait(unsigned long eax, unsigned long ecx)
    -{
    - /* "mwait %eax, %ecx;" */
    - asm volatile(".byte 0x0f, 0x01, 0xc9;"
    - :: "a" (eax), "c" (ecx));
    -}
    -
    -static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
    -{
    - trace_hardirqs_on();
    - /* "mwait %eax, %ecx;" */
    - asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
    - :: "a" (eax), "c" (ecx));
    -}
    -
    extern void select_idle_routine(const struct cpuinfo_x86 *c);
    extern void init_amd_e400_c1e_mask(void);

    --- a/arch/x86/kernel/acpi/cstate.c
    +++ b/arch/x86/kernel/acpi/cstate.c
    @@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsi
    }
    EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);

    -/*
    - * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
    - * which can obviate IPI to trigger checking of need_resched.
    - * We execute MONITOR against need_resched and enter optimized wait state
    - * through MWAIT. Whenever someone changes need_resched, we would be woken
    - * up from MWAIT (without an IPI).
    - *
    - * New with Core Duo processors, MWAIT can take some hints based on CPU
    - * capability.
    - */
    -void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
    -{
    - if (!need_resched()) {
    - if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
    - clflush((void *)&current_thread_info()->flags);
    -
    - __monitor((void *)&current_thread_info()->flags, 0, 0);
    - smp_mb();
    - if (!need_resched())
    - __mwait(ax, cx);
    - }
    -}
    -
    void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
    {
    unsigned int cpu = smp_processor_id();
    --- a/drivers/acpi/acpi_pad.c
    +++ b/drivers/acpi/acpi_pad.c
    @@ -193,10 +193,7 @@ static int power_saving_thread(void *dat
    CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
    stop_critical_timings();

    - __monitor((void *)&current_thread_info()->flags, 0, 0);
    - smp_mb();
    - if (!need_resched())
    - __mwait(power_saving_mwait_eax, 1);
    + mwait_idle_with_hints(power_saving_mwait_eax, 1);

    start_critical_timings();
    if (lapic_marked_unstable)
    --- a/drivers/acpi/processor_idle.c
    +++ b/drivers/acpi/processor_idle.c
    @@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpu
    if (unlikely(!pr))
    return -EINVAL;

    - if (cx->entry_method == ACPI_CSTATE_FFH) {
    - if (current_set_polling_and_test())
    - return -EINVAL;
    - }
    -
    lapic_timer_state_broadcast(pr, cx, 1);
    acpi_idle_do_entry(cx);

    @@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct
    if (unlikely(!pr))
    return -EINVAL;

    - if (cx->entry_method == ACPI_CSTATE_FFH) {
    - if (current_set_polling_and_test())
    - return -EINVAL;
    - }
    -
    /*
    * Must be done before busmaster disable as we might need to
    * access HPET !
    @@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpu
    }
    }

    - if (cx->entry_method == ACPI_CSTATE_FFH) {
    - if (current_set_polling_and_test())
    - return -EINVAL;
    - }
    -
    acpi_unlazy_tlb(smp_processor_id());

    /* Tell the scheduler that we are going deep-idle: */
    --- a/drivers/idle/intel_idle.c
    +++ b/drivers/idle/intel_idle.c
    @@ -359,13 +359,7 @@ static int intel_idle(struct cpuidle_dev
    if (!(lapic_timer_reliable_states & (1 << (cstate))))
    clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);

    - if (!current_set_polling_and_test()) {
    -
    - __monitor((void *)&current_thread_info()->flags, 0, 0);
    - smp_mb();
    - if (!need_resched())
    - __mwait(eax, ecx);
    - }
    + mwait_idle_with_hints(eax, ecx);

    if (!(lapic_timer_reliable_states & (1 << (cstate))))
    clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
    --- a/drivers/thermal/intel_powerclamp.c
    +++ b/drivers/thermal/intel_powerclamp.c
    @@ -438,9 +438,7 @@ static int clamp_thread(void *arg)
    */
    local_touch_nmi();
    stop_critical_timings();
    - __monitor((void *)&current_thread_info()->flags, 0, 0);
    - cpu_relax(); /* allow HT sibling to run */
    - __mwait(eax, ecx);
    + mwait_idle_with_hints(eax, ecx);
    start_critical_timings();
    atomic_inc(&idle_wakeup_counter);
    }



    \
     
     \ /
      Last update: 2013-11-20 18:01    [W:4.199 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site