lkml.org 
[lkml]   [2013]   [Dec]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 12/15] sched: Remove local_irq_disable() from the clocks
    Now that x86 no longer requires IRQs disabled for sched_clock() and
    ia64 never had this requirement (it doesn't seem to do cpufreq at
    all), we can remove the requirement of disabling IRQs.

    MAINLINE PRE POST

    sched_clock_stable: 1 1 1
    (cold) sched_clock: 329841 257223 221876
    (cold) local_clock: 301773 309889 234692
    (warm) sched_clock: 38375 25280 25602
    (warm) local_clock: 100371 85268 33265
    (warm) rdtsc: 27340 24247 24214
    sched_clock_stable: 0 0 0
    (cold) sched_clock: 382634 301224 235941
    (cold) local_clock: 396890 399870 297017
    (warm) sched_clock: 38194 25630 25233
    (warm) local_clock: 143452 129629 71234
    (warm) rdtsc: 27345 24307 24245

    Signed-off-by: Peter Zijlstra <peterz@infradead.org>
    ---
    kernel/sched/clock.c | 34 ++++++----------------------------
    1 file changed, 6 insertions(+), 28 deletions(-)

    --- a/kernel/sched/clock.c
    +++ b/kernel/sched/clock.c
    @@ -26,9 +26,10 @@
    * at 0 on boot (but people really shouldn't rely on that).
    *
    * cpu_clock(i) -- can be used from any context, including NMI.
    - * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
    * local_clock() -- is cpu_clock() on the current cpu.
    *
    + * sched_clock_cpu(i)
    + *
    * How:
    *
    * The implementation either uses sched_clock() when
    @@ -50,15 +51,6 @@
    * Furthermore, explicit sleep and wakeup hooks allow us to account for time
    * that is otherwise invisible (TSC gets stopped).
    *
    - *
    - * Notes:
    - *
    - * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
    - * like cpufreq interrupts that can change the base clock (TSC) multiplier
    - * and cause funny jumps in time -- although the filtering provided by
    - * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
    - * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
    - * sched_clock().
    */
    #include <linux/spinlock.h>
    #include <linux/hardirq.h>
    @@ -242,20 +234,20 @@ u64 sched_clock_cpu(int cpu)
    struct sched_clock_data *scd;
    u64 clock;

    - WARN_ON_ONCE(!irqs_disabled());
    -
    if (sched_clock_stable)
    return sched_clock();

    if (unlikely(!sched_clock_running))
    return 0ull;

    + preempt_disable();
    scd = cpu_sdc(cpu);

    if (cpu != smp_processor_id())
    clock = sched_clock_remote(scd);
    else
    clock = sched_clock_local(scd);
    + preempt_enable();

    return clock;
    }
    @@ -316,14 +308,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeu
    */
    u64 cpu_clock(int cpu)
    {
    - u64 clock;
    - unsigned long flags;
    -
    - local_irq_save(flags);
    - clock = sched_clock_cpu(cpu);
    - local_irq_restore(flags);
    -
    - return clock;
    + return sched_clock_cpu(cpu);
    }

    /*
    @@ -335,14 +320,7 @@ u64 cpu_clock(int cpu)
    */
    u64 local_clock(void)
    {
    - u64 clock;
    - unsigned long flags;
    -
    - local_irq_save(flags);
    - clock = sched_clock_cpu(smp_processor_id());
    - local_irq_restore(flags);
    -
    - return clock;
    + return sched_clock_cpu(raw_smp_processor_id());
    }

    #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */



    \
     
     \ /
      Last update: 2013-12-12 16:01    [W:4.296 / U:0.240 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site