lkml.org 
[lkml]   [2013]   [Jul]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH RFC nohz_full 2/7] nohz_full: Add rcu_dyntick data for scalable detection of all-idle state
    Date
    From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>

    This commit adds fields to the rcu_dyntick structure that are used to
    detect idle CPUs. These new fields differ from the existing ones in
    that the existing ones consider a CPU executing in user mode to be idle,
    where the new ones consider CPUs executing in user mode to be busy.
    The handling of these new fields is otherwise quite similar to that for
    the exiting fields. This commit also adds the initialization required
    for these fields.

    So, why is usermode execution treated differently, with RCU considering
    it a quiescent state equivalent to idle, while in contrast the new
    full-system idle state detection considers usermode execution to be
    non-idle?

    It turns out that although one of RCU's quiescent states is usermode
    execution, it is not a full-system idle state. This is because the
    purpose of the full-system idle state is not RCU, but rather determining
    when accurate timekeeping can safely be disabled. Whenever accurate
    timekeeping is required in a CONFIG_NO_HZ_FULL kernel, at least one
    CPU must keep the scheduling-clock tick going. If even one CPU is
    executing in user mode, accurate timekeeping is requires, particularly for
    architectures where gettimeofday() and friends do not enter the kernel.
    Only when all CPUs are really and truly idle can accurate timekeeping be
    disabled, allowing all CPUs to turn off the scheduling clock interrupt,
    thus greatly improving energy efficiency.

    This naturally raises the question "Why is this code in RCU rather than in
    timekeeping?", and the answer is that RCU has the data and infrastructure
    to efficiently make this determination.

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Cc: Frederic Weisbecker <fweisbec@gmail.com>
    Cc: Steven Rostedt <rostedt@goodmis.org>
    ---
    kernel/rcutree.c | 5 +++++
    kernel/rcutree.h | 9 +++++++++
    kernel/rcutree_plugin.h | 19 +++++++++++++++++++
    3 files changed, 33 insertions(+)

    diff --git a/kernel/rcutree.c b/kernel/rcutree.c
    index 928cb45..9412726 100644
    --- a/kernel/rcutree.c
    +++ b/kernel/rcutree.c
    @@ -209,6 +209,10 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
    DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
    .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
    .dynticks = ATOMIC_INIT(1),
    +#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
    + .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
    + .dynticks_idle = ATOMIC_INIT(1),
    +#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
    };

    static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
    @@ -2902,6 +2906,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
    rdp->blimit = blimit;
    init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
    rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
    + rcu_sysidle_init_percpu_data(rdp->dynticks);
    atomic_set(&rdp->dynticks->dynticks,
    (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
    raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
    diff --git a/kernel/rcutree.h b/kernel/rcutree.h
    index b383258..bd99d59 100644
    --- a/kernel/rcutree.h
    +++ b/kernel/rcutree.h
    @@ -88,6 +88,14 @@ struct rcu_dynticks {
    /* Process level is worth LLONG_MAX/2. */
    int dynticks_nmi_nesting; /* Track NMI nesting level. */
    atomic_t dynticks; /* Even value for idle, else odd. */
    +#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
    + long long dynticks_idle_nesting;
    + /* irq/process nesting level from idle. */
    + atomic_t dynticks_idle; /* Even value for idle, else odd. */
    + /* "Idle" excludes userspace execution. */
    + unsigned long dynticks_idle_jiffies;
    + /* End of last non-NMI non-idle period. */
    +#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
    #ifdef CONFIG_RCU_FAST_NO_HZ
    bool all_lazy; /* Are all CPU's CBs lazy? */
    unsigned long nonlazy_posted;
    @@ -545,6 +553,7 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
    static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
    static void rcu_kick_nohz_cpu(int cpu);
    static bool init_nocb_callback_list(struct rcu_data *rdp);
    +static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);

    #endif /* #ifndef RCU_TREE_NONCORE */

    diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
    index 769e12e..6937eb6 100644
    --- a/kernel/rcutree_plugin.h
    +++ b/kernel/rcutree_plugin.h
    @@ -2375,3 +2375,22 @@ static void rcu_kick_nohz_cpu(int cpu)
    smp_send_reschedule(cpu);
    #endif /* #ifdef CONFIG_NO_HZ_FULL */
    }
    +
    +
    +#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
    +
    +/*
    + * Initialize dynticks sysidle state for CPUs coming online.
    + */
    +static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
    +{
    + rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
    +}
    +
    +#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
    +
    +static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
    +{
    +}
    +
    +#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
    --
    1.8.1.5


    \
     
     \ /
      Last update: 2013-07-27 01:41    [W:3.222 / U:0.052 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site