lkml.org 
[lkml]   [2018]   [Jun]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 05/27] rcu: Improve RCU-tasks naming and comments
    Date
    The naming and comments associated with some RCU-tasks code make
    the faulty assumption that context switches due to cond_resched()
    are voluntary. As several people pointed out, this is not the case.
    This commit therefore updates function names and comments to better
    reflect current reality.

    Reported-by: Byungchul Park <byungchul.park@lge.com>
    Reported-by: Joel Fernandes <joel@joelfernandes.org>
    Reported-by: Steven Rostedt <rostedt@goodmis.org>
    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    include/linux/rcupdate.h | 12 ++++++------
    include/linux/rcutiny.h | 2 +-
    kernel/rcu/tree.c | 2 +-
    kernel/rcu/update.c | 27 ++++++++++++++-------------
    4 files changed, 22 insertions(+), 21 deletions(-)

    diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
    index dacc90358b33..75e5b393cf44 100644
    --- a/include/linux/rcupdate.h
    +++ b/include/linux/rcupdate.h
    @@ -158,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
    } while (0)

    /*
    - * Note a voluntary context switch for RCU-tasks benefit. This is a
    - * macro rather than an inline function to avoid #include hell.
    + * Note a quasi-voluntary context switch for RCU-tasks's benefit.
    + * This is a macro rather than an inline function to avoid #include hell.
    */
    #ifdef CONFIG_TASKS_RCU
    -#define rcu_note_voluntary_context_switch_lite(t) \
    +#define rcu_tasks_qs(t) \
    do { \
    if (READ_ONCE((t)->rcu_tasks_holdout)) \
    WRITE_ONCE((t)->rcu_tasks_holdout, false); \
    @@ -170,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
    #define rcu_note_voluntary_context_switch(t) \
    do { \
    rcu_all_qs(); \
    - rcu_note_voluntary_context_switch_lite(t); \
    + rcu_tasks_qs(t); \
    } while (0)
    void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
    void synchronize_rcu_tasks(void);
    void exit_tasks_rcu_start(void);
    void exit_tasks_rcu_finish(void);
    #else /* #ifdef CONFIG_TASKS_RCU */
    -#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
    +#define rcu_tasks_qs(t) do { } while (0)
    #define rcu_note_voluntary_context_switch(t) rcu_all_qs()
    #define call_rcu_tasks call_rcu_sched
    #define synchronize_rcu_tasks synchronize_sched
    @@ -194,7 +194,7 @@ static inline void exit_tasks_rcu_finish(void) { }
    */
    #define cond_resched_tasks_rcu_qs() \
    do { \
    - rcu_note_voluntary_context_switch_lite(current); \
    + rcu_tasks_qs(current); \
    cond_resched(); \
    } while (0)

    diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
    index 7b3c82e8a625..8d9a0ea8f0b5 100644
    --- a/include/linux/rcutiny.h
    +++ b/include/linux/rcutiny.h
    @@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
    #define rcu_note_context_switch(preempt) \
    do { \
    rcu_sched_qs(); \
    - rcu_note_voluntary_context_switch_lite(current); \
    + rcu_tasks_qs(current); \
    } while (0)

    static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index fcb0355a8084..1ea971244512 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -457,7 +457,7 @@ void rcu_note_context_switch(bool preempt)
    rcu_momentary_dyntick_idle();
    this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
    if (!preempt)
    - rcu_note_voluntary_context_switch_lite(current);
    + rcu_tasks_qs(current);
    out:
    trace_rcu_utilization(TPS("End context switch"));
    barrier(); /* Avoid RCU read-side critical sections leaking up. */
    diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
    index 4c230a60ece4..5783bdf86e5a 100644
    --- a/kernel/rcu/update.c
    +++ b/kernel/rcu/update.c
    @@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
    #ifdef CONFIG_TASKS_RCU

    /*
    - * Simple variant of RCU whose quiescent states are voluntary context switch,
    - * user-space execution, and idle. As such, grace periods can take one good
    - * long time. There are no read-side primitives similar to rcu_read_lock()
    - * and rcu_read_unlock() because this implementation is intended to get
    - * the system into a safe state for some of the manipulations involved in
    - * tracing and the like. Finally, this implementation does not support
    - * high call_rcu_tasks() rates from multiple CPUs. If this is required,
    - * per-CPU callback lists will be needed.
    + * Simple variant of RCU whose quiescent states are voluntary context
    + * switch, cond_resched_rcu_qs(), user-space execution, and idle.
    + * As such, grace periods can take one good long time. There are no
    + * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
    + * because this implementation is intended to get the system into a safe
    + * state for some of the manipulations involved in tracing and the like.
    + * Finally, this implementation does not support high call_rcu_tasks()
    + * rates from multiple CPUs. If this is required, per-CPU callback lists
    + * will be needed.
    */

    /* Global list of callbacks and associated lock. */
    @@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
    * period elapses, in other words after all currently executing RCU
    * read-side critical sections have completed. call_rcu_tasks() assumes
    * that the read-side critical sections end at a voluntary context
    - * switch (not a preemption!), entry into idle, or transition to usermode
    - * execution. As such, there are no read-side primitives analogous to
    - * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
    - * to determine that all tasks have passed through a safe state, not so
    - * much for data-strcuture synchronization.
    + * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
    + * or transition to usermode execution. As such, there are no read-side
    + * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
    + * this primitive is intended to determine that all tasks have passed
    + * through a safe state, not so much for data-strcuture synchronization.
    *
    * See the description of call_rcu() for more detailed information on
    * memory ordering guarantees.
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-06-26 02:37    [W:4.149 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site