lkml.org 
[lkml]   [2018]   [Aug]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 18/24] rcu: Clean up flavor-related definitions and comments in tree.c
    Date
    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    kernel/rcu/tree.c | 74 ++++++++++++++++++++---------------------------
    1 file changed, 32 insertions(+), 42 deletions(-)

    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index fb888085d304..e8fbb7ee76cc 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -382,12 +382,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
    }

    /*
    - * Register a quiescent state for all RCU flavors. If there is an
    + * Register an urgently needed quiescent state. If there is an
    * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
    - * dyntick-idle quiescent state visible to other CPUs (but only for those
    - * RCU flavors in desperate need of a quiescent state, which will normally
    - * be none of them). Either way, do a lightweight quiescent state for
    - * all RCU flavors.
    + * dyntick-idle quiescent state visible to other CPUs, which will in
    + * some cases serve for expedited as well as normal grace periods.
    + * Either way, register a lightweight quiescent state.
    *
    * The barrier() calls are redundant in the common case when this is
    * called externally, but just in case this is called from within this
    @@ -564,7 +563,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
    EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);

    /*
    - * Return the root node of the specified rcu_state structure.
    + * Return the root node of the rcu_state structure.
    */
    static struct rcu_node *rcu_get_root(void)
    {
    @@ -948,11 +947,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
    * Disable preemption to avoid false positives that could otherwise
    * happen due to the current CPU number being sampled, this task being
    * preempted, its old CPU being taken offline, resuming on some other CPU,
    - * then determining that its old CPU is now offline. Because there are
    - * multiple flavors of RCU, and because this function can be called in the
    - * midst of updating the flavors while a given CPU coming online or going
    - * offline, it is necessary to check all flavors. If any of the flavors
    - * believe that given CPU is online, it is considered to be online.
    + * then determining that its old CPU is now offline.
    *
    * Disable checking if in an NMI handler because we cannot safely
    * report errors from NMI handlers anyway. In addition, it is OK to use
    @@ -1562,11 +1557,10 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
    }

    /*
    - * Awaken the grace-period kthread for the specified flavor of RCU.
    - * Don't do a self-awaken, and don't bother awakening when there is
    - * nothing for the grace-period kthread to do (as in several CPUs
    - * raced to awaken, and we lost), and finally don't try to awaken
    - * a kthread that has not yet been created.
    + * Awaken the grace-period kthread. Don't do a self-awaken, and don't
    + * bother awakening when there is nothing for the grace-period kthread
    + * to do (as in several CPUs raced to awaken, and we lost), and finally
    + * don't try to awaken a kthread that has not yet been created.
    */
    static void rcu_gp_kthread_wake(void)
    {
    @@ -2118,13 +2112,13 @@ static int __noreturn rcu_gp_kthread(void *unused)
    }

    /*
    - * Report a full set of quiescent states to the specified rcu_state data
    - * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
    - * kthread if another grace period is required. Whether we wake
    - * the grace-period kthread or it awakens itself for the next round
    - * of quiescent-state forcing, that kthread will clean up after the
    - * just-completed grace period. Note that the caller must hold rnp->lock,
    - * which is released before return.
    + * Report a full set of quiescent states to the rcu_state data structure.
    + * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
    + * another grace period is required. Whether we wake the grace-period
    + * kthread or it awakens itself for the next round of quiescent-state
    + * forcing, that kthread will clean up after the just-completed grace
    + * period. Note that the caller must hold rnp->lock, which is released
    + * before return.
    */
    static void rcu_report_qs_rsp(unsigned long flags)
    __releases(rcu_get_root()->lock)
    @@ -2211,7 +2205,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
    /*
    * Record a quiescent state for all tasks that were previously queued
    * on the specified rcu_node structure and that were blocking the current
    - * RCU grace period. The caller must hold the specified rnp->lock with
    + * RCU grace period. The caller must hold the corresponding rnp->lock with
    * irqs disabled, and this lock is released upon return, but irqs remain
    * disabled.
    */
    @@ -2713,11 +2707,11 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
    }

    /*
    - * Schedule RCU callback invocation. If the specified type of RCU
    - * does not support RCU priority boosting, just do a direct call,
    - * otherwise wake up the per-CPU kernel kthread. Note that because we
    - * are running on the current CPU with softirqs disabled, the
    - * rcu_cpu_kthread_task cannot disappear out from under us.
    + * Schedule RCU callback invocation. If the running implementation of RCU
    + * does not support RCU priority boosting, just do a direct call, otherwise
    + * wake up the per-CPU kernel kthread. Note that because we are running
    + * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
    + * cannot disappear out from under us.
    */
    static void invoke_rcu_callbacks(struct rcu_data *rdp)
    {
    @@ -2958,11 +2952,10 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu);

    /*
    * Check to see if there is any immediate RCU-related work to be done by
    - * the current CPU, for the specified type of RCU, returning 1 if so and
    - * zero otherwise. The checks are in order of increasing expense: checks
    - * that can be carried out against CPU-local state are performed first.
    - * However, we must check for CPU stalls first, else we might not get
    - * a chance.
    + * the current CPU, returning 1 if so and zero otherwise. The checks are
    + * in order of increasing expense: checks that can be carried out against
    + * CPU-local state are performed first. However, we must check for CPU
    + * stalls first, else we might not get a chance.
    */
    static int rcu_pending(void)
    {
    @@ -3069,10 +3062,7 @@ static void rcu_barrier_func(void *unused)
    }
    }

    -/*
    - * Orchestrate the specified type of RCU barrier, waiting for all
    - * RCU callbacks of the specified type to complete.
    - */
    +/* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete. */
    static void _rcu_barrier(void)
    {
    int cpu;
    @@ -3392,7 +3382,7 @@ void rcu_report_dead(unsigned int cpu)
    struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
    struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */

    - /* QS for any half-done expedited RCU-sched GP. */
    + /* QS for any half-done expedited grace period. */
    preempt_disable();
    rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
    preempt_enable();
    @@ -3481,7 +3471,7 @@ static int rcu_pm_notify(struct notifier_block *self,
    }

    /*
    - * Spawn the kthreads that handle each RCU flavor's grace periods.
    + * Spawn the kthreads that handle RCU's grace periods.
    */
    static int __init rcu_spawn_gp_kthread(void)
    {
    @@ -3544,7 +3534,7 @@ void rcu_scheduler_starting(void)
    }

    /*
    - * Helper function for rcu_init() that initializes one rcu_state structure.
    + * Helper function for rcu_init() that initializes the rcu_state structure.
    */
    static void __init rcu_init_one(void)
    {
    @@ -3706,7 +3696,7 @@ static void __init rcu_init_geometry(void)

    /*
    * Dump out the structure of the rcu_node combining tree associated
    - * with the rcu_state structure referenced by rsp.
    + * with the rcu_state structure.
    */
    static void __init rcu_dump_rcu_node_tree(void)
    {
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-08-30 00:55    [W:3.697 / U:0.060 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site