lkml.org 
[lkml]   [2018]   [Aug]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 24/24] rcu: Inline _rcu_barrier() into its sole remaining caller
    Date
    Because rcu_barrier() is a one-line wrapper function for _rcu_barrier()
    and because nothing else calls _rcu_barrier(), this commit inlines
    _rcu_barrier() into rcu_barrier().

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    include/trace/events/rcu.h | 20 ++++++-------
    kernel/rcu/tree.c | 58 +++++++++++++++++---------------------
    kernel/rcu/tree.h | 4 +--
    kernel/rcu/tree_plugin.h | 2 +-
    4 files changed, 39 insertions(+), 45 deletions(-)

    diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
    index a8d07feff6a0..175e0bce22bd 100644
    --- a/include/trace/events/rcu.h
    +++ b/include/trace/events/rcu.h
    @@ -705,20 +705,20 @@ TRACE_EVENT(rcu_torture_read,
    );

    /*
    - * Tracepoint for _rcu_barrier() execution. The string "s" describes
    - * the _rcu_barrier phase:
    - * "Begin": _rcu_barrier() started.
    - * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
    - * "Inc1": _rcu_barrier() piggyback check counter incremented.
    - * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
    - * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
    - * "OnlineQ": _rcu_barrier() found online CPU with callbacks.
    - * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
    + * Tracepoint for rcu_barrier() execution. The string "s" describes
    + * the rcu_barrier phase:
    + * "Begin": rcu_barrier() started.
    + * "EarlyExit": rcu_barrier() piggybacked, thus early exit.
    + * "Inc1": rcu_barrier() piggyback check counter incremented.
    + * "OfflineNoCB": rcu_barrier() found callback on never-online CPU
    + * "OnlineNoCB": rcu_barrier() found online no-CBs CPU.
    + * "OnlineQ": rcu_barrier() found online CPU with callbacks.
    + * "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
    * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
    * "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
    * "CB": An rcu_barrier_callback() invoked a callback, not the last.
    * "LastCB": An rcu_barrier_callback() invoked the last callback.
    - * "Inc2": _rcu_barrier() piggyback check counter incremented.
    + * "Inc2": rcu_barrier() piggyback check counter incremented.
    * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
    * is the count of remaining callbacks, and "done" is the piggybacking count.
    */
    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index 5f2a12a65b42..31e94f672d01 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -2746,7 +2746,7 @@ static void rcu_leak_callback(struct rcu_head *rhp)
    /*
    * Helper function for call_rcu() and friends. The cpu argument will
    * normally be -1, indicating "currently running CPU". It may specify
    - * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
    + * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier()
    * is expected to specify a CPU.
    */
    static void
    @@ -2980,27 +2980,27 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
    }

    /*
    - * Helper function for _rcu_barrier() tracing. If tracing is disabled,
    + * Helper function for rcu_barrier() tracing. If tracing is disabled,
    * the compiler is expected to optimize this away.
    */
    -static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done)
    +static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
    {
    trace_rcu_barrier(rcu_state.name, s, cpu,
    atomic_read(&rcu_state.barrier_cpu_count), done);
    }

    /*
    - * RCU callback function for _rcu_barrier(). If we are last, wake
    - * up the task executing _rcu_barrier().
    + * RCU callback function for rcu_barrier(). If we are last, wake
    + * up the task executing rcu_barrier().
    */
    static void rcu_barrier_callback(struct rcu_head *rhp)
    {
    if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
    - _rcu_barrier_trace(TPS("LastCB"), -1,
    + rcu_barrier_trace(TPS("LastCB"), -1,
    rcu_state.barrier_sequence);
    complete(&rcu_state.barrier_completion);
    } else {
    - _rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
    + rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
    }
    }

    @@ -3011,33 +3011,40 @@ static void rcu_barrier_func(void *unused)
    {
    struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);

    - _rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
    + rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
    rdp->barrier_head.func = rcu_barrier_callback;
    debug_rcu_head_queue(&rdp->barrier_head);
    if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
    atomic_inc(&rcu_state.barrier_cpu_count);
    } else {
    debug_rcu_head_unqueue(&rdp->barrier_head);
    - _rcu_barrier_trace(TPS("IRQNQ"), -1,
    + rcu_barrier_trace(TPS("IRQNQ"), -1,
    rcu_state.barrier_sequence);
    }
    }

    -/* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete. */
    -static void _rcu_barrier(void)
    +/**
    + * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
    + *
    + * Note that this primitive does not necessarily wait for an RCU grace period
    + * to complete. For example, if there are no RCU callbacks queued anywhere
    + * in the system, then rcu_barrier() is within its rights to return
    + * immediately, without waiting for anything, much less an RCU grace period.
    + */
    +void rcu_barrier(void)
    {
    int cpu;
    struct rcu_data *rdp;
    unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);

    - _rcu_barrier_trace(TPS("Begin"), -1, s);
    + rcu_barrier_trace(TPS("Begin"), -1, s);

    /* Take mutex to serialize concurrent rcu_barrier() requests. */
    mutex_lock(&rcu_state.barrier_mutex);

    /* Did someone else do our work for us? */
    if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
    - _rcu_barrier_trace(TPS("EarlyExit"), -1,
    + rcu_barrier_trace(TPS("EarlyExit"), -1,
    rcu_state.barrier_sequence);
    smp_mb(); /* caller's subsequent code after above check. */
    mutex_unlock(&rcu_state.barrier_mutex);
    @@ -3046,7 +3053,7 @@ static void _rcu_barrier(void)

    /* Mark the start of the barrier operation. */
    rcu_seq_start(&rcu_state.barrier_sequence);
    - _rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
    + rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);

    /*
    * Initialize the count to one rather than to zero in order to
    @@ -3069,10 +3076,10 @@ static void _rcu_barrier(void)
    rdp = per_cpu_ptr(&rcu_data, cpu);
    if (rcu_is_nocb_cpu(cpu)) {
    if (!rcu_nocb_cpu_needs_barrier(cpu)) {
    - _rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
    + rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
    rcu_state.barrier_sequence);
    } else {
    - _rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
    + rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
    rcu_state.barrier_sequence);
    smp_mb__before_atomic();
    atomic_inc(&rcu_state.barrier_cpu_count);
    @@ -3080,11 +3087,11 @@ static void _rcu_barrier(void)
    rcu_barrier_callback, cpu, 0);
    }
    } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
    - _rcu_barrier_trace(TPS("OnlineQ"), cpu,
    + rcu_barrier_trace(TPS("OnlineQ"), cpu,
    rcu_state.barrier_sequence);
    smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
    } else {
    - _rcu_barrier_trace(TPS("OnlineNQ"), cpu,
    + rcu_barrier_trace(TPS("OnlineNQ"), cpu,
    rcu_state.barrier_sequence);
    }
    }
    @@ -3101,25 +3108,12 @@ static void _rcu_barrier(void)
    wait_for_completion(&rcu_state.barrier_completion);

    /* Mark the end of the barrier operation. */
    - _rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
    + rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
    rcu_seq_end(&rcu_state.barrier_sequence);

    /* Other rcu_barrier() invocations can now safely proceed. */
    mutex_unlock(&rcu_state.barrier_mutex);
    }
    -
    -/**
    - * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
    - *
    - * Note that this primitive does not necessarily wait for an RCU grace period
    - * to complete. For example, if there are no RCU callbacks queued anywhere
    - * in the system, then rcu_barrier() is within its rights to return
    - * immediately, without waiting for anything, much less an RCU grace period.
    - */
    -void rcu_barrier(void)
    -{
    - _rcu_barrier();
    -}
    EXPORT_SYMBOL_GPL(rcu_barrier);

    /*
    diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
    index 46452d3d0fad..8cf93ac277ec 100644
    --- a/kernel/rcu/tree.h
    +++ b/kernel/rcu/tree.h
    @@ -222,7 +222,7 @@ struct rcu_data {
    /* Grace period that needs help */
    /* from cond_resched(). */

    - /* 5) _rcu_barrier(), OOM callbacks, and expediting. */
    + /* 5) rcu_barrier(), OOM callbacks, and expediting. */
    struct rcu_head barrier_head;
    int exp_dynticks_snap; /* Double-check need for IPI. */

    @@ -328,7 +328,7 @@ struct rcu_state {
    atomic_t barrier_cpu_count; /* # CPUs waiting on. */
    struct completion barrier_completion; /* Wake at barrier end. */
    unsigned long barrier_sequence; /* ++ at start and end of */
    - /* _rcu_barrier(). */
    + /* rcu_barrier(). */
    /* End of fields guarded by barrier_mutex. */

    struct mutex exp_mutex; /* Serialize expedited GP. */
    diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
    index 7add1c297500..beaaca7a11f4 100644
    --- a/kernel/rcu/tree_plugin.h
    +++ b/kernel/rcu/tree_plugin.h
    @@ -1993,7 +1993,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
    * There needs to be a barrier before this function is called,
    * but associated with a prior determination that no more
    * callbacks would be posted. In the worst case, the first
    - * barrier in _rcu_barrier() suffices (but the caller cannot
    + * barrier in rcu_barrier() suffices (but the caller cannot
    * necessarily rely on this, not a substitute for the caller
    * getting the concurrency design right!). There must also be
    * a barrier between the following load an posting of a callback
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-08-30 00:55    [W:4.257 / U:0.172 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site