lkml.org 
[lkml]   [2022]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    From
    Date
    SubjectRe: [PATCH v3 13/14] perf/hw_breakpoint: Optimize max_bp_pinned_slots() for CPU-independent task targets
    On Mon, Jul 4, 2022 at 8:07 AM Marco Elver <elver@google.com> wrote:
    >
    > Running the perf benchmark with (note: more aggressive parameters vs.
    > preceding changes, but same 256 CPUs host):
    >
    > | $> perf bench -r 100 breakpoint thread -b 4 -p 128 -t 512
    > | # Running 'breakpoint/thread' benchmark:
    > | # Created/joined 100 threads with 4 breakpoints and 128 parallelism
    > | Total time: 1.989 [sec]
    > |
    > | 38.854160 usecs/op
    > | 4973.332500 usecs/op/cpu
    >
    > 20.43% [kernel] [k] queued_spin_lock_slowpath
    > 18.75% [kernel] [k] osq_lock
    > 16.98% [kernel] [k] rhashtable_jhash2
    > 8.34% [kernel] [k] task_bp_pinned
    > 4.23% [kernel] [k] smp_cfm_core_cond
    > 3.65% [kernel] [k] bcmp
    > 2.83% [kernel] [k] toggle_bp_slot
    > 1.87% [kernel] [k] find_next_bit
    > 1.49% [kernel] [k] __reserve_bp_slot
    >
    > We can see that a majority of the time is now spent hashing task
    > pointers to index into task_bps_ht in task_bp_pinned().
    >
    > Obtaining the max_bp_pinned_slots() for CPU-independent task targets
    > currently is O(#cpus), and calls task_bp_pinned() for each CPU, even if
    > the result of task_bp_pinned() is CPU-independent.
    >
    > The loop in max_bp_pinned_slots() wants to compute the maximum slots
    > across all CPUs. If task_bp_pinned() is CPU-independent, we can do so by
    > obtaining the max slots across all CPUs and adding task_bp_pinned().
    >
    > To do so in O(1), use a bp_slots_histogram for CPU-pinned slots.
    >
    > After this optimization:
    >
    > | $> perf bench -r 100 breakpoint thread -b 4 -p 128 -t 512
    > | # Running 'breakpoint/thread' benchmark:
    > | # Created/joined 100 threads with 4 breakpoints and 128 parallelism
    > | Total time: 1.930 [sec]
    > |
    > | 37.697832 usecs/op
    > | 4825.322500 usecs/op/cpu
    >
    > 19.13% [kernel] [k] queued_spin_lock_slowpath
    > 18.21% [kernel] [k] rhashtable_jhash2
    > 15.46% [kernel] [k] osq_lock
    > 6.27% [kernel] [k] toggle_bp_slot
    > 5.91% [kernel] [k] task_bp_pinned
    > 5.05% [kernel] [k] smp_cfm_core_cond
    > 1.78% [kernel] [k] update_sg_lb_stats
    > 1.36% [kernel] [k] llist_reverse_order
    > 1.34% [kernel] [k] find_next_bit
    > 1.19% [kernel] [k] bcmp
    >
    > Suggesting that time spent in task_bp_pinned() has been reduced.
    > However, we're still hashing too much, which will be addressed in the
    > subsequent change.
    >
    > Signed-off-by: Marco Elver <elver@google.com>
    > Reviewed-by: Dmitry Vyukov <dvyukov@google.com>

    Acked-by: Ian Rogers <irogers@google.com>

    Thanks,
    Ian

    > ---
    > v3:
    > * Update hw_breakpoint_is_used() to include global cpu_pinned.
    >
    > v2:
    > * New patch.
    > ---
    > kernel/events/hw_breakpoint.c | 57 ++++++++++++++++++++++++++++++++---
    > 1 file changed, 53 insertions(+), 4 deletions(-)
    >
    > diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
    > index 03ebecf048c0..a489f31fe147 100644
    > --- a/kernel/events/hw_breakpoint.c
    > +++ b/kernel/events/hw_breakpoint.c
    > @@ -64,6 +64,9 @@ static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
    > return per_cpu_ptr(bp_cpuinfo + type, cpu);
    > }
    >
    > +/* Number of pinned CPU breakpoints globally. */
    > +static struct bp_slots_histogram cpu_pinned[TYPE_MAX];
    > +
    > /* Keep track of the breakpoints attached to tasks */
    > static struct rhltable task_bps_ht;
    > static const struct rhashtable_params task_bps_ht_params = {
    > @@ -194,6 +197,10 @@ static __init int init_breakpoint_slots(void)
    > goto err;
    > }
    > }
    > + for (i = 0; i < TYPE_MAX; i++) {
    > + if (!bp_slots_histogram_alloc(&cpu_pinned[i], i))
    > + goto err;
    > + }
    >
    > return 0;
    > err:
    > @@ -203,6 +210,8 @@ static __init int init_breakpoint_slots(void)
    > if (err_cpu == cpu)
    > break;
    > }
    > + for (i = 0; i < TYPE_MAX; i++)
    > + bp_slots_histogram_free(&cpu_pinned[i]);
    >
    > return -ENOMEM;
    > }
    > @@ -270,6 +279,9 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
    > /*
    > * Count the number of breakpoints of the same type and same task.
    > * The given event must be not on the list.
    > + *
    > + * If @cpu is -1, but the result of task_bp_pinned() is not CPU-independent,
    > + * returns a negative value.
    > */
    > static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
    > {
    > @@ -288,9 +300,18 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
    > goto out;
    >
    > rhl_for_each_entry_rcu(iter, pos, head, hw.bp_list) {
    > - if (find_slot_idx(iter->attr.bp_type) == type &&
    > - (iter->cpu < 0 || cpu == iter->cpu))
    > - count += hw_breakpoint_weight(iter);
    > + if (find_slot_idx(iter->attr.bp_type) != type)
    > + continue;
    > +
    > + if (iter->cpu >= 0) {
    > + if (cpu == -1) {
    > + count = -1;
    > + goto out;
    > + } else if (cpu != iter->cpu)
    > + continue;
    > + }
    > +
    > + count += hw_breakpoint_weight(iter);
    > }
    >
    > out:
    > @@ -316,6 +337,19 @@ max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
    > int pinned_slots = 0;
    > int cpu;
    >
    > + if (bp->hw.target && bp->cpu < 0) {
    > + int max_pinned = task_bp_pinned(-1, bp, type);
    > +
    > + if (max_pinned >= 0) {
    > + /*
    > + * Fast path: task_bp_pinned() is CPU-independent and
    > + * returns the same value for any CPU.
    > + */
    > + max_pinned += bp_slots_histogram_max(&cpu_pinned[type], type);
    > + return max_pinned;
    > + }
    > + }
    > +
    > for_each_cpu(cpu, cpumask) {
    > struct bp_cpuinfo *info = get_bp_info(cpu, type);
    > int nr;
    > @@ -366,8 +400,11 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
    >
    > /* Pinned counter cpu profiling */
    > if (!bp->hw.target) {
    > + struct bp_cpuinfo *info = get_bp_info(bp->cpu, type);
    > +
    > lockdep_assert_held_write(&bp_cpuinfo_sem);
    > - get_bp_info(bp->cpu, type)->cpu_pinned += weight;
    > + bp_slots_histogram_add(&cpu_pinned[type], info->cpu_pinned, weight);
    > + info->cpu_pinned += weight;
    > return 0;
    > }
    >
    > @@ -804,6 +841,18 @@ bool hw_breakpoint_is_used(void)
    > }
    > }
    >
    > + for (int type = 0; type < TYPE_MAX; ++type) {
    > + for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
    > + /*
    > + * Warn, because if there are CPU pinned counters,
    > + * should never get here; bp_cpuinfo::cpu_pinned should
    > + * be consistent with the global cpu_pinned histogram.
    > + */
    > + if (WARN_ON(atomic_read(&cpu_pinned[type].count[slot])))
    > + return true;
    > + }
    > + }
    > +
    > return false;
    > }
    >
    > --
    > 2.37.0.rc0.161.g10f37bed90-goog
    >

    \
     
     \ /
      Last update: 2022-07-20 17:44    [W:5.491 / U:0.168 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site