lkml.org 
[lkml]   [2022]   [Aug]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/3] perf lock: Print lost entries at the end
Date
Like the normal perf lock report output, it'd print bad stats at the end
if exists or -v option is passed. Currently it uses BROKEN_CONTENDED
stat for the lost count (due to full stack maps).

$ sudo perf lock con -a -b -m 128 sleep 5
...
=== output for debug===

bad: 43, total: 14903
bad rate: 0.29 %
histogram of events caused bad sequence
acquire: 0
acquired: 0
contended: 43
release: 0

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/builtin-lock.c | 8 +++++++-
tools/perf/util/bpf_lock_contention.c | 6 ++++--
tools/perf/util/bpf_skel/lock_contention.bpf.c | 9 +++++++--
tools/perf/util/lock-contention.h | 1 +
4 files changed, 19 insertions(+), 5 deletions(-)

diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index e32fdcd497e0..8065f0268e55 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1471,8 +1471,11 @@ static void print_contention_result(void)
pr_info(" %10s %s\n\n", "type", "caller");

bad = total = 0;
+ if (use_bpf)
+ bad = bad_hist[BROKEN_CONTENDED];
+
while ((st = pop_from_result())) {
- total++;
+ total += use_bpf ? st->nr_contended : 1;
if (st->broken)
bad++;

@@ -1686,6 +1689,9 @@ static int __cmd_contention(int argc, const char **argv)

lock_contention_stop();
lock_contention_read(&con);
+
+ /* abuse bad hist stats for lost entries */
+ bad_hist[BROKEN_CONTENDED] = con.lost;
} else {
err = perf_session__process_events(session);
if (err)
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index 26128e5bb659..65f51cc25236 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -16,7 +16,7 @@ static struct lock_contention_bpf *skel;

/* should be same as bpf_skel/lock_contention.bpf.c */
struct lock_contention_key {
- u32 stack_id;
+ s32 stack_id;
};

struct lock_contention_data {
@@ -110,7 +110,7 @@ int lock_contention_stop(void)
int lock_contention_read(struct lock_contention *con)
{
int fd, stack;
- u32 prev_key, key;
+ s32 prev_key, key;
struct lock_contention_data data;
struct lock_stat *st;
struct machine *machine = con->machine;
@@ -119,6 +119,8 @@ int lock_contention_read(struct lock_contention *con)
fd = bpf_map__fd(skel->maps.lock_stat);
stack = bpf_map__fd(skel->maps.stacks);

+ con->lost = skel->bss->lost;
+
prev_key = 0;
while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
struct map *kmap;
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index 67d46533e518..9e8b94eb6320 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -12,7 +12,7 @@
#define MAX_ENTRIES 10240

struct contention_key {
- __u32 stack_id;
+ __s32 stack_id;
};

struct contention_data {
@@ -27,7 +27,7 @@ struct tstamp_data {
__u64 timestamp;
__u64 lock;
__u32 flags;
- __u32 stack_id;
+ __s32 stack_id;
};

/* callstack storage */
@@ -73,6 +73,9 @@ int enabled;
int has_cpu;
int has_task;

+/* error stat */
+unsigned long lost;
+
static inline int can_record(void)
{
if (has_cpu) {
@@ -116,6 +119,8 @@ int contention_begin(u64 *ctx)
pelem->flags = (__u32)ctx[1];
pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);

+ if (pelem->stack_id < 0)
+ lost++;
return 0;
}

diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
index b09fd6eb978a..d9fc5f076567 100644
--- a/tools/perf/util/lock-contention.h
+++ b/tools/perf/util/lock-contention.h
@@ -113,6 +113,7 @@ struct lock_contention {
struct machine *machine;
struct hlist_head *result;
unsigned long map_len;
+ unsigned long lost;
};

#ifdef HAVE_BPF_SKEL
--
2.37.1.455.g008518b4e5-goog
\
 
 \ /
  Last update: 2022-08-02 09:36    [W:0.050 / U:0.576 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site