lkml.org 
[lkml]   [2022]   [May]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH bpf-next v2] selftests/bpf: fix some bugs in map_lookup_percpu_elem testcase
Date
From: Feng Zhou <zhoufeng.zf@bytedance.com>

comments from Andrii Nakryiko, details in here:
https://lore.kernel.org/lkml/20220511093854.411-1-zhoufeng.zf@bytedance.com/T/

use /* */ instead of //
use libbpf_num_possible_cpus() instead of sysconf(_SC_NPROCESSORS_ONLN)
use 8 bytes for value size
fix memory leak
use ASSERT_EQ instead of ASSERT_OK
add bpf_loop to fetch values on each possible CPU

Fixes: ed7c13776e20c74486b0939a3c1de984c5efb6aa ("selftests/bpf: add test case for bpf_map_lookup_percpu_elem")
Signed-off-by: Feng Zhou <zhoufeng.zf@bytedance.com>
---
v1->v2: Addressed comments from Yonghong Song.
- Adjust the code format
more details can be seen from here:
https://lore.kernel.org/lkml/80ab09cf-6072-a75a-082d-2721f6f907ef@fb.com/T/

.../bpf/prog_tests/map_lookup_percpu_elem.c | 50 +++++++++------
.../bpf/progs/test_map_lookup_percpu_elem.c | 62 ++++++++++++-------
2 files changed, 71 insertions(+), 41 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
index 58b24c2112b0..f987c9278912 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
@@ -1,30 +1,38 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2022 Bytedance
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Bytedance */

#include <test_progs.h>
-
#include "test_map_lookup_percpu_elem.skel.h"

-#define TEST_VALUE 1
-
void test_map_lookup_percpu_elem(void)
{
struct test_map_lookup_percpu_elem *skel;
- int key = 0, ret;
- int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- int *buf;
+ __u64 key = 0, sum;
+ int ret, i;
+ int nr_cpus = libbpf_num_possible_cpus();
+ __u64 *buf;

- buf = (int *)malloc(nr_cpus*sizeof(int));
+ buf = (__u64 *)malloc(nr_cpus*sizeof(__u64));
if (!ASSERT_OK_PTR(buf, "malloc"))
return;
- memset(buf, 0, nr_cpus*sizeof(int));
- buf[0] = TEST_VALUE;

- skel = test_map_lookup_percpu_elem__open_and_load();
- if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open_and_load"))
- return;
+ for (i = 0; i < nr_cpus; i++)
+ buf[i] = i;
+ sum = (nr_cpus-1)*nr_cpus/2;
+
+ skel = test_map_lookup_percpu_elem__open();
+ if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open"))
+ goto exit;
+
+ skel->rodata->nr_cpus = nr_cpus;
+
+ ret = test_map_lookup_percpu_elem__load(skel);
+ if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load"))
+ goto cleanup;
+
ret = test_map_lookup_percpu_elem__attach(skel);
- ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach");
+ if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"))
+ goto cleanup;

ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
ASSERT_OK(ret, "percpu_array_map update");
@@ -37,10 +45,14 @@ void test_map_lookup_percpu_elem(void)

syscall(__NR_getuid);

- ret = skel->bss->percpu_array_elem_val == TEST_VALUE &&
- skel->bss->percpu_hash_elem_val == TEST_VALUE &&
- skel->bss->percpu_lru_hash_elem_val == TEST_VALUE;
- ASSERT_OK(!ret, "bpf_map_lookup_percpu_elem success");
+ test_map_lookup_percpu_elem__detach(skel);
+
+ ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem");
+ ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem");
+ ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem");

+cleanup:
test_map_lookup_percpu_elem__destroy(skel);
+exit:
+ free(buf);
}
diff --git a/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
index 5d4ef86cbf48..57e875d6e6e0 100644
--- a/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
+++ b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
@@ -1,52 +1,70 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2022 Bytedance
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Bytedance */

#include "vmlinux.h"
#include <bpf/bpf_helpers.h>

-int percpu_array_elem_val = 0;
-int percpu_hash_elem_val = 0;
-int percpu_lru_hash_elem_val = 0;
+__u64 percpu_array_elem_sum = 0;
+__u64 percpu_hash_elem_sum = 0;
+__u64 percpu_lru_hash_elem_sum = 0;
+const volatile int nr_cpus;

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
- __type(value, __u32);
+ __type(value, __u64);
} percpu_array_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u32);
+ __type(key, __u64);
+ __type(value, __u64);
} percpu_hash_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u32);
+ __type(key, __u64);
+ __type(value, __u64);
} percpu_lru_hash_map SEC(".maps");

+struct read_percpu_elem_ctx {
+ void *map;
+ __u64 sum;
+};
+
+static int read_percpu_elem_callback(__u32 index, struct read_percpu_elem_ctx *ctx)
+{
+ __u64 key = 0;
+ __u64 *value;
+
+ value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
+ if (value)
+ ctx->sum += *value;
+ return 0;
+}
+
SEC("tp/syscalls/sys_enter_getuid")
int sysenter_getuid(const void *ctx)
{
- __u32 key = 0;
- __u32 cpu = 0;
- __u32 *value;
+ struct read_percpu_elem_ctx map_ctx;

- value = bpf_map_lookup_percpu_elem(&percpu_array_map, &key, cpu);
- if (value)
- percpu_array_elem_val = *value;
+ map_ctx.map = &percpu_array_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_array_elem_sum = map_ctx.sum;

- value = bpf_map_lookup_percpu_elem(&percpu_hash_map, &key, cpu);
- if (value)
- percpu_hash_elem_val = *value;
+ map_ctx.map = &percpu_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_hash_elem_sum = map_ctx.sum;

- value = bpf_map_lookup_percpu_elem(&percpu_lru_hash_map, &key, cpu);
- if (value)
- percpu_lru_hash_elem_val = *value;
+ map_ctx.map = &percpu_lru_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_lru_hash_elem_sum = map_ctx.sum;

return 0;
}
--
2.20.1
\
 
 \ /
  Last update: 2022-05-18 04:51    [W:0.096 / U:0.808 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site