lkml.org 
[lkml]   [2023]   [Jan]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH 2/3] bpf: Optimize get_modules_for_addrs()
From
Date


On 2023/1/6 5:31, Jiri Olsa wrote:
> On Wed, Jan 04, 2023 at 05:25:08PM +0100, Petr Mladek wrote:
>> On Fri 2022-12-30 19:27:28, Zhen Lei wrote:
>>> Function __module_address() can quickly return the pointer of the module
>>> to which an address belongs. We do not need to traverse the symbols of all
>>> modules to check whether each address in addrs[] is the start address of
>>> the corresponding symbol, because register_fprobe_ips() will do this check
>>> later.
>
> hum, for some reason I can see only replies to this patch and
> not the actual patch.. I'll dig it out of the lore I guess

https://lkml.org/lkml/2022/12/30/195

>
>>>
>>> Assuming that there are m modules, each module has n symbols on average,
>>> and the number of addresses 'addrs_cnt' is abbreviated as K. Then the time
>>> complexity of the original method is O(K * log(K)) + O(m * n * log(K)),
>>> and the time complexity of current method is O(K * (log(m) + M)), M <= m.
>>> (m * n * log(K)) / (K * m) ==> n / log2(K). Even if n is 10 and K is 128,
>>> the ratio is still greater than 1. Therefore, the new method will
>>> generally have better performance.
>
> could you try to benchmark that? I tried something similar but was not
> able to get better performance

I'm just theoretically analyzing, at least the performance won't get worse.

>
> I'll review and run my benchmark test tomorrow
>
> thanks,
> jirka
>
>>>
>>> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
>>> ---
>>> kernel/trace/bpf_trace.c | 101 ++++++++++++++++-----------------------
>>> 1 file changed, 40 insertions(+), 61 deletions(-)
>>>
>>> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
>>> index 5f3be4bc16403a5..0ff9037098bd241 100644
>>> --- a/kernel/trace/bpf_trace.c
>>> +++ b/kernel/trace/bpf_trace.c
>>> @@ -2684,69 +2684,55 @@ static void symbols_swap_r(void *a, void *b, int size, const void *priv)
>>> }
>>> }
>>>
>>> -struct module_addr_args {
>>> - unsigned long *addrs;
>>> - u32 addrs_cnt;
>>> - struct module **mods;
>>> - int mods_cnt;
>>> - int mods_cap;
>>> -};
>>> -
>>> -static int module_callback(void *data, const char *name,
>>> - struct module *mod, unsigned long addr)
>>> +static int get_modules_for_addrs(struct module ***out_mods, unsigned long *addrs, u32 addrs_cnt)
>>> {
>>> - struct module_addr_args *args = data;
>>> - struct module **mods;
>>> -
>>> - /* We iterate all modules symbols and for each we:
>>> - * - search for it in provided addresses array
>>> - * - if found we check if we already have the module pointer stored
>>> - * (we iterate modules sequentially, so we can check just the last
>>> - * module pointer)
>>> - * - take module reference and store it
>>> - */
>>> - if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr),
>>> - bpf_kprobe_multi_addrs_cmp))
>>> - return 0;
>>> + int i, j, err;
>>> + int mods_cnt = 0;
>>> + int mods_cap = 0;
>>> + struct module *mod;
>>> + struct module **mods = NULL;
>>>
>>> - if (args->mods && args->mods[args->mods_cnt - 1] == mod)
>>> - return 0;
>>> + for (i = 0; i < addrs_cnt; i++) {
>>> + mod = __module_address(addrs[i]);
>>
>> This must be called under module_mutex to make sure that the module
>> would not disappear.
>>
>>> + if (!mod)
>>> + continue;
>>>
>>> - if (args->mods_cnt == args->mods_cap) {
>>> - args->mods_cap = max(16, args->mods_cap * 3 / 2);
>>> - mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL);
>>> - if (!mods)
>>> - return -ENOMEM;
>>> - args->mods = mods;
>>> - }
>>> + /* check if we already have the module pointer stored */
>>> + for (j = 0; j < mods_cnt; j++) {
>>> + if (mods[j] == mod)
>>> + break;
>>> + }
>>
>> This might get optimized like the original code.
>>
>> My understanding is that the addresses are sorted in "addrs" array.
>> So, the address is either part of the last found module or it belongs
>> to a completely new module.
>>
>> for (i = 0; i < addrs_cnt; i++) {
>> /*
>> * The adresses are sorted. The adress either belongs
>> * to the last found module or a new one.
>> *
>> * This is safe because we already have reference
>> * on the found modules.
>> */
>> if (mods_cnt && within_module(addrs[i], mods[mods_cnt - 1]))
>> continue;
>>
>> mutex_lock(&module_mutex);
>> mod = __module_address(addrs[i]);
>> if (mod && !try_module_get(mod)) {
>> mutex_unlock(&module_mutex);
>> goto failed;
>> }
>> mutex_unlock(&module_mutex);
>>
>> /*
>> * Nope when the address was not from a module.
>> *
>> * Is this correct? What if the module has gone in
>> * the meantime? Anyway, the original code
>> * worked this way.
>> *
>> * FIXME: I would personally make sure that it is part
>> * of vmlinux or so.
>> */
>> if (!mod)
>> continue;
>>
>> /* store the module into mods array */
>> ...
>>
>>
>>
>>
>>> + if (j < mods_cnt)
>>> + continue;
>>>
>>> - if (!try_module_get(mod))
>>> - return -EINVAL;
>>> + if (mods_cnt == mods_cap) {
>>> + struct module **new_mods;
>>>
>>> - args->mods[args->mods_cnt] = mod;
>>> - args->mods_cnt++;
>>> - return 0;
>>> -}
>>> + mods_cap = max(16, mods_cap * 3 / 2);
>>> + new_mods = krealloc_array(mods, mods_cap, sizeof(*mods), GFP_KERNEL);
>>> + if (!new_mods) {
>>> + err = -ENOMEM;
>>> + goto failed;
>>> + }
>>> + mods = new_mods;
>>> + }
>>>
>>> -static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
>>> -{
>>> - struct module_addr_args args = {
>>> - .addrs = addrs,
>>> - .addrs_cnt = addrs_cnt,
>>> - };
>>> - int err;
>>> + if (!try_module_get(mod)) {
>>> + err = -EINVAL;
>>> + goto failed;
>>> + }
>>>
>>> - /* We return either err < 0 in case of error, ... */
>>> - err = module_kallsyms_on_each_symbol(NULL, module_callback, &args);
>>> - if (err) {
>>> - kprobe_multi_put_modules(args.mods, args.mods_cnt);
>>> - kfree(args.mods);
>>> - return err;
>>> + mods[mods_cnt] = mod;
>>> + mods_cnt++;
>>> }
>>>
>>> - /* or number of modules found if everything is ok. */
>>> - *mods = args.mods;
>>> - return args.mods_cnt;
>>> + *out_mods = mods;
>>> + return mods_cnt;
>>> +
>>> +failed:
>>> + kprobe_multi_put_modules(mods, mods_cnt);
>>> + kfree(mods);
>>> + return err;
>>> }
>>>
>>> int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
>>
>> Otherwise, it looks good. IMHO, the new code looks more straightforward
>> than the original one.
>>
>> Best Regards,
>> Petr
> .
>

--
Regards,
Zhen Lei

\
 
 \ /
  Last update: 2023-03-26 23:32    [W:0.122 / U:0.160 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site