lkml.org 
[lkml]   [2022]   [Jul]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH v2 1/3] mm/slub: fix the race between validate_slab and slab_free
From


On 7/13/22 6:22 PM, Hyeonggon Yoo wrote:
> On Tue, Jul 12, 2022 at 10:28:05AM +0800, Rongwei Wang wrote:
>> In use cases where allocating and freeing slab frequently, some
>> error messages, such as "Left Redzone overwritten", "First byte
>> 0xbb instead of 0xcc" would be printed when validating slabs.
>> That's because an object has been filled with SLAB_RED_INACTIVE,
>> but has not been added to slab's freelist. And between these
>> two states, the behaviour of validating slab is likely to occur.
>>
>> Actually, it doesn't mean the slab can not work stably. But, these
>> confusing messages will disturb slab debugging more or less.
>>
>> Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
>> ---
>> mm/slub.c | 43 +++++++++++++++++++++++++------------------
>> 1 file changed, 25 insertions(+), 18 deletions(-)
>>
>
> This makes the code more complex.
>
> A part of me says it may be more pleasant to split implementation
> allocating from caches for debugging. That would make it simpler.
>
> something like:
>
> __slab_alloc() {
> if (kmem_cache_debug(s))
> slab_alloc_debug()
> else
> ___slab_alloc()
> }
>
> slab_free() {
> if (kmem_cache_debug(s))
> slab_free_debug()
> else
> __do_slab_free()
> }
Oh, I also have same idea, but not sure whether it is accepted because
of it needs more changes than now. Since you agree with this way, I can
rewrite this code.

Thanks.
>
> See also:
> https://lore.kernel.org/lkml/faf416b9-f46c-8534-7fb7-557c046a564d@suse.cz/
Thanks, it seems that I had missed it.
>
>> diff --git a/mm/slub.c b/mm/slub.c
>> index b1281b8654bd..e950d8df8380 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -1391,18 +1391,16 @@ static noinline int free_debug_processing(
>> void *head, void *tail, int bulk_cnt,
>> unsigned long addr)
>> {
>> - struct kmem_cache_node *n = get_node(s, slab_nid(slab));
>> void *object = head;
>> int cnt = 0;
>> - unsigned long flags, flags2;
>> + unsigned long flags;
>> int ret = 0;
>> depot_stack_handle_t handle = 0;
>>
>> if (s->flags & SLAB_STORE_USER)
>> handle = set_track_prepare();
>>
>> - spin_lock_irqsave(&n->list_lock, flags);
>> - slab_lock(slab, &flags2);
>> + slab_lock(slab, &flags);
>>
>> if (s->flags & SLAB_CONSISTENCY_CHECKS) {
>> if (!check_slab(s, slab))
>> @@ -1435,8 +1433,7 @@ static noinline int free_debug_processing(
>> slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
>> bulk_cnt, cnt);
>>
>> - slab_unlock(slab, &flags2);
>> - spin_unlock_irqrestore(&n->list_lock, flags);
>> + slab_unlock(slab, &flags);
>> if (!ret)
>> slab_fix(s, "Object at 0x%p not freed", object);
>> return ret;
>> @@ -3330,7 +3327,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
>>
>> {
>> void *prior;
>> - int was_frozen;
>> + int was_frozen, to_take_off = 0;
>> struct slab new;
>> unsigned long counters;
>> struct kmem_cache_node *n = NULL;
>> @@ -3341,14 +3338,23 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
>> if (kfence_free(head))
>> return;
>>
>> - if (kmem_cache_debug(s) &&
>> - !free_debug_processing(s, slab, head, tail, cnt, addr))
>> - return;
>> + n = get_node(s, slab_nid(slab));
>> + if (kmem_cache_debug(s)) {
>> + int ret;
>>
>> - do {
>> - if (unlikely(n)) {
>> + spin_lock_irqsave(&n->list_lock, flags);
>> + ret = free_debug_processing(s, slab, head, tail, cnt, addr);
>> + if (!ret) {
>> spin_unlock_irqrestore(&n->list_lock, flags);
>> - n = NULL;
>> + return;
>> + }
>> + }
>> +
>> + do {
>> + if (unlikely(to_take_off)) {
>> + if (!kmem_cache_debug(s))
>> + spin_unlock_irqrestore(&n->list_lock, flags);
>> + to_take_off = 0;
>> }
>> prior = slab->freelist;
>> counters = slab->counters;
>> @@ -3369,8 +3375,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
>> new.frozen = 1;
>>
>> } else { /* Needs to be taken off a list */
>> -
>> - n = get_node(s, slab_nid(slab));
>> /*
>> * Speculatively acquire the list_lock.
>> * If the cmpxchg does not succeed then we may
>> @@ -3379,8 +3383,10 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
>> * Otherwise the list_lock will synchronize with
>> * other processors updating the list of slabs.
>> */
>> - spin_lock_irqsave(&n->list_lock, flags);
>> + if (!kmem_cache_debug(s))
>> + spin_lock_irqsave(&n->list_lock, flags);
>>
>> + to_take_off = 1;
>> }
>> }
>>
>> @@ -3389,8 +3395,9 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
>> head, new.counters,
>> "__slab_free"));
>>
>> - if (likely(!n)) {
>> -
>> + if (likely(!to_take_off)) {
>> + if (kmem_cache_debug(s))
>> + spin_unlock_irqrestore(&n->list_lock, flags);
>> if (likely(was_frozen)) {
>> /*
>> * The list lock was not taken therefore no list
>> --
>> 2.27.0
>>

\
 
 \ /
  Last update: 2022-07-13 14:11    [W:0.300 / U:1.012 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site