lkml.org 
[lkml]   [2020]   [Apr]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 4/4] mm/slub: Fix sysfs shrink circular locking dependency
Date
A lockdep splat is observed by echoing "1" to the shrink sysfs file
and then shutting down the system:

[ 167.473392] Chain exists of:
[ 167.473392] kn->count#279 --> mem_hotplug_lock.rw_sem --> slab_mutex
[ 167.473392]
[ 167.484323] Possible unsafe locking scenario:
[ 167.484323]
[ 167.490273] CPU0 CPU1
[ 167.494825] ---- ----
[ 167.499376] lock(slab_mutex);
[ 167.502530] lock(mem_hotplug_lock.rw_sem);
[ 167.509356] lock(slab_mutex);
[ 167.515044] lock(kn->count#279);
[ 167.518462]
[ 167.518462] *** DEADLOCK ***

It is because of the get_online_cpus() and get_online_mems() calls in
kmem_cache_shrink() invoked via the shrink sysfs file. To fix that, we
have to use trylock to get the memory and cpu hotplug read locks. Since
hotplug events are rare, it should be fine to refuse a kmem caches
shrink operation when some hotplug events are in progress.

Signed-off-by: Waiman Long <longman@redhat.com>
---
include/linux/memory_hotplug.h | 2 ++
mm/memory_hotplug.c | 5 +++++
mm/slub.c | 19 +++++++++++++++----
3 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 93d9ada74ddd..4ec4b0a2f0fa 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -231,6 +231,7 @@ extern void get_page_bootmem(unsigned long ingo, struct page *page,

void get_online_mems(void);
void put_online_mems(void);
+int tryget_online_mems(void);

void mem_hotplug_begin(void);
void mem_hotplug_done(void);
@@ -274,6 +275,7 @@ static inline int try_online_node(int nid)

static inline void get_online_mems(void) {}
static inline void put_online_mems(void) {}
+static inline int tryget_online_mems(void) { return 1; }

static inline void mem_hotplug_begin(void) {}
static inline void mem_hotplug_done(void) {}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index fc0aad0bc1f5..38f9ccec9259 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -59,6 +59,11 @@ void get_online_mems(void)
percpu_down_read(&mem_hotplug_lock);
}

+int tryget_online_mems(void)
+{
+ return percpu_down_read_trylock(&mem_hotplug_lock);
+}
+
void put_online_mems(void)
{
percpu_up_read(&mem_hotplug_lock);
diff --git a/mm/slub.c b/mm/slub.c
index cf2114ca27f7..c4977ac3271b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5343,10 +5343,20 @@ static ssize_t shrink_show(struct kmem_cache *s, char *buf)
static ssize_t shrink_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- if (buf[0] == '1')
- kmem_cache_shrink(s);
- else
+ if (buf[0] != '1')
return -EINVAL;
+
+ if (!cpus_read_trylock())
+ return -EBUSY;
+ if (!tryget_online_mems()) {
+ length = -EBUSY;
+ goto cpus_unlock_out;
+ }
+ kasan_cache_shrink(s);
+ __kmem_cache_shrink(s);
+ put_online_mems();
+cpus_unlock_out:
+ cpus_read_unlock();
return length;
}
SLAB_ATTR(shrink);
@@ -5654,7 +5664,8 @@ static ssize_t slab_attr_store(struct kobject *kobj,

for (idx = 0; idx < cnt; idx++) {
c = pcaches[idx];
- attribute->store(c, buf, len);
+ if (attribute->store(c, buf, len) == -EBUSY)
+ err = -EBUSY;
percpu_ref_put(&c->memcg_params.refcnt);
}
kfree(pcaches);
--
2.18.1
\
 
 \ /
  Last update: 2020-04-28 01:57    [W:0.087 / U:0.060 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site