lkml.org 
[lkml]   [2022]   [Oct]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/4] mm/slab_common: Separate sysfs_slab_add() and debugfs_slab_add() from __kmem_cache_create()
Date
Separate sysfs_slab_add() and debugfs_slab_add() from __kmem_cache_create()
can help to fix a memory leak about kobject. After this patch, we can fix
the memory leak naturally by calling kobject_put() to free kobject and
associated kmem_cache when sysfs_slab_add() failed.
Besides, after that, we can easy to provide sysfs and debugfs support for
other allocators too.

Signed-off-by: Liu Shixin <liushixin2@huawei.com>
---
include/linux/slub_def.h | 11 ++++++++++
mm/slab_common.c | 10 +++++++++
mm/slub.c | 44 +++++++---------------------------------
3 files changed, 28 insertions(+), 37 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f9c68a9dac04..26d56c4c74d1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -144,9 +144,14 @@ struct kmem_cache {

#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
+int sysfs_slab_add(struct kmem_cache *);
void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
#else
+static inline int sysfs_slab_add(struct kmem_cache *s)
+{
+ return 0;
+}
static inline void sysfs_slab_unlink(struct kmem_cache *s)
{
}
@@ -155,6 +160,12 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
}
#endif

+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
+void debugfs_slab_add(struct kmem_cache *);
+#else
+static inline void debugfs_slab_add(struct kmem_cache *s) { }
+#endif
+
void *fixup_red_left(struct kmem_cache *s, void *p);

static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e5f430a17d95..f146dea3f9de 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -234,6 +234,16 @@ static struct kmem_cache *create_cache(const char *name,
if (err)
goto out_free_name;

+ /* Mutex is not taken during early boot */
+ if (slab_state >= FULL) {
+ err = sysfs_slab_add(s);
+ if (err) {
+ slab_kmem_cache_release(s);
+ return ERR_PTR(err);
+ }
+ debugfs_slab_add(s);
+ }
+
s->refcount = 1;
list_add(&s->list, &slab_caches);
return s;
diff --git a/mm/slub.c b/mm/slub.c
index ba94eb6fda78..a1ad759753ce 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -299,20 +299,12 @@ struct track {
enum track_item { TRACK_ALLOC, TRACK_FREE };

#ifdef CONFIG_SYSFS
-static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
#else
-static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
#endif

-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
-static void debugfs_slab_add(struct kmem_cache *);
-#else
-static inline void debugfs_slab_add(struct kmem_cache *s) { }
-#endif
-
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
@@ -4297,7 +4289,7 @@ static int calculate_sizes(struct kmem_cache *s)
return !!oo_objects(s->oo);
}

-static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
+int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
{
s->flags = kmem_cache_flags(s->size, flags, s->name);
#ifdef CONFIG_SLAB_FREELIST_HARDENED
@@ -4900,30 +4892,6 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
return s;
}

-int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
-{
- int err;
-
- err = kmem_cache_open(s, flags);
- if (err)
- return err;
-
- /* Mutex is not taken during early boot */
- if (slab_state <= UP)
- return 0;
-
- err = sysfs_slab_add(s);
- if (err) {
- __kmem_cache_release(s);
- return err;
- }
-
- if (s->flags & SLAB_STORE_USER)
- debugfs_slab_add(s);
-
- return 0;
-}
-
#ifdef CONFIG_SYSFS
static int count_inuse(struct slab *slab)
{
@@ -5913,7 +5881,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}

-static int sysfs_slab_add(struct kmem_cache *s)
+int sysfs_slab_add(struct kmem_cache *s)
{
int err;
const char *name;
@@ -6236,10 +6204,13 @@ static const struct file_operations slab_debugfs_fops = {
.release = slab_debug_trace_release,
};

-static void debugfs_slab_add(struct kmem_cache *s)
+void debugfs_slab_add(struct kmem_cache *s)
{
struct dentry *slab_cache_dir;

+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
if (unlikely(!slab_debugfs_root))
return;

@@ -6264,8 +6235,7 @@ static int __init slab_debugfs_init(void)
slab_debugfs_root = debugfs_create_dir("slab", NULL);

list_for_each_entry(s, &slab_caches, list)
- if (s->flags & SLAB_STORE_USER)
- debugfs_slab_add(s);
+ debugfs_slab_add(s);

return 0;

--
2.25.1
\
 
 \ /
  Last update: 2022-10-24 09:42    [W:0.093 / U:1.384 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site