lkml.org 
[lkml]   [2008]   [Apr]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH] use vmalloc for mem_cgroup allocation. v3
Tested on ia64/NUMA and x86/smp.
==
On ia64, this kmalloc() requires order-4 pages. But this is not
necessary to be phisically contiguous.
For big mem_cgroup, vmalloc is better. For small ones, kmalloc is used.


Changelog: v2->v3
- fixed the place of memset.
- added mem_cgroup_alloc()/free()
- use kmalloc if mem_cgroup is enough small.
Changelog: v1->v2
- added memset().

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>

Index: mm-2.6.25-rc8-mm2/mm/memcontrol.c
===================================================================
--- mm-2.6.25-rc8-mm2.orig/mm/memcontrol.c
+++ mm-2.6.25-rc8-mm2/mm/memcontrol.c
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/vmalloc.h>

#include <asm/uaccess.h>

@@ -983,6 +984,31 @@ static void free_mem_cgroup_per_zone_inf
kfree(mem->info.nodeinfo[node]);
}

+static struct mem_cgroup *mem_cgroup_alloc(void)
+{
+ struct mem_cgroup *mem;
+
+ if (sizeof(*mem) < PAGE_SIZE)
+ mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+ else
+ mem = vmalloc(sizeof(*mem));
+
+ if (!mem)
+ return NULL;
+
+ memset(mem, 0, sizeof(*mem));
+ return mem;
+}
+
+static void mem_cgroup_free(struct mem_cgroup *mem)
+{
+ if (sizeof(*mem) < PAGE_SIZE)
+ kfree(mem);
+ else
+ vfree(mem);
+}
+
+
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
@@ -992,11 +1018,11 @@ mem_cgroup_create(struct cgroup_subsys *
if (unlikely((cont->parent) == NULL)) {
mem = &init_mem_cgroup;
page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
- } else
- mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
-
- if (mem == NULL)
- return ERR_PTR(-ENOMEM);
+ } else {
+ mem = mem_cgroup_alloc();
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+ }

res_counter_init(&mem->res);

@@ -1011,7 +1037,7 @@ free_out:
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
if (cont->parent != NULL)
- kfree(mem);
+ mem_cgroup_free(mem);
return ERR_PTR(-ENOMEM);
}

@@ -1031,7 +1057,7 @@ static void mem_cgroup_destroy(struct cg
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);

- kfree(mem_cgroup_from_cont(cont));
+ mem_cgroup_free(mem_cgroup_from_cont(cont));
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,


\
 
 \ /
  Last update: 2008-04-15 07:13    [W:0.955 / U:0.048 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site