lkml.org 
[lkml]   [2002]   [Feb]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH] move task_struct allocation to arch
Date
From

Hi Linus,

This patch moves task_struct allocation from kernel/fork.c into
arch/*/kernel/process.c.

David Mosberger wrote:

> David.H> What might be worth doing is to move the task_struct slab
> David.H> cache and (de-)allocator out of fork.c and to stick it in
> David.H> the arch somewhere. Then archs aren't bound to have the two
> David.H> separate. So for a system that can handle lots of memory,
> David.H> you can allocate the thread_info, task_struct and
> David.H> supervisor stack all on one very large chunk if you so
> David.H> wish.
>
> Could you do this? I'd prefer if task_info could be completely hidden
> inside the x86/sparc arch-specific code, but if things are set up such
> that we at least have the option to keep the stack, task_info, and
> task_struct in a single chunk of memory (and without pointers between
> them), I'd have much less of an issue with it.

David

diff -uNr linux-2.5.5-pre1/arch/i386/kernel/process.c linux-tib-255p1/arch/i386/kernel/process.c
--- linux-2.5.5-pre1/arch/i386/kernel/process.c Thu Feb 14 14:31:13 2002
+++ linux-tib-255p1/arch/i386/kernel/process.c Thu Feb 14 14:32:35 2002
@@ -54,6 +54,8 @@

int hlt_counter;

+static kmem_cache_t *task_struct_cachep;
+
/*
* Return saved PC of a blocked thread.
*/
@@ -522,8 +524,49 @@
}

/*
- * Free current thread data structures etc..
+ * allocate and free task and thread data structures etc..
*/
+void __init init_task_struct_cache(void)
+{
+ /* create a slab on which task_structs can be allocated */
+ task_struct_cachep =
+ kmem_cache_create("task_struct",
+ sizeof(struct task_struct),0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!task_struct_cachep)
+ panic("fork_init(): cannot create task_struct SLAB cache");
+
+}
+
+struct task_struct *dup_task_struct(struct task_struct *orig)
+{
+ struct task_struct *tsk;
+ struct thread_info *ti;
+
+ ti = alloc_thread_info();
+ if (!ti) return NULL;
+
+ tsk = kmem_cache_alloc(task_struct_cachep,GFP_ATOMIC);
+ if (!tsk) {
+ free_thread_info(ti);
+ return NULL;
+ }
+
+ *ti = *orig->thread_info;
+ *tsk = *orig;
+ tsk->thread_info = ti;
+ ti->task = tsk;
+ atomic_set(&tsk->usage,1);
+
+ return tsk;
+}
+
+void __put_task_struct(struct task_struct *tsk)
+{
+ free_thread_info(tsk->thread_info);
+ kmem_cache_free(task_struct_cachep,tsk);
+}
+
void exit_thread(void)
{
/* nothing to do ... */
diff -uNr linux-2.5.5-pre1/arch/sparc64/kernel/process.c linux-tib-255p1/arch/sparc64/kernel/process.c
--- linux-2.5.5-pre1/arch/sparc64/kernel/process.c Thu Feb 14 14:04:42 2002
+++ linux-tib-255p1/arch/sparc64/kernel/process.c Thu Feb 14 14:21:26 2002
@@ -40,6 +40,8 @@
#include <asm/elf.h>
#include <asm/fpumacro.h>

+static kmem_cache_t *task_struct_cachep;
+
/* #define VERBOSE_SHOWREGS */

#ifndef CONFIG_SMP
@@ -398,7 +400,50 @@
return ret;
}

-/* Free current thread data structures etc.. */
+/*
+ * allocate and free task and thread data structures etc..
+ */
+void __init init_task_struct_cache(void)
+{
+ /* create a slab on which task_structs can be allocated */
+ task_struct_cachep =
+ kmem_cache_create("task_struct",
+ sizeof(struct task_struct),0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!task_struct_cachep)
+ panic("fork_init(): cannot create task_struct SLAB cache");
+
+}
+
+struct task_struct *dup_task_struct(struct task_struct *orig)
+{
+ struct task_struct *tsk;
+ struct thread_info *ti;
+
+ ti = alloc_thread_info();
+ if (!ti) return NULL;
+
+ tsk = kmem_cache_alloc(task_struct_cachep,GFP_ATOMIC);
+ if (!tsk) {
+ free_thread_info(ti);
+ return NULL;
+ }
+
+ *ti = *orig->thread_info;
+ *tsk = *orig;
+ tsk->thread_info = ti;
+ ti->task = tsk;
+ atomic_set(&tsk->usage,1);
+
+ return tsk;
+}
+
+void __put_task_struct(struct task_struct *tsk)
+{
+ free_thread_info(tsk->thread_info);
+ kmem_cache_free(task_struct_cachep,tsk);
+}
+
void exit_thread(void)
{
struct thread_info *t = current_thread_info();
diff -uNr linux-2.5.5-pre1/include/asm-i386/processor.h linux-tib-255p1/include/asm-i386/processor.h
--- linux-2.5.5-pre1/include/asm-i386/processor.h Thu Feb 14 14:31:18 2002
+++ linux-tib-255p1/include/asm-i386/processor.h Thu Feb 14 14:38:55 2002
@@ -425,6 +425,11 @@
struct task_struct;
struct mm_struct;

+/* task structure allocation */
+extern void init_task_struct_cache(void);
+extern struct task_struct *dup_task_struct(struct task_struct *orig);
+extern void __put_task_struct(struct task_struct *tsk);
+
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/*
diff -uNr linux-2.5.5-pre1/include/asm-sparc64/processor.h linux-tib-255p1/include/asm-sparc64/processor.h
--- linux-2.5.5-pre1/include/asm-sparc64/processor.h Thu Feb 14 14:03:47 2002
+++ linux-tib-255p1/include/asm-sparc64/processor.h Thu Feb 14 14:34:38 2002
@@ -175,6 +175,11 @@
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)

+/* task structure allocation */
+extern void init_task_struct_cache(void);
+extern struct task_struct *dup_task_struct(struct task_struct *orig);
+extern void __put_task_struct(struct task_struct *tsk);
+
#define get_wchan(__TSK) \
({ extern void scheduling_functions_start_here(void); \
extern void scheduling_functions_end_here(void); \
diff -uNr linux-2.5.5-pre1/kernel/fork.c linux-tib-255p1/kernel/fork.c
--- linux-2.5.5-pre1/kernel/fork.c Thu Feb 14 14:31:19 2002
+++ linux-tib-255p1/kernel/fork.c Thu Feb 14 14:32:40 2002
@@ -30,8 +30,6 @@
#include <asm/uaccess.h>
#include <asm/mmu_context.h>

-static kmem_cache_t *task_struct_cachep;
-
/* The idle threads do not count.. */
int nr_threads;

@@ -74,13 +72,7 @@

void __init fork_init(unsigned long mempages)
{
- /* create a slab on which task_structs can be allocated */
- task_struct_cachep =
- kmem_cache_create("task_struct",
- sizeof(struct task_struct),0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (!task_struct_cachep)
- panic("fork_init(): cannot create task_struct SLAB cache");
+ init_task_struct_cache();

/*
* The default maximum number of threads is set to a safe
@@ -93,35 +85,6 @@
init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
}

-struct task_struct *dup_task_struct(struct task_struct *orig)
-{
- struct task_struct *tsk;
- struct thread_info *ti;
-
- ti = alloc_thread_info();
- if (!ti) return NULL;
-
- tsk = kmem_cache_alloc(task_struct_cachep,GFP_ATOMIC);
- if (!tsk) {
- free_thread_info(ti);
- return NULL;
- }
-
- *ti = *orig->thread_info;
- *tsk = *orig;
- tsk->thread_info = ti;
- ti->task = tsk;
- atomic_set(&tsk->usage,1);
-
- return tsk;
-}
-
-void __put_task_struct(struct task_struct *tsk)
-{
- free_thread_info(tsk->thread_info);
- kmem_cache_free(task_struct_cachep,tsk);
-}
-
/* Protects next_safe and last_pid. */
spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:24    [W:0.091 / U:0.372 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site