lkml.org 
[lkml]   [2000]   [Mar]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [patch] preemptive kernel, preemptive-2.3.52-A7
On Tue, 14 Mar 2000, Andrea Arcangeli wrote:

>I think yes.

This is one possible implementation against 2.3.52pre1:

diff -urN 2.3.52pre1/arch/i386/kernel/entry.S p/arch/i386/kernel/entry.S
--- 2.3.52pre1/arch/i386/kernel/entry.S Sun Feb 27 06:19:41 2000
+++ p/arch/i386/kernel/entry.S Tue Mar 14 04:09:57 2000
@@ -76,7 +76,8 @@
addr_limit = 12
exec_domain = 16
need_resched = 20
-processor = 56
+preempt_depth = 24
+processor = 60

ENOSYS = 38

@@ -276,11 +277,27 @@
ALIGN
ret_from_intr:
GET_CURRENT(%ebx)
+ cmpl $0,preempt_depth(%ebx)
+ jne ret_with_kernel_preempt
movl EFLAGS(%esp),%eax # mix EFLAGS and CS
movb CS(%esp),%al
testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
jne ret_with_reschedule
jmp restore_all
+
+ret_with_kernel_preempt:
+#ifdef __SMP__
+ movl processor(%ebx),%eax
+ shll $5,%eax
+ movl SYMBOL_NAME(irq_stat)(,%eax),%ecx
+ orl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx
+#else
+ movl SYMBOL_NAME(irq_stat),%ecx
+ orl SYMBOL_NAME(irq_stat)+4,%ecx
+#endif
+ cmpl $0,%ecx
+ jne restore_all
+ jmp ret_with_reschedule

ALIGN
handle_softirq:
diff -urN 2.3.52pre1/arch/i386/lib/usercopy.c p/arch/i386/lib/usercopy.c
--- 2.3.52pre1/arch/i386/lib/usercopy.c Sun Nov 21 03:20:23 1999
+++ p/arch/i386/lib/usercopy.c Tue Mar 14 03:14:29 2000
@@ -64,6 +64,7 @@
#define __do_strncpy_from_user(dst,src,count,res) \
do { \
int __d0, __d1, __d2; \
+ preempt_open(); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
@@ -87,6 +88,7 @@
"=&D" (__d2) \
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
: "memory"); \
+ preempt_close(); \
} while (0)

long
@@ -114,6 +116,7 @@
#define __do_clear_user(addr,size) \
do { \
int __d0; \
+ preempt_open(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
@@ -130,6 +133,7 @@
".previous" \
: "=&c"(size), "=&D" (__d0) \
: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
+ preempt_close(); \
} while (0)

unsigned long
@@ -158,6 +162,7 @@
unsigned long mask = -__addr_ok(s);
unsigned long res, tmp;

+ preempt_open();
__asm__ __volatile__(
" andl %0,%%ecx\n"
"0: repne; scasb\n"
@@ -176,5 +181,6 @@
:"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
:"0" (n), "1" (s), "2" (0), "3" (mask)
:"cc");
+ preempt_close();
return res & mask;
}
diff -urN 2.3.52pre1/include/asm-i386/hardirq.h p/include/asm-i386/hardirq.h
--- 2.3.52pre1/include/asm-i386/hardirq.h Tue Mar 14 03:39:47 2000
+++ p/include/asm-i386/hardirq.h Tue Mar 14 04:17:57 2000
@@ -4,6 +4,10 @@
#include <linux/threads.h>
#include <linux/irq.h>

+/*
+ * This struct can't be changed without updating arch/i386/kernel/entry.S
+ * ret_with_kernel_preempt path.
+ */
typedef struct {
unsigned int __local_irq_count;
unsigned int __local_bh_count;
diff -urN 2.3.52pre1/include/asm-i386/uaccess.h p/include/asm-i386/uaccess.h
--- 2.3.52pre1/include/asm-i386/uaccess.h Tue Mar 14 04:20:08 2000
+++ p/include/asm-i386/uaccess.h Tue Mar 14 04:20:09 2000
@@ -253,6 +253,7 @@
#define __copy_user(to,from,size) \
do { \
int __d0, __d1; \
+ preempt_open(); \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movl %3,%0\n" \
@@ -270,11 +271,13 @@
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
+ preempt_close(); \
} while (0)

#define __copy_user_zeroing(to,from,size) \
do { \
int __d0, __d1; \
+ preempt_open(); \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movl %3,%0\n" \
@@ -298,6 +301,7 @@
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
+ preempt_close(); \
} while (0)

/* We let the __ versions of copy_from/to_user inline, because they're often
diff -urN 2.3.52pre1/include/linux/sched.h p/include/linux/sched.h
--- 2.3.52pre1/include/linux/sched.h Sat Mar 4 19:59:20 2000
+++ p/include/linux/sched.h Tue Mar 14 04:20:09 2000
@@ -268,6 +268,7 @@
*/
struct exec_domain *exec_domain;
volatile long need_resched;
+ long preempt_depth;

cycles_t avg_slice;
int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
@@ -857,6 +858,13 @@
{
up(&p->exit_sem);
}
+
+#if 1
+#define preempt_open() do { current->preempt_depth++; } while(0)
+#else
+#define preempt_open() do { if (!current->preempt_depth++ - current->need_resched < 0) schedule(); } while(0)
+#endif
+#define preempt_close() do { current->preempt_depth--; } while(0)

#endif /* __KERNEL__ */

diff -urN 2.3.52pre1/mm/memory.c p/mm/memory.c
--- 2.3.52pre1/mm/memory.c Sat Mar 11 20:02:33 2000
+++ p/mm/memory.c Tue Mar 14 03:28:35 2000
@@ -1073,7 +1073,9 @@
return -1;
if (PageHighMem(page))
high = 1;
+ preempt_open();
clear_highpage(page);
+ preempt_close();
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma->vm_mm->rss++;
tsk->min_flt++;

Andrea


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:57    [W:1.133 / U:0.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site