lkml.org 
[lkml]   [1999]   [Nov]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch] pre-alpha rw semaphore
Below is a pre-alpha patch of a rw semaphore.
I have not yet implemented the "_interruptible" calls, I'll
add them if the code is stable.

The version compiles with egcs-1.12, but everything else is untested.

--
Manfred
<<<<<<<<<
// $Header: /pub/cvs/ms/patches/patch-rws,v 1.1 1999/11/21 15:47:58 manfreds Exp $
// Kernel Version:
// VERSION = 2
// PATCHLEVEL = 3
// SUBLEVEL = 28
// EXTRAVERSION =
diff -urN -x i386-stub.c -x traps.c -x .* -x *~ -x *.o -x ptrace.h -x ioctls.h 2.3/include/asm-i386/rwsem.h build-2.3/include/asm-i386/rwsem.h
--- 2.3/include/asm-i386/rwsem.h Thu Jan 1 01:00:00 1970
+++ build-2.3/include/asm-i386/rwsem.h Sun Nov 21 16:39:27 1999
@@ -0,0 +1,130 @@
+#ifndef _I386_RWSEM_H
+#define _I386_RWSEM_H
+
+#include <linux/linkage.h>
+
+/*
+ * SMP-safe rw semaphores..
+ *
+ * you cannot call up_{exclusive,shared}() before down_{exclusive,shared}(),
+ * therefore rw semaphores are not interrupt safe.
+ *
+ * (C) Copyright 1999 Manfred Spraul.
+ *
+ * based on semaphore.h (C) Copyright 1996 Linus Torvalds
+ *
+ */
+
+#include <asm/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+struct rw_sem {
+ volatile unsigned int rwlock;
+ int reader_bypass;
+ int sleeper_count;
+ int sleeper_has_writelock;
+ wait_queue_head_t wait;
+};
+
+#define __RWMUTEX_INITIALIZER(name) \
+ { 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
+
+
+#define DECLARE_RWMUTEX(name) \
+ struct rw_sem name = __RWMUTEX_INITIALIZER(name)
+
+extern inline void init_rwsem(struct rw_sem* rws)
+{
+ rws->rwlock = 0;
+ rws->reader_bypass = 0;
+ rws->sleeper_count = 0;
+ rws->sleeper_has_writelock = 0;
+ init_waitqueue_head(&rws->wait);
+}
+
+asmlinkage void __down_failed_ww(void /* special register calling convention */);
+asmlinkage int __down_failed_i_ww(void /* params in registers */);
+asmlinkage void __down_failed_r(void /* special register calling convention */);
+asmlinkage int __down_failed_i_r(void /* params in registers */);
+
+asmlinkage void __up_wakeup_rw(void /* special register calling convention */);
+
+#define RWLOCK_DOWN_E(rws,fnc,res) \
+ __asm__ __volatile__( \
+ "# atomic down_exclusive operation\n\t" \
+ LOCK \
+ "btsl $31,%0\n\t" \
+ "jc 2f\n" \
+ "testl $0x7fffffff,%0\n\t" \
+ "jne 3f\n" \
+ "1:\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\t" \
+ "call " #fnc ## "_ww\n\t"\
+ "jmp 1b\n" \
+ "3:\tcall " #fnc ## "_wr\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ :res \
+ :"c" (rws) \
+ :"memory")
+
+extern inline void down_exclusive(struct rw_sem* rws)
+{
+ RWLOCK_DOWN_E(rws,__down_failed,/* no outputs */ );
+}
+
+extern inline int down_exclusive_interruptible(struct rw_sem* rws)
+{
+ int result;
+ RWLOCK_DOWN_E(rws,__down_failed_i, "=a"(result));
+
+ return result;
+}
+#define RWLOCK_DOWN_S(rws,fnc,res) \
+ __asm__ __volatile__( \
+ "# atomic down_shared operation\n\t" \
+ LOCK \
+ "inc %0\n\t" \
+ "js 2f\n" \
+ "1:\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\tcall " #fnc "\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ : res \
+ :"c" (rws) \
+ :"cc", "memory")
+
+extern inline void down_shared(struct rw_sem * rws)
+{
+ RWLOCK_DOWN_S(rws,__down_failed_r,/* no outputs */ );
+}
+
+extern inline int down_shared_interruptible(struct rw_sem * rws)
+{
+ int result;
+ RWLOCK_DOWN_S(rws,__down_failed_i_r, "=a"(result));
+
+ return result;
+}
+
+extern inline void up_exclusive(struct rw_sem* rws)
+{
+ __asm__ __volatile__(
+ "# atomic up_exclusive operation\n\t"
+ LOCK
+ "andl $0x7FFFffff,%0\n\t"
+ "jne 2f\n\t"
+ "1:\n\t"
+ ".section .text.lock,\"ax\"\n\t"
+ "2:\tcall __up_wakeup_rw\n\t"
+ "jmp 1b\n"
+ ".previous"
+ :/* no outputs */
+ :"c" (rws)
+ :"cc", "memory");
+}
+
+#endif /* _I386_RWSEM_H */
diff -urN -x i386-stub.c -x traps.c -x .* -x *~ -x *.o -x ptrace.h -x ioctls.h 2.3/arch/i386/kernel/Makefile build-2.3/arch/i386/kernel/Makefile
--- 2.3/arch/i386/kernel/Makefile Fri Nov 12 12:50:32 1999
+++ build-2.3/arch/i386/kernel/Makefile Sun Nov 21 16:14:42 1999
@@ -14,7 +14,8 @@

O_TARGET := kernel.o
O_OBJS := process.o semaphore.o signal.o entry.o traps.o irq.o vm86.o \
- ptrace.o i8259.o ioport.o ldt.o setup.o time.o sys_i386.o
+ ptrace.o i8259.o ioport.o ldt.o setup.o time.o sys_i386.o \
+ rwsem.o
OX_OBJS := i386_ksyms.o
MX_OBJS :=

diff -urN -x i386-stub.c -x traps.c -x .* -x *~ -x *.o -x ptrace.h -x ioctls.h 2.3/arch/i386/kernel/rwsem.c build-2.3/arch/i386/kernel/rwsem.c
--- 2.3/arch/i386/kernel/rwsem.c Thu Jan 1 01:00:00 1970
+++ build-2.3/arch/i386/kernel/rwsem.c Sun Nov 21 16:46:12 1999
@@ -0,0 +1,234 @@
+/*
+ * i386 rw mutex implementation.
+ *
+ * (C) Copyright 1999 Manfred Spraul <manfreds@colorfullife.com>
+ *
+ * based on ideas from Linus Torvalds
+ */
+#include <linux/sched.h>
+
+#include <linux/spinlock.h>
+#include <asm/rwsem.h>
+
+static spinlock_t rwsem_lock = SPIN_LOCK_UNLOCKED;
+
+#define ATOMIC_INC(x) \
+ __asm__ __volatile__( \
+ LOCK "incl %0" \
+ :/* no output */ \
+ :"r" (&x) \
+ :"cc", "memory")
+
+#define ATOMIC_DEC(x) \
+ __asm__ __volatile__( \
+ LOCK "decl %0" \
+ :/* no output */ \
+ :"r" (&x) \
+ :"cc", "memory")
+
+#define ATOMIC_GETEXCL(res,lock) \
+ __asm__ __volatile__( \
+ LOCK \
+ "btsl $31,%1\n\t" \
+ /* I would prefer setcl %0, but the assembler complained.*/ \
+ "sbbl %0,%0\n" \
+ : "=r"(res) \
+ : "r" (&lock) \
+ : "cc", "memory")
+
+
+/* getting a write lock consists of 2 steps:
+ * a) get the highest bit. This information is shared by
+ * all sleeping writers.
+ * b) check that there is no reader left.
+ * the next reader must starve us, because there is
+ * a window where the reader has increased rws->rwlock
+ * [ie we think that someone else owns the lock],
+ * but then he would fail because the highest bit is set.
+ */
+void __down_e(struct rw_sem* rws, int have_writelock)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ current->state = TASK_UNINTERRUPTIBLE|TASK_EXCLUSIVE;
+ add_wait_queue_exclusive(&rws->wait, &wait);
+ spin_lock(&rwsem_lock);
+ if(rws->sleeper_count++==0)
+ ATOMIC_INC(rws->rwlock);
+ if(have_writelock) {
+ if(rws->sleeper_has_writelock)
+ BUG();
+ rws->sleeper_has_writelock = 1;
+ }
+ for(;;) {
+ if(!rws->sleeper_has_writelock) {
+ int new;
+ ATOMIC_GETEXCL(new,rws->rwlock);
+ rws->sleeper_has_writelock = new;
+ }
+
+ if(rws->sleeper_has_writelock &&
+ rws->rwlock == 0x80000001) {
+ break;
+ }
+
+ if(rws->sleeper_has_writelock)
+ rws->reader_bypass = 1;
+
+ spin_unlock(&rwsem_lock);
+ schedule();
+ spin_lock(&rwsem_lock);
+ rws->reader_bypass = 0;
+ }
+ if(--rws->sleeper_count==0)
+ ATOMIC_DEC(rws->rwlock);
+ rws->sleeper_has_writelock = 0;
+ spin_unlock(&rwsem_lock);
+ remove_wait_queue(&rws->wait, &wait);
+ current->state = TASK_RUNNING;
+}
+
+int __down_e_interruptible(struct rw_sem* rws)
+{
+ BUG();
+ return 0;
+}
+
+void __down_s(struct rw_sem* rws)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* undo the incl that's part of the inline asm */
+ ATOMIC_DEC(rws->rwlock);
+
+ current->state = TASK_UNINTERRUPTIBLE;
+ add_wait_queue_exclusive(&rws->wait, &wait);
+ spin_lock(&rwsem_lock);
+
+ if(rws->sleeper_count++==0)
+ ATOMIC_INC(rws->rwlock);
+
+ for(;;) {
+ if(rws->reader_bypass) {
+ rws->reader_bypass = 0;
+ break;
+ }
+ if(rws->rwlock > 0)
+ break;
+
+ spin_unlock(&rwsem_lock);
+ schedule();
+ spin_lock(&rwsem_lock);
+ }
+ /* we must clean-up all counters:
+ * decrease sleeper_count
+ * decrease rws->rwlock if we are the last sleeper
+ * increase rws->rwlock because we are a new reader.
+ */
+ if(--rws->sleeper_count!=0)
+ ATOMIC_INC(rws->rwlock);
+ spin_unlock(&rwsem_lock);
+ remove_wait_queue(&rws->wait, &wait);
+ current->state = TASK_RUNNING;
+}
+
+int __down_s_interruptible(struct rw_sem* rws)
+{
+ BUG();
+ return 0;
+}
+
+void __up_rw(struct rw_sem* rws)
+{
+ wake_up(&rws->wait);
+}
+
+/*
+ * The rw mutex operations have a special calling sequence that
+ * allow us to do a simpler in-line version of them. These routines
+ * need to convert that sequence back into the C sequence when
+ * there is contention on the rw mutex.
+ *
+ * %ecx contains the rw_sem pointer on entry. Save the C-clobbered
+ * registers (%eax, %edx and %ecx) except %eax when used as a return
+ * value..
+ */
+#define CONVERTER(name1, name2, value) \
+ asm( \
+ ".align 4\n" \
+ ".globl " #name1 "\n" \
+ #name1 ## ":\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %edx\n\t" \
+ "pushl $" ## #value " \n\t" \
+ "pushl %ecx\n\t" \
+ "call " #name2 "\n\t" \
+ "popl %ecx\n\t" \
+ "popl %eax\n\t" \
+ "popl %edx\n\t" \
+ "popl %eax\n\t" \
+ "ret" \
+ )
+
+CONVERTER(__down_failed_ww, __down_e, 0);
+CONVERTER(__down_failed_wr, __down_e, 1);
+#undef CONVERTER
+
+#define CONVERTER(name1, name2, value) \
+ asm( \
+ ".align 4\n" \
+ ".globl " #name1 "\n" \
+ #name1 ## ":\n\t" \
+ "pushl %edx\n\t" \
+ "pushl $" ## #value " \n\t" \
+ "pushl %ecx\n\t" \
+ "call " #name2 "\n\t" \
+ "popl %ecx\n\t" \
+ "popl %eax\n\t" \
+ "popl %edx\n\t" \
+ "ret" \
+ )
+
+CONVERTER(__down_failed_i_ww, __down_e_interruptible, 0);
+CONVERTER(__down_failed_i_wr, __down_e_interruptible, 1);
+#undef CONVERTER
+
+asm(
+".align 4\n"
+".globl __down_failed_r\n"
+"__down_failed:\n\t"
+ "pushl %eax\n\t"
+ "pushl %edx\n\t"
+ "pushl %ecx\n\t"
+ "call __down_s\n\t"
+ "popl %ecx\n\t"
+ "popl %edx\n\t"
+ "popl %eax\n\t"
+ "ret"
+);
+
+asm(
+".align 4\n"
+".globl __down_failed_i_r\n"
+"__down_failed_i_r:\n\t"
+ "pushl %edx\n\t"
+ "pushl %ecx\n\t"
+ "call __down_s_interruptible\n\t"
+ "popl %ecx\n\t"
+ "popl %edx\n\t"
+ "ret"
+);
+
+
+asm(
+".align 4\n"
+".globl __up_wakeup_rw\n"
+"__up_e_wakeup:\n\t"
+ "pushl %eax\n\t"
+ "pushl %edx\n\t"
+ "pushl %ecx\n\t"
+ "call __up_rw\n\t"
+ "popl %ecx\n\t"
+ "popl %edx\n\t"
+ "popl %eax\n\t"
+ "ret"
+);


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:55    [W:0.047 / U:0.340 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site