lkml.org 
[lkml]   [2002]   [Jun]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch] 2.4.19-pre10-ac2: O(1) scheduler merge, -A3.

the attached patch, sched-2.4.19-pre10-ac2-A3, is a backport of the
current 2.5 O(1) scheduler, against 2.4.19-pre10-ac2. The patch includes
all the recent fixes. (It should not break any architecture that was
working before on -ac. The most affected architecture is Sparc64, i added
the bits without testing them. David?)

The patch can also be downloaded from:

http://redhat.com/~mingo/O(1)-scheduler/sched-2.4.19-pre10-ac2-A3

Changes relative to 2.4.19-pre10-ac2:

Bugfixes:

- rq-frozen fixes, which closes SMP races on x86 and Sparc64 as well.

- O(1) scheduling sched_yield() fixes: do not starve CPU-intensive
processes.

- migration bugfix, do not fast-migrate the task incorrectly if the task
is in the middle of load_balance().

- sync wakeup reintroduction - this should fix the pipe latency problems
observed.

Feature backports:

- nr_uninterruptible optimization. (This is a fairly straightforward
and risk-less feature, and since it also made the backport easier, i
included it.)

- sched_setaffinity() & sched_getaffinity() syscalls on x86.

plus identity changes, comment updates, to bring sched.c in line with the
2.5 version.

the patch was tested on x86 UP and SMP boxes.

Ingo
--- linux/fs/pipe.c.orig Sun Jun 16 18:46:45 2002
+++ linux/fs/pipe.c Sun Jun 16 18:47:14 2002
@@ -115,7 +115,7 @@
* writers synchronously that there is more
* room.
*/
- wake_up_interruptible(PIPE_WAIT(*inode));
+ wake_up_interruptible_sync(PIPE_WAIT(*inode));
if (!PIPE_EMPTY(*inode))
BUG();
goto do_more_read;
@@ -215,7 +215,7 @@
* is going to give up this CPU, so it doesnt have
* to do idle reschedules.
*/
- wake_up_interruptible(PIPE_WAIT(*inode));
+ wake_up_interruptible_sync(PIPE_WAIT(*inode));
PIPE_WAITING_WRITERS(*inode)++;
pipe_wait(inode);
PIPE_WAITING_WRITERS(*inode)--;
--- linux/kernel/sched.c.orig Sun Jun 16 17:34:06 2002
+++ linux/kernel/sched.c Sun Jun 16 18:23:44 2002
@@ -1,5 +1,5 @@
/*
- * linux/kernel/sched.c
+ * kernel/sched.c
*
* Kernel scheduler and related syscalls
*
@@ -13,16 +13,18 @@
* hybrid priority-list and round-robin design with
* an array-switch method of distributing timeslices
* and per-CPU runqueues. Additional code by Davide
- * Libenzi, Robert Love, and Rusty Russel.
+ * Libenzi, Robert Love, and Rusty Russell.
*/

#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <asm/uaccess.h>
+#include <linux/highmem.h>
#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
#include <asm/mmu_context.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
#include <linux/kernel_stat.h>

/*
@@ -133,8 +135,8 @@
*/
struct runqueue {
spinlock_t lock;
- spinlock_t frozen;
unsigned long nr_running, nr_switches, expired_timestamp;
+ signed long nr_uninterruptible;
task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2];
int prev_nr_running[NR_CPUS];
@@ -150,13 +152,29 @@
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)

+/*
+ * Default context-switch locking:
+ */
+#ifndef prepare_arch_schedule
+# define prepare_arch_schedule(prev) do { } while(0)
+# define finish_arch_schedule(prev) do { } while(0)
+# define prepare_arch_switch(rq) do { } while(0)
+# define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
+#endif
+
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
{
struct runqueue *rq;

repeat_lock_task:
+ local_irq_save(*flags);
rq = task_rq(p);
- spin_lock_irqsave(&rq->lock, *flags);
+ spin_lock(&rq->lock);
if (unlikely(rq != task_rq(p))) {
spin_unlock_irqrestore(&rq->lock, *flags);
goto repeat_lock_task;
@@ -170,6 +188,23 @@
}

/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static inline runqueue_t *rq_lock(runqueue_t *rq)
+{
+ local_irq_disable();
+ rq = this_rq();
+ spin_lock(&rq->lock);
+ return rq;
+}
+
+static inline void rq_unlock(runqueue_t *rq)
+{
+ spin_unlock(&rq->lock);
+ local_irq_enable();
+}
+
+/*
* Adding/removing a task to/from a priority array:
*/
static inline void dequeue_task(struct task_struct *p, prio_array_t *array)
@@ -239,12 +274,15 @@
static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running--;
+ if (p->state == TASK_UNINTERRUPTIBLE)
+ rq->nr_uninterruptible++;
dequeue_task(p, p->array);
p->array = NULL;
}

static inline void resched_task(task_t *p)
{
+#ifdef CONFIG_SMP
int need_resched;

need_resched = p->need_resched;
@@ -252,6 +290,9 @@
set_tsk_need_resched(p);
if (!need_resched && (p->cpu != smp_processor_id()))
smp_send_reschedule(p->cpu);
+#else
+ set_tsk_need_resched(p);
+#endif
}

#ifdef CONFIG_SMP
@@ -267,9 +308,9 @@

repeat:
rq = task_rq(p);
- while (unlikely(rq->curr == p)) {
+ if (unlikely(rq->curr == p)) {
cpu_relax();
- barrier();
+ goto repeat;
}
rq = task_rq_lock(p, &flags);
if (unlikely(rq->curr == p)) {
@@ -303,35 +344,50 @@
* "current->state = TASK_RUNNING" to mark yourself runnable
* without the overhead of this.
*/
-static int try_to_wake_up(task_t * p)
+static int try_to_wake_up(task_t * p, int sync)
{
unsigned long flags;
int success = 0;
+ long old_state;
runqueue_t *rq;

+repeat_lock_task:
rq = task_rq_lock(p, &flags);
- p->state = TASK_RUNNING;
+ old_state = p->state;
if (!p->array) {
+ if (unlikely(sync && (rq->curr != p))) {
+ if (p->cpu != smp_processor_id()) {
+ p->cpu = smp_processor_id();
+ task_rq_unlock(rq, &flags);
+ goto repeat_lock_task;
+ }
+ }
+ if (old_state == TASK_UNINTERRUPTIBLE)
+ rq->nr_uninterruptible--;
activate_task(p, rq);
+ /*
+ * If sync is set, a resched_task() is a NOOP
+ */
if (p->prio < rq->curr->prio)
resched_task(rq->curr);
success = 1;
}
+ p->state = TASK_RUNNING;
task_rq_unlock(rq, &flags);
+
return success;
}

int wake_up_process(task_t * p)
{
- return try_to_wake_up(p);
+ return try_to_wake_up(p, 0);
}

void wake_up_forked_process(task_t * p)
{
runqueue_t *rq;

- rq = this_rq();
- spin_lock_irq(&rq->lock);
+ rq = rq_lock(rq);

p->state = TASK_RUNNING;
if (!rt_task(p)) {
@@ -346,7 +402,8 @@
}
p->cpu = smp_processor_id();
activate_task(p, rq);
- spin_unlock_irq(&rq->lock);
+
+ rq_unlock(rq);
}

/*
@@ -377,17 +434,16 @@
#if CONFIG_SMP
asmlinkage void schedule_tail(task_t *prev)
{
- spin_unlock_irq(&this_rq()->frozen);
+ finish_arch_switch(this_rq());
+ finish_arch_schedule(prev);
}
#endif

-static inline void context_switch(task_t *prev, task_t *next)
+static inline task_t * context_switch(task_t *prev, task_t *next)
{
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;

- prepare_to_switch();
-
if (unlikely(!mm)) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
@@ -402,6 +458,8 @@

/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
+
+ return prev;
}

unsigned long nr_running(void)
@@ -414,6 +472,16 @@
return sum;
}

+unsigned long nr_uninterruptible(void)
+{
+ unsigned long i, sum = 0;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible;
+
+ return sum;
+}
+
unsigned long nr_context_switches(void)
{
unsigned long i, sum = 0;
@@ -569,7 +637,7 @@
#define CAN_MIGRATE_TASK(p,rq,this_cpu) \
((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \
((p) != (rq)->curr) && \
- ((p)->cpus_allowed & (1 << (this_cpu))))
+ ((p)->cpus_allowed & (1UL << (this_cpu))))

if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) {
curr = curr->next;
@@ -726,13 +794,14 @@
list_t *queue;
int idx;

- BUG_ON(in_interrupt());
-
+ if (unlikely(in_interrupt()))
+ BUG();
need_resched:
prev = current;
rq = this_rq();

release_kernel_lock(prev, smp_processor_id());
+ prepare_arch_schedule(prev);
prev->sleep_timestamp = jiffies;
spin_lock_irq(&rq->lock);

@@ -783,26 +852,19 @@
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
- spin_lock(&rq->frozen);
- spin_unlock(&rq->lock);
-
- context_switch(prev, next);
- /*
- * The runqueue pointer might be from another CPU
- * if the new task was last running on a different
- * CPU - thus re-load it.
- */
- mb();
+
+ prepare_arch_switch(rq);
+ prev = context_switch(prev, next);
+ barrier();
rq = this_rq();
- spin_unlock_irq(&rq->frozen);
- } else {
+ finish_arch_switch(rq);
+ } else
spin_unlock_irq(&rq->lock);
- }
+ finish_arch_schedule(prev);

reacquire_kernel_lock(current);
if (need_resched())
goto need_resched;
- return;
}

/*
@@ -814,8 +876,7 @@
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
-static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive)
+static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
{
struct list_head *tmp;
unsigned int state;
@@ -826,7 +887,7 @@
curr = list_entry(tmp, wait_queue_t, task_list);
p = curr->task;
state = p->state;
- if ((state & mode) && try_to_wake_up(p) &&
+ if ((state & mode) && try_to_wake_up(p, sync) &&
((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive))
break;
}
@@ -839,24 +900,43 @@
if (unlikely(!q))
return;

- wq_read_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive);
- wq_read_unlock_irqrestore(&q->lock, flags);
+ spin_lock_irqsave(&q->lock, flags);
+ __wake_up_common(q, mode, nr_exclusive, 0);
+ spin_unlock_irqrestore(&q->lock, flags);
}

+#if CONFIG_SMP
+
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+{
+ unsigned long flags;
+
+ if (unlikely(!q))
+ return;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (likely(nr_exclusive))
+ __wake_up_common(q, mode, nr_exclusive, 1);
+ else
+ __wake_up_common(q, mode, nr_exclusive, 0);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+#endif
+
void complete(struct completion *x)
{
unsigned long flags;

- wq_write_lock_irqsave(&x->wait.lock, flags);
+ spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1);
- wq_write_unlock_irqrestore(&x->wait.lock, flags);
+ __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
}

void wait_for_completion(struct completion *x)
{
- wq_write_lock_irq(&x->wait.lock);
+ spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);

@@ -864,14 +944,14 @@
__add_wait_queue_tail(&x->wait, &wait);
do {
__set_current_state(TASK_UNINTERRUPTIBLE);
- wq_write_unlock_irq(&x->wait.lock);
+ spin_unlock_irq(&x->wait.lock);
schedule();
- wq_write_lock_irq(&x->wait.lock);
+ spin_lock_irq(&x->wait.lock);
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
- wq_write_unlock_irq(&x->wait.lock);
+ spin_unlock_irq(&x->wait.lock);
}

#define SLEEP_ON_VAR \
@@ -880,14 +960,14 @@
init_waitqueue_entry(&wait, current);

#define SLEEP_ON_HEAD \
- wq_write_lock_irqsave(&q->lock,flags); \
+ spin_lock_irqsave(&q->lock,flags); \
__add_wait_queue(q, &wait); \
- wq_write_unlock(&q->lock);
+ spin_unlock(&q->lock);

#define SLEEP_ON_TAIL \
- wq_write_lock_irq(&q->lock); \
+ spin_lock_irq(&q->lock); \
__remove_wait_queue(q, &wait); \
- wq_write_unlock_irqrestore(&q->lock,flags);
+ spin_unlock_irqrestore(&q->lock, flags);

void interruptible_sleep_on(wait_queue_head_t *q)
{
@@ -1027,6 +1107,11 @@
return TASK_NICE(p);
}

+int idle_cpu(int cpu)
+{
+ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
+}
+
static inline task_t *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_pid(pid) : current;
@@ -1077,7 +1162,7 @@

/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
- * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_OTHER is 0.
+ * 1..MAX_USER_RT_PRIO, valid priority for SCHED_OTHER is 0.
*/
retval = -EINVAL;
if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
@@ -1177,28 +1262,127 @@
return retval;
}

-asmlinkage long sys_sched_yield(void)
+/**
+ * sys_sched_setaffinity - set the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new cpu mask
+ */
+asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long *user_mask_ptr)
{
- runqueue_t *rq;
- prio_array_t *array;
+ unsigned long new_mask;
+ task_t *p;
+ int retval;

- rq = this_rq();
+ if (len < sizeof(new_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+ return -EFAULT;
+
+ new_mask &= cpu_online_map;
+ if (!new_mask)
+ return -EINVAL;
+
+ read_lock(&tasklist_lock);
+
+ p = find_process_by_pid(pid);
+ if (!p) {
+ read_unlock(&tasklist_lock);
+ return -ESRCH;
+ }

/*
- * Decrease the yielding task's priority by one, to avoid
- * livelocks. This priority loss is temporary, it's recovered
- * once the current timeslice expires.
- *
- * If priority is already MAX_PRIO-1 then we still
- * roundrobin the task within the runlist.
+ * It is not safe to call set_cpus_allowed with the
+ * tasklist_lock held. We will bump the task_struct's
+ * usage count and then drop tasklist_lock.
*/
- spin_lock_irq(&rq->lock);
- array = current->array;
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+
+ retval = -EPERM;
+ if ((current->euid != p->euid) && (current->euid != p->uid) &&
+ !capable(CAP_SYS_NICE))
+ goto out_unlock;
+
+ retval = 0;
+ set_cpus_allowed(p, new_mask);
+
+out_unlock:
+ free_task_struct(p);
+ return retval;
+}
+
+/**
+ * sys_sched_getaffinity - get the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ */
+asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long *user_mask_ptr)
+{
+ unsigned long mask;
+ unsigned int real_len;
+ task_t *p;
+ int retval;
+
+ real_len = sizeof(mask);
+
+ if (len < real_len)
+ return -EINVAL;
+
+ read_lock(&tasklist_lock);
+
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+
+ retval = 0;
+ mask = p->cpus_allowed & cpu_online_map;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ if (retval)
+ return retval;
+ if (copy_to_user(user_mask_ptr, &mask, real_len))
+ return -EFAULT;
+ return real_len;
+}
+
+asmlinkage long sys_sched_yield(void)
+{
+ runqueue_t *rq = rq_lock(rq);
+ prio_array_t *array = current->array;
+
/*
- * If the task has reached maximum priority (or is a RT task)
- * then just requeue the task to the end of the runqueue:
+ * There are three levels of how a yielding task will give up
+ * the current CPU:
+ *
+ * #1 - it decreases its priority by one. This priority loss is
+ * temporary, it's recovered once the current timeslice
+ * expires.
+ *
+ * #2 - once it has reached the lowest priority level,
+ * it will give up timeslices one by one. (We do not
+ * want to give them up all at once, it's gradual,
+ * to protect the casual yield()er.)
+ *
+ * #3 - once all timeslices are gone we put the process into
+ * the expired array.
+ *
+ * (special rule: RT tasks do not lose any priority, they just
+ * roundrobin on their current priority level.)
*/
- if (likely(current->prio == MAX_PRIO-1 || rt_task(current))) {
+ if (likely(current->prio == MAX_PRIO-1)) {
+ if (current->time_slice <= 1) {
+ dequeue_task(current, rq->active);
+ enqueue_task(current, rq->expired);
+ } else
+ current->time_slice--;
+ } else if (unlikely(rt_task(current))) {
list_del(&current->run_list);
list_add_tail(&current->run_list, array->queue + current->prio);
} else {
@@ -1396,7 +1580,7 @@
spin_unlock(&rq2->lock);
}

-void init_idle(task_t *idle, int cpu)
+void __init init_idle(task_t *idle, int cpu)
{
runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(idle->cpu);
unsigned long flags;
@@ -1427,13 +1611,12 @@
int i, j, k;

for (i = 0; i < NR_CPUS; i++) {
- runqueue_t *rq = cpu_rq(i);
prio_array_t *array;

+ rq = cpu_rq(i);
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock);
- spin_lock_init(&rq->frozen);
INIT_LIST_HEAD(&rq->migration_queue);

for (j = 0; j < 2; j++) {
@@ -1497,8 +1680,8 @@
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. No
- * spinlocks can be held.
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
*/
void set_cpus_allowed(task_t *p, unsigned long new_mask)
{
@@ -1518,19 +1701,17 @@
*/
if (new_mask & (1UL << p->cpu)) {
task_rq_unlock(rq, &flags);
- return;
+ goto out;
}
-
/*
- * If the task is not on a runqueue, then it is safe to
- * simply update the task's cpu field.
+ * If the task is not on a runqueue (and not running), then
+ * it is sufficient to simply update the task's cpu field.
*/
- if (!p->array) {
+ if (!p->array && (p != rq->curr)) {
p->cpu = __ffs(p->cpus_allowed);
task_rq_unlock(rq, &flags);
- return;
+ goto out;
}
-
init_MUTEX_LOCKED(&req.sem);
req.task = p;
list_add(&req.list, &rq->migration_queue);
@@ -1538,6 +1719,7 @@
wake_up_process(rq->migration_thread);

down(&req.sem);
+out:
}

static int migration_thread(void * bind_cpu)
@@ -1550,17 +1732,16 @@
daemonize();
sigfillset(&current->blocked);
set_fs(KERNEL_DS);
-
/*
- * The first migration thread is started on CPU #0. This one can
- * migrate the other migration threads to their destination CPUs.
+ * The first migration thread is started on CPU #0. This one can migrate
+ * the other migration threads to their destination CPUs.
*/
if (cpu != 0) {
while (!cpu_rq(cpu_logical_map(0))->migration_thread)
yield();
set_cpus_allowed(current, 1UL << cpu);
}
- printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());
+ printk("migration_task %d on cpu=%d\n",cpu,smp_processor_id());
ret = setscheduler(0, SCHED_FIFO, &param);

rq = this_rq();
@@ -1632,5 +1813,4 @@
while (!cpu_rq(cpu_logical_map(cpu))->migration_thread)
schedule_timeout(2);
}
-
-#endif /* CONFIG_SMP */
+#endif
--- linux/kernel/timer.c.orig Sun Jun 16 17:43:38 2002
+++ linux/kernel/timer.c Sun Jun 16 17:43:50 2002
@@ -608,17 +608,7 @@
*/
static unsigned long count_active_tasks(void)
{
- struct task_struct *p;
- unsigned long nr = 0;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if ((p->state == TASK_RUNNING ||
- (p->state & TASK_UNINTERRUPTIBLE)))
- nr += FIXED_1;
- }
- read_unlock(&tasklist_lock);
- return nr;
+ return (nr_running() + nr_uninterruptible()) * FIXED_1;
}

/*
--- linux/include/linux/sched.h.orig Sun Jun 16 17:44:13 2002
+++ linux/include/linux/sched.h Sun Jun 16 18:54:01 2002
@@ -76,6 +76,7 @@
extern int nr_threads;
extern int last_pid;
extern unsigned long nr_running(void);
+extern unsigned long nr_uninterruptible(void);

#include <linux/fs.h>
#include <linux/time.h>
@@ -610,6 +611,7 @@
#define CURRENT_TIME (xtime.tv_sec)

extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
@@ -626,6 +628,12 @@
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
+#ifdef CONFIG_SMP
+#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#else
+#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
+#endif
+
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);

extern int in_group_p(gid_t);
--- linux/include/asm-i386/system.h.orig Sun Jun 16 17:48:41 2002
+++ linux/include/asm-i386/system.h Sun Jun 16 18:53:47 2002
@@ -12,25 +12,22 @@
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));

-#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
- "movl %3,%%esp\n\t" /* restore ESP */ \
+ "movl %2,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
- "pushl %4\n\t" /* restore EIP */ \
+ "pushl %3\n\t" /* restore EIP */ \
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
- :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
- "=b" (last) \
+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
- "a" (prev), "d" (next), \
- "b" (prev)); \
+ "a" (prev), "d" (next)); \
} while (0)

#define _set_base(addr,base) do { unsigned long __pr; \
--- linux/include/asm-sparc64/system.h.orig Sun Jun 16 18:19:03 2002
+++ linux/include/asm-sparc64/system.h Sun Jun 16 18:19:44 2002
@@ -149,7 +149,11 @@

#define flush_user_windows flushw_user
#define flush_register_windows flushw_all
-#define prepare_to_switch flushw_all
+
+#define prepare_arch_schedule(prev) task_lock(prev)
+#define finish_arch_schedule(prev) task_unlock(prev)
+#define prepare_arch_switch(rq) do { spin_unlock(&(rq)->lock); flushw_all(); }
+#define finish_arch_switch(rq) __sti()

#ifndef CONFIG_DEBUG_SPINLOCK
#define CHECK_LOCKS(PREV) do { } while(0)
--- linux/arch/i386/kernel/entry.S.orig Sun Jun 16 18:14:33 2002
+++ linux/arch/i386/kernel/entry.S Sun Jun 16 18:15:05 2002
@@ -639,8 +639,8 @@
.long SYMBOL_NAME(sys_tkill)
.long SYMBOL_NAME(sys_ni_syscall) /* reserved for sendfile64 */
.long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */
- .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_setaffinity */
- .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_getaffinity */
+ .long SYMBOL_NAME(sys_sched_setaffinity)
+ .long SYMBOL_NAME(sys_sched_getaffinity)

.rept NR_syscalls-(.-sys_call_table)/4
.long SYMBOL_NAME(sys_ni_syscall)
\
 
 \ /
  Last update: 2005-03-22 13:26    [W:0.190 / U:0.544 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site