lkml.org 
[lkml]   [2008]   [Feb]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    SubjectRe: [Regression] 2.6.24-git3: Major annoyance during suspend/hibernation on x86-64 (bisected)
    From
    Date

    On Thu, 2008-01-31 at 21:54 +0100, Rafael J. Wysocki wrote:
    > On Thursday, 31 of January 2008, Peter Zijlstra wrote:

    > > I can seem to reproduce this:
    > >
    > > [root@opteron cpu1]# time echo 0 > online
    > >
    > > real 0m6.230s
    > > user 0m0.000s
    > > sys 0m0.010s
    > > [root@opteron cpu1]# echo 1 > online
    > > [root@opteron cpu1]# time echo 0 > online
    > >
    > > real 0m7.966s
    > > user 0m0.000s
    > > sys 0m0.011s
    > >
    > >
    > > I'll have a look at it.
    >
    > Much appreciated, thanks!

    The below fixes it for me..

    ---
    - restore the old wakeup mechanism
    - fix break usage in do_each_thread() { } while_eac_thread().
    - fix the hotplug switch stmt, a fall-through case was broken.

    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    ---

    diff --git a/kernel/softlockup.c b/kernel/softlockup.c
    index c1d7655..7c2da88 100644
    --- a/kernel/softlockup.c
    +++ b/kernel/softlockup.c
    @@ -101,6 +101,10 @@ void softlockup_tick(void)

    now = get_timestamp(this_cpu);

    + /* Wake up the high-prio watchdog task every second: */
    + if (now > (touch_timestamp + 1))
    + wake_up_process(per_cpu(watchdog_task, this_cpu));
    +
    /* Warn about unreasonable delays: */
    if (now <= (touch_timestamp + softlockup_thresh))
    return;
    @@ -191,11 +195,11 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
    read_lock(&tasklist_lock);
    do_each_thread(g, t) {
    if (!--max_count)
    - break;
    + goto unlock;
    if (t->state & TASK_UNINTERRUPTIBLE)
    check_hung_task(t, now);
    } while_each_thread(g, t);
    -
    + unlock:
    read_unlock(&tasklist_lock);
    }

    @@ -218,14 +222,19 @@ static int watchdog(void *__bind_cpu)
    * debug-printout triggers in softlockup_tick().
    */
    while (!kthread_should_stop()) {
    + set_current_state(TASK_INTERRUPTIBLE);
    touch_softlockup_watchdog();
    - msleep_interruptible(10000);
    + schedule();
    +
    + if (kthread_should_stop())
    + break;

    if (this_cpu != check_cpu)
    continue;

    if (sysctl_hung_task_timeout_secs)
    check_hung_uninterruptible_tasks(this_cpu);
    +
    }

    return 0;
    @@ -259,13 +268,6 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
    wake_up_process(per_cpu(watchdog_task, hotcpu));
    break;
    #ifdef CONFIG_HOTPLUG_CPU
    - case CPU_UP_CANCELED:
    - case CPU_UP_CANCELED_FROZEN:
    - if (!per_cpu(watchdog_task, hotcpu))
    - break;
    - /* Unbind so it can run. Fall thru. */
    - kthread_bind(per_cpu(watchdog_task, hotcpu),
    - any_online_cpu(cpu_online_map));
    case CPU_DOWN_PREPARE:
    case CPU_DOWN_PREPARE_FROZEN:
    if (hotcpu == check_cpu) {
    @@ -275,6 +277,14 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
    check_cpu = any_online_cpu(temp_cpu_online_map);
    }
    break;
    +
    + case CPU_UP_CANCELED:
    + case CPU_UP_CANCELED_FROZEN:
    + if (!per_cpu(watchdog_task, hotcpu))
    + break;
    + /* Unbind so it can run. Fall thru. */
    + kthread_bind(per_cpu(watchdog_task, hotcpu),
    + any_online_cpu(cpu_online_map));
    case CPU_DEAD:
    case CPU_DEAD_FROZEN:
    p = per_cpu(watchdog_task, hotcpu);



    \
     
     \ /
      Last update: 2008-02-01 13:07    [W:2.420 / U:0.072 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site