lkml.org 
[lkml]   [2018]   [Aug]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.9 116/144] stop_machine: Use raw spinlocks
    Date
    4.9-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Thomas Gleixner <tglx@linutronix.de>

    [ Upstream commit de5b55c1d4e30740009864eb35ce4ed856aac01d ]

    Use raw-locks in stop_machine() to allow locking in irq-off and
    preempt-disabled regions on -RT. This also documents the possible locking
    context in general.

    [bigeasy: update patch description.]
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
    Link: https://lkml.kernel.org/r/20180423191635.6014-1-bigeasy@linutronix.de
    Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    kernel/stop_machine.c | 24 ++++++++++++------------
    1 file changed, 12 insertions(+), 12 deletions(-)

    --- a/kernel/stop_machine.c
    +++ b/kernel/stop_machine.c
    @@ -36,7 +36,7 @@ struct cpu_stop_done {
    struct cpu_stopper {
    struct task_struct *thread;

    - spinlock_t lock;
    + raw_spinlock_t lock;
    bool enabled; /* is this stopper enabled? */
    struct list_head works; /* list of pending works */

    @@ -78,13 +78,13 @@ static bool cpu_stop_queue_work(unsigned
    unsigned long flags;
    bool enabled;

    - spin_lock_irqsave(&stopper->lock, flags);
    + raw_spin_lock_irqsave(&stopper->lock, flags);
    enabled = stopper->enabled;
    if (enabled)
    __cpu_stop_queue_work(stopper, work);
    else if (work->done)
    cpu_stop_signal_done(work->done);
    - spin_unlock_irqrestore(&stopper->lock, flags);
    + raw_spin_unlock_irqrestore(&stopper->lock, flags);

    return enabled;
    }
    @@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int
    struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
    int err;
    retry:
    - spin_lock_irq(&stopper1->lock);
    - spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
    + raw_spin_lock_irq(&stopper1->lock);
    + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);

    err = -ENOENT;
    if (!stopper1->enabled || !stopper2->enabled)
    @@ -255,8 +255,8 @@ retry:
    __cpu_stop_queue_work(stopper1, work1);
    __cpu_stop_queue_work(stopper2, work2);
    unlock:
    - spin_unlock(&stopper2->lock);
    - spin_unlock_irq(&stopper1->lock);
    + raw_spin_unlock(&stopper2->lock);
    + raw_spin_unlock_irq(&stopper1->lock);

    if (unlikely(err == -EDEADLK)) {
    while (stop_cpus_in_progress)
    @@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned
    unsigned long flags;
    int run;

    - spin_lock_irqsave(&stopper->lock, flags);
    + raw_spin_lock_irqsave(&stopper->lock, flags);
    run = !list_empty(&stopper->works);
    - spin_unlock_irqrestore(&stopper->lock, flags);
    + raw_spin_unlock_irqrestore(&stopper->lock, flags);
    return run;
    }

    @@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned

    repeat:
    work = NULL;
    - spin_lock_irq(&stopper->lock);
    + raw_spin_lock_irq(&stopper->lock);
    if (!list_empty(&stopper->works)) {
    work = list_first_entry(&stopper->works,
    struct cpu_stop_work, list);
    list_del_init(&work->list);
    }
    - spin_unlock_irq(&stopper->lock);
    + raw_spin_unlock_irq(&stopper->lock);

    if (work) {
    cpu_stop_fn_t fn = work->fn;
    @@ -541,7 +541,7 @@ static int __init cpu_stop_init(void)
    for_each_possible_cpu(cpu) {
    struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);

    - spin_lock_init(&stopper->lock);
    + raw_spin_lock_init(&stopper->lock);
    INIT_LIST_HEAD(&stopper->works);
    }


    \
     
     \ /
      Last update: 2018-08-01 19:41    [W:3.253 / U:0.264 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site