lkml.org 
[lkml]   [2020]   [Sep]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch 05/10] sched/core: Split __set_cpus_allowed_ptr()
    Split the function so the actual work part can be reused and called from
    places which hold rq::lock already.

    No functional change.

    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    ---
    kernel/sched/core.c | 64 ++++++++++++++++++++++++++++++----------------------
    1 file changed, 38 insertions(+), 26 deletions(-)

    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -1863,34 +1863,17 @@ void do_set_cpus_allowed(struct task_str
    set_next_task(rq, p);
    }

    -/*
    - * Change a given task's CPU affinity. Migrate the thread to a
    - * proper CPU and schedule it away if the CPU it's executing on
    - * is removed from the allowed bitmask.
    - *
    - * NOTE: the caller must have a valid reference to the task, the
    - * task must not exit() & deallocate itself prematurely. The
    - * call is not atomic; no spinlocks may be held.
    - */
    -static int __set_cpus_allowed_ptr(struct task_struct *p,
    - const struct cpumask *new_mask, bool check)
    +static int set_cpus_allowed_ptr_locked(struct task_struct *p,
    + const struct cpumask *new_mask,
    + bool check,
    + struct rq *rq, struct rq_flags *rf)
    {
    const struct cpumask *cpu_valid_mask = cpu_active_mask;
    unsigned int dest_cpu;
    - struct rq_flags rf;
    - struct rq *rq;
    int ret = 0;

    - rq = task_rq_lock(p, &rf);
    update_rq_clock(rq);

    - if (p->flags & PF_KTHREAD) {
    - /*
    - * Kernel threads are allowed on online && !active CPUs
    - */
    - cpu_valid_mask = cpu_online_mask;
    - }
    -
    /*
    * Must re-check here, to close a race against __kthread_bind(),
    * sched_setaffinity() is not guaranteed to observe the flag.
    @@ -1900,8 +1883,12 @@ static int __set_cpus_allowed_ptr(struct
    goto out;
    }

    - if (cpumask_equal(&p->cpus_mask, new_mask))
    - goto out;
    + if (p->flags & PF_KTHREAD) {
    + /*
    + * Kernel threads are allowed on online && !active CPUs
    + */
    + cpu_valid_mask = cpu_online_mask;
    + }

    /*
    * Picking a ~random cpu helps in cases where we are changing affinity
    @@ -1933,7 +1920,7 @@ static int __set_cpus_allowed_ptr(struct
    if (task_running(rq, p) || p->state == TASK_WAKING) {
    struct migration_arg arg = { p, dest_cpu };
    /* Need help from migration thread: drop lock and wait. */
    - task_rq_unlock(rq, p, &rf);
    + task_rq_unlock(rq, p, rf);
    stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
    return 0;
    } else if (task_on_rq_queued(p)) {
    @@ -1941,10 +1928,35 @@ static int __set_cpus_allowed_ptr(struct
    * OK, since we're going to drop the lock immediately
    * afterwards anyway.
    */
    - rq = move_queued_task(rq, &rf, p, dest_cpu);
    + rq = move_queued_task(rq, rf, p, dest_cpu);
    }
    out:
    - task_rq_unlock(rq, p, &rf);
    + task_rq_unlock(rq, p, rf);
    + return ret;
    +}
    +
    +/*
    + * Change a given task's CPU affinity. Migrate the thread to a
    + * proper CPU and schedule it away if the CPU it's executing on
    + * is removed from the allowed bitmask.
    + *
    + * NOTE: the caller must have a valid reference to the task, the
    + * task must not exit() & deallocate itself prematurely. The
    + * call is not atomic; no spinlocks may be held.
    + */
    +static int __set_cpus_allowed_ptr(struct task_struct *p,
    + const struct cpumask *new_mask, bool check)
    +{
    + struct rq_flags rf;
    + struct rq *rq;
    + int ret = 0;
    +
    + rq = task_rq_lock(p, &rf);
    +
    + if (cpumask_equal(&p->cpus_mask, new_mask))
    + task_rq_unlock(rq, p, &rf);
    + else
    + ret = set_cpus_allowed_ptr_locked(p, new_mask, check, rq, &rf);

    return ret;
    }
    \
     
     \ /
      Last update: 2020-09-17 12:53    [W:2.829 / U:0.248 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site