lkml.org 
[lkml]   [2022]   [Aug]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 02/10] sched/fair: remove redundant cpu_cgrp_subsys->fork()
    Date
    We use cpu_cgrp_subsys->fork() to set task group for the new fair task
    in cgroup_post_fork().

    Since commit b1e8206582f9 ("sched: Fix yet more sched_fork() races")
    has already set_task_rq() for the new fair task in sched_cgroup_fork(),
    so cpu_cgrp_subsys->fork() can be removed.

    cgroup_can_fork() --> pin parent's sched_task_group
    sched_cgroup_fork()
    __set_task_cpu()
    set_task_rq()
    cgroup_post_fork()
    ss->fork() := cpu_cgroup_fork()
    sched_change_group(..., TASK_SET_GROUP)
    task_set_group_fair()
    set_task_rq() --> can be removed

    After this patch's change, task_change_group_fair() only need to
    care about task cgroup migration, make the code much simplier.

    Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
    Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
    Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
    ---
    kernel/sched/core.c | 27 ++++-----------------------
    kernel/sched/fair.c | 23 +----------------------
    kernel/sched/sched.h | 5 +----
    3 files changed, 6 insertions(+), 49 deletions(-)

    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index 5555e49c4e12..614d7180c99e 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -481,8 +481,7 @@ sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
    * p->se.load, p->rt_priority,
    * p->dl.dl_{runtime, deadline, period, flags, bw, density}
    * - sched_setnuma(): p->numa_preferred_nid
    - * - sched_move_task()/
    - * cpu_cgroup_fork(): p->sched_task_group
    + * - sched_move_task(): p->sched_task_group
    * - uclamp_update_active() p->uclamp*
    *
    * p->state <- TASK_*:
    @@ -10125,7 +10124,7 @@ void sched_release_group(struct task_group *tg)
    spin_unlock_irqrestore(&task_group_lock, flags);
    }

    -static void sched_change_group(struct task_struct *tsk, int type)
    +static void sched_change_group(struct task_struct *tsk)
    {
    struct task_group *tg;

    @@ -10141,7 +10140,7 @@ static void sched_change_group(struct task_struct *tsk, int type)

    #ifdef CONFIG_FAIR_GROUP_SCHED
    if (tsk->sched_class->task_change_group)
    - tsk->sched_class->task_change_group(tsk, type);
    + tsk->sched_class->task_change_group(tsk);
    else
    #endif
    set_task_rq(tsk, task_cpu(tsk));
    @@ -10172,7 +10171,7 @@ void sched_move_task(struct task_struct *tsk)
    if (running)
    put_prev_task(rq, tsk);

    - sched_change_group(tsk, TASK_MOVE_GROUP);
    + sched_change_group(tsk);

    if (queued)
    enqueue_task(rq, tsk, queue_flags);
    @@ -10250,23 +10249,6 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
    sched_unregister_group(tg);
    }

    -/*
    - * This is called before wake_up_new_task(), therefore we really only
    - * have to set its group bits, all the other stuff does not apply.
    - */
    -static void cpu_cgroup_fork(struct task_struct *task)
    -{
    - struct rq_flags rf;
    - struct rq *rq;
    -
    - rq = task_rq_lock(task, &rf);
    -
    - update_rq_clock(rq);
    - sched_change_group(task, TASK_SET_GROUP);
    -
    - task_rq_unlock(rq, task, &rf);
    -}
    -
    static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
    {
    struct task_struct *task;
    @@ -11132,7 +11114,6 @@ struct cgroup_subsys cpu_cgrp_subsys = {
    .css_released = cpu_cgroup_css_released,
    .css_free = cpu_cgroup_css_free,
    .css_extra_stat_show = cpu_extra_stat_show,
    - .fork = cpu_cgroup_fork,
    .can_attach = cpu_cgroup_can_attach,
    .attach = cpu_cgroup_attach,
    .legacy_cftypes = cpu_legacy_files,
    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index 77cd2bad17a8..89626b115660 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -11661,15 +11661,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
    }

    #ifdef CONFIG_FAIR_GROUP_SCHED
    -static void task_set_group_fair(struct task_struct *p)
    -{
    - struct sched_entity *se = &p->se;
    -
    - set_task_rq(p, task_cpu(p));
    - se->depth = se->parent ? se->parent->depth + 1 : 0;
    -}
    -
    -static void task_move_group_fair(struct task_struct *p)
    +static void task_change_group_fair(struct task_struct *p)
    {
    detach_task_cfs_rq(p);
    set_task_rq(p, task_cpu(p));
    @@ -11681,19 +11673,6 @@ static void task_move_group_fair(struct task_struct *p)
    attach_task_cfs_rq(p);
    }

    -static void task_change_group_fair(struct task_struct *p, int type)
    -{
    - switch (type) {
    - case TASK_SET_GROUP:
    - task_set_group_fair(p);
    - break;
    -
    - case TASK_MOVE_GROUP:
    - task_move_group_fair(p);
    - break;
    - }
    -}
    -
    void free_fair_sched_group(struct task_group *tg)
    {
    int i;
    diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
    index 8cc3eb7b86cd..19e0076e4245 100644
    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -2203,11 +2203,8 @@ struct sched_class {

    void (*update_curr)(struct rq *rq);

    -#define TASK_SET_GROUP 0
    -#define TASK_MOVE_GROUP 1
    -
    #ifdef CONFIG_FAIR_GROUP_SCHED
    - void (*task_change_group)(struct task_struct *p, int type);
    + void (*task_change_group)(struct task_struct *p);
    #endif
    };

    --
    2.36.1
    \
     
     \ /
      Last update: 2022-08-01 06:28    [W:5.396 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site