lkml.org 
[lkml]   [2020]   [Mar]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 08/13] sched/fair: wrapper for cfs_rq->min_vruntime
Date
From: Aaron Lu <aaron.lu@linux.alibaba.com>

Add a wrapper function cfs_rq_min_vruntime(cfs_rq) to
return cfs_rq->min_vruntime.

It will be used in the following patch, no functionality
change.

Signed-off-by: Aaron Lu <ziqian.lzq@antfin.com>
---
kernel/sched/fair.c | 27 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8432de767730..d99ea6ee7af2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -449,6 +449,11 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)

#endif /* CONFIG_FAIR_GROUP_SCHED */

+static inline u64 cfs_rq_min_vruntime(struct cfs_rq *cfs_rq)
+{
+ return cfs_rq->min_vruntime;
+}
+
static __always_inline
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);

@@ -485,7 +490,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
struct sched_entity *curr = cfs_rq->curr;
struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);

- u64 vruntime = cfs_rq->min_vruntime;
+ u64 vruntime = cfs_rq_min_vruntime(cfs_rq);

if (curr) {
if (curr->on_rq)
@@ -505,7 +510,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
}

/* ensure we never gain time by being placed backwards. */
- cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
+ cfs_rq->min_vruntime = max_vruntime(cfs_rq_min_vruntime(cfs_rq), vruntime);
#ifndef CONFIG_64BIT
smp_wmb();
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
@@ -3833,7 +3838,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_SCHED_DEBUG
- s64 d = se->vruntime - cfs_rq->min_vruntime;
+ s64 d = se->vruntime - cfs_rq_min_vruntime(cfs_rq);

if (d < 0)
d = -d;
@@ -3846,7 +3851,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
- u64 vruntime = cfs_rq->min_vruntime;
+ u64 vruntime = cfs_rq_min_vruntime(cfs_rq);

/*
* The 'current' period is already promised to the current tasks,
@@ -3939,7 +3944,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* update_curr().
*/
if (renorm && curr)
- se->vruntime += cfs_rq->min_vruntime;
+ se->vruntime += cfs_rq_min_vruntime(cfs_rq);

update_curr(cfs_rq);

@@ -3950,7 +3955,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* fairness detriment of existing tasks.
*/
if (renorm && !curr)
- se->vruntime += cfs_rq->min_vruntime;
+ se->vruntime += cfs_rq_min_vruntime(cfs_rq);

/*
* When enqueuing a sched_entity, we must:
@@ -4063,7 +4068,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* can move min_vruntime forward still more.
*/
if (!(flags & DEQUEUE_SLEEP))
- se->vruntime -= cfs_rq->min_vruntime;
+ se->vruntime -= cfs_rq_min_vruntime(cfs_rq);

/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
@@ -6396,7 +6401,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
min_vruntime = cfs_rq->min_vruntime;
} while (min_vruntime != min_vruntime_copy);
#else
- min_vruntime = cfs_rq->min_vruntime;
+ min_vruntime = cfs_rq_min_vruntime(cfs_rq);
#endif

se->vruntime -= min_vruntime;
@@ -10382,7 +10387,7 @@ static void task_fork_fair(struct task_struct *p)
resched_curr(rq);
}

- se->vruntime -= cfs_rq->min_vruntime;
+ se->vruntime -= cfs_rq_min_vruntime(cfs_rq);
rq_unlock(rq, &rf);
}

@@ -10502,7 +10507,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
* cause 'unlimited' sleep bonus.
*/
place_entity(cfs_rq, se, 0);
- se->vruntime -= cfs_rq->min_vruntime;
+ se->vruntime -= cfs_rq_min_vruntime(cfs_rq);
}

detach_entity_cfs_rq(se);
@@ -10516,7 +10521,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
attach_entity_cfs_rq(se);

if (!vruntime_normalized(p))
- se->vruntime += cfs_rq->min_vruntime;
+ se->vruntime += cfs_rq_min_vruntime(cfs_rq);
}

static void switched_from_fair(struct rq *rq, struct task_struct *p)
--
2.17.1
\
 
 \ /
  Last update: 2020-03-04 18:01    [W:0.462 / U:24.348 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site