lkml.org 
[lkml]   [2022]   [Apr]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] sched/core: Skip sched_core_fork/free() when core sched is disabled
Date
As __put_task_struct() and copy_process() are hot path functions,
the call of sched_core_fork/free() will bring overhead when core
sched is disabled, and we skip them when core sched is disabled().

Signed-off-by: Cruz Zhao <CruzZhao@linux.alibaba.com>
---
include/linux/sched.h | 10 ++++++++++
kernel/fork.c | 9 ++++++---
kernel/sched/sched.h | 10 ----------
3 files changed, 16 insertions(+), 13 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index f64f8f2..a2266df 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2406,9 +2406,19 @@ static inline void rseq_syscall(struct pt_regs *regs)
extern void sched_core_fork(struct task_struct *p);
extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr);
+DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
+static inline bool sched_core_disabled(void)
+{
+ return !static_branch_unlikely(&__sched_core_enabled);
+}
+
#else
static inline void sched_core_free(struct task_struct *tsk) { }
static inline void sched_core_fork(struct task_struct *p) { }
+static inline bool sched_core_disabled(void)
+{
+ return true;
+}
#endif

#endif
diff --git a/kernel/fork.c b/kernel/fork.c
index 0d13baf..611f80b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -843,7 +843,8 @@ void __put_task_struct(struct task_struct *tsk)
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
- sched_core_free(tsk);
+ if (!sched_core_disabled())
+ sched_core_free(tsk);
free_task(tsk);
}
EXPORT_SYMBOL_GPL(__put_task_struct);
@@ -2381,7 +2382,8 @@ static __latent_entropy struct task_struct *copy_process(

klp_copy_process(p);

- sched_core_fork(p);
+ if (!sched_core_disabled())
+ sched_core_fork(p);

spin_lock(&current->sighand->siglock);

@@ -2469,7 +2471,8 @@ static __latent_entropy struct task_struct *copy_process(
return p;

bad_fork_cancel_cgroup:
- sched_core_free(p);
+ if (!sched_core_disabled())
+ sched_core_free(p);
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
cgroup_cancel_fork(p, args);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5b21448..c6aeeda 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1157,11 +1157,6 @@ static inline bool sched_core_enabled(struct rq *rq)
return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
}

-static inline bool sched_core_disabled(void)
-{
- return !static_branch_unlikely(&__sched_core_enabled);
-}
-
/*
* Be careful with this function; not for general use. The return value isn't
* stable unless you actually hold a relevant rq->__lock.
@@ -1257,11 +1252,6 @@ static inline bool sched_core_enabled(struct rq *rq)
return false;
}

-static inline bool sched_core_disabled(void)
-{
- return true;
-}
-
static inline raw_spinlock_t *rq_lockp(struct rq *rq)
{
return &rq->__lock;
--
1.8.3.1
\
 
 \ /
  Last update: 2022-04-24 18:38    [W:0.313 / U:0.068 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site