lkml.org 
[lkml]   [2018]   [Sep]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] scheduler: conditional statement cleanup
Date
From: Pierce Griffiths <pierceagriffiths@gmail.com>

*Condense ssequential if statements into a single if statement when they
share an outcome
*Eliminate a jump instruction by replacing a goto with a return
*Eliminate an unnecessary local variable
*Replace "if(function or boolean expression) return true else return false"
with "return (function or boolean expression);"
*Remove null pointer checks before calls to kfree on the grounds that
kfree(NULL) results in a no-op

Signed-off-by: Pierce Griffiths <pierceagriffiths@gmail.com>
---
Please tell me if I've made any mistakes in the submission process,
this is the first time I've submitted a patch to the Linux kernel.

kernel/sched/core.c | 8 ++------
kernel/sched/cpufreq.c | 5 +----
kernel/sched/cpupri.c | 22 ++++++++--------------
kernel/sched/rt.c | 31 ++++++++++++-------------------
4 files changed, 23 insertions(+), 43 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 625bc9897f62..443a1f235cfd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq)
* If there are more than one RR tasks, we need the tick to effect the
* actual RR behaviour.
*/
- if (rq->rt.rr_nr_running) {
- if (rq->rt.rr_nr_running == 1)
- return true;
- else
- return false;
- }
+ if (rq->rt.rr_nr_running)
+ return rq->rt.rr_nr_running == 1;

/*
* If there's no RR tasks, but FIFO tasks, we can skip the tick, no
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 5e54cbcae673..a8fd4bd68954 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags))
{
- if (WARN_ON(!data || !func))
- return;
-
- if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
+ if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu)))
return;

data->func = func;
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index daaadf939ccb..152c133e8247 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -29,20 +29,16 @@
#include "sched.h"

/* Convert between a 140 based task->prio, and our 102 based cpupri */
-static int convert_prio(int prio)
+static int convert_prio(const int prio)
{
- int cpupri;
-
if (prio == CPUPRI_INVALID)
- cpupri = CPUPRI_INVALID;
+ return CPUPRI_INVALID;
else if (prio == MAX_PRIO)
- cpupri = CPUPRI_IDLE;
+ return CPUPRI_IDLE;
else if (prio >= MAX_RT_PRIO)
- cpupri = CPUPRI_NORMAL;
+ return CPUPRI_NORMAL;
else
- cpupri = MAX_RT_PRIO - prio + 1;
-
- return cpupri;
+ return MAX_RT_PRIO - prio + 1;
}

/**
@@ -95,10 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
smp_rmb();

/* Need to do the rmb for every iteration */
- if (skip)
- continue;
-
- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+ if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask)
+ >= nr_cpu_ids)
continue;

if (lowest_mask) {
@@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp)
return 0;

cleanup:
- for (i--; i >= 0; i--)
+ while (--i >= 0)
free_cpumask_var(cp->pri_to_cpu[i].mask);
return -ENOMEM;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2e2955a8cf8f..acf1b94669ad 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg)
destroy_rt_bandwidth(&tg->rt_bandwidth);

for_each_possible_cpu(i) {
- if (tg->rt_rq)
- kfree(tg->rt_rq[i]);
- if (tg->rt_se)
- kfree(tg->rt_se[i]);
+ /* Don't need to check if tg->rt_rq[i]
+ * or tg->rt_se[i] are NULL, since kfree(NULL)
+ * simply performs no operation
+ */
+ kfree(tg->rt_rq[i]);
+ kfree(tg->rt_se[i]);
}

kfree(tg->rt_rq);
@@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)

BUG_ON(&rq->rt != rt_rq);

- if (rt_rq->rt_queued)
- return;
-
- if (rt_rq_throttled(rt_rq))
+ if (rt_rq->rt_queued || rt_rq_throttled(rt_rq))
return;

if (rt_rq->rt_nr_running) {
@@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
*/
static inline bool move_entity(unsigned int flags)
{
- if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
- return false;
-
- return true;
+ return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
}

static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
@@ -1393,7 +1389,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)

/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
- goto out;
+ return cpu;

rq = cpu_rq(cpu);

@@ -1437,7 +1433,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
}
rcu_read_unlock();

-out:
return cpu;
}

@@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
+ *
+ * No period doesn't make any sense.
*/
- if (tg == &root_task_group && rt_runtime == 0)
- return -EINVAL;
-
- /* No period doesn't make any sense. */
- if (rt_period == 0)
+ if ((tg == &root_task_group && !rt_runtime) || !rt_period)
return -EINVAL;

mutex_lock(&rt_constraints_mutex);
--
2.19.0
\
 
 \ /
  Last update: 2018-09-21 22:22    [W:0.101 / U:0.144 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site