lkml.org 
[lkml]   [2019]   [Jun]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v2] sched/core: clean up sched_init() a bit
From
Date
Ping.

On Tue, 2019-06-04 at 16:46 -0400, Qian Cai wrote:
> Compiling a kernel with both FAIR_GROUP_SCHED=n and RT_GROUP_SCHED=n
> will generate a warning using W=1:
>
>   kernel/sched/core.c: In function 'sched_init':
>   kernel/sched/core.c:5906:32: warning: variable 'ptr' set but not used
>
> Use this opportunity to tidy up a code a bit by removing unnecssary
> indentations, #endif comments and lines.
>
> Signed-off-by: Qian Cai <cai@lca.pw>
> ---
>
> v2: Fix an oversight when both FAIR_GROUP_SCHED and RT_GROUP_SCHED
>     selected which was found by the 0day kernel testing robot.
>
>  kernel/sched/core.c | 50 +++++++++++++++++++++++---------------------------
>  1 file changed, 23 insertions(+), 27 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 874c427742a9..edebd5e97542 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -5903,36 +5903,31 @@ int in_sched_functions(unsigned long addr)
>  void __init sched_init(void)
>  {
>   int i, j;
> - unsigned long alloc_size = 0, ptr;
> -
> - wait_bit_init();
> -
> -#ifdef CONFIG_FAIR_GROUP_SCHED
> - alloc_size += 2 * nr_cpu_ids * sizeof(void **);
> +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
> + unsigned long alloc_size = 4 * nr_cpu_ids * sizeof(void **);
> + unsigned long ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
> +#elif defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
> + unsigned long alloc_size = 2 * nr_cpu_ids * sizeof(void **);
> + unsigned long ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
>  #endif
> -#ifdef CONFIG_RT_GROUP_SCHED
> - alloc_size += 2 * nr_cpu_ids * sizeof(void **);
> -#endif
> - if (alloc_size) {
> - ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
> + wait_bit_init();
>  
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> - root_task_group.se = (struct sched_entity **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> + root_task_group.se = (struct sched_entity **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
>  
> - root_task_group.cfs_rq = (struct cfs_rq **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> + root_task_group.cfs_rq = (struct cfs_rq **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
>  
> -#endif /* CONFIG_FAIR_GROUP_SCHED */
> +#endif
>  #ifdef CONFIG_RT_GROUP_SCHED
> - root_task_group.rt_se = (struct sched_rt_entity **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> + root_task_group.rt_se = (struct sched_rt_entity **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
>  
> - root_task_group.rt_rq = (struct rt_rq **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> + root_task_group.rt_rq = (struct rt_rq **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
>  
> -#endif /* CONFIG_RT_GROUP_SCHED */
> - }
> +#endif
>  #ifdef CONFIG_CPUMASK_OFFSTACK
>   for_each_possible_cpu(i) {
>   per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
> @@ -5940,7 +5935,7 @@ void __init sched_init(void)
>   per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
>   cpumask_size(), GFP_KERNEL, cpu_to_node(i));
>   }
> -#endif /* CONFIG_CPUMASK_OFFSTACK */
> +#endif
>  
>   init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(),
> global_rt_runtime());
>   init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(),
> global_rt_runtime());
> @@ -5950,9 +5945,9 @@ void __init sched_init(void)
>  #endif
>  
>  #ifdef CONFIG_RT_GROUP_SCHED
> - init_rt_bandwidth(&root_task_group.rt_bandwidth,
> - global_rt_period(), global_rt_runtime());
> -#endif /* CONFIG_RT_GROUP_SCHED */
> + init_rt_bandwidth(&root_task_group.rt_bandwidth, global_rt_period(),
> +   global_rt_runtime());
> +#endif
>  
>  #ifdef CONFIG_CGROUP_SCHED
>   task_group_cache = KMEM_CACHE(task_group, 0);
> @@ -5961,7 +5956,7 @@ void __init sched_init(void)
>   INIT_LIST_HEAD(&root_task_group.children);
>   INIT_LIST_HEAD(&root_task_group.siblings);
>   autogroup_init(&init_task);
> -#endif /* CONFIG_CGROUP_SCHED */
> +#endif
>  
>   for_each_possible_cpu(i) {
>   struct rq *rq;
> @@ -6031,6 +6026,7 @@ void __init sched_init(void)
>   rq->last_blocked_load_update_tick = jiffies;
>   atomic_set(&rq->nohz_flags, 0);
>  #endif
> +
>  #endif /* CONFIG_SMP */
>   hrtick_rq_init(rq);
>   atomic_set(&rq->nr_iowait, 0);

\
 
 \ /
  Last update: 2019-06-19 16:37    [W:0.033 / U:2.544 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site