lkml.org 
[lkml]   [2022]   [Jun]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v3 2/2] sched/fair: Scan cluster before scanning LLC in wake-up path
From
Date
On 2022/6/9 18:14, kernel test robot wrote:
> Hi Yicong,
>
> Thank you for the patch! Perhaps something to improve:
>
> [auto build test WARNING on tip/sched/core]
> [also build test WARNING on linus/master v5.19-rc1 next-20220609]
> [If your patch is applied to the wrong git tree, kindly drop us a note.
> And when submitting patch, we suggest to use '--base' as documented in
> https://git-scm.com/docs/git-format-patch]
>
> url: https://github.com/intel-lab-lkp/linux/commits/Yicong-Yang/sched-fair-Wake-task-within-the-cluster-when-possible/20220608-181847
> base: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 991d8d8142cad94f9c5c05db25e67fa83d6f772a
> config: x86_64-randconfig-a006 (https://download.01.org/0day-ci/archive/20220609/202206091846.fm1bYjWk-lkp@intel.com/config)
> compiler: gcc-11 (Debian 11.3.0-3) 11.3.0
> reproduce (this is a W=1 build):
> # https://github.com/intel-lab-lkp/linux/commit/f2b15e8641f351783c1d47bc654ace164300b7f1
> git remote add linux-review https://github.com/intel-lab-lkp/linux
> git fetch --no-tags linux-review Yicong-Yang/sched-fair-Wake-task-within-the-cluster-when-possible/20220608-181847
> git checkout f2b15e8641f351783c1d47bc654ace164300b7f1
> # save the config file
> mkdir build_dir && cp config build_dir/.config
> make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash kernel/sched/
>
> If you fix the issue, kindly add following tag where applicable
> Reported-by: kernel test robot <lkp@intel.com>
>
> All warnings (new ones prefixed by >>):
>
> kernel/sched/fair.c: In function 'select_idle_cpu':
>>> kernel/sched/fair.c:6381:36: warning: passing argument 2 of 'scan_cluster' makes integer from pointer without a cast [-Wint-conversion]

I didn't change the scan_cluster() stub correspondingly, which leads to this error. thanks for catching this and will fix in v4.

> 6381 | idle_cpu = scan_cluster(p, cpus, target, &nr);
> | ^~~~
> | |
> | struct cpumask *
> kernel/sched/fair.c:6327:59: note: expected 'int' but argument is of type 'struct cpumask *'
> 6327 | static inline int scan_cluster(struct task_struct *p, int prev_cpu, int target, int *nr)
> | ~~~~^~~~~~~~
> kernel/sched/fair.c: At top level:
> kernel/sched/fair.c:11114:6: warning: no previous prototype for 'task_vruntime_update' [-Wmissing-prototypes]
> 11114 | void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
> | ^~~~~~~~~~~~~~~~~~~~
>
>
> vim +/scan_cluster +6381 kernel/sched/fair.c
>
> 6332
> 6333 /*
> 6334 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
> 6335 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
> 6336 * average idle time for this rq (as found in rq->avg_idle).
> 6337 */
> 6338 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
> 6339 {
> 6340 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
> 6341 int i, cpu, idle_cpu = -1, nr = INT_MAX;
> 6342 struct rq *this_rq = this_rq();
> 6343 int this = smp_processor_id();
> 6344 struct sched_domain *this_sd;
> 6345 u64 time = 0;
> 6346
> 6347 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
> 6348 if (!this_sd)
> 6349 return -1;
> 6350
> 6351 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
> 6352
> 6353 if (sched_feat(SIS_PROP) && !has_idle_core) {
> 6354 u64 avg_cost, avg_idle, span_avg;
> 6355 unsigned long now = jiffies;
> 6356
> 6357 /*
> 6358 * If we're busy, the assumption that the last idle period
> 6359 * predicts the future is flawed; age away the remaining
> 6360 * predicted idle time.
> 6361 */
> 6362 if (unlikely(this_rq->wake_stamp < now)) {
> 6363 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
> 6364 this_rq->wake_stamp++;
> 6365 this_rq->wake_avg_idle >>= 1;
> 6366 }
> 6367 }
> 6368
> 6369 avg_idle = this_rq->wake_avg_idle;
> 6370 avg_cost = this_sd->avg_scan_cost + 1;
> 6371
> 6372 span_avg = sd->span_weight * avg_idle;
> 6373 if (span_avg > 4*avg_cost)
> 6374 nr = div_u64(span_avg, avg_cost);
> 6375 else
> 6376 nr = 4;
> 6377
> 6378 time = cpu_clock(this);
> 6379 }
> 6380
>> 6381 idle_cpu = scan_cluster(p, cpus, target, &nr);
> 6382 if ((unsigned int)idle_cpu < nr_cpumask_bits)
> 6383 return idle_cpu;
> 6384
> 6385 for_each_cpu_wrap(cpu, cpus, target + 1) {
> 6386 if (has_idle_core) {
> 6387 i = select_idle_core(p, cpu, cpus, &idle_cpu);
> 6388 if ((unsigned int)i < nr_cpumask_bits)
> 6389 return i;
> 6390
> 6391 } else {
> 6392 if (--nr <= 0)
> 6393 return -1;
> 6394 idle_cpu = __select_idle_cpu(cpu, p);
> 6395 if ((unsigned int)idle_cpu < nr_cpumask_bits)
> 6396 break;
> 6397 }
> 6398 }
> 6399
> 6400 if (has_idle_core)
> 6401 set_idle_cores(target, false);
> 6402
> 6403 if (sched_feat(SIS_PROP) && !has_idle_core) {
> 6404 time = cpu_clock(this) - time;
> 6405
> 6406 /*
> 6407 * Account for the scan cost of wakeups against the average
> 6408 * idle time.
> 6409 */
> 6410 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
> 6411
> 6412 update_avg(&this_sd->avg_scan_cost, time);
> 6413 }
> 6414
> 6415 return idle_cpu;
> 6416 }
> 6417
>

\
 
 \ /
  Last update: 2022-06-09 13:27    [W:0.061 / U:1.436 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site