lkml.org 
[lkml]   [2021]   [Nov]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
Subject[mark:arm64/preempt-dynamic-static-key 6/6] kernel/sched/core.c:6435:43: warning: no previous prototype for function 'preempt_schedule'
tree:   https://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git arm64/preempt-dynamic-static-key
head: 47c5f8b3abb64dcd6ac43521d24ca9e7d7891dcc
commit: 47c5f8b3abb64dcd6ac43521d24ca9e7d7891dcc [6/6] arm64: support PREEMPT_DYNAMIC
config: arm64-buildonly-randconfig-r004-20211118 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project c46becf500df2a7fb4b4fce16178a036c344315a)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install arm64 cross compiling tool for clang build
# apt-get install binutils-aarch64-linux-gnu
# https://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git/commit/?id=47c5f8b3abb64dcd6ac43521d24ca9e7d7891dcc
git remote add mark https://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git
git fetch --no-tags mark arm64/preempt-dynamic-static-key
git checkout 47c5f8b3abb64dcd6ac43521d24ca9e7d7891dcc
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 ARCH=arm64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

kernel/sched/core.c:3439:6: warning: no previous prototype for function 'sched_set_stop_task' [-Wmissing-prototypes]
void sched_set_stop_task(int cpu, struct task_struct *stop)
^
kernel/sched/core.c:3439:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void sched_set_stop_task(int cpu, struct task_struct *stop)
^
static
kernel/sched/core.c:6359:35: warning: no previous prototype for function 'schedule_user' [-Wmissing-prototypes]
asmlinkage __visible void __sched schedule_user(void)
^
kernel/sched/core.c:6359:22: note: declare 'static' if the function is not intended to be used outside of this translation unit
asmlinkage __visible void __sched schedule_user(void)
^
static
>> kernel/sched/core.c:6435:43: warning: no previous prototype for function 'preempt_schedule' [-Wmissing-prototypes]
asmlinkage __visible void __sched notrace preempt_schedule(void)
^
kernel/sched/core.c:6435:22: note: declare 'static' if the function is not intended to be used outside of this translation unit
asmlinkage __visible void __sched notrace preempt_schedule(void)
^
static
>> kernel/sched/core.c:6483:43: warning: no previous prototype for function 'preempt_schedule_notrace' [-Wmissing-prototypes]
asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
^
kernel/sched/core.c:6483:22: note: declare 'static' if the function is not intended to be used outside of this translation unit
asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
^
static
4 warnings generated.


vim +/preempt_schedule +6435 kernel/sched/core.c

a18b5d01819235 kernel/sched/core.c Frederic Weisbecker 2015-01-22 6429
c1a280b68d4e6b kernel/sched/core.c Thomas Gleixner 2019-07-26 6430 #ifdef CONFIG_PREEMPTION
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6431 /*
a49b4f4012ef23 kernel/sched/core.c Valentin Schneider 2019-09-23 6432 * This is the entry point to schedule() from in-kernel preemption
a49b4f4012ef23 kernel/sched/core.c Valentin Schneider 2019-09-23 6433 * off of preempt_enable.
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6434 */
722a9f9299ca72 kernel/sched/core.c Andi Kleen 2014-05-02 @6435 asmlinkage __visible void __sched notrace preempt_schedule(void)
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6436 {
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6437 /*
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6438 * If there is a non-zero preempt_count or interrupts are disabled,
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6439 * we do not want to preempt the current task. Just return..
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6440 */
fbb00b568bc930 kernel/sched/core.c Frederic Weisbecker 2013-06-19 6441 if (likely(!preemptible()))
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6442 return;
a18b5d01819235 kernel/sched/core.c Frederic Weisbecker 2015-01-22 6443 preempt_schedule_common();
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6444 }
376e242429bf85 kernel/sched/core.c Masami Hiramatsu 2014-04-17 6445 NOKPROBE_SYMBOL(preempt_schedule);
^1da177e4c3f41 kernel/sched.c Linus Torvalds 2005-04-16 6446 EXPORT_SYMBOL(preempt_schedule);
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6447
2c9a98d3bc8087 kernel/sched/core.c Peter Zijlstra (Intel 2021-01-18 6448) #ifdef CONFIG_PREEMPT_DYNAMIC
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6449 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
67bb8fb8cf8e8a kernel/sched/core.c Mark Rutland 2021-11-09 6450 #ifndef preempt_schedule_dynamic_enabled
67bb8fb8cf8e8a kernel/sched/core.c Mark Rutland 2021-11-09 6451 #define preempt_schedule_dynamic_enabled preempt_schedule
67bb8fb8cf8e8a kernel/sched/core.c Mark Rutland 2021-11-09 6452 #define preempt_schedule_dynamic_disabled NULL
67bb8fb8cf8e8a kernel/sched/core.c Mark Rutland 2021-11-09 6453 #endif
67bb8fb8cf8e8a kernel/sched/core.c Mark Rutland 2021-11-09 6454 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
ef72661e28c64a kernel/sched/core.c Peter Zijlstra 2021-01-25 6455 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6456 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6457 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6458 void __sched notrace dynamic_preempt_schedule(void)
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6459 {
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6460 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6461 return;
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6462 preempt_schedule();
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6463 }
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6464 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6465 EXPORT_SYMBOL(dynamic_preempt_schedule);
19f12e212caad8 kernel/sched/core.c Mark Rutland 2021-11-09 6466 #endif
2c9a98d3bc8087 kernel/sched/core.c Peter Zijlstra (Intel 2021-01-18 6467) #endif
2c9a98d3bc8087 kernel/sched/core.c Peter Zijlstra (Intel 2021-01-18 6468)
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6469 /**
4eaca0a887eaee kernel/sched/core.c Frederic Weisbecker 2015-06-04 6470 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6471 *
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6472 * The tracing infrastructure uses preempt_enable_notrace to prevent
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6473 * recursion and tracing preempt enabling caused by the tracing
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6474 * infrastructure itself. But as tracing can happen in areas coming
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6475 * from userspace or just about to enter userspace, a preempt enable
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6476 * can occur before user_exit() is called. This will cause the scheduler
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6477 * to be called when the system is still in usermode.
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6478 *
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6479 * To prevent this, the preempt_enable_notrace will use this function
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6480 * instead of preempt_schedule() to exit user context if needed before
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6481 * calling the scheduler.
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6482 */
4eaca0a887eaee kernel/sched/core.c Frederic Weisbecker 2015-06-04 @6483 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6484 {
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6485 enum ctx_state prev_ctx;
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6486
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6487 if (likely(!preemptible()))
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6488 return;
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6489
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6490 do {
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6491 /*
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6492 * Because the function tracer can trace preempt_count_sub()
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6493 * and it also uses preempt_enable/disable_notrace(), if
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6494 * NEED_RESCHED is set, the preempt_enable_notrace() called
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6495 * by the function tracer will call this function again and
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6496 * cause infinite recursion.
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6497 *
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6498 * Preemption must be disabled here before the function
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6499 * tracer can trace. Break up preempt_disable() into two
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6500 * calls. One to disable preemption without fear of being
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6501 * traced. The other to still record the preemption latency,
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6502 * which can also be traced by the function tracer.
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6503 */
3d8f74dd4ca1da kernel/sched/core.c Peter Zijlstra 2015-09-28 6504 preempt_disable_notrace();
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6505 preempt_latency_start(1);
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6506 /*
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6507 * Needs preempt disabled in case user_exit() is traced
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6508 * and the tracer calls preempt_enable_notrace() causing
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6509 * an infinite recursion.
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6510 */
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6511 prev_ctx = exception_enter();
b4bfa3fcfe3b82 kernel/sched/core.c Thomas Gleixner 2021-08-15 6512 __schedule(SM_PREEMPT);
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6513 exception_exit(prev_ctx);
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6514
47252cfbac0364 kernel/sched/core.c Steven Rostedt 2016-03-21 6515 preempt_latency_stop(1);
3d8f74dd4ca1da kernel/sched/core.c Peter Zijlstra 2015-09-28 6516 preempt_enable_no_resched_notrace();
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6517 } while (need_resched());
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6518 }
4eaca0a887eaee kernel/sched/core.c Frederic Weisbecker 2015-06-04 6519 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2763568 kernel/sched/core.c Oleg Nesterov 2014-10-05 6520

:::::: The code at line 6435 was first introduced by commit
:::::: 722a9f9299ca720a3f14660e7c0dce7b76a9cb42 asmlinkage: Add explicit __visible to drivers/*, lib/*, kernel/*

:::::: TO: Andi Kleen <ak@linux.intel.com>
:::::: CC: H. Peter Anvin <hpa@linux.intel.com>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[unhandled content-type:application/gzip]
\
 
 \ /
  Last update: 2021-11-20 20:27    [W:0.879 / U:0.528 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site