lkml.org 
[lkml]   [2023]   [Aug]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip: x86/cleanups] x86/paravirt: Silence unused native_pv_lock_init() function warning
The following commit has been merged into the x86/cleanups branch of tip:

Commit-ID: ce0a1b608bfc709cf366f020b520310a3b3272c3
Gitweb: https://git.kernel.org/tip/ce0a1b608bfc709cf366f020b520310a3b3272c3
Author: Arnd Bergmann <arnd@arndb.de>
AuthorDate: Thu, 03 Aug 2023 10:26:18 +02:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Thu, 03 Aug 2023 16:50:19 +02:00

x86/paravirt: Silence unused native_pv_lock_init() function warning

The native_pv_lock_init() function is only used in SMP configurations
and declared in asm/qspinlock.h which is not used in UP kernels, but
the function is still defined for both, which causes a warning:

arch/x86/kernel/paravirt.c:76:13: error: no previous prototype for 'native_pv_lock_init' [-Werror=missing-prototypes]

Move the declaration to asm/paravirt.h so it is visible even
with CONFIG_SMP but short-circuit the definition to turn it
into an empty function.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230803082619.1369127-7-arnd@kernel.org
---
arch/x86/include/asm/paravirt.h | 7 +++++++
arch/x86/include/asm/qspinlock.h | 7 +------
arch/x86/kernel/paravirt.c | 3 ++-
3 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b497786..6c8ff12 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -739,6 +739,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
".popsection")

extern void default_banner(void);
+void native_pv_lock_init(void) __init;

#else /* __ASSEMBLY__ */

@@ -778,6 +779,12 @@ extern void default_banner(void);
#endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
+
+#ifndef __ASSEMBLY__
+static inline void native_pv_lock_init(void)
+{
+}
+#endif
#endif /* !CONFIG_PARAVIRT */

#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index d87451d..cde8357 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -74,8 +74,6 @@ static inline bool vcpu_is_preempted(long cpu)
*/
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);

-void native_pv_lock_init(void) __init;
-
/*
* Shortcut for the queued_spin_lock_slowpath() function that allows
* virt to hijack it.
@@ -103,10 +101,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)

return true;
}
-#else
-static inline void native_pv_lock_init(void)
-{
-}
+
#endif /* CONFIG_PARAVIRT */

#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ac10b46..32e94a3 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -75,7 +75,8 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);

void __init native_pv_lock_init(void)
{
- if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}

\
 
 \ /
  Last update: 2023-08-03 17:59    [W:0.060 / U:1.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site