lkml.org 
[lkml]   [2022]   [Nov]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v5 11/16] x86/mtrr: let cache_aps_delayed_init replace mtrr_aps_delayed_init
Date
In order to prepare decoupling MTRR and PAT replace the MTRR specific
mtrr_aps_delayed_init flag with a more generic cache_aps_delayed_init
one.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
V4:
- reestablish function to set cache_aps_delayed_init (Borislav Petkov)
V5:
- make cache_aps_delayed_init static, add get accessor (Borislav Petkov)
---
arch/x86/include/asm/cacheinfo.h | 2 ++
arch/x86/include/asm/mtrr.h | 2 --
arch/x86/kernel/cpu/cacheinfo.c | 12 ++++++++++++
arch/x86/kernel/cpu/mtrr/mtrr.c | 18 +++++-------------
arch/x86/kernel/smpboot.c | 5 +++--
5 files changed, 22 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 978bac70fd49..e443fcc1f045 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -13,5 +13,7 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cache_disable(void);
void cache_enable(void);
void cache_cpu_init(void);
+void set_cache_aps_delayed_init(bool val);
+bool get_cache_aps_delayed_init(void);

#endif /* _ASM_X86_CACHEINFO_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 986249a2b9b6..5d31219c8529 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -43,7 +43,6 @@ extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
-extern void set_mtrr_aps_delayed_init(void);
extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
@@ -87,7 +86,6 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
#define mtrr_ap_init() do {} while (0)
-#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 81ab99fe92bd..931ba3fb1363 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -1137,3 +1137,15 @@ void cache_cpu_init(void)
cache_enable();
local_irq_restore(flags);
}
+
+static bool cache_aps_delayed_init;
+
+void set_cache_aps_delayed_init(bool val)
+{
+ cache_aps_delayed_init = val;
+}
+
+bool get_cache_aps_delayed_init(void)
+{
+ return cache_aps_delayed_init;
+}
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index f671be9823b6..15ee6d72fb1f 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -68,7 +68,6 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);

u64 size_or_mask, size_and_mask;
-static bool mtrr_aps_delayed_init;

static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;

@@ -175,7 +174,8 @@ static int mtrr_rendezvous_handler(void *info)
if (data->smp_reg != ~0U) {
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
- } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
+ } else if (get_cache_aps_delayed_init() ||
+ !cpu_online(smp_processor_id())) {
cache_cpu_init();
}
return 0;
@@ -782,7 +782,7 @@ void __init mtrr_bp_init(void)

void mtrr_ap_init(void)
{
- if (!memory_caching_control || mtrr_aps_delayed_init)
+ if (!memory_caching_control || get_cache_aps_delayed_init())
return;

/*
@@ -816,14 +816,6 @@ void mtrr_save_state(void)
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
}

-void set_mtrr_aps_delayed_init(void)
-{
- if (!memory_caching_control)
- return;
-
- mtrr_aps_delayed_init = true;
-}
-
/*
* Delayed MTRR initialization for all AP's
*/
@@ -837,11 +829,11 @@ void mtrr_aps_init(void)
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
- if (!mtrr_aps_delayed_init)
+ if (!get_cache_aps_delayed_init())
return;

set_mtrr(~0U, 0, 0, 0);
- mtrr_aps_delayed_init = false;
+ set_cache_aps_delayed_init(false);
}

void mtrr_bp_restore(void)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 3f3ea0287f69..13c71ab29d84 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -58,6 +58,7 @@
#include <linux/overflow.h>

#include <asm/acpi.h>
+#include <asm/cacheinfo.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
@@ -1428,7 +1429,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)

uv_system_init();

- set_mtrr_aps_delayed_init();
+ set_cache_aps_delayed_init(true);

smp_quirk_init_udelay();

@@ -1439,7 +1440,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)

void arch_thaw_secondary_cpus_begin(void)
{
- set_mtrr_aps_delayed_init();
+ set_cache_aps_delayed_init(true);
}

void arch_thaw_secondary_cpus_end(void)
--
2.35.3
\
 
 \ /
  Last update: 2022-11-02 08:49    [W:1.386 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site