lkml.org 
[lkml]   [2022]   [Mar]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/2] x86/tsc: Reduce external interference on max_warp detection
Date
The TSC max_warp detection code in check_tsc_warp() is very timing
sensitive. Due to the possibility of false cacheline sharing,
activities done in other CPUs may have an impact on the max_warp
detection process. Put the max_wrap
detection data variables on their own cacheline to reduce that kind
of external interference.

Signed-off-by: Waiman Long <longman@redhat.com>
---
arch/x86/kernel/tsc_sync.c | 57 ++++++++++++++++++++------------------
1 file changed, 30 insertions(+), 27 deletions(-)

diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9452dc9664b5..70aeb254b62b 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -253,12 +253,15 @@ static atomic_t test_runs;
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
-static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-
-static cycles_t last_tsc;
-static cycles_t max_warp;
-static int nr_warps;
-static int random_warps;
+static struct {
+ arch_spinlock_t lock;
+ int nr_warps;
+ int random_warps;
+ cycles_t last_tsc;
+ cycles_t max_warp;
+} sync ____cacheline_aligned_in_smp = {
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED,
+};

/*
* TSC-warp measurement loop running on both CPUs. This is not called
@@ -281,11 +284,11 @@ static cycles_t check_tsc_warp(unsigned int timeout)
* previous TSC that was measured (possibly on
* another CPU) and update the previous TSC timestamp.
*/
- arch_spin_lock(&sync_lock);
- prev = last_tsc;
+ arch_spin_lock(&sync.lock);
+ prev = sync.last_tsc;
now = rdtsc_ordered();
- last_tsc = now;
- arch_spin_unlock(&sync_lock);
+ sync.last_tsc = now;
+ arch_spin_unlock(&sync.lock);

/*
* Be nice every now and then (and also check whether
@@ -304,18 +307,18 @@ static cycles_t check_tsc_warp(unsigned int timeout)
* we saw a time-warp of the TSC going backwards:
*/
if (unlikely(prev > now)) {
- arch_spin_lock(&sync_lock);
- max_warp = max(max_warp, prev - now);
- cur_max_warp = max_warp;
+ arch_spin_lock(&sync.lock);
+ sync.max_warp = max(sync.max_warp, prev - now);
+ cur_max_warp = sync.max_warp;
/*
* Check whether this bounces back and forth. Only
* one CPU should observe time going backwards.
*/
- if (cur_warps != nr_warps)
- random_warps++;
- nr_warps++;
- cur_warps = nr_warps;
- arch_spin_unlock(&sync_lock);
+ if (cur_warps != sync.nr_warps)
+ sync.random_warps++;
+ sync.nr_warps++;
+ cur_warps = sync.nr_warps;
+ arch_spin_unlock(&sync.lock);
}
}
WARN(!(now-start),
@@ -394,21 +397,21 @@ void check_tsc_sync_source(int cpu)
* stop. If not, decrement the number of runs an check if we can
* retry. In case of random warps no retry is attempted.
*/
- if (!nr_warps) {
+ if (!sync.nr_warps) {
atomic_set(&test_runs, 0);

pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
smp_processor_id(), cpu);

- } else if (atomic_dec_and_test(&test_runs) || random_warps) {
+ } else if (atomic_dec_and_test(&test_runs) || sync.random_warps) {
/* Force it to 0 if random warps brought us here */
atomic_set(&test_runs, 0);

pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
smp_processor_id(), cpu);
pr_warn("Measured %Ld cycles TSC warp between CPUs, "
- "turning off TSC clock.\n", max_warp);
- if (random_warps)
+ "turning off TSC clock.\n", sync.max_warp);
+ if (sync.random_warps)
pr_warn("TSC warped randomly between CPUs\n");
mark_tsc_unstable("check_tsc_sync_source failed");
}
@@ -417,10 +420,10 @@ void check_tsc_sync_source(int cpu)
* Reset it - just in case we boot another CPU later:
*/
atomic_set(&start_count, 0);
- random_warps = 0;
- nr_warps = 0;
- max_warp = 0;
- last_tsc = 0;
+ sync.random_warps = 0;
+ sync.nr_warps = 0;
+ sync.max_warp = 0;
+ sync.last_tsc = 0;

/*
* Let the target continue with the bootup:
@@ -476,7 +479,7 @@ void check_tsc_sync_target(void)
/*
* Store the maximum observed warp value for a potential retry:
*/
- gbl_max_warp = max_warp;
+ gbl_max_warp = sync.max_warp;

/*
* Ok, we are done:
--
2.27.0
\
 
 \ /
  Last update: 2022-03-14 20:48    [W:0.082 / U:0.288 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site