lkml.org 
[lkml]   [2020]   [Nov]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v1 1/3] x86/mm: change l1d flush runtime prctl behaviour
Date
Detection of task affinities at API opt-in time is not the best
approach, the approach is to kill the task if it runs on a
SMT enable core. This is better than not flushing the L1D cache
when the task switches from a non-SMT core to an SMT enabled core.

Signed-off-by: Balbir Singh <sblbir@amazon.com>
---
To be applied on top of tip commit id
b6724f118d44606fddde391ba7527526b3cad211

arch/Kconfig | 4 ++++
arch/x86/Kconfig | 1 +
arch/x86/mm/tlb.c | 37 +++++++++++++++++++++----------------
include/linux/sched.h | 10 ++++++++++
4 files changed, 36 insertions(+), 16 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 546869c3269d..2024486d28a2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -311,6 +311,10 @@ config ARCH_32BIT_OFF_T
still support 32-bit off_t. This option is enabled for all such
architectures explicitly.

+config ARCH_HAS_PARANOID_L1D_FLUSH
+ bool
+ default n
+
config HAVE_ASM_MODVERSIONS
bool
help
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 36357c806e8a..02ff6ff71002 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -103,6 +103,7 @@ config X86
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_HUGE_PMD_SHARE
select ARCH_WANTS_THP_SWAP if X86_64
+ select ARCH_HAS_PARANOID_L1D_FLUSH
select BUILDTIME_TABLE_SORT
select CLKEVT_I8253
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 88e9ad5142e4..bdc399b86bc7 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -315,31 +315,18 @@ EXPORT_SYMBOL_GPL(leave_mm);

int enable_l1d_flush_for_task(struct task_struct *tsk)
{
- int cpu, ret = 0, i;
-
/*
* Do not enable L1D_FLUSH_OUT if
* b. The CPU is not affected by the L1TF bug
* c. The CPU does not have L1D FLUSH feature support
- * c. The task's affinity is on cores with SMT on.
*/

if (!boot_cpu_has_bug(X86_BUG_L1TF) ||
- !static_cpu_has(X86_FEATURE_FLUSH_L1D))
+ !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
return -EINVAL;

- cpu = get_cpu();
-
- for_each_cpu(i, &tsk->cpus_mask) {
- if (cpu_data(i).smt_active == true) {
- put_cpu();
- return -EINVAL;
- }
- }
-
set_ti_thread_flag(&tsk->thread_info, TIF_SPEC_L1D_FLUSH);
- put_cpu();
- return ret;
+ return 0;
}

int disable_l1d_flush_for_task(struct task_struct *tsk)
@@ -358,13 +345,31 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_restore(flags);
}

+/*
+ * Sent to a task that opts into L1D flushing via the prctl interface
+ * but ends up running on an SMT enabled core.
+ */
+static void l1d_flush_kill(struct callback_head *ch)
+{
+ force_sig(SIGBUS);
+}
+
static inline unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
{
unsigned long next_tif = task_thread_info(next)->flags;
unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
+ unsigned long next_mm;

BUILD_BUG_ON(TIF_SPEC_L1D_FLUSH != TIF_SPEC_IB + 1);
- return (unsigned long)next->mm | spec_bits;
+ next_mm = (unsigned long)next->mm | spec_bits;
+
+ if ((next_mm & LAST_USER_MM_L1D_FLUSH) && this_cpu_read(cpu_info.smt_active)) {
+ clear_ti_thread_flag(&next->thread_info, TIF_SPEC_L1D_FLUSH);
+ next->l1d_flush_kill.func = l1d_flush_kill;
+ task_work_add(next, &next->l1d_flush_kill, true);
+ }
+
+ return next_mm;
}

static void cond_mitigation(struct task_struct *next)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 322ea9b827e1..c569c49715d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1338,6 +1338,16 @@ struct task_struct {
unsigned long getblk_bh_state;
#endif

+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
+ /*
+ * If L1D flush is supported on mm context switch
+ * then we use this callback head to queue kill work
+ * to kill tasks that are not running on SMT disabled
+ * cores
+ */
+ struct callback_head l1d_flush_kill;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
--
2.17.1
\
 
 \ /
  Last update: 2020-11-18 00:51    [W:0.411 / U:0.120 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site