lkml.org 
[lkml]   [2021]   [Dec]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH/RFC v2 1/3] tlb: mmu_gather: introduce CONFIG_MMU_GATHER_TABLE_FREE_COMMON
Date
For architectures that use free_page_and_swap_cache() as their
__tlb_remove_table(), place that common implementation into
mm/mmu_gather.c, ifdef'ed by CONFIG_MMU_GATHER_TABLE_FREE_COMMON.

Signed-off-by: Nikita Yushchenko <nikita.yushchenko@virtuozzo.com>
---
arch/Kconfig | 3 +++
arch/arm/Kconfig | 1 +
arch/arm/include/asm/tlb.h | 5 -----
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/tlb.h | 5 -----
arch/x86/Kconfig | 1 +
arch/x86/include/asm/tlb.h | 14 --------------
include/asm-generic/tlb.h | 5 +++++
mm/mmu_gather.c | 10 ++++++++++
9 files changed, 21 insertions(+), 24 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index d3c4ab249e9c..9eba553cd86f 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -415,6 +415,9 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
config MMU_GATHER_TABLE_FREE
bool

+config MMU_GATHER_TABLE_FREE_COMMON
+ bool
+
config MMU_GATHER_RCU_TABLE_FREE
bool
select MMU_GATHER_TABLE_FREE
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c2724d986fa0..cc272e1ad12c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -110,6 +110,7 @@ config ARM
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
+ select MMU_GATHER_TABLE_FREE_COMMON
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index b8cbe03ad260..9d9b21649ca0 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -29,11 +29,6 @@
#include <linux/swap.h>
#include <asm/tlbflush.h>

-static inline void __tlb_remove_table(void *_table)
-{
- free_page_and_swap_cache((struct page *)_table);
-}
-
#include <asm-generic/tlb.h>

static inline void
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c4207cf9bb17..0f99f30d99f6 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -196,6 +196,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUTEX_CMPXCHG if FUTEX
select MMU_GATHER_RCU_TABLE_FREE
+ select MMU_GATHER_TABLE_FREE_COMMON
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index c995d1f4594f..401826260a5c 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -11,11 +11,6 @@
#include <linux/pagemap.h>
#include <linux/swap.h>

-static inline void __tlb_remove_table(void *_table)
-{
- free_page_and_swap_cache((struct page *)_table);
-}
-
#define tlb_flush tlb_flush
static void tlb_flush(struct mmu_gather *tlb);

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5c2ccb85f2ef..379d6832d3de 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -235,6 +235,7 @@ config X86
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT
+ select MMU_GATHER_TABLE_FREE_COMMON
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 1bfe979bb9bc..96e3b4f922c9 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -23,18 +23,4 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
}

-/*
- * While x86 architecture in general requires an IPI to perform TLB
- * shootdown, enablement code for several hypervisors overrides
- * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
- * a hypercall. To keep software pagetable walkers safe in this case we
- * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment
- * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void __tlb_remove_table(void *table)
-{
- free_page_and_swap_cache(table);
-}
-
#endif /* _ASM_X86_TLB_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 2c68a545ffa7..877431da21cf 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -158,6 +158,11 @@
* Useful if your architecture doesn't use IPIs for remote TLB invalidates
* and therefore doesn't naturally serialize with software page-table walkers.
*
+ * MMU_GATHER_TABLE_FREE_COMMON
+ *
+ * Provide default implementation of __tlb_remove_table() based on
+ * free_page_and_swap_cache().
+ *
* MMU_GATHER_NO_RANGE
*
* Use this if your architecture lacks an efficient flush_tlb_range().
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 1b9837419bf9..eb2f30a92462 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -93,6 +93,13 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_

#ifdef CONFIG_MMU_GATHER_TABLE_FREE

+#ifdef CONFIG_MMU_GATHER_TABLE_FREE_COMMON
+static inline void __tlb_remove_table(void *table)
+{
+ free_page_and_swap_cache((struct page *)table);
+}
+#endif
+
static void __tlb_remove_table_free(struct mmu_table_batch *batch)
{
int i;
@@ -132,6 +139,9 @@ static void __tlb_remove_table_free(struct mmu_table_batch *batch)
* pressure. To guarantee progress we fall back to single table freeing, see
* the implementation of tlb_remove_table_one().
*
+ * This is also used to keep software pagetable walkers safe when architecture
+ * natively uses IPIs for TLB flushes, but hypervisor enablement code replaced
+ * that by issuing a hypercall.
*/

static void tlb_remove_table_smp_sync(void *arg)
--
2.30.2
\
 
 \ /
  Last update: 2021-12-18 19:54    [W:0.237 / U:0.112 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site