lkml.org 
[lkml]   [2018]   [Dec]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip:x86/mm] x86/mm/cpa: Better use CLFLUSHOPT
Commit-ID:  c38116bb940ae37f51fccd315b420ee5961dcb76
Gitweb: https://git.kernel.org/tip/c38116bb940ae37f51fccd315b420ee5961dcb76
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Mon, 3 Dec 2018 18:03:52 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 17 Dec 2018 18:54:29 +0100

x86/mm/cpa: Better use CLFLUSHOPT

Currently we issue an MFENCE before and after flushing a range. This
means that if we flush a bunch of single page ranges -- like with the
cpa array, we issue a whole bunch of superfluous MFENCEs.

Reorgainze the code a little to avoid this.

[ mingo: capitalize instructions, tweak changelog and comments. ]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom.StDenis@amd.com
Cc: dave.hansen@intel.com
Link: http://lkml.kernel.org/r/20181203171043.626999883@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/x86/mm/pageattr.c | 29 +++++++++++++++++------------
1 file changed, 17 insertions(+), 12 deletions(-)

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 85ef53b86fa0..7d05149995dc 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -251,15 +251,7 @@ static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
* Flushing functions
*/

-/**
- * clflush_cache_range - flush a cache range with clflush
- * @vaddr: virtual start address
- * @size: number of bytes to flush
- *
- * clflushopt is an unordered instruction which needs fencing with mfence or
- * sfence to avoid ordering issues.
- */
-void clflush_cache_range(void *vaddr, unsigned int size)
+static void clflush_cache_range_opt(void *vaddr, unsigned int size)
{
const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
@@ -268,11 +260,22 @@ void clflush_cache_range(void *vaddr, unsigned int size)
if (p >= vend)
return;

- mb();
-
for (; p < vend; p += clflush_size)
clflushopt(p);
+}

+/**
+ * clflush_cache_range - flush a cache range with clflush
+ * @vaddr: virtual start address
+ * @size: number of bytes to flush
+ *
+ * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
+ * SFENCE to avoid ordering issues.
+ */
+void clflush_cache_range(void *vaddr, unsigned int size)
+{
+ mb();
+ clflush_cache_range_opt(vaddr, size);
mb();
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
@@ -333,6 +336,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
if (!cache)
return;

+ mb();
for (i = 0; i < cpa->numpages; i++) {
unsigned long addr = __cpa_addr(cpa, i);
unsigned int level;
@@ -343,8 +347,9 @@ static void cpa_flush(struct cpa_data *data, int cache)
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
- clflush_cache_range((void *)addr, PAGE_SIZE);
+ clflush_cache_range_opt((void *)addr, PAGE_SIZE);
}
+ mb();
}

static bool overlaps(unsigned long r1_start, unsigned long r1_end,
\
 
 \ /
  Last update: 2018-12-17 19:26    [W:0.155 / U:0.136 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site