lkml.org 
[lkml]   [2022]   [Aug]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH] arch/cacheflush: Introduce flush_all_caches()
With CXL security features, global CPU cache flushing nvdimm
requirements are no longer specific to that subsystem, even
beyond the scope of security_ops. CXL will need such semantics
for features not necessarily limited to persistent memory.

While the scope of this is for physical address space, add a
new flush_all_caches() in cacheflush headers such that each
architecture can define it, when capable. For x86 just use the
wbinvd hammer and prevent any other arch from being capable.
While there can be performance penalties or delays response
times, these calls are both rare and explicitly security
related, and therefore become less important.

Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---

After a few iterations I circled back to an interface without granularity.
It just doesn't make sense right now to define a range if arm64 will not
support this (won't do VA-based physical address space flushes) and, until
it comes up with consistent caches, security operations will simply be
unsupported.

arch/x86/include/asm/cacheflush.h | 3 +++
drivers/acpi/nfit/intel.c | 41 ++++++++++++++-----------------
include/asm-generic/cacheflush.h | 22 +++++++++++++++++
3 files changed, 43 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index b192d917a6d0..ce2ec9556093 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -10,4 +10,7 @@

void clflush_cache_range(void *addr, unsigned int size);

+#define flush_all_caches() \
+ do { wbinvd_on_all_cpus(); } while(0)
+
#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 8dd792a55730..f2f6c31e6ab7 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -4,6 +4,7 @@
#include <linux/ndctl.h>
#include <linux/acpi.h>
#include <asm/smp.h>
+#include <linux/cacheflush.h>
#include "intel.h"
#include "nfit.h"

@@ -190,8 +191,6 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
}
}

-static void nvdimm_invalidate_cache(void);
-
static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
@@ -210,6 +209,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
};
int rc;

+ if (!flush_all_caches_capable())
+ return -EINVAL;
+
if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
return -ENOTTY;

@@ -228,7 +230,7 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
}

/* DIMM unlocked, invalidate all CPU caches before we read it */
- nvdimm_invalidate_cache();
+ flush_all_caches();

return 0;
}
@@ -294,11 +296,14 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
},
};

+ if (!flush_all_caches_capable())
+ return -EINVAL;
+
if (!test_bit(cmd, &nfit_mem->dsm_mask))
return -ENOTTY;

/* flush all cache before we erase DIMM */
- nvdimm_invalidate_cache();
+ flush_all_caches();
memcpy(nd_cmd.cmd.passphrase, key->data,
sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -318,7 +323,7 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
}

/* DIMM erased, invalidate all CPU caches before we read it */
- nvdimm_invalidate_cache();
+ flush_all_caches();
return 0;
}

@@ -338,6 +343,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
},
};

+ if (!flush_all_caches_capable())
+ return -EINVAL;
+
if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
return -ENOTTY;

@@ -355,7 +363,7 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
}

/* flush all cache before we make the nvdimms available */
- nvdimm_invalidate_cache();
+ flush_all_caches();
return 0;
}

@@ -377,11 +385,14 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
},
};

+ if (!flush_all_caches_capable())
+ return -EINVAL;
+
if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
return -ENOTTY;

/* flush all cache before we erase DIMM */
- nvdimm_invalidate_cache();
+ flush_all_caches();
memcpy(nd_cmd.cmd.passphrase, nkey->data,
sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -401,22 +412,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
}
}

-/*
- * TODO: define a cross arch wbinvd equivalent when/if
- * NVDIMM_FAMILY_INTEL command support arrives on another arch.
- */
-#ifdef CONFIG_X86
-static void nvdimm_invalidate_cache(void)
-{
- wbinvd_on_all_cpus();
-}
-#else
-static void nvdimm_invalidate_cache(void)
-{
- WARN_ON_ONCE("cache invalidation required after unlock\n");
-}
-#endif
-
static const struct nvdimm_security_ops __intel_security_ops = {
.get_flags = intel_security_flags,
.freeze = intel_security_freeze,
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4f07afacbc23..f249142b4908 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -115,4 +115,26 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
memcpy(dst, src, len)
#endif

+/*
+ * Flush the entire caches across all CPUs. It is considered
+ * a big hammer (latency and performance). Unlike the APIs
+ * above, this function can be defined on architectures which
+ * have VIPT or PIPT caches, and thus is beyond the scope of
+ * virtual to physical mappings/page tables changing.
+ *
+ * The limitation here is that the architectures that make
+ * use of it must can actually comply with the semantics,
+ * such as those which caches are in a consistent state. The
+ * caller can verify the situation early on.
+ */
+#ifndef flush_all_caches
+# define flush_all_caches_capable() false
+static inline void flush_all_caches(void)
+{
+ WARN_ON_ONCE("cache invalidation required\n");
+}
+#else
+# define flush_all_caches_capable() true
+#endif
+
#endif /* _ASM_GENERIC_CACHEFLUSH_H */
--
2.37.2
\
 
 \ /
  Last update: 2022-08-15 18:08    [W:0.063 / U:1.504 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site