Messages in this thread Patch in this message | | | From | Jeremy Linton <> | Subject | [PATCH v3 6/7] arm64: add sysfs vulnerability show for speculative store bypass | Date | Wed, 9 Jan 2019 17:55:43 -0600 |
| |
Return status based on ssbd_state and the arm64 SSBS feature. If the mitigation is disabled, or the firmware isn't responding then return the expected machine state based on a new blacklist of known vulnerable cores.
Signed-off-by: Jeremy Linton <jeremy.linton@arm.com> --- arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ee286d606d9b..c8ff96158b94 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; +static bool __ssb_safe = true; static const struct ssbd_options { const char *str; @@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, { struct arm_smccc_res res; bool required = true; + bool is_vul; s32 val; WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); + + if (is_vul) + __ssb_safe = false; + + arm64_requested_vuln_attrs |= VULN_SSB; + if (this_cpu_has_cap(ARM64_SSBS)) { required = false; goto out_printmsg; @@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, ssbd_state = ARM64_SSBD_UNKNOWN; return false; + /* machines with mixed mitigation requirements must not return this */ case SMCCC_RET_NOT_REQUIRED: pr_info_once("%s mitigation not required\n", entry->desc); ssbd_state = ARM64_SSBD_MITIGATED; @@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, return required; } + +/* known vulnerable cores */ +static const struct midr_range arm64_ssb_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + {}, +}; + #endif /* CONFIG_ARM64_SSBD */ static void __maybe_unused @@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_SSBD, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = has_ssbd_mitigation, + .midr_range_list = arm64_ssb_cpus, }, #endif #ifdef CONFIG_ARM64_ERRATUM_1188873 @@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, return sprintf(buf, "Vulnerable\n"); } +ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + /* + * Two assumptions: First, get_ssbd_state() reflects the worse case + * for hetrogenous machines, and that if SSBS is supported its + * supported by all cores. + */ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_MITIGATED: + return sprintf(buf, "Not affected\n"); + + case ARM64_SSBD_KERNEL: + case ARM64_SSBD_FORCE_ENABLE: + if (cpus_have_cap(ARM64_SSBS)) + return sprintf(buf, "Not affected\n"); + return sprintf(buf, + "Mitigation: Speculative Store Bypass disabled\n"); + } + + if (__ssb_safe) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} + #endif -- 2.17.2
| |