lkml.org 
[lkml]   [2020]   [May]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH V3 12/16] arm64/cpufeature: Add remaining feature bits in ID_AA64MMFR0 register
Date
Enable EVC, FGT, EXS, TGRAN4_2, TGRAN64_2 and TGRAN16_2 features bits in
ID_AA64MMFR0 register as per ARM DDI 0487F.a specification.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org

Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
arch/arm64/include/asm/sysreg.h | 13 +++++++++++++
arch/arm64/kernel/cpufeature.c | 7 +++++++
2 files changed, 20 insertions(+)

diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index f1c0d874220a..fdf782de6723 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -699,6 +699,12 @@
#define ID_AA64ZFR0_SVEVER_SVE2 0x1

/* id_aa64mmfr0 */
+#define ID_AA64MMFR0_ECV_SHIFT 60
+#define ID_AA64MMFR0_FGT_SHIFT 56
+#define ID_AA64MMFR0_EXS_SHIFT 44
+#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40
+#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36
+#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
@@ -840,6 +846,13 @@
#define ID_PFR1_SECURITY_SHIFT 4
#define ID_PFR1_PROGMOD_SHIFT 0

+#define ID_AA64MMFR0_TGRAN4_2_NI 0x1
+#define ID_AA64MMFR0_TGRAN4_2_SUPPORTED 0x2
+#define ID_AA64MMFR0_TGRAN64_2_NI 0x1
+#define ID_AA64MMFR0_TGRAN64_2_SUPPORTED 0x2
+#define ID_AA64MMFR0_TGRAN16_2_NI 0x1
+#define ID_AA64MMFR0_TGRAN16_2_SUPPORTED 0x2
+
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 181e09d62147..0286319d9b08 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -264,6 +264,10 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
};

static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
/*
* We already refuse to boot CPUs that don't support our configured
* page size, so we can only detect mismatches for a page size other
@@ -271,6 +275,9 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* exist in the wild so, even though we don't like it, we'll have to go
* along with it and treat them as non-strict.
*/
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, ID_AA64MMFR0_TGRAN4_2_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, ID_AA64MMFR0_TGRAN64_2_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, ID_AA64MMFR0_TGRAN16_2_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
--
2.20.1
\
 
 \ /
  Last update: 2020-05-02 15:35    [W:0.291 / U:0.276 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site