lkml.org 
[lkml]   [2021]   [Oct]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [RFC V3 13/13] KVM: arm64: Enable FEAT_LPA2 based 52 bits IPA size on 4K and 16K
    On Thu, 30 Sep 2021 11:35:16 +0100,
    Anshuman Khandual <anshuman.khandual@arm.com> wrote:
    >
    > Stage-2 FEAT_LPA2 support is independent and also orthogonal to FEAT_LPA2
    > support either in Stage-1 or in the host kernel. Stage-2 IPA range support
    > is evaluated from the platform via ID_AA64MMFR0_TGRAN_2_SUPPORTED_LPA2 and
    > gets enabled regardless of Stage-1 translation.
    >
    > Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
    > ---
    > arch/arm64/include/asm/kvm_pgtable.h | 10 +++++++++-
    > arch/arm64/kvm/hyp/pgtable.c | 25 +++++++++++++++++++++++--
    > arch/arm64/kvm/reset.c | 14 ++++++++++----
    > 3 files changed, 42 insertions(+), 7 deletions(-)
    >
    > diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
    > index 0277838..78a9d12 100644
    > --- a/arch/arm64/include/asm/kvm_pgtable.h
    > +++ b/arch/arm64/include/asm/kvm_pgtable.h
    > @@ -29,18 +29,26 @@ typedef u64 kvm_pte_t;
    >
    > #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
    > #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
    > +#define KVM_PTE_ADDR_51_50 GENMASK(9, 8)
    >
    > static inline bool kvm_pte_valid(kvm_pte_t pte)
    > {
    > return pte & KVM_PTE_VALID;
    > }
    >
    > +void set_kvm_lpa2_enabled(void);
    > +bool get_kvm_lpa2_enabled(void);
    > +
    > static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
    > {
    > u64 pa = pte & KVM_PTE_ADDR_MASK;
    >
    > - if (PAGE_SHIFT == 16)
    > + if (PAGE_SHIFT == 16) {
    > pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
    > + } else {
    > + if (get_kvm_lpa2_enabled())

    Having to do a function call just for this test seems bad, specially
    for something that is used so often on the fault path.

    Why can't this be made a normal capability that indicates LPA support
    for the current page size?

    > + pa |= FIELD_GET(KVM_PTE_ADDR_51_50, pte) << 50;

    Where are bits 48 and 49?

    > + }
    >
    > return pa;
    > }
    > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
    > index f8ceebe..58141bf 100644
    > --- a/arch/arm64/kvm/hyp/pgtable.c
    > +++ b/arch/arm64/kvm/hyp/pgtable.c
    > @@ -49,6 +49,18 @@
    > #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
    > #define KVM_MAX_OWNER_ID 1
    >
    > +static bool kvm_lpa2_enabled;
    > +
    > +bool get_kvm_lpa2_enabled(void)
    > +{
    > + return kvm_lpa2_enabled;
    > +}
    > +
    > +void set_kvm_lpa2_enabled(void)
    > +{
    > + kvm_lpa2_enabled = true;
    > +}
    > +
    > struct kvm_pgtable_walk_data {
    > struct kvm_pgtable *pgt;
    > struct kvm_pgtable_walker *walker;
    > @@ -126,8 +138,12 @@ static kvm_pte_t kvm_phys_to_pte(u64 pa)
    > {
    > kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
    >
    > - if (PAGE_SHIFT == 16)
    > + if (PAGE_SHIFT == 16) {
    > pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
    > + } else {
    > + if (get_kvm_lpa2_enabled())
    > + pte |= FIELD_PREP(KVM_PTE_ADDR_51_50, pa >> 50);
    > + }
    >
    > return pte;
    > }
    > @@ -540,6 +556,9 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
    > */
    > vtcr |= VTCR_EL2_HA;
    >
    > + if (get_kvm_lpa2_enabled())
    > + vtcr |= VTCR_EL2_DS;
    > +
    > /* Set the vmid bits */
    > vtcr |= (get_vmid_bits(mmfr1) == 16) ?
    > VTCR_EL2_VS_16BIT :
    > @@ -577,7 +596,9 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
    > if (prot & KVM_PGTABLE_PROT_W)
    > attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
    >
    > - attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
    > + if (!get_kvm_lpa2_enabled())
    > + attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
    > +
    > attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
    > attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
    > *ptep = attr;
    > diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
    > index 5ce36b0..97ec387 100644
    > --- a/arch/arm64/kvm/reset.c
    > +++ b/arch/arm64/kvm/reset.c
    > @@ -315,26 +315,32 @@ u32 get_kvm_ipa_limit(void)
    >
    > int kvm_set_ipa_limit(void)
    > {
    > - unsigned int parange;
    > + unsigned int parange, tgran;
    > u64 mmfr0;
    >
    > mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
    > parange = cpuid_feature_extract_unsigned_field(mmfr0,
    > ID_AA64MMFR0_PARANGE_SHIFT);
    > + tgran = cpuid_feature_extract_unsigned_field(mmfr0,
    > + ID_AA64MMFR0_TGRAN_2_SHIFT);
    > /*
    > * IPA size beyond 48 bits could not be supported
    > * on either 4K or 16K page size. Hence let's cap
    > * it to 48 bits, in case it's reported as larger
    > * on the system.

    Shouldn't you fix this comment?

    > */
    > - if (PAGE_SIZE != SZ_64K)
    > - parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
    > + if (PAGE_SIZE != SZ_64K) {
    > + if (tgran == ID_AA64MMFR0_TGRAN_2_SUPPORTED_LPA2)
    > + set_kvm_lpa2_enabled();
    > + else
    > + parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
    > + }
    >
    > /*
    > * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
    > * Stage-2. If not, things will stop very quickly.
    > */
    > - switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
    > + switch (tgran) {
    > case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
    > kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
    > return -EINVAL;

    Another thing I don't see is how you manage TLB invalidation by level
    now that we gain a level 0 at 4kB, breaking the current assumptions
    encoded in __tlbi_level().

    M.

    --
    Without deviation from the norm, progress is not possible.

    \
     
     \ /
      Last update: 2021-10-11 12:16    [W:2.571 / U:0.716 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site