lkml.org 
[lkml]   [2023]   [Jan]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[RFC PATCH 5/6] KVM: arm64: Optimize the stage2 map path with TLBI range instructions
From
Currently, when the map path of stage2 page-table coalesces a
bunch of pages into a hugepage, KVM invalidates the entire
VM's TLB entries. This would cause a perforamance penality for
the guest whose pages have already been coalesced earlier as they
would have to refill their TLB entries unnecessarily again.

Hence, if the system supports it, use __kvm_tlb_flush_range_vmid_ipa()
to flush only the range of pages that have been combined into
a hugepage, while leaving other TLB entries alone.

Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
---
arch/arm64/kvm/hyp/pgtable.c | 29 +++++++++++++++++++++++++----
1 file changed, 25 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index b11cf2c618a6c..099032bb01bce 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -686,6 +686,22 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
}

+static void kvm_table_pte_flush(struct kvm_s2_mmu *mmu, u64 addr, u32 level, u32 tlb_level)
+{
+ if (system_supports_tlb_range()) {
+ u64 end = addr + kvm_granule_size(level);
+
+ kvm_call_hyp(__kvm_tlb_flush_range_vmid_ipa, mmu, addr, end, tlb_level);
+ } else {
+ /*
+ * Invalidate the whole stage-2, as we may have numerous leaf
+ * entries below us which would otherwise need invalidating
+ * individually.
+ */
+ kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ }
+}
+
/**
* stage2_try_break_pte() - Invalidates a pte according to the
* 'break-before-make' requirements of the
@@ -693,6 +709,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
*
* @ctx: context of the visited pte.
* @mmu: stage-2 mmu
+ * @tlb_level: The level at which the leaf pages are expected (for FEAT_TTL hint)
*
* Returns: true if the pte was successfully broken.
*
@@ -701,7 +718,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
* on the containing table page.
*/
static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
- struct kvm_s2_mmu *mmu)
+ struct kvm_s2_mmu *mmu, u32 tlb_level)
{
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;

@@ -722,7 +739,7 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
* value (if any).
*/
if (kvm_pte_table(ctx->old, ctx->level))
- kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ kvm_table_pte_flush(mmu, ctx->addr, ctx->level, tlb_level);
else if (kvm_pte_valid(ctx->old))
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);

@@ -804,7 +821,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
if (!stage2_pte_needs_update(ctx->old, new))
return -EAGAIN;

- if (!stage2_try_break_pte(ctx, data->mmu))
+ if (!stage2_try_break_pte(ctx, data->mmu, ctx->level))
return -EAGAIN;

/* Perform CMOs before installation of the guest stage-2 PTE */
@@ -861,7 +878,11 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
if (!childp)
return -ENOMEM;

- if (!stage2_try_break_pte(ctx, data->mmu)) {
+ /*
+ * As the table will be replaced with a block, one level down would
+ * be the current page entries held by the table.
+ */
+ if (!stage2_try_break_pte(ctx, data->mmu, ctx->level + 1)) {
mm_ops->put_page(childp);
return -EAGAIN;
}
--
2.39.0.314.g84b9a713c41-goog
\
 
 \ /
  Last update: 2023-03-26 23:33    [W:0.193 / U:0.788 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site