lkml.org 
[lkml]   [2022]   [Feb]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 13/18] KVM: x86: reset and reinitialize the MMU in __set_sregs_common
Date
Do a full unload of the MMU in KVM_SET_SREGS and KVM_SEST_REGS2, in
preparation for not doing so in kvm_mmu_reset_context. There is no
need to delay the reset until after the return, so do it directly in
the __set_sregs_common function and remove the mmu_reset_needed output
parameter.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/kvm/x86.c | 32 +++++++++++++-------------------
1 file changed, 13 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6aefd7ac7039..f10878aa5b20 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10730,7 +10730,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
}

static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
- int *mmu_reset_needed, bool update_pdptrs)
+ int update_pdptrs)
{
struct msr_data apic_base_msr;
int idx;
@@ -10755,29 +10755,31 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
static_call(kvm_x86_set_gdt)(vcpu, &dt);

vcpu->arch.cr2 = sregs->cr2;
- *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
+
+ if (vcpu->arch.efer != sregs->efer ||
+ kvm_read_cr0(vcpu) != sregs->cr0 ||
+ vcpu->arch.cr3 != sregs->cr3 || !update_pdptrs ||
+ kvm_read_cr4(vcpu) != sregs->cr4)
+ kvm_mmu_unload(vcpu);
+
vcpu->arch.cr3 = sregs->cr3;
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);

kvm_set_cr8(vcpu, sregs->cr8);

- *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
static_call(kvm_x86_set_efer)(vcpu, sregs->efer);

- *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;

- *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4);

+ kvm_init_mmu(vcpu);
if (update_pdptrs) {
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (is_pae_paging(vcpu)) {
+ if (is_pae_paging(vcpu))
load_pdptrs(vcpu, kvm_read_cr3(vcpu));
- *mmu_reset_needed = 1;
- }
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}

@@ -10805,15 +10807,11 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
int pending_vec, max_bits;
- int mmu_reset_needed = 0;
- int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
+ int ret = __set_sregs_common(vcpu, sregs, true);

if (ret)
return ret;

- if (mmu_reset_needed)
- kvm_mmu_reset_context(vcpu);
-
max_bits = KVM_NR_INTERRUPTS;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
@@ -10828,7 +10826,6 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)

static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
{
- int mmu_reset_needed = 0;
bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
!(sregs2->efer & EFER_LMA);
@@ -10840,8 +10837,7 @@ static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
return -EINVAL;

- ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
- &mmu_reset_needed, !valid_pdptrs);
+ ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, !valid_pdptrs);
if (ret)
return ret;

@@ -10850,11 +10846,9 @@ static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);

kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
- mmu_reset_needed = 1;
vcpu->arch.pdptrs_from_userspace = true;
+ /* kvm_mmu_reload will be called on the next entry. */
}
- if (mmu_reset_needed)
- kvm_mmu_reset_context(vcpu);
return 0;
}

--
2.31.1

\
 
 \ /
  Last update: 2022-02-17 22:05    [W:0.486 / U:1.104 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site