lkml.org 
[lkml]   [2018]   [Aug]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 4.17 60/97] x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs
Date
4.17-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

commit 989e3992d2eca32c3f1404f2bc91acda3aa122d8 upstream

The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend
add_atomic_switch_msr() with an entry_only parameter to allow storing the
MSR only in the guest (ENTRY) MSR array.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
arch/x86/kvm/vmx.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)

--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2300,9 +2300,9 @@ static void add_atomic_switch_msr_specia
}

static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
- u64 guest_val, u64 host_val)
+ u64 guest_val, u64 host_val, bool entry_only)
{
- int i, j;
+ int i, j = 0;
struct msr_autoload *m = &vmx->msr_autoload;

switch (msr) {
@@ -2338,7 +2338,9 @@ static void add_atomic_switch_msr(struct
}

i = find_msr(&m->guest, msr);
- j = find_msr(&m->host, msr);
+ if (!entry_only)
+ j = find_msr(&m->host, msr);
+
if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
@@ -2348,12 +2350,16 @@ static void add_atomic_switch_msr(struct
i = m->guest.nr++;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
}
+ m->guest.val[i].index = msr;
+ m->guest.val[i].value = guest_val;
+
+ if (entry_only)
+ return;
+
if (j < 0) {
j = m->host.nr++;
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}
- m->guest.val[i].index = msr;
- m->guest.val[i].value = guest_val;
m->host.val[j].index = msr;
m->host.val[j].value = host_val;
}
@@ -2399,7 +2405,7 @@ static bool update_transition_efer(struc
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
- guest_efer, host_efer);
+ guest_efer, host_efer, false);
return false;
} else {
guest_efer &= ~ignore_bits;
@@ -3845,7 +3851,7 @@ static int vmx_set_msr(struct kvm_vcpu *
vcpu->arch.ia32_xss = data;
if (vcpu->arch.ia32_xss != host_xss)
add_atomic_switch_msr(vmx, MSR_IA32_XSS,
- vcpu->arch.ia32_xss, host_xss);
+ vcpu->arch.ia32_xss, host_xss, false);
else
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
break;
@@ -9815,7 +9821,7 @@ static void atomic_switch_perf_msrs(stru
clear_atomic_switch_msr(vmx, msrs[i].msr);
else
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
- msrs[i].host);
+ msrs[i].host, false);
}

static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)

\
 
 \ /
  Last update: 2018-08-14 20:07    [W:0.466 / U:0.064 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site