lkml.org 
[lkml]   [2020]   [Aug]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH v4 09/12] KVM: SVM: Remove set_exception_intercept and clr_exception_intercept
From
Date
Remove set_exception_intercept and clr_exception_intercept.
Replace with generic set_intercept and clr_intercept for these calls.

Signed-off-by: Babu Moger <babu.moger@amd.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
arch/x86/kvm/svm/svm.c | 20 ++++++++++----------
arch/x86/kvm/svm/svm.h | 18 ------------------
2 files changed, 10 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index b40ed18cb5c2..3c718caa3b99 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -988,11 +988,11 @@ static void init_vmcb(struct vcpu_svm *svm)

set_dr_intercepts(svm);

- set_exception_intercept(svm, INTERCEPT_PF_VECTOR);
- set_exception_intercept(svm, INTERCEPT_UD_VECTOR);
- set_exception_intercept(svm, INTERCEPT_MC_VECTOR);
- set_exception_intercept(svm, INTERCEPT_AC_VECTOR);
- set_exception_intercept(svm, INTERCEPT_DB_VECTOR);
+ set_intercept(svm, INTERCEPT_PF_VECTOR);
+ set_intercept(svm, INTERCEPT_UD_VECTOR);
+ set_intercept(svm, INTERCEPT_MC_VECTOR);
+ set_intercept(svm, INTERCEPT_AC_VECTOR);
+ set_intercept(svm, INTERCEPT_DB_VECTOR);
/*
* Guest access to VMware backdoor ports could legitimately
* trigger #GP because of TSS I/O permission bitmap.
@@ -1000,7 +1000,7 @@ static void init_vmcb(struct vcpu_svm *svm)
* as VMware does.
*/
if (enable_vmware_backdoor)
- set_exception_intercept(svm, INTERCEPT_GP_VECTOR);
+ set_intercept(svm, INTERCEPT_GP_VECTOR);

set_intercept(svm, INTERCEPT_INTR);
set_intercept(svm, INTERCEPT_NMI);
@@ -1078,7 +1078,7 @@ static void init_vmcb(struct vcpu_svm *svm)
/* Setup VMCB for Nested Paging */
control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
clr_intercept(svm, INTERCEPT_INVLPG);
- clr_exception_intercept(svm, INTERCEPT_PF_VECTOR);
+ clr_intercept(svm, INTERCEPT_PF_VECTOR);
clr_intercept(svm, INTERCEPT_CR3_READ);
clr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat;
@@ -1120,7 +1120,7 @@ static void init_vmcb(struct vcpu_svm *svm)

if (sev_guest(svm->vcpu.kvm)) {
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
- clr_exception_intercept(svm, INTERCEPT_UD_VECTOR);
+ clr_intercept(svm, INTERCEPT_UD_VECTOR);
}

mark_all_dirty(svm->vmcb);
@@ -1631,11 +1631,11 @@ static void update_bp_intercept(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

- clr_exception_intercept(svm, INTERCEPT_BP_VECTOR);
+ clr_intercept(svm, INTERCEPT_BP_VECTOR);

if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
- set_exception_intercept(svm, INTERCEPT_BP_VECTOR);
+ set_intercept(svm, INTERCEPT_BP_VECTOR);
} else
vcpu->guest_debug = 0;
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 6c3f0e1c4555..c89ebaaa3be3 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -264,24 +264,6 @@ static inline void clr_dr_intercepts(struct vcpu_svm *svm)
recalc_intercepts(svm);
}

-static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
-{
- struct vmcb *vmcb = get_host_vmcb(svm);
-
- vmcb_set_intercept(&vmcb->control, bit);
-
- recalc_intercepts(svm);
-}
-
-static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
-{
- struct vmcb *vmcb = get_host_vmcb(svm);
-
- vmcb_clr_intercept(&vmcb->control, bit);
-
- recalc_intercepts(svm);
-}
-
static inline void set_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
\
 
 \ /
  Last update: 2020-08-07 02:47    [W:0.082 / U:1.508 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site