lkml.org 
[lkml]   [2015]   [Sep]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
SubjectRe: [PATCH 1/2] KVM: introduce __vmx_flush_tlb to handle specific vpid
Date
Wanpeng Li <wanpeng.li@hotmail.com> writes:

> Introduce __vmx_flush_tlb() to handle specific vpid.
>
> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
> ---
> arch/x86/kvm/vmx.c | 21 +++++++++++++--------
> 1 file changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 794c529..7188c5e 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1343,13 +1343,13 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
> __loaded_vmcs_clear, loaded_vmcs, 1);
> }
>
> -static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
> +static inline void vpid_sync_vcpu_single(int vpid)
> {
> - if (vmx->vpid == 0)
> + if (vpid == 0)
> return;
>
> if (cpu_has_vmx_invvpid_single())
> - __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
> + __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
> }
>
> static inline void vpid_sync_vcpu_global(void)
> @@ -1358,10 +1358,10 @@ static inline void vpid_sync_vcpu_global(void)
> __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
> }
>
> -static inline void vpid_sync_context(struct vcpu_vmx *vmx)
> +static inline void vpid_sync_context(int vpid)
> {
> if (cpu_has_vmx_invvpid_single())
> - vpid_sync_vcpu_single(vmx);
> + vpid_sync_vcpu_single(vpid);
> else
> vpid_sync_vcpu_global();
> }

Not sure myself what's the right thing to do but this may be undesirable
in a nested environment. Assuming the processor supports global invalidation
only, this seems like a easy way for the nested guest to invalidate *all*
mappings - even the L1 specific mappings.


> @@ -3450,9 +3450,9 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
>
> #endif
>
> -static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
> +static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
> {
> - vpid_sync_context(to_vmx(vcpu));
> + vpid_sync_context(vpid);
> if (enable_ept) {
> if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> return;
> @@ -3460,6 +3460,11 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
> }
> }
>
> +static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
> +{
> + __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
> +}
> +
> static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
> {
> ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
> @@ -4795,7 +4800,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> vmx_fpu_activate(vcpu);
> update_exception_bitmap(vcpu);
>
> - vpid_sync_context(vmx);
> + vpid_sync_context(vmx->vpid);
> }
>
> /*


\
 
 \ /
  Last update: 2015-09-24 18:41    [W:0.334 / U:0.480 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site