lkml.org 
[lkml]   [2019]   [Feb]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[Update PATCH V3 2/10] KVM/VMX: Fill range list in kvm_fill_hv_flush_list_func()
    Date
    From: Lan Tianyu <Tianyu.Lan@microsoft.com>

    Populate ranges on the flush list into struct hv_guest_mapping_flush_list
    when flush list is available in the struct kvm_tlb_range.

    Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
    ---
    Update:
    - Add check of return value "offset" in the kvm_fill_hv_flush_list_func()

    Change since v2:
    - Fix calculation of flush pages in the kvm_fill_hv_flush_list_func()
    ---
    arch/x86/include/asm/kvm_host.h | 7 +++++++
    arch/x86/kvm/vmx/vmx.c | 21 +++++++++++++++++++--
    2 files changed, 26 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index 875ae7256608..9fc9dd0c92cb 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -318,6 +318,12 @@ struct kvm_rmap_head {

    struct kvm_mmu_page {
    struct list_head link;
    +
    + /*
    + * Tlb flush with range list uses struct kvm_mmu_page as list entry
    + * and all list operations should be under protection of mmu_lock.
    + */
    + struct hlist_node flush_link;
    struct hlist_node hash_link;
    bool unsync;
    bool mmio_cached;
    @@ -441,6 +447,7 @@ struct kvm_mmu {
    struct kvm_tlb_range {
    u64 start_gfn;
    u64 pages;
    + struct hlist_head *flush_list;
    };

    enum pmc_type {
    diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
    index 77b5379e3655..197545121355 100644
    --- a/arch/x86/kvm/vmx/vmx.c
    +++ b/arch/x86/kvm/vmx/vmx.c
    @@ -432,9 +432,26 @@ static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush
    void *data)
    {
    struct kvm_tlb_range *range = data;
    + struct kvm_mmu_page *sp;

    - return hyperv_fill_flush_guest_mapping_list(flush, 0, range->start_gfn,
    - range->pages);
    + if (!range->flush_list) {
    + return hyperv_fill_flush_guest_mapping_list(flush,
    + 0, range->start_gfn, range->pages);
    + } else {
    + int offset = 0;
    +
    + hlist_for_each_entry(sp, range->flush_list, flush_link) {
    + int pages = KVM_PAGES_PER_HPAGE(sp->role.level + 1);
    +
    + offset = hyperv_fill_flush_guest_mapping_list(flush,
    + offset, sp->gfn, pages);
    + if (offset < 0)
    + return offset;
    +
    + }
    +
    + return offset;
    + }
    }

    static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
    --
    2.14.4
    \
     
     \ /
      Last update: 2019-02-26 15:23    [W:4.217 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site