lkml.org 
[lkml]   [2015]   [Apr]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4/9] KVM: MMU: introduce for_each_rmap_spte()
    Date
    From: Xiao Guangrong <guangrong.xiao@linux.intel.com>

    It's used to walk all the sptes on the rmap to clean up the
    code

    Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
    ---
    arch/x86/kvm/mmu.c | 63 +++++++++++++++++++-----------------------------
    arch/x86/kvm/mmu_audit.c | 4 +--
    2 files changed, 26 insertions(+), 41 deletions(-)

    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index fd13991..4d98c6c 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -1142,6 +1142,11 @@ static u64 *rmap_get_next(struct rmap_iterator *iter)
    return NULL;
    }

    +#define for_each_rmap_spte(_rmap_, _iter_, _spte_) \
    + for (_spte_ = rmap_get_first(*_rmap_, _iter_); \
    + _spte_ && ({BUG_ON(!is_shadow_present_pte(*_spte_)); 1;}); \
    + _spte_ = rmap_get_next(_iter_))
    +
    static void drop_spte(struct kvm *kvm, u64 *sptep)
    {
    if (mmu_spte_clear_track_bits(sptep))
    @@ -1205,12 +1210,8 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
    struct rmap_iterator iter;
    bool flush = false;

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
    - BUG_ON(!(*sptep & PT_PRESENT_MASK));
    -
    + for_each_rmap_spte(rmapp, &iter, sptep)
    flush |= spte_write_protect(kvm, sptep, pt_protect);
    - sptep = rmap_get_next(&iter);
    - }

    return flush;
    }
    @@ -1232,12 +1233,8 @@ static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
    struct rmap_iterator iter;
    bool flush = false;

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
    - BUG_ON(!(*sptep & PT_PRESENT_MASK));
    -
    + for_each_rmap_spte(rmapp, &iter, sptep)
    flush |= spte_clear_dirty(kvm, sptep);
    - sptep = rmap_get_next(&iter);
    - }

    return flush;
    }
    @@ -1259,12 +1256,8 @@ static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp)
    struct rmap_iterator iter;
    bool flush = false;

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
    - BUG_ON(!(*sptep & PT_PRESENT_MASK));
    -
    + for_each_rmap_spte(rmapp, &iter, sptep)
    flush |= spte_set_dirty(kvm, sptep);
    - sptep = rmap_get_next(&iter);
    - }

    return flush;
    }
    @@ -1368,13 +1361,14 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
    struct rmap_iterator iter;
    int need_tlb_flush = 0;

    - while ((sptep = rmap_get_first(*rmapp, &iter))) {
    - BUG_ON(!(*sptep & PT_PRESENT_MASK));
    +restart:
    + for_each_rmap_spte(rmapp, &iter, sptep) {
    rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n",
    sptep, *sptep, gfn, level);

    drop_spte(kvm, sptep);
    need_tlb_flush = 1;
    + goto restart;
    }

    return need_tlb_flush;
    @@ -1394,8 +1388,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
    WARN_ON(pte_huge(*ptep));
    new_pfn = pte_pfn(*ptep);

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
    - BUG_ON(!is_shadow_present_pte(*sptep));
    +restart:
    + for_each_rmap_spte(rmapp, &iter, sptep) {
    rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
    sptep, *sptep, gfn, level);

    @@ -1403,7 +1397,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,

    if (pte_write(*ptep)) {
    drop_spte(kvm, sptep);
    - sptep = rmap_get_first(*rmapp, &iter);
    + goto restart;
    } else {
    new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
    new_spte |= (u64)new_pfn << PAGE_SHIFT;
    @@ -1414,7 +1408,6 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,

    mmu_spte_clear_track_bits(sptep);
    mmu_spte_set(sptep, new_spte);
    - sptep = rmap_get_next(&iter);
    }
    }

    @@ -1518,16 +1511,13 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,

    BUG_ON(!shadow_accessed_mask);

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;
    - sptep = rmap_get_next(&iter)) {
    - BUG_ON(!is_shadow_present_pte(*sptep));
    -
    + for_each_rmap_spte(rmapp, &iter, sptep)
    if (*sptep & shadow_accessed_mask) {
    young = 1;
    clear_bit((ffs(shadow_accessed_mask) - 1),
    (unsigned long *)sptep);
    }
    - }
    +
    trace_kvm_age_page(gfn, level, slot, young);
    return young;
    }
    @@ -1548,15 +1538,11 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
    if (!shadow_accessed_mask)
    goto out;

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;
    - sptep = rmap_get_next(&iter)) {
    - BUG_ON(!is_shadow_present_pte(*sptep));
    -
    + for_each_rmap_spte(rmapp, &iter, sptep)
    if (*sptep & shadow_accessed_mask) {
    young = 1;
    break;
    }
    - }
    out:
    return young;
    }
    @@ -2232,8 +2218,11 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
    u64 *sptep;
    struct rmap_iterator iter;

    - while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
    +restart:
    + for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
    drop_parent_pte(sp, sptep);
    + goto restart;
    + }
    }

    static int mmu_zap_unsync_children(struct kvm *kvm,
    @@ -4519,9 +4508,8 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
    pfn_t pfn;
    struct kvm_mmu_page *sp;

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
    - BUG_ON(!(*sptep & PT_PRESENT_MASK));
    -
    +restart:
    + for_each_rmap_spte(rmapp, &iter, sptep) {
    sp = page_header(__pa(sptep));
    pfn = spte_to_pfn(*sptep);

    @@ -4536,10 +4524,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
    !kvm_is_reserved_pfn(pfn) &&
    PageTransCompound(pfn_to_page(pfn))) {
    drop_spte(kvm, sptep);
    - sptep = rmap_get_first(*rmapp, &iter);
    need_tlb_flush = 1;
    - } else
    - sptep = rmap_get_next(&iter);
    + goto restart;
    + }
    }

    return need_tlb_flush;
    diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
    index 9ade5cf..368d534 100644
    --- a/arch/x86/kvm/mmu_audit.c
    +++ b/arch/x86/kvm/mmu_audit.c
    @@ -197,13 +197,11 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)

    rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL);

    - for (sptep = rmap_get_first(*rmapp, &iter); sptep;
    - sptep = rmap_get_next(&iter)) {
    + for_each_rmap_spte(rmapp, &iter, sptep)
    if (is_writable_pte(*sptep))
    audit_printk(kvm, "shadow page has writable "
    "mappings: gfn %llx role %x\n",
    sp->gfn, sp->role.word);
    - }
    }

    static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
    --
    1.9.3


    \
     
     \ /
      Last update: 2015-04-30 17:21    [W:4.619 / U:0.676 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site