lkml.org 
[lkml]   [2021]   [Jul]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH 01/14] KVM: arm64: Provide the host_stage2_try() helper macro
    From
    We currently unmap all MMIO mappings from the host stage-2 to recycle
    the pages whenever we run out. In order to make this pattern easy to
    re-use from other places, factor the logic out into a dedicated macro.
    While at it, apply the macro for the kvm_pgtable_stage2_set_owner()
    calls. They're currently only called early on and are guaranteed to
    succeed, but making them robust to the -ENOMEM case doesn't hurt and
    will avoid painful debugging sessions later on.

    Signed-off-by: Quentin Perret <qperret@google.com>
    ---
    arch/arm64/kvm/hyp/nvhe/mem_protect.c | 38 ++++++++++++++-------------
    1 file changed, 20 insertions(+), 18 deletions(-)

    diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
    index d938ce95d3bd..56f2117c877b 100644
    --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
    +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
    @@ -208,6 +208,23 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
    prot, &host_s2_pool);
    }

    +/*
    + * The pool has been provided with enough pages to cover all of memory with
    + * page granularity, but it is difficult to know how much of the MMIO range
    + * we will need to cover upfront, so we may need to 'recycle' the pages if we
    + * run out.
    + */
    +#define host_stage2_try(fn, ...) \
    + ({ \
    + int __ret = fn(__VA_ARGS__); \
    + if (__ret == -ENOMEM) { \
    + __ret = host_stage2_unmap_dev_all(); \
    + if (!__ret) \
    + __ret = fn(__VA_ARGS__); \
    + } \
    + __ret; \
    + })
    +
    static int host_stage2_idmap(u64 addr)
    {
    enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
    @@ -223,22 +240,7 @@ static int host_stage2_idmap(u64 addr)
    if (ret)
    goto unlock;

    - ret = __host_stage2_idmap(range.start, range.end, prot);
    - if (ret != -ENOMEM)
    - goto unlock;
    -
    - /*
    - * The pool has been provided with enough pages to cover all of memory
    - * with page granularity, but it is difficult to know how much of the
    - * MMIO range we will need to cover upfront, so we may need to 'recycle'
    - * the pages if we run out.
    - */
    - ret = host_stage2_unmap_dev_all();
    - if (ret)
    - goto unlock;
    -
    - ret = __host_stage2_idmap(range.start, range.end, prot);
    -
    + ret = host_stage2_try(__host_stage2_idmap, range.start, range.end, prot);
    unlock:
    hyp_spin_unlock(&host_kvm.lock);

    @@ -257,8 +259,8 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
    return -EINVAL;

    hyp_spin_lock(&host_kvm.lock);
    - ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
    - &host_s2_pool, pkvm_hyp_id);
    + ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
    + start, end - start, &host_s2_pool, pkvm_hyp_id);
    hyp_spin_unlock(&host_kvm.lock);

    return ret != -EAGAIN ? ret : 0;
    --
    2.32.0.402.g57bb445576-goog
    \
     
     \ /
      Last update: 2021-07-19 12:48    [W:4.116 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site