lkml.org 
[lkml]   [2019]   [Mar]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH RFC 13/14] arm/kvm: Introduce a new VMID allocator
    Date
    A follow-up patch will replace the KVM VMID allocator with the arm64 ASID
    allocator. It is not yet clear how the code can be shared between arm
    and arm64, so this is a verbatim copy of arch/arm64/lib/asid.c.

    Signed-off-by: Julien Grall <julien.grall@arm.com>
    ---
    arch/arm/include/asm/kvm_asid.h | 81 +++++++++++++++++
    arch/arm/kvm/Makefile | 1 +
    arch/arm/kvm/asid.c | 191 ++++++++++++++++++++++++++++++++++++++++
    3 files changed, 273 insertions(+)
    create mode 100644 arch/arm/include/asm/kvm_asid.h
    create mode 100644 arch/arm/kvm/asid.c

    diff --git a/arch/arm/include/asm/kvm_asid.h b/arch/arm/include/asm/kvm_asid.h
    new file mode 100644
    index 000000000000..f312a6d7543c
    --- /dev/null
    +++ b/arch/arm/include/asm/kvm_asid.h
    @@ -0,0 +1,81 @@
    +/* SPDX-License-Identifier: GPL-2.0 */
    +#ifndef __ARM_KVM_ASID_H__
    +#define __ARM_KVM_ASID_H__
    +
    +#include <linux/atomic.h>
    +#include <linux/compiler.h>
    +#include <linux/cpumask.h>
    +#include <linux/percpu.h>
    +#include <linux/spinlock.h>
    +
    +struct asid_info
    +{
    + atomic64_t generation;
    + unsigned long *map;
    + atomic64_t __percpu *active;
    + u64 __percpu *reserved;
    + u32 bits;
    + /* Lock protecting the structure */
    + raw_spinlock_t lock;
    + /* Which CPU requires context flush on next call */
    + cpumask_t flush_pending;
    + /* Number of ASID allocated by context (shift value) */
    + unsigned int ctxt_shift;
    + /* Callback to locally flush the context. */
    + void (*flush_cpu_ctxt_cb)(void);
    + /* Callback to call when a context is updated */
    + void (*update_ctxt_cb)(void *ctxt);
    +};
    +
    +#define NUM_ASIDS(info) (1UL << ((info)->bits))
    +#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
    +
    +#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
    +
    +void asid_new_context(struct asid_info *info, atomic64_t *pasid,
    + unsigned int cpu, void *ctxt);
    +
    +/*
    + * Check the ASID is still valid for the context. If not generate a new ASID.
    + *
    + * @pasid: Pointer to the current ASID batch
    + * @cpu: current CPU ID. Must have been acquired throught get_cpu()
    + */
    +static inline void asid_check_context(struct asid_info *info,
    + atomic64_t *pasid, unsigned int cpu,
    + void *ctxt)
    +{
    + u64 asid, old_active_asid;
    +
    + asid = atomic64_read(pasid);
    +
    + /*
    + * The memory ordering here is subtle.
    + * If our active_asid is non-zero and the ASID matches the current
    + * generation, then we update the active_asid entry with a relaxed
    + * cmpxchg. Racing with a concurrent rollover means that either:
    + *
    + * - We get a zero back from the cmpxchg and end up waiting on the
    + * lock. Taking the lock synchronises with the rollover and so
    + * we are forced to see the updated generation.
    + *
    + * - We get a valid ASID back from the cmpxchg, which means the
    + * relaxed xchg in flush_context will treat us as reserved
    + * because atomic RmWs are totally ordered for a given location.
    + */
    + old_active_asid = atomic64_read(&active_asid(info, cpu));
    + if (old_active_asid &&
    + !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
    + atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
    + old_active_asid, asid))
    + return;
    +
    + asid_new_context(info, pasid, cpu, ctxt);
    +}
    +
    +int asid_allocator_init(struct asid_info *info,
    + u32 bits, unsigned int asid_per_ctxt,
    + void (*flush_cpu_ctxt_cb)(void),
    + void (*update_ctxt_cb)(void *ctxt));
    +
    +#endif /* __ARM_KVM_ASID_H__ */
    diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
    index 531e59f5be9c..35d2d4c67827 100644
    --- a/arch/arm/kvm/Makefile
    +++ b/arch/arm/kvm/Makefile
    @@ -21,6 +21,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/

    obj-y += kvm-arm.o init.o interrupts.o
    obj-y += handle_exit.o guest.o emulate.o reset.o
    +obj-y += asid.o
    obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
    obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
    obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
    diff --git a/arch/arm/kvm/asid.c b/arch/arm/kvm/asid.c
    new file mode 100644
    index 000000000000..60a25270163a
    --- /dev/null
    +++ b/arch/arm/kvm/asid.c
    @@ -0,0 +1,191 @@
    +// SPDX-License-Identifier: GPL-2.0
    +/*
    + * Generic ASID allocator.
    + *
    + * Based on arch/arm/mm/context.c
    + *
    + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
    + * Copyright (C) 2012 ARM Ltd.
    + */
    +
    +#include <linux/slab.h>
    +
    +#include <asm/kvm_asid.h>
    +
    +#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
    +
    +#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
    +#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
    +
    +#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
    +#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
    +
    +static void flush_context(struct asid_info *info)
    +{
    + int i;
    + u64 asid;
    +
    + /* Update the list of reserved ASIDs and the ASID bitmap. */
    + bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
    +
    + for_each_possible_cpu(i) {
    + asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
    + /*
    + * If this CPU has already been through a
    + * rollover, but hasn't run another task in
    + * the meantime, we must preserve its reserved
    + * ASID, as this is the only trace we have of
    + * the process it is still running.
    + */
    + if (asid == 0)
    + asid = reserved_asid(info, i);
    + __set_bit(asid2idx(info, asid), info->map);
    + reserved_asid(info, i) = asid;
    + }
    +
    + /*
    + * Queue a TLB invalidation for each CPU to perform on next
    + * context-switch
    + */
    + cpumask_setall(&info->flush_pending);
    +}
    +
    +static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
    + u64 newasid)
    +{
    + int cpu;
    + bool hit = false;
    +
    + /*
    + * Iterate over the set of reserved ASIDs looking for a match.
    + * If we find one, then we can update our mm to use newasid
    + * (i.e. the same ASID in the current generation) but we can't
    + * exit the loop early, since we need to ensure that all copies
    + * of the old ASID are updated to reflect the mm. Failure to do
    + * so could result in us missing the reserved ASID in a future
    + * generation.
    + */
    + for_each_possible_cpu(cpu) {
    + if (reserved_asid(info, cpu) == asid) {
    + hit = true;
    + reserved_asid(info, cpu) = newasid;
    + }
    + }
    +
    + return hit;
    +}
    +
    +static u64 new_context(struct asid_info *info, atomic64_t *pasid)
    +{
    + static u32 cur_idx = 1;
    + u64 asid = atomic64_read(pasid);
    + u64 generation = atomic64_read(&info->generation);
    +
    + if (asid != 0) {
    + u64 newasid = generation | (asid & ~ASID_MASK(info));
    +
    + /*
    + * If our current ASID was active during a rollover, we
    + * can continue to use it and this was just a false alarm.
    + */
    + if (check_update_reserved_asid(info, asid, newasid))
    + return newasid;
    +
    + /*
    + * We had a valid ASID in a previous life, so try to re-use
    + * it if possible.
    + */
    + if (!__test_and_set_bit(asid2idx(info, asid), info->map))
    + return newasid;
    + }
    +
    + /*
    + * Allocate a free ASID. If we can't find one, take a note of the
    + * currently active ASIDs and mark the TLBs as requiring flushes. We
    + * always count from ASID #2 (index 1), as we use ASID #0 when setting
    + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
    + * pairs.
    + */
    + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
    + if (asid != NUM_CTXT_ASIDS(info))
    + goto set_asid;
    +
    + /* We're out of ASIDs, so increment the global generation count */
    + generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
    + &info->generation);
    + flush_context(info);
    +
    + /* We have more ASIDs than CPUs, so this will always succeed */
    + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
    +
    +set_asid:
    + __set_bit(asid, info->map);
    + cur_idx = asid;
    + return idx2asid(info, asid) | generation;
    +}
    +
    +/*
    + * Generate a new ASID for the context.
    + *
    + * @pasid: Pointer to the current ASID batch allocated. It will be updated
    + * with the new ASID batch.
    + * @cpu: current CPU ID. Must have been acquired through get_cpu()
    + * @ctxt: Context to update when calling update_context
    + */
    +void asid_new_context(struct asid_info *info, atomic64_t *pasid,
    + unsigned int cpu, void *ctxt)
    +{
    + unsigned long flags;
    + u64 asid;
    +
    + raw_spin_lock_irqsave(&info->lock, flags);
    + /* Check that our ASID belongs to the current generation. */
    + asid = atomic64_read(pasid);
    + if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
    + asid = new_context(info, pasid);
    + atomic64_set(pasid, asid);
    + }
    +
    + if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
    + info->flush_cpu_ctxt_cb();
    +
    + atomic64_set(&active_asid(info, cpu), asid);
    +
    + info->update_ctxt_cb(ctxt);
    +
    + raw_spin_unlock_irqrestore(&info->lock, flags);
    +}
    +
    +/*
    + * Initialize the ASID allocator
    + *
    + * @info: Pointer to the asid allocator structure
    + * @bits: Number of ASIDs available
    + * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
    + * allocated contiguously for a given context. This value should be a power of
    + * 2.
    + */
    +int asid_allocator_init(struct asid_info *info,
    + u32 bits, unsigned int asid_per_ctxt,
    + void (*flush_cpu_ctxt_cb)(void),
    + void (*update_ctxt_cb)(void *ctxt))
    +{
    + info->bits = bits;
    + info->ctxt_shift = ilog2(asid_per_ctxt);
    + info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
    + info->update_ctxt_cb = update_ctxt_cb;
    + /*
    + * Expect allocation after rollover to fail if we don't have at least
    + * one more ASID than CPUs. ASID #0 is always reserved.
    + */
    + WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
    + atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
    + info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
    + sizeof(*info->map), GFP_KERNEL);
    + if (!info->map)
    + return -ENOMEM;
    +
    + raw_spin_lock_init(&info->lock);
    +
    + return 0;
    +}
    --
    2.11.0
    \
     
     \ /
      Last update: 2019-03-21 17:37    [W:6.444 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site