lkml.org 
[lkml]   [2020]   [Jun]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] KVM: LAPIC: ensure APIC map is up to date on concurrent update requests
Date
The following race can cause lost map update events:

cpu1 cpu2

apic_map_dirty = true
------------------------------------------------------------
kvm_recalculate_apic_map:
pass check
mutex_lock(&kvm->arch.apic_map_lock);
if (!kvm->arch.apic_map_dirty)
and in process of updating map
-------------------------------------------------------------
other calls to
apic_map_dirty = true might be too late for affected cpu
-------------------------------------------------------------
apic_map_dirty = false
-------------------------------------------------------------
kvm_recalculate_apic_map:
bail out on
if (!kvm->arch.apic_map_dirty)

To fix it, record the beginning of an update of the APIC map in
apic_map_dirty. If another APIC map change switches apic_map_dirty
back to DIRTY, kvm_recalculate_apic_map should not make it CLEAN and
let the other caller go through the slow path.

Reported-by: Igor Mammedov <imammedo@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/lapic.c | 45 +++++++++++++++++++--------------
2 files changed, 27 insertions(+), 20 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1da5858501ca..d814032a81e7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -943,7 +943,7 @@ struct kvm_arch {
atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock;
struct kvm_apic_map *apic_map;
- bool apic_map_dirty;
+ atomic_t apic_map_dirty;

bool apic_access_page_done;
unsigned long apicv_inhibit_reasons;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 34a7e0533dad..ef98f2fd3bbd 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
kvfree(map);
}

+/*
+ * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
+ *
+ * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
+ * apic_map_lock_held.
+ */
+enum {
+ CLEAN,
+ UPDATE_IN_PROGRESS,
+ DIRTY
+};
+
void kvm_recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -176,17 +188,13 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
int i;
u32 max_id = 255; /* enough space for any xAPIC ID */

- if (!kvm->arch.apic_map_dirty) {
- /*
- * Read kvm->arch.apic_map_dirty before
- * kvm->arch.apic_map
- */
- smp_rmb();
+ /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
+ if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
return;
- }

mutex_lock(&kvm->arch.apic_map_lock);
- if (!kvm->arch.apic_map_dirty) {
+ if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
+ DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
/* Someone else has updated the map. */
mutex_unlock(&kvm->arch.apic_map_lock);
return;
@@ -256,11 +264,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
/*
- * Write kvm->arch.apic_map before
- * clearing apic->apic_map_dirty
+ * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
+ * If another update came in, leave it DIRTY.
*/
- smp_wmb();
- kvm->arch.apic_map_dirty = false;
+ atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
+ UPDATE_IN_PROGRESS, CLEAN);
mutex_unlock(&kvm->arch.apic_map_lock);

if (old)
@@ -282,20 +290,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
else
static_key_slow_inc(&apic_sw_disabled.key);

- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
}

static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}

static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
kvm_lapic_set_reg(apic, APIC_LDR, id);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}

static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
@@ -311,7 +319,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)

kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}

static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
@@ -1976,7 +1984,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} else
ret = 1;
break;
@@ -2232,7 +2240,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static_key_slow_dec_deferred(&apic_hw_disabled);
} else {
static_key_slow_inc(&apic_hw_disabled.key);
- vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
}

@@ -2273,7 +2281,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!apic)
return;

- vcpu->kvm->arch.apic_map_dirty = false;
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);

--
2.25.4
\
 
 \ /
  Last update: 2020-06-22 17:00    [W:0.309 / U:0.032 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site