lkml.org 
[lkml]   [2022]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 2/2] x86/kvm: handle the failure of __pv_cpu_mask allocation
Date
From: Wanpeng Li <wanpengli@tencent.com>

Fallback to native ipis/tlb flush if fails to allocate __pv_cpu_mask.

Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
v1 -> v2:
* move orig_apic under CONFIG_SMP

arch/x86/kernel/kvm.c | 26 ++++++++++++++++++++++++--
1 file changed, 24 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8b1c45c9cda8..ce03121d038b 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -457,6 +457,7 @@ static int kvm_cpu_online(unsigned int cpu)

#ifdef CONFIG_SMP

+static struct apic orig_apic;
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);

static bool pv_tlb_flush_supported(void)
@@ -543,6 +544,11 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)

static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
{
+ if (unlikely(!this_cpu_cpumask_var_ptr(__pv_cpu_mask))) {
+ orig_apic.send_IPI_mask(mask, vector);
+ return;
+ }
+
__send_ipi_mask(mask, vector);
}

@@ -552,6 +558,11 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
const struct cpumask *local_mask;

+ if (unlikely(!new_mask)) {
+ orig_apic.send_IPI_mask_allbutself(mask, vector);
+ return;
+ }
+
cpumask_copy(new_mask, mask);
cpumask_clear_cpu(this_cpu, new_mask);
local_mask = new_mask;
@@ -612,6 +623,7 @@ late_initcall(setup_efi_kvm_sev_migration);
*/
static void kvm_setup_pv_ipi(void)
{
+ orig_apic = *apic;
apic->send_IPI_mask = kvm_send_ipi_mask;
apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
pr_info("setup PV IPIs\n");
@@ -640,6 +652,11 @@ static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
struct kvm_steal_time *src;
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);

+ if (unlikely(!flushmask)) {
+ native_flush_tlb_multi(cpumask, info);
+ return;
+ }
+
cpumask_copy(flushmask, cpumask);
/*
* We have to call flush only on online vCPUs. And
@@ -672,11 +689,16 @@ static __init int kvm_alloc_cpumask(void)

if (pv_tlb_flush_supported() || pv_ipi_supported())
for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
- GFP_KERNEL, cpu_to_node(cpu));
+ if (!zalloc_cpumask_var_node(&per_cpu(__pv_cpu_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu)))
+ goto err_out;
}

return 0;
+err_out:
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+ return -ENOMEM;
}
arch_initcall(kvm_alloc_cpumask);

--
2.25.1
\
 
 \ /
  Last update: 2022-05-11 04:49    [W:0.656 / U:0.040 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site