lkml.org 
[lkml]   [2021]   [Dec]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v3 6/9] x86/smpboot: Support parallel startup of secondary CPUs
From
Date
On 12/15/21 8:56 AM, David Woodhouse wrote:
> From: Thomas Gleixner <tglx@linutronix.de>
>
...
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index d8b3ebd2bb85..0249212e23d2 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -25,6 +25,7 @@
> #include <asm/export.h>
> #include <asm/nospec-branch.h>
> #include <asm/fixmap.h>
> +#include <asm/smp.h>
>
> /*
> * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
> @@ -176,6 +177,64 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
> 1:
> UNWIND_HINT_EMPTY
>
> + /*
> + * Is this the boot CPU coming up? If so everything is available
> + * in initial_gs, initial_stack and early_gdt_descr.
> + */
> + movl smpboot_control(%rip), %eax
> + testl %eax, %eax
> + jz .Lsetup_cpu
> +
> + /*
> + * Secondary CPUs find out the offsets via the APIC ID. For parallel
> + * boot the APIC ID is retrieved from CPUID, otherwise it's encoded
> + * in smpboot_control:
> + * Bit 0-15 APICID if STARTUP_USE_CPUID_0B is not set
> + * Bit 16 Secondary boot flag
> + * Bit 17 Parallel boot flag
> + */
> + testl $STARTUP_USE_CPUID_0B, %eax
> + jz .Lsetup_AP
> +
> + mov $0x0B, %eax
> + xorl %ecx, %ecx
> + cpuid

This will break an SEV-ES guest because CPUID will generate a #VC and a
#VC handler has not been established yet.

I guess for now, you can probably just not enable parallel startup for
SEV-ES guests.

Thanks,
Tom


> + mov %edx, %eax
> +
> +.Lsetup_AP:
> + /* EAX contains the APICID of the current CPU */
> + andl $0xFFFF, %eax
> + xorl %ecx, %ecx
> + leaq cpuid_to_apicid(%rip), %rbx
> +
> +.Lfind_cpunr:
> + cmpl (%rbx), %eax
> + jz .Linit_cpu_data
> + addq $4, %rbx
> + addq $8, %rcx
> + jmp .Lfind_cpunr
> +
> +.Linit_cpu_data:
> + /* Get the per cpu offset */
> + leaq __per_cpu_offset(%rip), %rbx
> + addq %rcx, %rbx
> + movq (%rbx), %rbx
> + /* Save it for GS BASE setup */
> + movq %rbx, initial_gs(%rip)
> +
> + /* Calculate the GDT address */
> + movq $gdt_page, %rcx
> + addq %rbx, %rcx
> + movq %rcx, early_gdt_descr_base(%rip)
> +
> + /* Find the idle task stack */
> + movq $idle_threads, %rcx
> + addq %rbx, %rcx
> + movq (%rcx), %rcx
> + movq TASK_threadsp(%rcx), %rcx
> + movq %rcx, initial_stack(%rip)
> +
> +.Lsetup_cpu:
> /*
> * We must switch to a new descriptor in kernel space for the GDT
> * because soon the kernel won't have access anymore to the userspace
> @@ -216,6 +275,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
> */
> movq initial_stack(%rip), %rsp
>
> + /* Drop the realmode protection. For the boot CPU the pointer is NULL! */
> + movq trampoline_lock(%rip), %rax
> + testq %rax, %rax
> + jz .Lsetup_idt
> + lock
> + btrl $0, (%rax)
> +
> +.Lsetup_idt:
> /* Setup and Load IDT */
> pushq %rsi
> call early_setup_idt
> @@ -347,6 +414,7 @@ SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
> * reliably detect the end of the stack.
> */
> SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
> +SYM_DATA(trampoline_lock, .quad 0);
> __FINITDATA
>
> __INIT
> @@ -572,6 +640,9 @@ SYM_DATA_END(level1_fixmap_pgt)
> SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1)
> SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page))
>
> + .align 16
> +SYM_DATA(smpboot_control, .long 0)
> +
> .align 16
> /* This must match the first entry in level2_kernel_pgt */
> SYM_DATA(phys_base, .quad 0x0)
> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
> index 7a763b84b6e5..1e38d44c3603 100644
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -1104,9 +1104,19 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
> unsigned long boot_error = 0;
>
> idle->thread.sp = (unsigned long)task_pt_regs(idle);
> - early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
> initial_code = (unsigned long)start_secondary;
> - initial_stack = idle->thread.sp;
> +
> + if (IS_ENABLED(CONFIG_X86_32)) {
> + early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
> + initial_stack = idle->thread.sp;
> + } else if (boot_cpu_data.cpuid_level < 0x0B) {
> + /* Anything with X2APIC should have CPUID leaf 0x0B */
> + if (WARN_ON_ONCE(x2apic_mode) && apicid > 0xffff)
> + return -EIO;
> + smpboot_control = apicid | STARTUP_USE_APICID;
> + } else {
> + smpboot_control = STARTUP_USE_CPUID_0B;
> + }
>
> /* Enable the espfix hack for this CPU */
> init_espfix_ap(cpu);
> diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
> index 4a3da7592b99..7dc2e817bd02 100644
> --- a/arch/x86/realmode/init.c
> +++ b/arch/x86/realmode/init.c
> @@ -127,6 +127,9 @@ static void __init setup_real_mode(void)
>
> trampoline_header->flags = 0;
>
> + trampoline_lock = &trampoline_header->lock;
> + *trampoline_lock = 0;
> +
> trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
> trampoline_pgd[0] = trampoline_pgd_entry.pgd;
> trampoline_pgd[511] = init_top_pgt[511].pgd;
> diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
> index cc8391f86cdb..12a540904e80 100644
> --- a/arch/x86/realmode/rm/trampoline_64.S
> +++ b/arch/x86/realmode/rm/trampoline_64.S
> @@ -49,6 +49,19 @@ SYM_CODE_START(trampoline_start)
> mov %ax, %es
> mov %ax, %ss
>
> + /*
> + * Make sure only one CPU fiddles with the realmode stack
> + */
> +.Llock_rm:
> + btl $0, tr_lock
> + jnc 2f
> + pause
> + jmp .Llock_rm
> +2:
> + lock
> + btsl $0, tr_lock
> + jc .Llock_rm
> +
> # Setup stack
> movl $rm_stack_end, %esp
>
> @@ -192,6 +205,7 @@ SYM_DATA_START(trampoline_header)
> SYM_DATA(tr_efer, .space 8)
> SYM_DATA(tr_cr4, .space 4)
> SYM_DATA(tr_flags, .space 4)
> + SYM_DATA(tr_lock, .space 4)
> SYM_DATA_END(trampoline_header)
>
> #include "trampoline_common.S"
> diff --git a/kernel/smpboot.c b/kernel/smpboot.c
> index f6bc0bc8a2aa..934e64ff4eed 100644
> --- a/kernel/smpboot.c
> +++ b/kernel/smpboot.c
> @@ -25,7 +25,7 @@
> * For the hotplug case we keep the task structs around and reuse
> * them.
> */
> -static DEFINE_PER_CPU(struct task_struct *, idle_threads);
> +DEFINE_PER_CPU(struct task_struct *, idle_threads);
>
> struct task_struct *idle_thread_get(unsigned int cpu)
> {
>

\
 
 \ /
  Last update: 2021-12-16 15:26    [W:2.018 / U:0.684 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site