Messages in this thread Patch in this message | | | From | Brian Gerst <> | Subject | [PATCH 5/9] x86/percpu/64: Use relative percpu offsets | Date | Mon, 23 Oct 2023 17:17:26 -0400 |
| |
The percpu section is currently linked at virtual address 0, because older compilers hardcoded the stack protector canary value at a fixed offset from the start of the GS segment. Now that the canary is a normal percpu variable, the percpu section can be linked normally. This means that x86-64 will calculate percpu offsets like most other architectures, as the delta between the initial percpu address and the dynamically allocated memory.
Signed-off-by: Brian Gerst <brgerst@gmail.com> --- arch/x86/kernel/head_64.S | 6 ------ arch/x86/kernel/setup_percpu.c | 12 ++---------- arch/x86/kernel/vmlinux.lds.S | 24 +----------------------- arch/x86/tools/relocs.c | 10 +++------- arch/x86/xen/xen-head.S | 6 ------ init/Kconfig | 2 +- 6 files changed, 7 insertions(+), 53 deletions(-)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index f2453eb38417..b35f74e58dd7 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -72,14 +72,8 @@ SYM_CODE_START_NOALIGN(startup_64) /* Setup GSBASE to allow stack canary access for C code */ movl $MSR_GS_BASE, %ecx -#ifdef CONFIG_SMP - leaq __per_cpu_load(%rip), %rdx - movl %edx, %eax - shrq $32, %rdx -#else xorl %eax, %eax xorl %edx, %edx -#endif wrmsr call startup_64_setup_env diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 2c97bf7b56ae..8707dd07b9ce 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -23,18 +23,10 @@ #include <asm/cpumask.h> #include <asm/cpu.h> -#ifdef CONFIG_X86_64 -#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) -#else -#define BOOT_PERCPU_OFFSET 0 -#endif - -DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); EXPORT_PER_CPU_SYMBOL(this_cpu_off); -unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = { - [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, -}; +unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init; EXPORT_SYMBOL(__per_cpu_offset); /* diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index c87dc8de2084..8d8eb4d9ff9d 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -102,12 +102,6 @@ jiffies = jiffies_64; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(6); /* RW_ */ -#ifdef CONFIG_X86_64 -#ifdef CONFIG_SMP - percpu PT_LOAD FLAGS(6); /* RW_ */ -#endif - init PT_LOAD FLAGS(7); /* RWE */ -#endif note PT_NOTE FLAGS(0); /* ___ */ } @@ -223,21 +217,7 @@ SECTIONS __init_begin = .; /* paired with __init_end */ } -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) - /* - * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the - * output PHDR, so the next output section - .init.text - should - * start another segment - init. - */ - PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) - ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, - "per-CPU data too large - increase CONFIG_PHYSICAL_START") -#endif - INIT_TEXT_SECTION(PAGE_SIZE) -#ifdef CONFIG_X86_64 - :init -#endif /* * Section for code used exclusively before alternatives are run. All @@ -367,9 +347,7 @@ SECTIONS EXIT_DATA } -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) PERCPU_SECTION(INTERNODE_CACHE_BYTES) -#endif . = ALIGN(PAGE_SIZE); @@ -507,7 +485,7 @@ SECTIONS * Per-cpu symbols which need to be offset from __per_cpu_load * for the boot processor. */ -#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load +#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_backing_store); diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 3ccd9d4fcf9c..01efbfdd3eb3 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -815,12 +815,7 @@ static void percpu_init(void) */ static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) { - int shndx = sym_index(sym); - - return (shndx == per_cpu_shndx) && - strcmp(symname, "__init_begin") && - strcmp(symname, "__per_cpu_load") && - strncmp(symname, "init_per_cpu_", 13); + return 0; } @@ -1043,7 +1038,8 @@ static int cmp_relocs(const void *va, const void *vb) static void sort_relocs(struct relocs *r) { - qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs); + if (r->count) + qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs); } static int write32(uint32_t v, FILE *f) diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 9ce0d9d268bb..c1d9c92b417a 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -57,14 +57,8 @@ SYM_CODE_START(startup_xen) * the per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx -#ifdef CONFIG_SMP - leaq __per_cpu_load(%rip), %rdx - movl %edx, %eax - shrq $32, %rdx -#else xorl %eax, %eax xorl %edx, %edx -#endif wrmsr mov %rsi, %rdi diff --git a/init/Kconfig b/init/Kconfig index 6d35728b94b2..1af31b23e376 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1718,7 +1718,7 @@ config KALLSYMS_ALL config KALLSYMS_ABSOLUTE_PERCPU bool depends on KALLSYMS - default X86_64 && SMP + default n config KALLSYMS_BASE_RELATIVE bool -- 2.41.0
| |