lkml.org 
[lkml]   [2020]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v5 3/7] x86/boot/compressed: Get rid of GOT fixup code
On Wed, Jul 15, 2020 at 2:41 AM Arvind Sankar <nivedita@alum.mit.edu> wrote:
>
> From: Ard Biesheuvel <ardb@kernel.org>
>
> In a previous patch, we have eliminated GOT entries from the decompressor
> binary and added an assertion that the .got section is empty. This means
> that the GOT fixup routines that exist in both the 32-bit and 64-bit
> startup routines have become dead code, and can be removed.
>
> While at it, drop the KEEP() from the linker script, as it has no effect
> on the contents of output sections that are created by the linker itself.
>
> Reviewed-by: Kees Cook <keescook@chromium.org>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> Acked-by: Arvind Sankar <nivedita@alum.mit.edu>
> Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
> From: Ard Biesheuvel <ardb@kernel.org>
> Link: https://lore.kernel.org/r/20200523120021.34996-4-ardb@kernel.org

Tested-by: Sedat Dilek <sedat.dilek@gmail.com>

- Sedat -

> ---
> arch/x86/boot/compressed/head_32.S | 24 ++---------
> arch/x86/boot/compressed/head_64.S | 57 --------------------------
> arch/x86/boot/compressed/vmlinux.lds.S | 4 +-
> 3 files changed, 5 insertions(+), 80 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
> index 03557f2174bf..39f0bb43218f 100644
> --- a/arch/x86/boot/compressed/head_32.S
> +++ b/arch/x86/boot/compressed/head_32.S
> @@ -49,16 +49,13 @@
> * Position Independent Executable (PIE) so that linker won't optimize
> * R_386_GOT32X relocation to its fixed symbol address. Older
> * linkers generate R_386_32 relocations against locally defined symbols,
> - * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less
> - * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
> - * R_386_32 relocations when relocating the kernel. To generate
> - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
> - * hidden:
> + * _bss, _ebss and _end, in PIE. It isn't wrong, just less optimal than
> + * R_386_RELATIVE. But the x86 kernel fails to properly handle R_386_32
> + * relocations when relocating the kernel. To generate R_386_RELATIVE
> + * relocations, we mark _bss, _ebss and _end as hidden:
> */
> .hidden _bss
> .hidden _ebss
> - .hidden _got
> - .hidden _egot
> .hidden _end
>
> __HEAD
> @@ -192,19 +189,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
> shrl $2, %ecx
> rep stosl
>
> -/*
> - * Adjust our own GOT
> - */
> - leal _got(%ebx), %edx
> - leal _egot(%ebx), %ecx
> -1:
> - cmpl %ecx, %edx
> - jae 2f
> - addl %ebx, (%edx)
> - addl $4, %edx
> - jmp 1b
> -2:
> -
> /*
> * Do the extraction, and jump to the new kernel..
> */
> diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
> index 97d37f0a34f5..bf1ab30acc5b 100644
> --- a/arch/x86/boot/compressed/head_64.S
> +++ b/arch/x86/boot/compressed/head_64.S
> @@ -40,8 +40,6 @@
> */
> .hidden _bss
> .hidden _ebss
> - .hidden _got
> - .hidden _egot
> .hidden _end
>
> __HEAD
> @@ -353,25 +351,6 @@ SYM_CODE_START(startup_64)
> /* Set up the stack */
> leaq boot_stack_end(%rbx), %rsp
>
> - /*
> - * paging_prepare() and cleanup_trampoline() below can have GOT
> - * references. Adjust the table with address we are running at.
> - *
> - * Zero RAX for adjust_got: the GOT was not adjusted before;
> - * there's no adjustment to undo.
> - */
> - xorq %rax, %rax
> -
> - /*
> - * Calculate the address the binary is loaded at and use it as
> - * a GOT adjustment.
> - */
> - call 1f
> -1: popq %rdi
> - subq $1b, %rdi
> -
> - call .Ladjust_got
> -
> /*
> * At this point we are in long mode with 4-level paging enabled,
> * but we might want to enable 5-level paging or vice versa.
> @@ -464,21 +443,6 @@ trampoline_return:
> pushq $0
> popfq
>
> - /*
> - * Previously we've adjusted the GOT with address the binary was
> - * loaded at. Now we need to re-adjust for relocation address.
> - *
> - * Calculate the address the binary is loaded at, so that we can
> - * undo the previous GOT adjustment.
> - */
> - call 1f
> -1: popq %rax
> - subq $1b, %rax
> -
> - /* The new adjustment is the relocation address */
> - movq %rbx, %rdi
> - call .Ladjust_got
> -
> /*
> * Copy the compressed kernel to the end of our buffer
> * where decompression in place becomes safe.
> @@ -556,27 +520,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
> jmp *%rax
> SYM_FUNC_END(.Lrelocated)
>
> -/*
> - * Adjust the global offset table
> - *
> - * RAX is the previous adjustment of the table to undo (use 0 if it's the
> - * first time we touch GOT).
> - * RDI is the new adjustment to apply.
> - */
> -.Ladjust_got:
> - /* Walk through the GOT adding the address to the entries */
> - leaq _got(%rip), %rdx
> - leaq _egot(%rip), %rcx
> -1:
> - cmpq %rcx, %rdx
> - jae 2f
> - subq %rax, (%rdx) /* Undo previous adjustment */
> - addq %rdi, (%rdx) /* Apply the new adjustment */
> - addq $8, %rdx
> - jmp 1b
> -2:
> - ret
> -
> .code32
> /*
> * This is the 32-bit trampoline that will be copied over to low memory.
> diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
> index 4bcc943842ab..a4a4a59a2628 100644
> --- a/arch/x86/boot/compressed/vmlinux.lds.S
> +++ b/arch/x86/boot/compressed/vmlinux.lds.S
> @@ -43,9 +43,7 @@ SECTIONS
> _erodata = . ;
> }
> .got : {
> - _got = .;
> - KEEP(*(.got))
> - _egot = .;
> + *(.got)
> }
> .got.plt : {
> *(.got.plt)
> --
> 2.26.2
>

\
 
 \ /
  Last update: 2020-07-15 10:55    [W:0.895 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site