lkml.org 
[lkml]   [2018]   [Sep]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v8 1/2] x86/mm: add .bss..decrypted section to hold shared variables
From
Date
On 09/14/2018 09:12 AM, Borislav Petkov wrote:
> On Fri, Sep 14, 2018 at 02:17:05PM +0200, Thomas Gleixner wrote:
>>> The sme_encrypt_kernel() does not have access to pmd (after pointer
>>> fixup is applied). You can extend the sme_encrypt_kernel() to pass an
>>> additional arguments but then we start getting in include hell. The pmd
>>> is defined as "pmdval_t". If we extend the sme_encrypt_kernel() then 
>>> asm/mem_encrypt.h need to include the header file which defines
>>> "pmdval_t". Adding the 'asm/pgtable_type.h' was causing all kind of
>>> compilation errors. I didn't spend much time on it. IMO, we really don't
>>> need to go in this path unless we see some value from doing this.
>>
>> Keep it here then.
>
> *For what is worth*, a simple forward declaration works. I've taken the
> 64-bit forward declaration of pmdval_t as SME is 64-bit only anyway.

Just my 2 cents, but I'd prefer it to be in head64.c. This is where
the future pagetable entries are all updated to set the encryption
mask by applying sme_get_me_mask() to load_delta. So, to me, it makes
sense to keep the clearing of the encryption mask for the bss_decrypted
section here.

Thanks,
Tom

>
> The below diff ontop passes the mandatory all*config smoke builds:
>
> ---
> arch/x86/include/asm/mem_encrypt.h | 6 ++++--
> arch/x86/kernel/head64.c | 18 +-----------------
> arch/x86/mm/mem_encrypt_identity.c | 18 +++++++++++++++++-
> 3 files changed, 22 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 616f8e637bc3..67c0e6cfdfb3 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -19,6 +19,8 @@
>
> #include <asm/bootparam.h>
>
> +typedef unsigned long pmdval_t;
> +
> #ifdef CONFIG_AMD_MEM_ENCRYPT
>
> extern u64 sme_me_mask;
> @@ -40,7 +42,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
>
> void __init sme_early_init(void);
>
> -void __init sme_encrypt_kernel(struct boot_params *bp);
> +void __init sme_encrypt_kernel(struct boot_params *bp, pmdval_t *pmd);
> void __init sme_enable(struct boot_params *bp);
>
> int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
> @@ -69,7 +71,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
>
> static inline void __init sme_early_init(void) { }
>
> -static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
> +static inline void __init sme_encrypt_kernel(struct boot_params *bp, pmdval_t *pmd) { }
> static inline void __init sme_enable(struct boot_params *bp) { }
>
> static inline bool sme_active(void) { return false; }
> diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
> index c16af27eb23f..6f8e9b534e80 100644
> --- a/arch/x86/kernel/head64.c
> +++ b/arch/x86/kernel/head64.c
> @@ -112,7 +112,6 @@ static bool __head check_la57_support(unsigned long physaddr)
> unsigned long __head __startup_64(unsigned long physaddr,
> struct boot_params *bp)
> {
> - unsigned long vaddr, vaddr_end;
> unsigned long load_delta, *p;
> unsigned long pgtable_flags;
> pgdval_t *pgd;
> @@ -233,22 +232,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
> *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
>
> /* Encrypt the kernel and related (if SME is active) */
> - sme_encrypt_kernel(bp);
> -
> - /*
> - * Clear the memory encryption mask from the .bss..decrypted section.
> - * The bss section will be memset to zero later in the initialization so
> - * there is no need to zero it after changing the memory encryption
> - * attribute.
> - */
> - if (mem_encrypt_active()) {
> - vaddr = (unsigned long)__start_bss_decrypted;
> - vaddr_end = (unsigned long)__end_bss_decrypted;
> - for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
> - i = pmd_index(vaddr);
> - pmd[i] -= sme_get_me_mask();
> - }
> - }
> + sme_encrypt_kernel(bp, pmd);
>
> /*
> * Return the SME encryption mask (if SME is active) to be used as a
> diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
> index a19ef1a416ff..9dbc145d10f8 100644
> --- a/arch/x86/mm/mem_encrypt_identity.c
> +++ b/arch/x86/mm/mem_encrypt_identity.c
> @@ -267,15 +267,17 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
> return entries + tables;
> }
>
> -void __init sme_encrypt_kernel(struct boot_params *bp)
> +void __init sme_encrypt_kernel(struct boot_params *bp, pmdval_t *pmd)
> {
> unsigned long workarea_start, workarea_end, workarea_len;
> unsigned long execute_start, execute_end, execute_len;
> unsigned long kernel_start, kernel_end, kernel_len;
> unsigned long initrd_start, initrd_end, initrd_len;
> struct sme_populate_pgd_data ppd;
> + unsigned long vaddr, vaddr_end;
> unsigned long pgtable_area_len;
> unsigned long decrypted_base;
> + int i;
>
> if (!sme_active())
> return;
> @@ -467,6 +469,20 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
>
> /* Flush the TLB - no globals so cr3 is enough */
> native_write_cr3(__native_read_cr3());
> +
> + /*
> + * Clear the memory encryption mask from the .bss..decrypted section.
> + * The bss section will be memset to zero later in the initialization so
> + * there is no need to zero it after changing the memory encryption
> + * attribute.
> + */
> + vaddr = (unsigned long)__start_bss_decrypted;
> + vaddr_end = (unsigned long)__end_bss_decrypted;
> +
> + for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
> + i = pmd_index(vaddr);
> + pmd[i] -= sme_get_me_mask();
> + }
> }
>
> void __init sme_enable(struct boot_params *bp)
>

\
 
 \ /
  Last update: 2018-09-14 16:51    [W:0.082 / U:0.492 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site