lkml.org 
[lkml]   [2022]   [Aug]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
SubjectRe: [PATCH v2 1/2] x86/sev: Put PSC struct on the stack in prep for unaccepted memory support
From
On 8/8/22 10:16, Tom Lendacky wrote:
...
> diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
> index b8357d6ecd47..6f7268a817fc 100644
> --- a/arch/x86/include/asm/sev-common.h
> +++ b/arch/x86/include/asm/sev-common.h
> @@ -107,7 +107,7 @@ enum psc_op {
> #define GHCB_HV_FT_SNP_AP_CREATION BIT_ULL(1)
>
> /* SNP Page State Change NAE event */
> -#define VMGEXIT_PSC_MAX_ENTRY 253
> +#define VMGEXIT_PSC_MAX_ENTRY 64

In general, the stack-based allocation looks fine. It might be worth a
comment in there to make it clear that this can consume stack space.

> struct psc_hdr {
> u16 cur_entry;
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index c05f0124c410..275aa890611f 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -66,6 +66,9 @@ static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
> */
> static struct ghcb *boot_ghcb __section(".data");
>
> +/* Flag to indicate when the first per-CPU GHCB is registered */
> +static bool ghcb_percpu_ready __section(".data");

So, there's a code path that can't be entered until this is set? Seems
like the least we can do it annotate that path with a
WARN_ON_ONCE(!ghcb_percpu_ready).

Also, how does having _one_ global variable work for indicating the
state of multiple per-cpu structures? The code doesn't seem to delay
setting this variable until after _all_ of the per-cpu state is ready.

> /* Bitmap of SEV features supported by the hypervisor */
> static u64 sev_hv_features __ro_after_init;
>
> @@ -660,7 +663,7 @@ static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool valid
> }
> }
>
> -static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
> +static void early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
> {
> unsigned long paddr_end;
> u64 val;
> @@ -868,11 +871,16 @@ static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
> static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
> {
> unsigned long vaddr_end, next_vaddr;
> - struct snp_psc_desc *desc;
> + struct snp_psc_desc desc;
> +
> + /*
> + * Use the MSR protocol when the per-CPU GHCBs are not yet registered,
> + * since vmgexit_psc() uses the per-CPU GHCB.
> + */
> + if (!ghcb_percpu_ready)
> + return early_set_pages_state(__pa(vaddr), npages, op);
>
> - desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
> - if (!desc)
> - panic("SNP: failed to allocate memory for PSC descriptor\n");
> + memset(&desc, 0, sizeof(desc));

Why is this using memset()? The compiler should be smart enough to
delay initializing 'desc' until after the return with this kind of
construct:

struct snp_psc_desc desc = {};
if (foo)
return;
// use 'desc' here

The compiler *knows* there is no access to 'desc' before the if().


> vaddr = vaddr & PAGE_MASK;
> vaddr_end = vaddr + (npages << PAGE_SHIFT);
> @@ -882,12 +890,10 @@ static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
> next_vaddr = min_t(unsigned long, vaddr_end,
> (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
>
> - __set_pages_state(desc, vaddr, next_vaddr, op);
> + __set_pages_state(&desc, vaddr, next_vaddr, op);
>
> vaddr = next_vaddr;
> }
> -
> - kfree(desc);
> }
>
> void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
> @@ -1254,6 +1260,8 @@ void setup_ghcb(void)
> if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
> snp_register_per_cpu_ghcb();
>
> + ghcb_percpu_ready = true;
> +
> return;
> }
>

\
 
 \ /
  Last update: 2022-08-08 23:44    [W:0.146 / U:0.156 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site