Messages in this thread | | | Date | Mon, 14 Aug 2023 15:21:38 -0500 | Subject | Re: [PATCH v2 1/2] x86/sev: Move sev_setup_arch() to mem_encrypt.c | From | Tom Lendacky <> |
| |
On 6/9/23 12:12, Alexander Shishkin wrote: > Since commit 4d96f9109109b ("x86/sev: Replace occurrences of > sev_active() with cc_platform_has()"), the SWIOTLB bounce buffer size > adjustment and restricted virtio memory setting also inadvertently apply > to TDX: the code is using cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) as > a gatekeeping condition, which is also true for TDX, and this is also what > we want. > > To reflect this, move the corresponding code to generic mem_encrypt.c. > No functional changes intended. > > Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> > --- > arch/x86/include/asm/mem_encrypt.h | 11 ++++++++-- > arch/x86/kernel/setup.c | 2 +- > arch/x86/mm/mem_encrypt.c | 34 +++++++++++++++++++++++++++++ > arch/x86/mm/mem_encrypt_amd.c | 35 ------------------------------ > 4 files changed, 44 insertions(+), 38 deletions(-) > > diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h > index b7126701574c..4283063c1e1c 100644 > --- a/arch/x86/include/asm/mem_encrypt.h > +++ b/arch/x86/include/asm/mem_encrypt.h > @@ -37,7 +37,6 @@ void __init sme_map_bootdata(char *real_mode_data); > void __init sme_unmap_bootdata(char *real_mode_data); > > void __init sme_early_init(void); > -void __init sev_setup_arch(void); > > void __init sme_encrypt_kernel(struct boot_params *bp); > void __init sme_enable(struct boot_params *bp); > @@ -67,7 +66,6 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { } > static inline void __init sme_unmap_bootdata(char *real_mode_data) { } > > static inline void __init sme_early_init(void) { } > -static inline void __init sev_setup_arch(void) { } > > static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } > static inline void __init sme_enable(struct boot_params *bp) { } > @@ -92,6 +90,15 @@ void __init mem_encrypt_init(void); > > void add_encrypt_protection_map(void); > > +#ifdef CONFIG_X86_MEM_ENCRYPT > + > +void __init mem_encrypt_setup_arch(void); > + > +#else /* !CONFIG_X86_MEM_ENCRYPT */ > + > +static inline void __init mem_encrypt_setup_arch(void) { } > + > +#endif /* CONFIG_X86_MEM_ENCRYPT */
Commit 0a9567ac5e6a ("x86/mem_encrypt: Unbreak the AMD_MEM_ENCRYPT=n build") added a #ifdef for CONFIG_X86_MEM_ENCRYPT near the top of the file, it would be better to rebase and put these up with the mem_encrypt_init() definitions.
Otherwise, no objections.
Thanks, Tom
> /* > * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when > * writing to or comparing values from the cr3 register. Having the > diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c > index 16babff771bd..e2aa1d5b37a9 100644 > --- a/arch/x86/kernel/setup.c > +++ b/arch/x86/kernel/setup.c > @@ -1121,7 +1121,7 @@ void __init setup_arch(char **cmdline_p) > * Needs to run after memblock setup because it needs the physical > * memory size. > */ > - sev_setup_arch(); > + mem_encrypt_setup_arch(); > > efi_fake_memmap(); > efi_find_mirror(); > diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c > index 9f27e14e185f..c290c55b632b 100644 > --- a/arch/x86/mm/mem_encrypt.c > +++ b/arch/x86/mm/mem_encrypt.c > @@ -12,6 +12,7 @@ > #include <linux/swiotlb.h> > #include <linux/cc_platform.h> > #include <linux/mem_encrypt.h> > +#include <linux/virtio_anchor.h> > > /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ > bool force_dma_unencrypted(struct device *dev) > @@ -86,3 +87,36 @@ void __init mem_encrypt_init(void) > > print_mem_encrypt_feature_info(); > } > + > +void __init mem_encrypt_setup_arch(void) > +{ > + phys_addr_t total_mem = memblock_phys_mem_size(); > + unsigned long size; > + > + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) > + return; > + > + /* > + * For SEV and TDX, all DMA has to occur via shared/unencrypted pages. > + * Kernel uses SWIOTLB to make this happen without changing device > + * drivers. However, depending on the workload being run, the > + * default 64MB of SWIOTLB may not be enough and SWIOTLB may > + * run out of buffers for DMA, resulting in I/O errors and/or > + * performance degradation especially with high I/O workloads. > + * > + * Adjust the default size of SWIOTLB using a percentage of guest > + * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer > + * memory is allocated from low memory, ensure that the adjusted size > + * is within the limits of low available memory. > + * > + * The percentage of guest memory used here for SWIOTLB buffers > + * is more of an approximation of the static adjustment which > + * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% > + */ > + size = total_mem * 6 / 100; > + size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); > + swiotlb_adjust_size(size); > + > + /* Set restricted memory access for virtio. */ > + virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc); > +} > diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c > index e0b51c09109f..b1edeb8642b8 100644 > --- a/arch/x86/mm/mem_encrypt_amd.c > +++ b/arch/x86/mm/mem_encrypt_amd.c > @@ -20,7 +20,6 @@ > #include <linux/bitops.h> > #include <linux/dma-mapping.h> > #include <linux/virtio_config.h> > -#include <linux/virtio_anchor.h> > #include <linux/cc_platform.h> > > #include <asm/tlbflush.h> > @@ -215,40 +214,6 @@ void __init sme_map_bootdata(char *real_mode_data) > __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); > } > > -void __init sev_setup_arch(void) > -{ > - phys_addr_t total_mem = memblock_phys_mem_size(); > - unsigned long size; > - > - if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) > - return; > - > - /* > - * For SEV, all DMA has to occur via shared/unencrypted pages. > - * SEV uses SWIOTLB to make this happen without changing device > - * drivers. However, depending on the workload being run, the > - * default 64MB of SWIOTLB may not be enough and SWIOTLB may > - * run out of buffers for DMA, resulting in I/O errors and/or > - * performance degradation especially with high I/O workloads. > - * > - * Adjust the default size of SWIOTLB for SEV guests using > - * a percentage of guest memory for SWIOTLB buffers. > - * Also, as the SWIOTLB bounce buffer memory is allocated > - * from low memory, ensure that the adjusted size is within > - * the limits of low available memory. > - * > - * The percentage of guest memory used here for SWIOTLB buffers > - * is more of an approximation of the static adjustment which > - * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% > - */ > - size = total_mem * 6 / 100; > - size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); > - swiotlb_adjust_size(size); > - > - /* Set restricted memory access for virtio. */ > - virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc); > -} > - > static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) > { > unsigned long pfn = 0;
| |