lkml.org 
[lkml]   [2022]   [Nov]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
SubjectRe: [PATCH v10 014/108] KVM: TDX: Stub in tdx.h with structs, accessors, and VMCS helpers
Date
On Sat, 2022-10-29 at 23:22 -0700, isaku.yamahata@intel.com wrote:
> From: Sean Christopherson <sean.j.christopherson@intel.com>
>
> Stub in kvm_tdx, vcpu_tdx, and their various accessors. TDX defines
> SEAMCALL APIs to access TDX control structures corresponding to the VMX
> VMCS. Introduce helper accessors to hide its SEAMCALL ABI details.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> arch/x86/kvm/vmx/tdx.h | 118 ++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 116 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> index 473013265bd8..98999bf3f188 100644
> --- a/arch/x86/kvm/vmx/tdx.h
> +++ b/arch/x86/kvm/vmx/tdx.h
> @@ -3,14 +3,27 @@
> #define __KVM_X86_TDX_H
>
> #ifdef CONFIG_INTEL_TDX_HOST
> +
> +#include "tdx_ops.h"
> +
> +struct tdx_td_page {
> + unsigned long va;
> + hpa_t pa;
> + bool added;
> +};
> +
> struct kvm_tdx {
> struct kvm kvm;
> - /* TDX specific members follow. */
> +
> + struct tdx_td_page tdr;
> + struct tdx_td_page *tdcs;
> };
>
> struct vcpu_tdx {
> struct kvm_vcpu vcpu;
> - /* TDX specific members follow. */
> +
> + struct tdx_td_page tdvpr;
> + struct tdx_td_page *tdvpx;
> };

As replied to the patch "KVM: TDX: create/destroy VM structure", I think it's
better to introduce this part together with that patch to make reviewer easier.

If I am not seeing mistakenly, the "accessors" below are not used in that patch,
so they can be done in a later patch when needed.

>
> static inline bool is_td(struct kvm *kvm)
> @@ -32,6 +45,107 @@ static inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu)
> {
> return container_of(vcpu, struct vcpu_tdx, vcpu);
> }
> +
> +static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
> +{
> +#define VMCS_ENC_ACCESS_TYPE_MASK 0x1UL
> +#define VMCS_ENC_ACCESS_TYPE_FULL 0x0UL
> +#define VMCS_ENC_ACCESS_TYPE_HIGH 0x1UL
> +#define VMCS_ENC_ACCESS_TYPE(field) ((field) & VMCS_ENC_ACCESS_TYPE_MASK)
> +
> + /* TDX is 64bit only. HIGH field isn't supported. */
> + BUILD_BUG_ON_MSG(__builtin_constant_p(field) &&
> + VMCS_ENC_ACCESS_TYPE(field) == VMCS_ENC_ACCESS_TYPE_HIGH,
> + "Read/Write to TD VMCS *_HIGH fields not supported");
> +
> + BUILD_BUG_ON(bits != 16 && bits != 32 && bits != 64);
> +
> +#define VMCS_ENC_WIDTH_MASK GENMASK(14, 13)
> +#define VMCS_ENC_WIDTH_16BIT (0UL << 13)
> +#define VMCS_ENC_WIDTH_64BIT (1UL << 13)
> +#define VMCS_ENC_WIDTH_32BIT (2UL << 13)
> +#define VMCS_ENC_WIDTH_NATURAL (3UL << 13)
> +#define VMCS_ENC_WIDTH(field) ((field) & VMCS_ENC_WIDTH_MASK)
> +
> + /* TDX is 64bit only. i.e. natural width = 64bit. */
> + BUILD_BUG_ON_MSG(bits != 64 && __builtin_constant_p(field) &&
> + (VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_64BIT ||
> + VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_NATURAL),
> + "Invalid TD VMCS access for 64-bit field");
> + BUILD_BUG_ON_MSG(bits != 32 && __builtin_constant_p(field) &&
> + VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_32BIT,
> + "Invalid TD VMCS access for 32-bit field");
> + BUILD_BUG_ON_MSG(bits != 16 && __builtin_constant_p(field) &&
> + VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_16BIT,
> + "Invalid TD VMCS access for 16-bit field");
> +}
> +
> +static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
> +static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
> +
> +#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
> +static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
> + u32 field) \
> +{ \
> + struct tdx_module_output out; \
> + u64 err; \
> + \
> + tdvps_##lclass##_check(field, bits); \
> + err = tdh_vp_rd(tdx->tdvpr.pa, TDVPS_##uclass(field), &out); \
> + if (unlikely(err)) { \
> + pr_err("TDH_VP_RD["#uclass".0x%x] failed: 0x%llx\n", \
> + field, err); \
> + return 0; \
> + } \
> + return (u##bits)out.r8; \
> +} \
> +static __always_inline void td_##lclass##_write##bits(struct vcpu_tdx *tdx, \
> + u32 field, u##bits val) \
> +{ \
> + struct tdx_module_output out; \
> + u64 err; \
> + \
> + tdvps_##lclass##_check(field, bits); \
> + err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), val, \
> + GENMASK_ULL(bits - 1, 0), &out); \
> + if (unlikely(err)) \
> + pr_err("TDH_VP_WR["#uclass".0x%x] = 0x%llx failed: 0x%llx\n", \
> + field, (u64)val, err); \
> +} \
> +static __always_inline void td_##lclass##_setbit##bits(struct vcpu_tdx *tdx, \
> + u32 field, u64 bit) \
> +{ \
> + struct tdx_module_output out; \
> + u64 err; \
> + \
> + tdvps_##lclass##_check(field, bits); \
> + err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), bit, bit, \
> + &out); \
> + if (unlikely(err)) \
> + pr_err("TDH_VP_WR["#uclass".0x%x] |= 0x%llx failed: 0x%llx\n", \
> + field, bit, err); \
> +} \
> +static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
> + u32 field, u64 bit) \
> +{ \
> + struct tdx_module_output out; \
> + u64 err; \
> + \
> + tdvps_##lclass##_check(field, bits); \
> + err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), 0, bit, \
> + &out); \
> + if (unlikely(err)) \
> + pr_err("TDH_VP_WR["#uclass".0x%x] &= ~0x%llx failed: 0x%llx\n", \
> + field, bit, err); \
> +}
> +
> +TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
> +TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
> +TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
> +
> +TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
> +TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
> +
>

\
 
 \ /
  Last update: 2022-11-10 12:11    [W:1.555 / U:0.064 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site