lkml.org 
[lkml]   [2022]   [Jun]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v2 138/144] KVM: selftests: Move per-VM/per-vCPU nr pages calculation to __vm_create()
On Fri, Jun 03, 2022 at 12:43:25AM +0000, Sean Christopherson wrote:
...
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 508a5eafe15b..494bce490344 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -258,12 +258,45 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
> return vm;
> }
>
> -struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
> +static uint64_t vm_nr_pages_required(uint32_t nr_runnable_vcpus,
> + uint64_t extra_mem_pages)
> {
> + uint64_t nr_pages;
> +
> + TEST_ASSERT(nr_runnable_vcpus,
> + "Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
> +
> + TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
> + "nr_vcpus = %d too large for host, max-vcpus = %d",
> + nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
> +
> + nr_pages = DEFAULT_GUEST_PHY_PAGES;
> + nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
> +
> + /*
> + * Account for the number of pages needed for the page tables. The
> + * maximum page table size for a memory region will be when the
> + * smallest page size is used. Considering each page contains x page
> + * table descriptors, the total extra size for page tables (for extra
> + * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
> + * than N/x*2.
> + */
> + nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
> +
> + TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
> + "Host doesn't support %d vCPUs, max-vcpus = %d",
> + nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));

This assert is a repeat of the second assert above.

> +
> + return vm_adjust_num_guest_pages(VM_MODE_DEFAULT, nr_pages);

We should use 'mode' here which means we need to pass it to this helper
from __vm_create.

> +}
> +
> +struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
> + uint64_t nr_extra_pages)
> +{
> + uint64_t nr_pages = vm_nr_pages_required(nr_runnable_vcpus,
> + nr_extra_pages);
> struct kvm_vm *vm;
>
> - nr_pages = vm_adjust_num_guest_pages(VM_MODE_DEFAULT, nr_pages);
> -
> vm = ____vm_create(mode, nr_pages);
>
> kvm_vm_elf_load(vm, program_invocation_name);
> @@ -297,27 +330,12 @@ struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
> uint64_t extra_mem_pages,
> void *guest_code, struct kvm_vcpu *vcpus[])
> {
> - uint64_t vcpu_pages, extra_pg_pages, pages;
> struct kvm_vm *vm;
> int i;
>
> TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
>
> - /* The maximum page table size for a memory region will be when the
> - * smallest pages are used. Considering each page contains x page
> - * table descriptors, the total extra size for page tables (for extra
> - * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
> - * than N/x*2.
> - */
> - vcpu_pages = nr_vcpus * DEFAULT_STACK_PGS;
> - extra_pg_pages = (DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
> - pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
> -
> - TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
> - "nr_vcpus = %d too large for host, max-vcpus = %d",
> - nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
> -
> - vm = __vm_create(mode, pages);
> + vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
>
> for (i = 0; i < nr_vcpus; ++i)
> vcpus[i] = vm_vcpu_add(vm, i, guest_code);
> diff --git a/tools/testing/selftests/kvm/s390x/resets.c b/tools/testing/selftests/kvm/s390x/resets.c
> index 43fa71d90232..4ba866047401 100644
> --- a/tools/testing/selftests/kvm/s390x/resets.c
> +++ b/tools/testing/selftests/kvm/s390x/resets.c
> @@ -205,7 +205,7 @@ static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
> {
> struct kvm_vm *vm;
>
> - vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
> + vm = vm_create(1);
>
> *vcpu = vm_vcpu_add(vm, ARBITRARY_NON_ZERO_VCPU_ID, guest_code_initial);
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index 012741176ae4..ffa6a2f93de2 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -339,7 +339,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
> if (!(r & KVM_PMU_CAP_DISABLE))
> return;
>
> - vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
> + vm = vm_create(1);
>
> vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
>
> diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
> index afc063178c6a..8bcaf4421dc5 100644
> --- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
> +++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
> @@ -78,13 +78,10 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
> static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id,
> struct kvm_vcpu *vcpus[])
> {
> - uint64_t vcpu_pages = (DEFAULT_STACK_PGS) * nr_vcpus;
> - uint64_t extra_pg_pages = vcpu_pages / PTES_PER_MIN_PAGE * nr_vcpus;
> - uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
> struct kvm_vm *vm;
> uint32_t i;
>
> - vm = vm_create(pages);
> + vm = vm_create(nr_vcpus);
>
> vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(unsigned long)bsp_vcpu_id);
>
> diff --git a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
> index e416af887ca0..4a962952212e 100644
> --- a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
> +++ b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
> @@ -98,7 +98,7 @@ int main(int argc, char *argv[])
> exit(KSFT_SKIP);
> }
>
> - vm = vm_create(DEFAULT_GUEST_PHY_PAGES + DEFAULT_STACK_PGS * NR_TEST_VCPUS);
> + vm = vm_create(NR_TEST_VCPUS);
> vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
>
> pthread_spin_init(&create_lock, PTHREAD_PROCESS_PRIVATE);
> --
> 2.36.1.255.ge46751e96f-goog
>

Thanks,
drew

\
 
 \ /
  Last update: 2022-06-10 19:56    [W:0.725 / U:0.148 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site