lkml.org 
[lkml]   [2022]   [Jun]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v2 090/144] KVM: selftests: Convert dirty_log_test away from VCPU_ID
    From
    Convert dirty_log_test to pass around a 'struct kvm_vcpu' object instead
    of using a global VCPU_ID. Note, this is a "functional" change in the
    sense that the test now creates a vCPU with vcpu_id==0 instead of
    vcpu_id==5. The non-zero VCPU_ID was 100% arbitrary and added little to
    no validation coverage. If testing non-zero vCPU IDs is desirable for
    generic tests, that can be done in the future by tweaking the VM creation
    helpers.

    The test still hardcodes usage of vcpu_id==0, but only for a few lines.
    That wart will be removed in the not-too-distant future.

    Signed-off-by: Sean Christopherson <seanjc@google.com>
    ---
    tools/testing/selftests/kvm/dirty_log_test.c | 59 ++++++++++----------
    1 file changed, 30 insertions(+), 29 deletions(-)

    diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
    index cf426a8ae816..23e0c727e375 100644
    --- a/tools/testing/selftests/kvm/dirty_log_test.c
    +++ b/tools/testing/selftests/kvm/dirty_log_test.c
    @@ -23,8 +23,6 @@
    #include "guest_modes.h"
    #include "processor.h"

    -#define VCPU_ID 1
    -
    /* The memory slot index to track dirty pages */
    #define TEST_MEM_SLOT_INDEX 1

    @@ -226,17 +224,17 @@ static void clear_log_create_vm_done(struct kvm_vm *vm)
    vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
    }

    -static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
    +static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
    void *bitmap, uint32_t num_pages)
    {
    - kvm_vm_get_dirty_log(vm, slot, bitmap);
    + kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
    }

    -static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
    +static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
    void *bitmap, uint32_t num_pages)
    {
    - kvm_vm_get_dirty_log(vm, slot, bitmap);
    - kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
    + kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
    + kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
    }

    /* Should only be called after a GUEST_SYNC */
    @@ -250,14 +248,14 @@ static void vcpu_handle_sync_stop(void)
    }
    }

    -static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
    +static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
    {
    - struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    + struct kvm_run *run = vcpu->run;

    TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
    "vcpu run failed: errno=%d", err);

    - TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
    + TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC,
    "Invalid guest sync status: exit_reason=%s\n",
    exit_reason_str(run->exit_reason));

    @@ -328,7 +326,7 @@ static void dirty_ring_continue_vcpu(void)
    sem_post(&sem_vcpu_cont);
    }

    -static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
    +static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
    void *bitmap, uint32_t num_pages)
    {
    /* We only have one vcpu */
    @@ -348,10 +346,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
    }

    /* Only have one vcpu */
    - count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
    + count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu->vm, vcpu->id),
    slot, bitmap, num_pages, &fetch_index);

    - cleared = kvm_vm_reset_dirty_ring(vm);
    + cleared = kvm_vm_reset_dirty_ring(vcpu->vm);

    /* Cleared pages should be the same as collected */
    TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
    @@ -366,12 +364,12 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
    pr_info("Iteration %ld collected %u pages\n", iteration, count);
    }

    -static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
    +static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
    {
    - struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    + struct kvm_run *run = vcpu->run;

    /* A ucall-sync or ring-full event is allowed */
    - if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
    + if (get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC) {
    /* We should allow this to continue */
    ;
    } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
    @@ -405,10 +403,10 @@ struct log_mode {
    /* Hook when the vm creation is done (before vcpu creation) */
    void (*create_vm_done)(struct kvm_vm *vm);
    /* Hook to collect the dirty pages into the bitmap provided */
    - void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
    + void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
    void *bitmap, uint32_t num_pages);
    /* Hook to call when after each vcpu run */
    - void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
    + void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
    void (*before_vcpu_join) (void);
    } log_modes[LOG_MODE_NUM] = {
    {
    @@ -470,22 +468,22 @@ static void log_mode_create_vm_done(struct kvm_vm *vm)
    mode->create_vm_done(vm);
    }

    -static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
    +static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
    void *bitmap, uint32_t num_pages)
    {
    struct log_mode *mode = &log_modes[host_log_mode];

    TEST_ASSERT(mode->collect_dirty_pages != NULL,
    "collect_dirty_pages() is required for any log mode!");
    - mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
    + mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
    }

    -static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
    +static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
    {
    struct log_mode *mode = &log_modes[host_log_mode];

    if (mode->after_vcpu_run)
    - mode->after_vcpu_run(vm, ret, err);
    + mode->after_vcpu_run(vcpu, ret, err);
    }

    static void log_mode_before_vcpu_join(void)
    @@ -507,7 +505,8 @@ static void generate_random_array(uint64_t *guest_array, uint64_t size)
    static void *vcpu_worker(void *data)
    {
    int ret;
    - struct kvm_vm *vm = data;
    + struct kvm_vcpu *vcpu = data;
    + struct kvm_vm *vm = vcpu->vm;
    uint64_t *guest_array;
    uint64_t pages_count = 0;
    struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
    @@ -522,7 +521,7 @@ static void *vcpu_worker(void *data)
    sigmask->len = 8;
    pthread_sigmask(0, NULL, sigset);
    sigdelset(sigset, SIG_IPI);
    - vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
    + vcpu_ioctl(vm, vcpu->id, KVM_SET_SIGNAL_MASK, sigmask);

    sigemptyset(sigset);
    sigaddset(sigset, SIG_IPI);
    @@ -534,13 +533,13 @@ static void *vcpu_worker(void *data)
    generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
    pages_count += TEST_PAGES_PER_LOOP;
    /* Let the guest dirty the random pages */
    - ret = __vcpu_run(vm, VCPU_ID);
    + ret = __vcpu_run(vm, vcpu->id);
    if (ret == -1 && errno == EINTR) {
    int sig = -1;
    sigwait(sigset, &sig);
    assert(sig == SIG_IPI);
    }
    - log_mode_after_vcpu_run(vm, ret, errno);
    + log_mode_after_vcpu_run(vcpu, ret, errno);
    }

    pr_info("Dirtied %"PRIu64" pages\n", pages_count);
    @@ -693,6 +692,7 @@ struct test_params {
    static void run_test(enum vm_guest_mode mode, void *arg)
    {
    struct test_params *p = arg;
    + struct kvm_vcpu *vcpu;
    struct kvm_vm *vm;
    unsigned long *bmap;

    @@ -710,9 +710,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
    * (e.g., 64K page size guest will need even less memory for
    * page tables).
    */
    - vm = create_vm(mode, VCPU_ID,
    + vm = create_vm(mode, 0,
    2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
    guest_code);
    + vcpu = vcpu_get(vm, 0);

    guest_page_size = vm_get_page_size(vm);
    /*
    @@ -773,12 +774,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
    host_clear_count = 0;
    host_track_next_count = 0;

    - pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
    + pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);

    while (iteration < p->iterations) {
    /* Give the vcpu thread some time to dirty some pages */
    usleep(p->interval * 1000);
    - log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
    + log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
    bmap, host_num_pages);

    /*
    --
    2.36.1.255.ge46751e96f-goog
    \
     
     \ /
      Last update: 2022-06-03 02:56    [W:4.057 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site