lkml.org 
[lkml]   [2021]   [Mar]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH v4 3/9] KVM: selftests: Use flag CLOCK_MONOTONIC_RAW for timing
Date
In addition to function of CLOCK_MONOTONIC, flag CLOCK_MONOTONIC_RAW can
also shield possiable impact of NTP, which can provide more robustness.

Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
---
tools/testing/selftests/kvm/demand_paging_test.c | 8 ++++----
tools/testing/selftests/kvm/dirty_log_perf_test.c | 14 +++++++-------
tools/testing/selftests/kvm/lib/test_util.c | 2 +-
tools/testing/selftests/kvm/steal_time.c | 4 ++--
4 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 5f7a229c3af1..efbf0c1e9130 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -53,7 +53,7 @@ static void *vcpu_worker(void *data)
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
run = vcpu_state(vm, vcpu_id);

- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);

/* Let the guest access its memory */
ret = _vcpu_run(vm, vcpu_id);
@@ -86,7 +86,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
copy.len = perf_test_args.host_page_size;
copy.mode = 0;

- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);

r = ioctl(uffd, UFFDIO_COPY, &copy);
if (r == -1) {
@@ -123,7 +123,7 @@ static void *uffd_handler_thread_fn(void *arg)
struct timespec start;
struct timespec ts_diff;

- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
while (!quit_uffd_thread) {
struct uffd_msg msg;
struct pollfd pollfd[2];
@@ -336,7 +336,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)

pr_info("Finished creating vCPUs and starting uffd threads\n");

- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);

for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 04a2641261be..6cff4ccf9525 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -50,7 +50,7 @@ static void *vcpu_worker(void *data)
while (!READ_ONCE(host_quit)) {
int current_iteration = READ_ONCE(iteration);

- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
ret = _vcpu_run(vm, vcpu_id);
ts_diff = timespec_elapsed(start);

@@ -141,7 +141,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration = 0;
host_quit = false;

- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vcpu_last_completed_iteration[vcpu_id] = -1;

@@ -162,7 +162,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
ts_diff.tv_sec, ts_diff.tv_nsec);

/* Enable dirty logging */
- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX,
KVM_MEM_LOG_DIRTY_PAGES);
ts_diff = timespec_elapsed(start);
@@ -174,7 +174,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* Incrementing the iteration number will start the vCPUs
* dirtying memory again.
*/
- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
iteration++;

pr_debug("Starting iteration %d\n", iteration);
@@ -189,7 +189,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);

- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
kvm_vm_get_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap);

ts_diff = timespec_elapsed(start);
@@ -199,7 +199,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);

if (dirty_log_manual_caps) {
- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
kvm_vm_clear_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap, 0,
host_num_pages);

@@ -212,7 +212,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}

/* Disable dirty logging */
- clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0);
ts_diff = timespec_elapsed(start);
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 906c955384e2..c7c0627c6842 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -89,7 +89,7 @@ struct timespec timespec_elapsed(struct timespec start)
{
struct timespec end;

- clock_gettime(CLOCK_MONOTONIC, &end);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &end);
return timespec_sub(end, start);
}

diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index fcc840088c91..5bc582d3f2a2 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -237,11 +237,11 @@ static void *do_steal_time(void *arg)
{
struct timespec ts, stop;

- clock_gettime(CLOCK_MONOTONIC, &ts);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);

while (1) {
- clock_gettime(CLOCK_MONOTONIC, &ts);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
break;
}
--
2.23.0
\
 
 \ /
  Last update: 2021-03-02 17:10    [W:0.153 / U:0.152 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site