lkml.org 
[lkml]   [2022]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[RFC V2 PATCH 8/8] selftests: kvm: priv_memfd: Add test without double allocation
From
Add a memory conversion test without leading to double allocation
of memory backing gpa ranges.

Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
tools/testing/selftests/kvm/priv_memfd_test.c | 225 ++++++++++++++++--
1 file changed, 211 insertions(+), 14 deletions(-)

diff --git a/tools/testing/selftests/kvm/priv_memfd_test.c b/tools/testing/selftests/kvm/priv_memfd_test.c
index dbe6ead92ba7..3b6e84cf6a44 100644
--- a/tools/testing/selftests/kvm/priv_memfd_test.c
+++ b/tools/testing/selftests/kvm/priv_memfd_test.c
@@ -63,6 +63,8 @@ struct test_run_helper {
guest_code_fn guest_fn;
void *shared_mem;
int priv_memfd;
+ bool disallow_boot_shared_access;
+ bool toggle_shared_mem_state;
};

enum page_size {
@@ -779,6 +781,151 @@ static void pspahct_guest_code(void)
GUEST_DONE();
}

+/* Test to verify guest accesses without double allocation:
+ * Guest starts with shared memory access disallowed by default.
+ * 1) Guest writes the private memory privately via a known pattern
+ * 3) Guest reads the private memory privately and verifies that the contents
+ * are same as written.
+ * 4) Guest invokes KVM_HC_MAP_GPA_RANGE to map the hpa range as shared
+ * and marks the range to be accessed via shared access.
+ * 5) Guest writes shared memory with another pattern and signals VMM
+ * 6) VMM verifies the memory contents to be same as written by guest in step
+ * 5 and updates the memory with a different pattern
+ * 7) Guest verifies the memory contents to be same as written in step 6.
+ * 8) Guest invokes KVM_HC_MAP_GPA_RANGE to map the hpa range as private
+ * and marks the range to be accessed via private access.
+ * 9) Guest writes a known pattern to the test memory and verifies the contents
+ * to be same as written.
+ * 10) Guest invokes KVM_HC_MAP_GPA_RANGE to map the hpa range as shared
+ * and marks the range to be accessed via shared access.
+ * 11) Guest writes shared memory with another pattern and signals VMM
+ * 12) VMM verifies the memory contents to be same as written by guest in step
+ * 5 and updates the memory with a different pattern
+ * 13) Guest verifies the memory contents to be same as written in step 6.
+ */
+#define PSAWDAT_ID 7
+#define PSAWDAT_DESC "PrivateSharedAccessWithoutDoubleAllocationTest"
+
+#define PSAWDAT_GUEST_SHARED_MEM_UPDATED1 1ULL
+#define PSAWDAT_GUEST_SHARED_MEM_UPDATED2 2ULL
+
+static bool psawdat_handle_vm_stage(struct kvm_vm *vm,
+ void *test_info,
+ uint64_t stage)
+{
+ void *shared_mem = ((struct test_run_helper *)test_info)->shared_mem;
+
+ switch (stage) {
+ case PSAWDAT_GUEST_SHARED_MEM_UPDATED1: {
+ /* verify data to be same as what guest wrote earlier */
+ TEST_ASSERT(do_mem_op(VERIFY_PAT, shared_mem,
+ TEST_MEM_DATA_PAT2, test_mem_size),
+ "Shared memory view mismatch");
+ TEST_ASSERT(do_mem_op(SET_PAT, shared_mem,
+ TEST_MEM_DATA_PAT1, test_mem_size),
+ "Shared mem update Failure");
+ VM_STAGE_PROCESSED(PSAWDAT_GUEST_SHARED_MEM_UPDATED);
+ break;
+ }
+ case PSAWDAT_GUEST_SHARED_MEM_UPDATED2: {
+ /* verify data to be same as what guest wrote earlier */
+ TEST_ASSERT(do_mem_op(VERIFY_PAT, shared_mem,
+ TEST_MEM_DATA_PAT3, test_mem_size),
+ "Shared memory view mismatch");
+ TEST_ASSERT(do_mem_op(SET_PAT, shared_mem,
+ TEST_MEM_DATA_PAT4, test_mem_size),
+ "Shared mem update Failure");
+ VM_STAGE_PROCESSED(PSAWDAT_GUEST_SHARED_MEM_UPDATED2);
+ break;
+ }
+ default:
+ printf("Unhandled VM stage %ld\n", stage);
+ return false;
+ }
+
+ return true;
+}
+
+static void psawdat_guest_code(void)
+{
+ void *test_mem = (void *)TEST_MEM_GPA;
+ int ret;
+
+ const size_t mem_size = *((size_t *)MEM_SIZE_MMIO_ADDRESS);
+
+ /* Mark the GPA range to be treated as always accessed privately */
+ ret = kvm_hypercall(KVM_HC_MAP_GPA_RANGE, TEST_MEM_GPA,
+ mem_size >> MIN_PAGE_SHIFT,
+ KVM_MARK_GPA_RANGE_ENC_ACCESS, 0);
+ GUEST_ASSERT_1(ret == 0, ret);
+ GUEST_ASSERT(do_mem_op(SET_PAT, test_mem,
+ TEST_MEM_DATA_PAT1, mem_size));
+
+ GUEST_ASSERT(do_mem_op(VERIFY_PAT, test_mem,
+ TEST_MEM_DATA_PAT1, mem_size));
+
+ /* Map the GPA range to be treated as shared */
+ ret = kvm_hypercall(KVM_HC_MAP_GPA_RANGE, TEST_MEM_GPA,
+ mem_size >> MIN_PAGE_SHIFT,
+ KVM_MAP_GPA_RANGE_DECRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K, 0);
+ GUEST_ASSERT_1(ret == 0, ret);
+
+ /* Mark the GPA range to be treated as always accessed via shared
+ * access
+ */
+ ret = kvm_hypercall(KVM_HC_MAP_GPA_RANGE, 0, 0,
+ KVM_MARK_GPA_RANGE_ENC_ACCESS, 0);
+ GUEST_ASSERT_1(ret == 0, ret);
+
+ GUEST_ASSERT(do_mem_op(SET_PAT, test_mem,
+ TEST_MEM_DATA_PAT2, mem_size));
+ GUEST_SYNC(PSAWDAT_GUEST_SHARED_MEM_UPDATED1);
+
+ GUEST_ASSERT(do_mem_op(VERIFY_PAT, test_mem,
+ TEST_MEM_DATA_PAT1, mem_size));
+
+ /* Map the GPA range to be treated as private */
+ ret = kvm_hypercall(KVM_HC_MAP_GPA_RANGE, TEST_MEM_GPA,
+ mem_size >> MIN_PAGE_SHIFT,
+ KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K, 0);
+ GUEST_ASSERT_1(ret == 0, ret);
+
+ /* Mark the GPA range to be treated as always accessed via private
+ * access
+ */
+ ret = kvm_hypercall(KVM_HC_MAP_GPA_RANGE, TEST_MEM_GPA,
+ mem_size >> MIN_PAGE_SHIFT,
+ KVM_MARK_GPA_RANGE_ENC_ACCESS, 0);
+ GUEST_ASSERT_1(ret == 0, ret);
+
+ GUEST_ASSERT(do_mem_op(SET_PAT, test_mem,
+ TEST_MEM_DATA_PAT2, mem_size));
+ GUEST_ASSERT(do_mem_op(VERIFY_PAT, test_mem,
+ TEST_MEM_DATA_PAT2, mem_size));
+
+ /* Map the GPA range to be treated as shared */
+ ret = kvm_hypercall(KVM_HC_MAP_GPA_RANGE, TEST_MEM_GPA,
+ mem_size >> MIN_PAGE_SHIFT,
+ KVM_MAP_GPA_RANGE_DECRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K, 0);
+ GUEST_ASSERT_1(ret == 0, ret);
+
+ /* Mark the GPA range to be treated as always accessed via shared
+ * access
+ */
+ ret = kvm_hypercall(KVM_HC_MAP_GPA_RANGE, 0, 0,
+ KVM_MARK_GPA_RANGE_ENC_ACCESS, 0);
+ GUEST_ASSERT_1(ret == 0, ret);
+
+ GUEST_ASSERT(do_mem_op(SET_PAT, test_mem,
+ TEST_MEM_DATA_PAT3, mem_size));
+ GUEST_SYNC(PSAWDAT_GUEST_SHARED_MEM_UPDATED2);
+
+ GUEST_ASSERT(do_mem_op(VERIFY_PAT, test_mem,
+ TEST_MEM_DATA_PAT4, mem_size));
+
+ GUEST_DONE();
+}
+
static struct test_run_helper priv_memfd_testsuite[] = {
[PMPAT_ID] = {
.test_desc = PMPAT_DESC,
@@ -815,6 +962,13 @@ static struct test_run_helper priv_memfd_testsuite[] = {
.vmst_handler = pspahct_handle_vm_stage,
.guest_fn = pspahct_guest_code,
},
+ [PSAWDAT_ID] = {
+ .test_desc = PSAWDAT_DESC,
+ .vmst_handler = psawdat_handle_vm_stage,
+ .guest_fn = psawdat_guest_code,
+ .toggle_shared_mem_state = true,
+ .disallow_boot_shared_access = true,
+ },
};

static void handle_vm_exit_hypercall(struct kvm_run *run,
@@ -825,6 +979,10 @@ static void handle_vm_exit_hypercall(struct kvm_run *run,
priv_memfd_testsuite[test_id].priv_memfd;
int ret;
int fallocate_mode;
+ void *shared_mem = priv_memfd_testsuite[test_id].shared_mem;
+ bool toggle_shared_mem_state =
+ priv_memfd_testsuite[test_id].toggle_shared_mem_state;
+ int mprotect_mode;

if (run->hypercall.nr != KVM_HC_MAP_GPA_RANGE) {
TEST_FAIL("Unhandled Hypercall %lld\n",
@@ -842,11 +1000,13 @@ static void handle_vm_exit_hypercall(struct kvm_run *run,
gpa, npages);
}

- if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
+ if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED) {
fallocate_mode = 0;
- else {
+ mprotect_mode = PROT_NONE;
+ } else {
fallocate_mode = (FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_KEEP_SIZE);
+ mprotect_mode = PROT_READ | PROT_WRITE;
}
pr_info("Converting off 0x%lx pages 0x%lx to %s\n",
(gpa - TEST_MEM_GPA), npages,
@@ -857,6 +1017,17 @@ static void handle_vm_exit_hypercall(struct kvm_run *run,
npages << MIN_PAGE_SHIFT);
TEST_ASSERT(ret != -1,
"fallocate failed in hc handling");
+ if (toggle_shared_mem_state) {
+ if (fallocate_mode) {
+ ret = madvise(shared_mem, test_mem_size,
+ MADV_DONTNEED);
+ TEST_ASSERT(ret != -1,
+ "madvise failed in hc handling");
+ }
+ ret = mprotect(shared_mem, test_mem_size, mprotect_mode);
+ TEST_ASSERT(ret != -1,
+ "mprotect failed in hc handling");
+ }
run->hypercall.ret = 0;
}

@@ -867,7 +1038,11 @@ static void handle_vm_exit_memory_error(struct kvm_run *run,
int ret;
int priv_memfd =
priv_memfd_testsuite[test_id].priv_memfd;
+ void *shared_mem = priv_memfd_testsuite[test_id].shared_mem;
+ bool toggle_shared_mem_state =
+ priv_memfd_testsuite[test_id].toggle_shared_mem_state;
int fallocate_mode;
+ int mprotect_mode;

gpa = run->memory.gpa;
size = run->memory.size;
@@ -880,11 +1055,13 @@ static void handle_vm_exit_memory_error(struct kvm_run *run,
gpa, size);
}

- if (flags & KVM_MEMORY_EXIT_FLAG_PRIVATE)
+ if (flags & KVM_MEMORY_EXIT_FLAG_PRIVATE) {
fallocate_mode = 0;
- else {
+ mprotect_mode = PROT_NONE;
+ } else {
fallocate_mode = (FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_KEEP_SIZE);
+ mprotect_mode = PROT_READ | PROT_WRITE;
}
pr_info("Converting off 0x%lx size 0x%lx to %s\n",
(gpa - TEST_MEM_GPA), size,
@@ -894,6 +1071,18 @@ static void handle_vm_exit_memory_error(struct kvm_run *run,
(gpa - TEST_MEM_GPA), size);
TEST_ASSERT(ret != -1,
"fallocate failed in memory error handling");
+
+ if (toggle_shared_mem_state) {
+ if (fallocate_mode) {
+ ret = madvise(shared_mem, test_mem_size,
+ MADV_DONTNEED);
+ TEST_ASSERT(ret != -1,
+ "madvise failed in memory error handling");
+ }
+ ret = mprotect(shared_mem, test_mem_size, mprotect_mode);
+ TEST_ASSERT(ret != -1,
+ "mprotect failed in memory error handling");
+ }
}

static void vcpu_work(struct kvm_vm *vm, uint32_t test_id)
@@ -924,14 +1113,14 @@ static void vcpu_work(struct kvm_vm *vm, uint32_t test_id)

if (run->exit_reason == KVM_EXIT_MMIO) {
if (run->mmio.phys_addr == MEM_SIZE_MMIO_ADDRESS) {
- // tell the guest the size of the memory
- // it's been allocated
+ /* tell the guest the size of the memory it's
+ * been allocated
+ */
int shift_amount = 0;

for (int i = 0; i < sizeof(uint64_t); ++i) {
- run->mmio.data[i] =
- (test_mem_size >>
- shift_amount) & BYTE_MASK;
+ run->mmio.data[i] = (test_mem_size >>
+ shift_amount) & BYTE_MASK;
shift_amount += CHAR_BIT;
}
}
@@ -985,6 +1174,9 @@ static void setup_and_execute_test(uint32_t test_id,
int ret;
void *shared_mem;
struct kvm_enable_cap cap;
+ bool disallow_boot_shared_access =
+ priv_memfd_testsuite[test_id].disallow_boot_shared_access;
+ int prot_flags = PROT_READ | PROT_WRITE;

vm = vm_create_default(VCPU_ID, 0,
priv_memfd_testsuite[test_id].guest_fn);
@@ -1036,10 +1228,12 @@ static void setup_and_execute_test(uint32_t test_id,
// set global for mem size to use later
test_mem_size = mem_size;

+ if (disallow_boot_shared_access)
+ prot_flags = PROT_NONE;
+
/* Allocate shared memory */
shared_mem = mmap(NULL, mem_size,
- PROT_READ | PROT_WRITE,
- mmap_flags, -1, 0);
+ prot_flags, mmap_flags, -1, 0);
TEST_ASSERT(shared_mem != MAP_FAILED, "Failed to mmap() host");

if (using_hugepages) {
@@ -1166,7 +1360,8 @@ int main(int argc, char *argv[])

for (uint32_t i = 0; i < ARRAY_SIZE(priv_memfd_testsuite); i++) {
for (uint32_t j = 0; j < ARRAY_SIZE(page_size_matrix); j++) {
- const struct page_combo current_page_matrix = page_size_matrix[j];
+ const struct page_combo current_page_matrix =
+ page_size_matrix[j];

if (should_skip_test(current_page_matrix,
use_2mb_pages, use_1gb_pages))
@@ -1174,8 +1369,10 @@ int main(int argc, char *argv[])
pr_info("=== Starting test %s... ===\n",
priv_memfd_testsuite[i].test_desc);
pr_info("using page sizes shared: %s private: %s\n",
- page_size_to_str(current_page_matrix.shared),
- page_size_to_str(current_page_matrix.private));
+ page_size_to_str(
+ current_page_matrix.shared),
+ page_size_to_str(
+ current_page_matrix.private));
hugepage_requirements_text(current_page_matrix);
setup_and_execute_test(i, current_page_matrix.shared,
current_page_matrix.private);
--
2.36.0.550.gb090851708-goog
\
 
 \ /
  Last update: 2022-05-11 02:10    [W:0.326 / U:0.136 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site