summary refs log tree commit diff
path: root/tests
diff options
context:
space:
mode:
authorMatt Delco <delco@chromium.org>2019-11-01 16:45:46 -0700
committerCommit Bot <commit-bot@chromium.org>2019-11-06 23:01:15 +0000
commita52b2a6c8167eebb285b70b54077919bbf113a35 (patch)
tree600d2fec69fd89e3216a84b2bc64de510c7f9207 /tests
parent1de9cb53e1eccfaf138553c45a800f26927663bb (diff)
downloadcrosvm-a52b2a6c8167eebb285b70b54077919bbf113a35.tar
crosvm-a52b2a6c8167eebb285b70b54077919bbf113a35.tar.gz
crosvm-a52b2a6c8167eebb285b70b54077919bbf113a35.tar.bz2
crosvm-a52b2a6c8167eebb285b70b54077919bbf113a35.tar.lz
crosvm-a52b2a6c8167eebb285b70b54077919bbf113a35.tar.xz
crosvm-a52b2a6c8167eebb285b70b54077919bbf113a35.tar.zst
crosvm-a52b2a6c8167eebb285b70b54077919bbf113a35.zip
crosvm: add plugin API for async writes
A plugin might care to be immediately notified when a write
is made to a port, but it doesn't care to have the VM stopped
while the plugin calls back to resume the VM.

Unfortunately this means that multiple messages can be queued up in the
pipe and read() together by the plugin API.  Protobuf's parsing function
doesn't report how many bytes it read, so I've resorted to having crosvm
prefix every message with a length and then have the plugin lib parse
this number.  Impact on performance has not been measured.

BUG=b:143294496
TEST=Local build and run of build_test.  Verified that new unit
test was executed, exercised the case where multiple msgs are
received together, and completed successfully.

Change-Id: If6ef463e7b4d2e688e649f832a764fa644bf2d36
Signed-off-by: Matt Delco <delco@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1896376
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Stephen Barber <smbarber@chromium.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/plugin_async_write.c273
-rw-r--r--tests/plugins.rs5
2 files changed, 278 insertions, 0 deletions
diff --git a/tests/plugin_async_write.c b/tests/plugin_async_write.c
new file mode 100644
index 0000000..5c99f25
--- /dev/null
+++ b/tests/plugin_async_write.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/memfd.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "crosvm.h"
+
+#ifndef F_LINUX_SPECIFIC_BASE
+#define F_LINUX_SPECIFIC_BASE 1024
+#endif
+
+#ifndef F_ADD_SEALS
+#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
+#endif
+
+#ifndef F_SEAL_SHRINK
+#define F_SEAL_SHRINK 0x0002
+#endif
+
+#define KILL_ADDRESS   0x3f9
+#define ASYNC_ADDRESS  0x500
+
+int g_kill_evt;
+int got_error = 0;
+
+void *vcpu_thread(void *arg) {
+    struct crosvm_vcpu *vcpu = arg;
+    struct crosvm_vcpu_event evt;
+    while (crosvm_vcpu_wait(vcpu, &evt) == 0) {
+        if (evt.kind == CROSVM_VCPU_EVENT_KIND_INIT) {
+            struct kvm_sregs sregs;
+            crosvm_vcpu_get_sregs(vcpu, &sregs);
+            sregs.cs.base = 0;
+            sregs.cs.selector = 0;
+            sregs.es.base = KILL_ADDRESS;
+            sregs.es.selector = 0;
+            crosvm_vcpu_set_sregs(vcpu, &sregs);
+
+            struct kvm_regs regs;
+            crosvm_vcpu_get_regs(vcpu, &regs);
+            regs.rip = 0x1000;
+            regs.rax = 2;
+            regs.rbx = 7;
+            regs.rflags = 2;
+            crosvm_vcpu_set_regs(vcpu, &regs);
+        }
+        if (evt.kind == CROSVM_VCPU_EVENT_KIND_IO_ACCESS) {
+            if (evt.io_access.address_space == CROSVM_ADDRESS_SPACE_IOPORT &&
+                evt.io_access.address == ASYNC_ADDRESS &&
+                evt.io_access.is_write &&
+                evt.io_access.length == 1) {
+                int ret;
+                if (!evt.io_access.no_resume) {
+                    fprintf(stderr, "should have been told not to resume\n");
+                    got_error = 1;
+                }
+
+                ret = crosvm_vcpu_wait(vcpu, &evt);
+                if (ret == 0) {
+                    if (evt.kind != CROSVM_VCPU_EVENT_KIND_IO_ACCESS ||
+                        evt.io_access.address_space !=
+                        CROSVM_ADDRESS_SPACE_IOPORT ||
+                        evt.io_access.address != ASYNC_ADDRESS ||
+                        !evt.io_access.is_write ||
+                        !evt.io_access.no_resume ||
+                        evt.io_access.length != 1) {
+                        fprintf(stderr, "got unexpected wait #1 result\n");
+                        got_error = 1;
+                    }
+                } else {
+                    fprintf(stderr, "crosvm_vcpu_wait() #1 failed: %d\n", ret);
+                    got_error = 1;
+                }
+
+                ret = crosvm_vcpu_wait(vcpu, &evt);
+                if (ret == 0) {
+                    if (evt.kind != CROSVM_VCPU_EVENT_KIND_IO_ACCESS ||
+                        evt.io_access.address_space !=
+                        CROSVM_ADDRESS_SPACE_IOPORT ||
+                        evt.io_access.address != ASYNC_ADDRESS ||
+                        !evt.io_access.is_write ||
+                        !evt.io_access.no_resume ||
+                        evt.io_access.length != 1) {
+                        fprintf(stderr, "got unexpected wait #2 result\n");
+                        got_error = 1;
+                    }
+                } else {
+                    fprintf(stderr, "crosvm_vcpu_wait() #2 failed: %d\n", ret);
+                    got_error = 1;
+                }
+
+                // skip the crosvm_vcpu_resume()
+                continue;
+            }
+            if (evt.io_access.address_space == CROSVM_ADDRESS_SPACE_IOPORT &&
+                evt.io_access.address == KILL_ADDRESS &&
+                evt.io_access.is_write &&
+                evt.io_access.length == 1 &&
+                evt.io_access.data[0] == 1)
+            {
+                uint64_t dummy = 1;
+                write(g_kill_evt, &dummy, sizeof(dummy));
+                return NULL;
+            }
+        }
+
+        crosvm_vcpu_resume(vcpu);
+    }
+
+    return NULL;
+}
+
+int main(int argc, char** argv) {
+    const uint8_t code[] = {
+    /*
+    B007    mov al,0x7
+    BA0005  mov dx,0x500
+    EE      out dx,al
+    EE      out dx,al
+    EE      out dx,al
+    BAF903  mov dx,0x3f9
+    B001    mov al,0x1
+    EE      out dx,al
+    F4      hlt
+    */
+        0xb0, 0x7,
+        0xba, (ASYNC_ADDRESS & 0xFF), ((ASYNC_ADDRESS >> 8) & 0xFF),
+        0xee,
+        0xee,
+        0xee,
+        0xba, (KILL_ADDRESS & 0xFF), ((KILL_ADDRESS >> 8) & 0xFF),
+        0xb0, 0x01,
+        0xee,
+        0xf4
+    };
+
+    struct crosvm *crosvm;
+    int ret = crosvm_connect(&crosvm);
+    if (ret) {
+        fprintf(stderr, "failed to connect to crosvm: %d\n", ret);
+        return 1;
+    }
+
+    /*
+     * Not strictly necessary, but demonstrates we can have as many connections
+     * as we please.
+     */
+    struct crosvm *extra_crosvm;
+    ret = crosvm_new_connection(crosvm, &extra_crosvm);
+    if (ret) {
+        fprintf(stderr, "failed to make new socket: %d\n", ret);
+        return 1;
+    }
+
+    /* We needs this eventfd to know when to exit before being killed. */
+    g_kill_evt = crosvm_get_shutdown_eventfd(crosvm);
+    if (g_kill_evt < 0) {
+        fprintf(stderr, "failed to get kill eventfd: %d\n", g_kill_evt);
+        return 1;
+    }
+
+    ret = crosvm_reserve_async_write_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
+                                           ASYNC_ADDRESS, 1);
+    if (ret) {
+        fprintf(stderr, "failed to reserve async ioport range: %d\n", ret);
+        return 1;
+    }
+
+    ret = crosvm_reserve_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
+                               KILL_ADDRESS, 1);
+    if (ret) {
+        fprintf(stderr, "failed to reserve kill ioport range: %d\n", ret);
+        return 1;
+    }
+
+    int mem_size = 0x2000;
+    int mem_fd = syscall(SYS_memfd_create, "guest_mem",
+                         MFD_CLOEXEC | MFD_ALLOW_SEALING);
+    if (mem_fd < 0) {
+        fprintf(stderr, "failed to create guest memfd: %d\n", errno);
+        return 1;
+    }
+    ret = ftruncate(mem_fd, mem_size);
+    if (ret) {
+        fprintf(stderr, "failed to set size of guest memory: %d\n", errno);
+        return 1;
+    }
+    uint8_t *mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+                        mem_fd, 0x1000);
+    if (mem == MAP_FAILED) {
+        fprintf(stderr, "failed to mmap guest memory: %d\n", errno);
+        return 1;
+    }
+    fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK);
+    memcpy(mem, code, sizeof(code));
+
+    struct crosvm_memory *mem_obj;
+    ret = crosvm_create_memory(crosvm, mem_fd, 0x1000, 0x1000, 0x1000, false,
+                               false, &mem_obj);
+    if (ret) {
+        fprintf(stderr, "failed to create memory in crosvm: %d\n", ret);
+        return 1;
+    }
+
+    /* get and creat a thread for each vcpu */
+    struct crosvm_vcpu *vcpus[32];
+    pthread_t vcpu_threads[32];
+    uint32_t vcpu_count;
+    for (vcpu_count = 0; vcpu_count < 32; vcpu_count++) {
+        ret = crosvm_get_vcpu(crosvm, vcpu_count, &vcpus[vcpu_count]);
+        if (ret == -ENOENT)
+            break;
+
+        if (ret) {
+            fprintf(stderr, "error while getting all vcpus: %d\n", ret);
+            return 1;
+        }
+        pthread_create(&vcpu_threads[vcpu_count], NULL, vcpu_thread,
+                       vcpus[vcpu_count]);
+    }
+
+    ret = crosvm_start(extra_crosvm);
+    if (ret) {
+        fprintf(stderr, "failed to tell crosvm to start: %d\n", ret);
+        return 1;
+    }
+
+    /* Wait for crosvm to request that we exit otherwise we will be killed. */
+    uint64_t dummy;
+    read(g_kill_evt, &dummy, 8);
+
+    ret = crosvm_destroy_memory(crosvm, &mem_obj);
+    if (ret) {
+        fprintf(stderr, "failed to destroy memory in crosvm: %d\n", ret);
+        return 1;
+    }
+
+    ret = crosvm_reserve_async_write_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
+                                           ASYNC_ADDRESS, 0);
+    if (ret) {
+        fprintf(stderr, "failed to unreserve async ioport range: %d\n", ret);
+        return 1;
+    }
+
+    ret = crosvm_reserve_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
+                               KILL_ADDRESS, 0);
+    if (ret) {
+        fprintf(stderr, "failed to unreserve kill ioport range: %d\n", ret);
+        return 1;
+    }
+
+    if (got_error) {
+      fprintf(stderr, "vm ran to completion but with an error\n");
+      return 1;
+    }
+
+    return 0;
+}
diff --git a/tests/plugins.rs b/tests/plugins.rs
index 47b0ca3..d56f4ce 100644
--- a/tests/plugins.rs
+++ b/tests/plugins.rs
@@ -231,6 +231,11 @@ fn test_hint() {
 }
 
 #[test]
+fn test_async_write() {
+    test_plugin(include_str!("plugin_async_write.c"));
+}
+
+#[test]
 fn test_dirty_log() {
     test_plugin(include_str!("plugin_dirty_log.c"));
 }