summary refs log tree commit diff
path: root/crosvm_plugin
diff options
context:
space:
mode:
authorMatt Delco <delco@chromium.org>2019-04-16 18:37:55 -0700
committerchrome-bot <chrome-bot@chromium.org>2019-04-24 15:51:11 -0700
commit2ec62db5f77cd52fe934e87d3be3c832251f4748 (patch)
tree210e85049bc1b92a43986a8f8c76240ee0225a7d /crosvm_plugin
parentf82d6320e7192858553bb0e1b5464363d29a631a (diff)
downloadcrosvm-2ec62db5f77cd52fe934e87d3be3c832251f4748.tar
crosvm-2ec62db5f77cd52fe934e87d3be3c832251f4748.tar.gz
crosvm-2ec62db5f77cd52fe934e87d3be3c832251f4748.tar.bz2
crosvm-2ec62db5f77cd52fe934e87d3be3c832251f4748.tar.lz
crosvm-2ec62db5f77cd52fe934e87d3be3c832251f4748.tar.xz
crosvm-2ec62db5f77cd52fe934e87d3be3c832251f4748.tar.zst
crosvm-2ec62db5f77cd52fe934e87d3be3c832251f4748.zip
crosvm: reduce excess chatter with plugin
This change helps to improve performance in plugin communications by
removing unnecessary communication exchange.

The existing protocol basically requires the plugin to send a request
msg and wait for a reply msg.  Prior to this change a plugin had to send
a wait request before it got a wait reply (which typically contains an IO
event notication). Similarly, when the plugin sends a resume request
there's also a resume reply that's sent.

The reply to the resume message serves no worthwhile purpose and can be
removed. In the common case there's also no need for the plugin to send
a wait request message--the prior operation was a resume so both sides
know that the only next legal operation is a wait.  Thereforce, crosvm
can send a wait reply message without waiting for the plugin's request.

Another way to look at the situation is that a resume request message is
now answered by a wait reply message, and the overall message exchange
pattern looks less like http and more like async I/O.

The plugin's first call to wait is the one time that a wait request is
sent.  This in turn will receive an wait-init reply.

TEST=Ran my diagnostic plugin and confirmed that it still passes (after
working around an 8-byte limitation in crosvm).  Run my benchmarking
plugin and observed the time it takes to complete go down by 16.5%.
BUG=None

Change-Id: I9c93ba1d3a8f7814ca952f3dc7239d48675192e2
Signed-off-by: Matt Delco <delco@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/1571066
Tested-by: kokoro <noreply+kokoro@google.com>
Tested-by: Matt Delco <delco@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Diffstat (limited to 'crosvm_plugin')
-rw-r--r--crosvm_plugin/crosvm.h16
-rw-r--r--crosvm_plugin/src/lib.rs26
2 files changed, 30 insertions, 12 deletions
diff --git a/crosvm_plugin/crosvm.h b/crosvm_plugin/crosvm.h
index 81c8ade..d7a036c 100644
--- a/crosvm_plugin/crosvm.h
+++ b/crosvm_plugin/crosvm.h
@@ -235,7 +235,7 @@ int crosvm_get_pic_state(struct crosvm *, bool __primary,
 
 /* Sets the state of interrupt controller in a VM. */
 int crosvm_set_pic_state(struct crosvm *, bool __primary,
-                         struct kvm_pic_state *__pic_state);
+                         const struct kvm_pic_state *__pic_state);
 
 /* Gets the state of IOAPIC in a VM. */
 int crosvm_get_ioapic_state(struct crosvm *,
@@ -243,7 +243,7 @@ int crosvm_get_ioapic_state(struct crosvm *,
 
 /* Sets the state of IOAPIC in a VM. */
 int crosvm_set_ioapic_state(struct crosvm *,
-                            struct kvm_ioapic_state *__ioapic_state);
+                            const struct kvm_ioapic_state *__ioapic_state);
 
 /* Gets the state of interrupt controller in a VM. */
 int crosvm_get_pit_state(struct crosvm *, struct kvm_pit_state2 *__pit_state);
@@ -280,7 +280,7 @@ int crosvm_pause_vcpus(struct crosvm*, uint64_t __cpu_mask, void* __user);
 int crosvm_start(struct crosvm*);
 
 /*
- * Registers an eventfd that is triggered asynchronously on write in |__space|
+ * Allocates an eventfd that is triggered asynchronously on write in |__space|
  * at the given |__addr|.
  *
  * If |__datamatch| is non-NULL, it must be contain |__length| bytes that will
@@ -288,8 +288,8 @@ int crosvm_start(struct crosvm*);
  * the eventfd if equal. If datamatch is NULL all writes to the address will
  * trigger the eventfd.
  *
- * On successful registration, returns a non-negative eventfd owned by the
- * caller.
+ * On successful allocation, returns a crosvm_io.  Obtain the actual fd
+ * by passing this result to crosvm_io_event_fd().
  */
 int crosvm_create_io_event(struct crosvm*, uint32_t __space, uint64_t __addr,
                            uint32_t __len, const uint8_t* __datamatch,
@@ -423,6 +423,10 @@ struct crosvm_vcpu_event {
        * than 8 bytes, such as by AVX-512 instructions, multiple vcpu access
        * events are generated serially to cover each 8 byte fragment of the
        * access.
+       *
+       * Larger I/O accesses are possible.  "rep in" can generate I/Os larger
+       * than 8 bytes, though such accesses can also be split into multiple
+       * events.  Currently kvm doesn't seem to batch "rep out" I/Os.
        */
       uint32_t length;
 
@@ -435,7 +439,7 @@ struct crosvm_vcpu_event {
       uint8_t _reserved[3];
     } io_access;
 
-    /* CROSVM_VCPU_EVENT_KIND_USER */
+    /* CROSVM_VCPU_EVENT_KIND_PAUSED */
     void *user;
 
     uint8_t _reserved[64];
diff --git a/crosvm_plugin/src/lib.rs b/crosvm_plugin/src/lib.rs
index a334505..d1bb665 100644
--- a/crosvm_plugin/src/lib.rs
+++ b/crosvm_plugin/src/lib.rs
@@ -920,6 +920,7 @@ pub struct crosvm_vcpu_event {
 
 pub struct crosvm_vcpu {
     socket: UnixDatagram,
+    send_init: bool,
     request_buffer: Vec<u8>,
     response_buffer: Vec<u8>,
     resume_data: Vec<u8>,
@@ -929,13 +930,13 @@ impl crosvm_vcpu {
     fn new(socket: UnixDatagram) -> crosvm_vcpu {
         crosvm_vcpu {
             socket,
+            send_init: true,
             request_buffer: Vec::new(),
             response_buffer: vec![0; MAX_DATAGRAM_SIZE],
             resume_data: Vec::new(),
         }
     }
-
-    fn vcpu_transaction(&mut self, request: &VcpuRequest) -> result::Result<VcpuResponse, c_int> {
+    fn vcpu_send(&mut self, request: &VcpuRequest) -> result::Result<(), c_int> {
         self.request_buffer.clear();
         request
             .write_to_vec(&mut self.request_buffer)
@@ -943,7 +944,10 @@ impl crosvm_vcpu {
         self.socket
             .send(self.request_buffer.as_slice())
             .map_err(|e| -e.raw_os_error().unwrap_or(EINVAL))?;
+        Ok(())
+    }
 
+    fn vcpu_recv(&mut self) -> result::Result<VcpuResponse, c_int> {
         let msg_size = self
             .socket
             .recv(&mut self.response_buffer)
@@ -957,10 +961,20 @@ impl crosvm_vcpu {
         Ok(response)
     }
 
+    fn vcpu_transaction(&mut self, request: &VcpuRequest) -> result::Result<VcpuResponse, c_int> {
+        self.vcpu_send(request)?;
+        let response: VcpuResponse = self.vcpu_recv()?;
+        Ok(response)
+    }
+
     fn wait(&mut self, event: &mut crosvm_vcpu_event) -> result::Result<(), c_int> {
-        let mut r = VcpuRequest::new();
-        r.mut_wait();
-        let mut response: VcpuResponse = self.vcpu_transaction(&r)?;
+        if self.send_init {
+            self.send_init = false;
+            let mut r = VcpuRequest::new();
+            r.mut_wait();
+            self.vcpu_send(&r)?;
+        }
+        let mut response: VcpuResponse = self.vcpu_recv()?;
         if !response.has_wait() {
             return Err(EPROTO);
         }
@@ -997,7 +1011,7 @@ impl crosvm_vcpu {
         let resume: &mut VcpuRequest_Resume = r.mut_resume();
         swap(&mut resume.data, &mut self.resume_data);
 
-        self.vcpu_transaction(&r)?;
+        self.vcpu_send(&r)?;
         Ok(())
     }