summary refs log tree commit diff
path: root/kvm
diff options
context:
space:
mode:
authorDylan Reid <dgreid@chromium.org>2019-10-22 18:30:36 +0300
committerCommit Bot <commit-bot@chromium.org>2019-12-10 05:16:06 +0000
commitbb30b2f7cf2721b32da71adfa7f7482bfce7b915 (patch)
treee364b4b3d1e605ec1aa7ba032e1d3211cd12ca18 /kvm
parent7434c0002022541f34a929d9a8e3bfaaf4dfc2d9 (diff)
downloadcrosvm-bb30b2f7cf2721b32da71adfa7f7482bfce7b915.tar
crosvm-bb30b2f7cf2721b32da71adfa7f7482bfce7b915.tar.gz
crosvm-bb30b2f7cf2721b32da71adfa7f7482bfce7b915.tar.bz2
crosvm-bb30b2f7cf2721b32da71adfa7f7482bfce7b915.tar.lz
crosvm-bb30b2f7cf2721b32da71adfa7f7482bfce7b915.tar.xz
crosvm-bb30b2f7cf2721b32da71adfa7f7482bfce7b915.tar.zst
crosvm-bb30b2f7cf2721b32da71adfa7f7482bfce7b915.zip
Add runnable vcpu
Add a new type `RunnableVcpu` for a vcpu that is bound to a thread. This
adds type safety to ensure that vcpus are only ever run on one thread
because RunnableVcpu can't `Send`. It also ensures multiple vcpus can't
run on the same thread.

Change-Id: Ia50dc127bc7a4ea4ce3ca99ef1062edbcaa912d0
Signed-off-by: Dylan Reid <dgreid@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1898909
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Diffstat (limited to 'kvm')
-rw-r--r--kvm/src/lib.rs322
-rw-r--r--kvm/tests/dirty_log.rs3
-rw-r--r--kvm/tests/read_only_memory.rs3
-rw-r--r--kvm/tests/real_run_adder.rs3
4 files changed, 194 insertions, 137 deletions
diff --git a/kvm/src/lib.rs b/kvm/src/lib.rs
index 0a8d5c1..06f5915 100644
--- a/kvm/src/lib.rs
+++ b/kvm/src/lib.rs
@@ -11,12 +11,13 @@ use std::cmp::{min, Ordering};
 use std::collections::{BinaryHeap, HashMap};
 use std::fs::File;
 use std::mem::size_of;
+use std::ops::{Deref, DerefMut};
 use std::os::raw::*;
 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
 use std::ptr::copy_nonoverlapping;
 
 use libc::sigset_t;
-use libc::{open, EINVAL, ENOENT, ENOSPC, EOVERFLOW, O_CLOEXEC, O_RDWR};
+use libc::{open, EBUSY, EINVAL, ENOENT, ENOSPC, EOVERFLOW, O_CLOEXEC, O_RDWR};
 
 use kvm_sys::*;
 
@@ -1148,6 +1149,8 @@ pub enum VcpuExit {
 }
 
 /// A wrapper around creating and using a VCPU.
+/// `Vcpu` provides all functionality except for running. To run, `to_runnable` must be called to
+/// lock the vcpu to a thread. Then the returned `RunnableVcpu` can be used for running.
 pub struct Vcpu {
     vcpu: File,
     run_mmap: MemoryMapping,
@@ -1156,7 +1159,7 @@ pub struct Vcpu {
 
 pub struct VcpuThread {
     run: *mut kvm_run,
-    signal_num: c_int,
+    signal_num: Option<c_int>,
 }
 
 thread_local!(static VCPU_THREAD: RefCell<Option<VcpuThread>> = RefCell::new(None));
@@ -1190,33 +1193,39 @@ impl Vcpu {
         })
     }
 
-    /// Sets the thread id for the vcpu and stores it in a hash map that can be used
-    /// by signal handlers to call set_local_immediate_exit(). Signal
-    /// number (if provided, otherwise use -1) will be temporily blocked when the vcpu
-    /// is added to the map, or later destroyed/removed from the map.
+    /// Consumes `self` and returns a `RunnableVcpu`. A `RunnableVcpu` is required to run the
+    /// guest.
+    /// Assigns a vcpu to the current thread and stores it in a hash map that can be used by signal
+    /// handlers to call set_local_immediate_exit(). An optional signal number will be temporarily
+    /// blocked while assigning the vcpu to the thread and later blocked when `RunnableVcpu` is
+    /// destroyed.
+    ///
+    /// Returns an error, `EBUSY`, if the current thread already contains a Vcpu.
     #[allow(clippy::cast_ptr_alignment)]
-    pub fn set_thread_id(&mut self, signal_num: c_int) {
+    pub fn to_runnable(self, signal_num: Option<c_int>) -> Result<RunnableVcpu> {
         // Block signal while we add -- if a signal fires (very unlikely,
         // as this means something is trying to pause the vcpu before it has
         // even started) it'll try to grab the read lock while this write
         // lock is grabbed and cause a deadlock.
-        let mut unblock = false;
-        if signal_num >= 0 {
-            unblock = true;
-            // Assuming that a failure to block means it's already blocked.
-            if block_signal(signal_num).is_err() {
-                unblock = false;
-            }
-        }
+        // Assuming that a failure to block means it's already blocked.
+        let _blocked_signal = signal_num.map(BlockedSignal::new);
+
         VCPU_THREAD.with(|v| {
-            *v.borrow_mut() = Some(VcpuThread {
-                run: self.run_mmap.as_ptr() as *mut kvm_run,
-                signal_num,
-            });
-        });
-        if unblock {
-            let _ = unblock_signal(signal_num).expect("failed to restore signal mask");
-        }
+            if v.borrow().is_none() {
+                *v.borrow_mut() = Some(VcpuThread {
+                    run: self.run_mmap.as_ptr() as *mut kvm_run,
+                    signal_num,
+                });
+                Ok(())
+            } else {
+                Err(Error::new(EBUSY))
+            }
+        })?;
+
+        Ok(RunnableVcpu {
+            vcpu: self,
+            phantom: Default::default(),
+        })
     }
 
     /// Gets a reference to the guest memory owned by this VM of this VCPU.
@@ -1297,99 +1306,6 @@ impl Vcpu {
         });
     }
 
-    /// Runs the VCPU until it exits, returning the reason.
-    ///
-    /// Note that the state of the VCPU and associated VM must be setup first for this to do
-    /// anything useful.
-    #[allow(clippy::cast_ptr_alignment)]
-    // The pointer is page aligned so casting to a different type is well defined, hence the clippy
-    // allow attribute.
-    pub fn run(&self) -> Result<VcpuExit> {
-        // Safe because we know that our file is a VCPU fd and we verify the return result.
-        let ret = unsafe { ioctl(self, KVM_RUN()) };
-        if ret == 0 {
-            // Safe because we know we mapped enough memory to hold the kvm_run struct because the
-            // kernel told us how large it was.
-            let run = unsafe { &*(self.run_mmap.as_ptr() as *const kvm_run) };
-            match run.exit_reason {
-                KVM_EXIT_IO => {
-                    // Safe because the exit_reason (which comes from the kernel) told us which
-                    // union field to use.
-                    let io = unsafe { run.__bindgen_anon_1.io };
-                    let port = io.port;
-                    let size = (io.count as usize) * (io.size as usize);
-                    match io.direction as u32 {
-                        KVM_EXIT_IO_IN => Ok(VcpuExit::IoIn { port, size }),
-                        KVM_EXIT_IO_OUT => {
-                            let mut data = [0; 8];
-                            let run_start = run as *const kvm_run as *const u8;
-                            // The data_offset is defined by the kernel to be some number of bytes
-                            // into the kvm_run structure, which we have fully mmap'd.
-                            unsafe {
-                                let data_ptr = run_start.offset(io.data_offset as isize);
-                                copy_nonoverlapping(
-                                    data_ptr,
-                                    data.as_mut_ptr(),
-                                    min(size, data.len()),
-                                );
-                            }
-                            Ok(VcpuExit::IoOut { port, size, data })
-                        }
-                        _ => Err(Error::new(EINVAL)),
-                    }
-                }
-                KVM_EXIT_MMIO => {
-                    // Safe because the exit_reason (which comes from the kernel) told us which
-                    // union field to use.
-                    let mmio = unsafe { &run.__bindgen_anon_1.mmio };
-                    let address = mmio.phys_addr;
-                    let size = min(mmio.len as usize, mmio.data.len());
-                    if mmio.is_write != 0 {
-                        Ok(VcpuExit::MmioWrite {
-                            address,
-                            size,
-                            data: mmio.data,
-                        })
-                    } else {
-                        Ok(VcpuExit::MmioRead { address, size })
-                    }
-                }
-                KVM_EXIT_UNKNOWN => Ok(VcpuExit::Unknown),
-                KVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
-                KVM_EXIT_HYPERCALL => Ok(VcpuExit::Hypercall),
-                KVM_EXIT_DEBUG => Ok(VcpuExit::Debug),
-                KVM_EXIT_HLT => Ok(VcpuExit::Hlt),
-                KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen),
-                KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown),
-                KVM_EXIT_FAIL_ENTRY => Ok(VcpuExit::FailEntry),
-                KVM_EXIT_INTR => Ok(VcpuExit::Intr),
-                KVM_EXIT_SET_TPR => Ok(VcpuExit::SetTpr),
-                KVM_EXIT_TPR_ACCESS => Ok(VcpuExit::TprAccess),
-                KVM_EXIT_S390_SIEIC => Ok(VcpuExit::S390Sieic),
-                KVM_EXIT_S390_RESET => Ok(VcpuExit::S390Reset),
-                KVM_EXIT_DCR => Ok(VcpuExit::Dcr),
-                KVM_EXIT_NMI => Ok(VcpuExit::Nmi),
-                KVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
-                KVM_EXIT_OSI => Ok(VcpuExit::Osi),
-                KVM_EXIT_PAPR_HCALL => Ok(VcpuExit::PaprHcall),
-                KVM_EXIT_S390_UCONTROL => Ok(VcpuExit::S390Ucontrol),
-                KVM_EXIT_WATCHDOG => Ok(VcpuExit::Watchdog),
-                KVM_EXIT_S390_TSCH => Ok(VcpuExit::S390Tsch),
-                KVM_EXIT_EPR => Ok(VcpuExit::Epr),
-                KVM_EXIT_SYSTEM_EVENT => {
-                    // Safe because we know the exit reason told us this union
-                    // field is valid
-                    let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
-                    let event_flags = unsafe { run.__bindgen_anon_1.system_event.flags };
-                    Ok(VcpuExit::SystemEvent(event_type, event_flags))
-                }
-                r => panic!("unknown kvm exit reason: {}", r),
-            }
-        } else {
-            errno_result()
-        }
-    }
-
     /// Gets the VCPU registers.
     #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
     pub fn get_regs(&self) -> Result<kvm_regs> {
@@ -1787,35 +1703,151 @@ impl Vcpu {
     }
 }
 
-impl Drop for Vcpu {
-    fn drop(&mut self) {
-        VCPU_THREAD.with(|v| {
-            let mut unblock = false;
-            let mut signal_num: c_int = -1;
-            if let Some(state) = &(*v.borrow()) {
-                if state.signal_num >= 0 {
-                    unblock = true;
-                    signal_num = state.signal_num;
-                    // Assuming that a failure to block means it's already blocked.
-                    if block_signal(signal_num).is_err() {
-                        unblock = false;
+impl AsRawFd for Vcpu {
+    fn as_raw_fd(&self) -> RawFd {
+        self.vcpu.as_raw_fd()
+    }
+}
+
+/// A Vcpu that has a thread and can be run. Created by calling `to_runnable` on a `Vcpu`.
+/// Implements `Deref` to a `Vcpu` so all `Vcpu` methods are usable, with the addition of the `run`
+/// function to execute the guest.
+pub struct RunnableVcpu {
+    vcpu: Vcpu,
+    // vcpus must stay on the same thread once they start.
+    // Add the PhantomData pointer to ensure RunnableVcpu is not `Send`.
+    phantom: std::marker::PhantomData<*mut u8>,
+}
+
+impl RunnableVcpu {
+    /// Runs the VCPU until it exits, returning the reason for the exit.
+    ///
+    /// Note that the state of the VCPU and associated VM must be setup first for this to do
+    /// anything useful.
+    #[allow(clippy::cast_ptr_alignment)]
+    // The pointer is page aligned so casting to a different type is well defined, hence the clippy
+    // allow attribute.
+    pub fn run(&self) -> Result<VcpuExit> {
+        // Safe because we know that our file is a VCPU fd and we verify the return result.
+        let ret = unsafe { ioctl(self, KVM_RUN()) };
+        if ret == 0 {
+            // Safe because we know we mapped enough memory to hold the kvm_run struct because the
+            // kernel told us how large it was.
+            let run = unsafe { &*(self.run_mmap.as_ptr() as *const kvm_run) };
+            match run.exit_reason {
+                KVM_EXIT_IO => {
+                    // Safe because the exit_reason (which comes from the kernel) told us which
+                    // union field to use.
+                    let io = unsafe { run.__bindgen_anon_1.io };
+                    let port = io.port;
+                    let size = (io.count as usize) * (io.size as usize);
+                    match io.direction as u32 {
+                        KVM_EXIT_IO_IN => Ok(VcpuExit::IoIn { port, size }),
+                        KVM_EXIT_IO_OUT => {
+                            let mut data = [0; 8];
+                            let run_start = run as *const kvm_run as *const u8;
+                            // The data_offset is defined by the kernel to be some number of bytes
+                            // into the kvm_run structure, which we have fully mmap'd.
+                            unsafe {
+                                let data_ptr = run_start.offset(io.data_offset as isize);
+                                copy_nonoverlapping(
+                                    data_ptr,
+                                    data.as_mut_ptr(),
+                                    min(size, data.len()),
+                                );
+                            }
+                            Ok(VcpuExit::IoOut { port, size, data })
+                        }
+                        _ => Err(Error::new(EINVAL)),
                     }
                 }
-            };
-            *v.borrow_mut() = None;
-            if unblock {
-                let _ = unblock_signal(signal_num).expect("failed to restore signal mask");
+                KVM_EXIT_MMIO => {
+                    // Safe because the exit_reason (which comes from the kernel) told us which
+                    // union field to use.
+                    let mmio = unsafe { &run.__bindgen_anon_1.mmio };
+                    let address = mmio.phys_addr;
+                    let size = min(mmio.len as usize, mmio.data.len());
+                    if mmio.is_write != 0 {
+                        Ok(VcpuExit::MmioWrite {
+                            address,
+                            size,
+                            data: mmio.data,
+                        })
+                    } else {
+                        Ok(VcpuExit::MmioRead { address, size })
+                    }
+                }
+                KVM_EXIT_UNKNOWN => Ok(VcpuExit::Unknown),
+                KVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
+                KVM_EXIT_HYPERCALL => Ok(VcpuExit::Hypercall),
+                KVM_EXIT_DEBUG => Ok(VcpuExit::Debug),
+                KVM_EXIT_HLT => Ok(VcpuExit::Hlt),
+                KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen),
+                KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown),
+                KVM_EXIT_FAIL_ENTRY => Ok(VcpuExit::FailEntry),
+                KVM_EXIT_INTR => Ok(VcpuExit::Intr),
+                KVM_EXIT_SET_TPR => Ok(VcpuExit::SetTpr),
+                KVM_EXIT_TPR_ACCESS => Ok(VcpuExit::TprAccess),
+                KVM_EXIT_S390_SIEIC => Ok(VcpuExit::S390Sieic),
+                KVM_EXIT_S390_RESET => Ok(VcpuExit::S390Reset),
+                KVM_EXIT_DCR => Ok(VcpuExit::Dcr),
+                KVM_EXIT_NMI => Ok(VcpuExit::Nmi),
+                KVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
+                KVM_EXIT_OSI => Ok(VcpuExit::Osi),
+                KVM_EXIT_PAPR_HCALL => Ok(VcpuExit::PaprHcall),
+                KVM_EXIT_S390_UCONTROL => Ok(VcpuExit::S390Ucontrol),
+                KVM_EXIT_WATCHDOG => Ok(VcpuExit::Watchdog),
+                KVM_EXIT_S390_TSCH => Ok(VcpuExit::S390Tsch),
+                KVM_EXIT_EPR => Ok(VcpuExit::Epr),
+                KVM_EXIT_SYSTEM_EVENT => {
+                    // Safe because we know the exit reason told us this union
+                    // field is valid
+                    let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
+                    let event_flags = unsafe { run.__bindgen_anon_1.system_event.flags };
+                    Ok(VcpuExit::SystemEvent(event_type, event_flags))
+                }
+                r => panic!("unknown kvm exit reason: {}", r),
             }
-        });
+        } else {
+            errno_result()
+        }
     }
 }
 
-impl AsRawFd for Vcpu {
+impl Deref for RunnableVcpu {
+    type Target = Vcpu;
+    fn deref(&self) -> &Self::Target {
+        &self.vcpu
+    }
+}
+
+impl DerefMut for RunnableVcpu {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.vcpu
+    }
+}
+
+impl AsRawFd for RunnableVcpu {
     fn as_raw_fd(&self) -> RawFd {
         self.vcpu.as_raw_fd()
     }
 }
 
+impl Drop for RunnableVcpu {
+    fn drop(&mut self) {
+        VCPU_THREAD.with(|v| {
+            // This assumes that a failure in `BlockedSignal::new` means the signal is already
+            // blocked and there it should not be unblocked on exit.
+            let _blocked_signal = &(*v.borrow())
+                .as_ref()
+                .and_then(|state| state.signal_num)
+                .map(BlockedSignal::new);
+
+            *v.borrow_mut() = None;
+        });
+    }
+}
+
 /// Wrapper for kvm_cpuid2 which has a zero length array at the end.
 /// Hides the zero length array behind a bounds check.
 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
@@ -1858,6 +1890,28 @@ impl CpuId {
     }
 }
 
+// Represents a temporarily blocked signal. It will unblock the signal when dropped.
+struct BlockedSignal {
+    signal_num: c_int,
+}
+
+impl BlockedSignal {
+    // Returns a `BlockedSignal` if the specified signal can be blocked, otherwise None.
+    fn new(signal_num: c_int) -> Option<BlockedSignal> {
+        if block_signal(signal_num).is_ok() {
+            Some(BlockedSignal { signal_num })
+        } else {
+            None
+        }
+    }
+}
+
+impl Drop for BlockedSignal {
+    fn drop(&mut self) {
+        let _ = unblock_signal(self.signal_num).expect("failed to restore signal mask");
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
diff --git a/kvm/tests/dirty_log.rs b/kvm/tests/dirty_log.rs
index 527af46..1efe135 100644
--- a/kvm/tests/dirty_log.rs
+++ b/kvm/tests/dirty_log.rs
@@ -52,8 +52,9 @@ fn test_run() {
         )
         .expect("failed to register memory");
 
+    let runnable_vcpu = vcpu.to_runnable(None).unwrap();
     loop {
-        match vcpu.run().expect("run failed") {
+        match runnable_vcpu.run().expect("run failed") {
             VcpuExit::Hlt => break,
             r => panic!("unexpected exit reason: {:?}", r),
         }
diff --git a/kvm/tests/read_only_memory.rs b/kvm/tests/read_only_memory.rs
index 5aa7bcc..d8c0e1d 100644
--- a/kvm/tests/read_only_memory.rs
+++ b/kvm/tests/read_only_memory.rs
@@ -74,8 +74,9 @@ fn test_run() {
     // Ensure we get exactly 1 exit from attempting to write to read only memory.
     let mut exits = 0;
 
+    let runnable_vcpu = vcpu.to_runnable(None).unwrap();
     loop {
-        match vcpu.run().expect("run failed") {
+        match runnable_vcpu.run().expect("run failed") {
             VcpuExit::Hlt => break,
             VcpuExit::MmioWrite {
                 address,
diff --git a/kvm/tests/real_run_adder.rs b/kvm/tests/real_run_adder.rs
index a419ad9..60869ed 100644
--- a/kvm/tests/real_run_adder.rs
+++ b/kvm/tests/real_run_adder.rs
@@ -49,8 +49,9 @@ fn test_run() {
     vcpu.set_regs(&vcpu_regs).expect("set regs failed");
 
     let mut out = String::new();
+    let runnable_vcpu = vcpu.to_runnable(None).unwrap();
     loop {
-        match vcpu.run().expect("run failed") {
+        match runnable_vcpu.run().expect("run failed") {
             VcpuExit::IoOut {
                 port: 0x3f8,
                 size,