summary refs log tree commit diff
path: root/hypervisor
diff options
context:
space:
mode:
authorSteven Richman <srichman@google.com>2020-05-11 13:27:03 -0700
committerCommit Bot <commit-bot@chromium.org>2020-05-21 15:03:51 +0000
commit0aacc50fd25d61ae1274d6054550f45acaa50897 (patch)
tree131c2f4a3dfdf069cd9766f5b2eaa51de2472b60 /hypervisor
parent1de5def170a96d8a5d6a50a1759128e4554e2bc2 (diff)
downloadcrosvm-0aacc50fd25d61ae1274d6054550f45acaa50897.tar
crosvm-0aacc50fd25d61ae1274d6054550f45acaa50897.tar.gz
crosvm-0aacc50fd25d61ae1274d6054550f45acaa50897.tar.bz2
crosvm-0aacc50fd25d61ae1274d6054550f45acaa50897.tar.lz
crosvm-0aacc50fd25d61ae1274d6054550f45acaa50897.tar.xz
crosvm-0aacc50fd25d61ae1274d6054550f45acaa50897.tar.zst
crosvm-0aacc50fd25d61ae1274d6054550f45acaa50897.zip
hypervisor: add KvmVm new and try_clone
Vms and Vcpus will be try_cloneable and Send, so we can configure on
vcpu threads and so IrqChips can reference all Vcpus.  To support
cloning, collection fields in Vm have been moved into arc mutexes, and
Vm and Vcpu are now Sized.  Because this breaks object safety, all usage
of the traits will be via static binding.

Add impl AsRawFd for SafeDescriptor, to get the sys_util ioctl functions
working with SafeDescriptor; eventually the functions will change to
take SafeDescriptors.  Copy set_user_memory_region helper from kvm
crate.

BUG=chromium:1077058
TEST=cargo test -p hypervisor

Change-Id: I23de47c4472a77632006d0d45de9754394b400c2
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2197337
Reviewed-by: Udam Saini <udam@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Steven Richman <srichman@google.com>
Diffstat (limited to 'hypervisor')
-rw-r--r--hypervisor/Cargo.toml3
-rw-r--r--hypervisor/src/kvm/aarch64.rs2
-rw-r--r--hypervisor/src/kvm/mod.rs167
-rw-r--r--hypervisor/src/lib.rs29
4 files changed, 185 insertions, 16 deletions
diff --git a/hypervisor/Cargo.toml b/hypervisor/Cargo.toml
index a68f315..8f07d5d 100644
--- a/hypervisor/Cargo.toml
+++ b/hypervisor/Cargo.toml
@@ -8,4 +8,5 @@ edition = "2018"
 libc = "*"
 kvm = { path = "../kvm" }
 kvm_sys = { path = "../kvm_sys" }
-sys_util = { path = "../sys_util" }
\ No newline at end of file
+sync = { path = "../sync" }
+sys_util = { path = "../sys_util" }
diff --git a/hypervisor/src/kvm/aarch64.rs b/hypervisor/src/kvm/aarch64.rs
index 6e9f9f7..4f0398f 100644
--- a/hypervisor/src/kvm/aarch64.rs
+++ b/hypervisor/src/kvm/aarch64.rs
@@ -16,7 +16,7 @@ impl VmAArch64 for KvmVm {
 }
 
 impl VcpuAArch64 for KvmVcpu {
-    fn set_one_reg(&self, reg_id: u64, data: u64) -> Result<()> {
+    fn set_one_reg(&self, _reg_id: u64, _data: u64) -> Result<()> {
         Ok(())
     }
 }
diff --git a/hypervisor/src/kvm/mod.rs b/hypervisor/src/kvm/mod.rs
index ede1391..0550792 100644
--- a/hypervisor/src/kvm/mod.rs
+++ b/hypervisor/src/kvm/mod.rs
@@ -7,18 +7,57 @@ mod aarch64;
 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
 mod x86_64;
 
-use kvm_sys::*;
-use libc::{open, O_CLOEXEC, O_RDWR};
+use std::cmp::Ordering;
+use std::collections::{BinaryHeap, HashMap};
 use std::convert::TryFrom;
 use std::ops::{Deref, DerefMut};
 use std::os::raw::{c_char, c_ulong};
 use std::os::unix::io::{AsRawFd, RawFd};
+use std::sync::Arc;
+
+use libc::{open, O_CLOEXEC, O_RDWR};
+
+use kvm_sys::*;
+use sync::Mutex;
 use sys_util::{
-    errno_result, ioctl_with_val, AsRawDescriptor, Error, FromRawDescriptor, GuestMemory,
-    RawDescriptor, Result, SafeDescriptor,
+    errno_result, ioctl, ioctl_with_ref, ioctl_with_val, AsRawDescriptor, Error, FromRawDescriptor,
+    GuestMemory, RawDescriptor, Result, SafeDescriptor,
 };
 
-use crate::{Hypervisor, HypervisorCap, RunnableVcpu, Vcpu, VcpuExit, Vm};
+use crate::{Hypervisor, HypervisorCap, MappedRegion, RunnableVcpu, Vcpu, VcpuExit, Vm};
+
+// Wrapper around KVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
+// from guest physical to host user pages.
+//
+// Safe when the guest regions are guaranteed not to overlap.
+unsafe fn set_user_memory_region(
+    descriptor: &SafeDescriptor,
+    slot: u32,
+    read_only: bool,
+    log_dirty_pages: bool,
+    guest_addr: u64,
+    memory_size: u64,
+    userspace_addr: *mut u8,
+) -> Result<()> {
+    let mut flags = if read_only { KVM_MEM_READONLY } else { 0 };
+    if log_dirty_pages {
+        flags |= KVM_MEM_LOG_DIRTY_PAGES;
+    }
+    let region = kvm_userspace_memory_region {
+        slot,
+        flags,
+        guest_phys_addr: guest_addr,
+        memory_size,
+        userspace_addr: userspace_addr as u64,
+    };
+
+    let ret = ioctl_with_ref(descriptor, KVM_SET_USER_MEMORY_REGION(), &region);
+    if ret == 0 {
+        Ok(())
+    } else {
+        errno_result()
+    }
+}
 
 pub struct Kvm {
     kvm: SafeDescriptor,
@@ -67,15 +106,64 @@ impl Hypervisor for Kvm {
     }
 }
 
+// Used to invert the order when stored in a max-heap.
+#[derive(Copy, Clone, Eq, PartialEq)]
+struct MemSlot(u32);
+
+impl Ord for MemSlot {
+    fn cmp(&self, other: &MemSlot) -> Ordering {
+        // Notice the order is inverted so the lowest magnitude slot has the highest priority in a
+        // max-heap.
+        other.0.cmp(&self.0)
+    }
+}
+
+impl PartialOrd for MemSlot {
+    fn partial_cmp(&self, other: &MemSlot) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
 /// A wrapper around creating and using a KVM VM.
 pub struct KvmVm {
+    vm: SafeDescriptor,
     guest_mem: GuestMemory,
+    mem_regions: Arc<Mutex<HashMap<u32, Box<dyn MappedRegion>>>>,
+    mem_slot_gaps: Arc<Mutex<BinaryHeap<MemSlot>>>,
 }
 
 impl KvmVm {
     /// Constructs a new `KvmVm` using the given `Kvm` instance.
-    pub fn new(_kvm: &Kvm, guest_mem: GuestMemory) -> Result<KvmVm> {
-        Ok(KvmVm { guest_mem })
+    pub fn new(kvm: &Kvm, guest_mem: GuestMemory) -> Result<KvmVm> {
+        // Safe because we know kvm is a real kvm fd as this module is the only one that can make
+        // Kvm objects.
+        let ret = unsafe { ioctl(kvm, KVM_CREATE_VM()) };
+        if ret < 0 {
+            return errno_result();
+        }
+        // Safe because we verify that ret is valid and we own the fd.
+        let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
+        guest_mem.with_regions(|index, guest_addr, size, host_addr, _| {
+            unsafe {
+                // Safe because the guest regions are guaranteed not to overlap.
+                set_user_memory_region(
+                    &vm_descriptor,
+                    index as u32,
+                    false,
+                    false,
+                    guest_addr.offset() as u64,
+                    size as u64,
+                    host_addr as *mut u8,
+                )
+            }
+        })?;
+        // TODO(colindr/srichman): add default IRQ routes in IrqChip constructor or configure_vm
+        Ok(KvmVm {
+            vm: vm_descriptor,
+            guest_mem,
+            mem_regions: Arc::new(Mutex::new(HashMap::new())),
+            mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
+        })
     }
 
     fn create_kvm_vcpu(&self, _id: usize) -> Result<KvmVcpu> {
@@ -84,11 +172,32 @@ impl KvmVm {
 }
 
 impl Vm for KvmVm {
-    fn get_guest_mem(&self) -> &GuestMemory {
+    fn try_clone(&self) -> Result<Self> {
+        Ok(KvmVm {
+            vm: self.vm.try_clone()?,
+            guest_mem: self.guest_mem.clone(),
+            mem_regions: self.mem_regions.clone(),
+            mem_slot_gaps: self.mem_slot_gaps.clone(),
+        })
+    }
+
+    fn get_memory(&self) -> &GuestMemory {
         &self.guest_mem
     }
 }
 
+impl AsRawDescriptor for KvmVm {
+    fn as_raw_descriptor(&self) -> RawDescriptor {
+        self.vm.as_raw_descriptor()
+    }
+}
+
+impl AsRawFd for KvmVm {
+    fn as_raw_fd(&self) -> RawFd {
+        self.vm.as_raw_descriptor()
+    }
+}
+
 /// A wrapper around creating and using a KVM Vcpu.
 pub struct KvmVcpu {}
 
@@ -154,7 +263,9 @@ impl<'a> TryFrom<&'a HypervisorCap> for KvmCap {
 
 #[cfg(test)]
 mod tests {
-    use super::{Hypervisor, HypervisorCap, Kvm};
+    use super::*;
+    use std::thread;
+    use sys_util::GuestAddress;
 
     #[test]
     fn new() {
@@ -167,4 +278,42 @@ mod tests {
         assert!(kvm.check_capability(&HypervisorCap::UserMemory));
         assert!(!kvm.check_capability(&HypervisorCap::S390UserSigp));
     }
+
+    #[test]
+    fn create_vm() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        KvmVm::new(&kvm, gm).unwrap();
+    }
+
+    #[test]
+    fn clone_vm() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        let vm = KvmVm::new(&kvm, gm).unwrap();
+        vm.try_clone().unwrap();
+    }
+
+    #[test]
+    fn send_vm() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        let vm = KvmVm::new(&kvm, gm).unwrap();
+        thread::spawn(move || {
+            let _vm = vm;
+        })
+        .join()
+        .unwrap();
+    }
+
+    #[test]
+    fn get_memory() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        let vm = KvmVm::new(&kvm, gm).unwrap();
+        let obj_addr = GuestAddress(0xf0);
+        vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
+        let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
+        assert_eq!(read_val, 67u8);
+    }
 }
diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs
index f17528b..784af8c 100644
--- a/hypervisor/src/lib.rs
+++ b/hypervisor/src/lib.rs
@@ -27,15 +27,18 @@ pub trait Hypervisor {
 }
 
 /// A wrapper for using a VM and getting/setting its state.
-pub trait Vm {
-    // Gets the guest-mapped memory for the Vm
-    fn get_guest_mem(&self) -> &GuestMemory;
+pub trait Vm: Send + Sized {
+    /// Makes a shallow clone this `Vm`.
+    fn try_clone(&self) -> Result<Self>;
+
+    /// Gets the guest-mapped memory for the Vm.
+    fn get_memory(&self) -> &GuestMemory;
 }
 
-/// A wrapper around creating and using a VCPU.
+/// A wrapper around using a VCPU.
 /// `Vcpu` provides all functionality except for running. To run, `to_runnable` must be called to
 /// lock the vcpu to a thread. Then the returned `RunnableVcpu` can be used for running.
-pub trait Vcpu {
+pub trait Vcpu: Send + Sized {
     type Runnable: RunnableVcpu;
 
     /// Consumes `self` and returns a `RunnableVcpu`. A `RunnableVcpu` is required to run the guest.
@@ -58,6 +61,22 @@ pub trait RunnableVcpu: Deref<Target = <Self as RunnableVcpu>::Vcpu> + DerefMut
     fn run(&self) -> Result<VcpuExit>;
 }
 
+/// A memory region in the current process that can be mapped into the guest's memory.
+///
+/// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
+/// can't be unmapped during the `MappedRegion`'s lifetime.
+pub unsafe trait MappedRegion: Send + Sync {
+    /// Returns a pointer to the beginning of the memory region. Should only be
+    /// used for passing this region to ioctls for setting guest memory.
+    fn as_ptr(&self) -> *mut u8;
+
+    /// Returns the size of the memory region in bytes.
+    fn size(&self) -> usize;
+
+    /// Flushes changes to this memory region to the backing file.
+    fn msync(&self) -> Result<()>;
+}
+
 /// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
 #[derive(Debug)]
 pub enum VcpuExit {