summary refs log tree commit diff
diff options
context:
space:
mode:
authorDaniel Prilik <prilik@google.com>2019-02-27 12:24:25 -0800
committerchrome-bot <chrome-bot@chromium.org>2019-03-25 17:43:50 -0700
commitdb4721d8709a7d139f9b6cb405b22e2a405072ff (patch)
treeff21f5766937b8b8333d894adc19c9a5faf08293
parent33e8ef061d4a0d263edd6542728ac4bec3ebf42f (diff)
downloadcrosvm-db4721d8709a7d139f9b6cb405b22e2a405072ff.tar
crosvm-db4721d8709a7d139f9b6cb405b22e2a405072ff.tar.gz
crosvm-db4721d8709a7d139f9b6cb405b22e2a405072ff.tar.bz2
crosvm-db4721d8709a7d139f9b6cb405b22e2a405072ff.tar.lz
crosvm-db4721d8709a7d139f9b6cb405b22e2a405072ff.tar.xz
crosvm-db4721d8709a7d139f9b6cb405b22e2a405072ff.tar.zst
crosvm-db4721d8709a7d139f9b6cb405b22e2a405072ff.zip
crosvm: add memfd for GuestMemory
Building off CL:1290293

Instead of having a seperate GuestMemoryManager, this adds SharedMemory
as a Arc'd member of GuestMemory. This is nice since it removes the need
to plumb the Manager struct throughout the codebase.

BUG=chromium:936567
TEST=cargo test -p sys_util

Change-Id: I6fa5d73f7e0db495c2803a040479818445660345
Reviewed-on: https://chromium-review.googlesource.com/1493013
Commit-Ready: Daniel Prilik <prilik@google.com>
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
-rw-r--r--devices/src/virtio/vhost/net.rs4
-rw-r--r--kvm/src/lib.rs2
-rw-r--r--sys_util/src/guest_memory.rs158
-rw-r--r--vhost/src/lib.rs6
-rw-r--r--x86_64/src/mptable.rs23
5 files changed, 166 insertions, 27 deletions
diff --git a/devices/src/virtio/vhost/net.rs b/devices/src/virtio/vhost/net.rs
index b8e518d..2ecef9d 100644
--- a/devices/src/virtio/vhost/net.rs
+++ b/devices/src/virtio/vhost/net.rs
@@ -227,8 +227,8 @@ pub mod tests {
 
     fn create_guest_memory() -> result::Result<GuestMemory, GuestMemoryError> {
         let start_addr1 = GuestAddress(0x0);
-        let start_addr2 = GuestAddress(0x100);
-        GuestMemory::new(&vec![(start_addr1, 0x100), (start_addr2, 0x400)])
+        let start_addr2 = GuestAddress(0x1000);
+        GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x4000)])
     }
 
     fn create_net_common() -> Net<FakeTap, FakeNet<FakeTap>> {
diff --git a/kvm/src/lib.rs b/kvm/src/lib.rs
index 3112fa0..42ed400 100644
--- a/kvm/src/lib.rs
+++ b/kvm/src/lib.rs
@@ -312,7 +312,7 @@ impl Vm {
         if ret >= 0 {
             // Safe because we verify the value of ret and we are the owners of the fd.
             let vm_file = unsafe { File::from_raw_fd(ret) };
-            guest_mem.with_regions(|index, guest_addr, size, host_addr| {
+            guest_mem.with_regions(|index, guest_addr, size, host_addr, _| {
                 unsafe {
                     // Safe because the guest regions are guaranteed not to overlap.
                     set_user_memory_region(
diff --git a/sys_util/src/guest_memory.rs b/sys_util/src/guest_memory.rs
index 2f397cf..0d0c292 100644
--- a/sys_util/src/guest_memory.rs
+++ b/sys_util/src/guest_memory.rs
@@ -4,16 +4,19 @@
 
 //! Track memory regions that are mapped to the guest VM.
 
+use std::ffi::CStr;
 use std::fmt::{self, Display};
 use std::io::{Read, Write};
+use std::os::unix::io::{AsRawFd, RawFd};
 use std::result;
 use std::sync::Arc;
 
-use data_model::volatile_memory::*;
-use data_model::DataInit;
-
 use crate::guest_address::GuestAddress;
 use crate::mmap::{self, MemoryMapping};
+use crate::shm::{MemfdSeals, SharedMemory};
+use crate::{errno, pagesize};
+use data_model::volatile_memory::*;
+use data_model::DataInit;
 
 #[derive(Debug)]
 pub enum Error {
@@ -21,6 +24,10 @@ pub enum Error {
     MemoryAccess(GuestAddress, mmap::Error),
     MemoryMappingFailed(mmap::Error),
     MemoryRegionOverlap,
+    MemoryNotAligned,
+    MemoryCreationFailed(errno::Error),
+    MemorySetSizeFailed(errno::Error),
+    MemoryAddSealsFailed(errno::Error),
     ShortWrite { expected: usize, completed: usize },
     ShortRead { expected: usize, completed: usize },
 }
@@ -39,6 +46,10 @@ impl Display for Error {
             }
             MemoryMappingFailed(e) => write!(f, "failed to map guest memory: {}", e),
             MemoryRegionOverlap => write!(f, "memory regions overlap"),
+            MemoryNotAligned => write!(f, "memfd regions must be page aligned"),
+            MemoryCreationFailed(_) => write!(f, "failed to create memfd region"),
+            MemorySetSizeFailed(e) => write!(f, "failed to set memfd region size: {}", e),
+            MemoryAddSealsFailed(e) => write!(f, "failed to set seals on memfd region: {}", e),
             ShortWrite {
                 expected,
                 completed,
@@ -62,6 +73,7 @@ impl Display for Error {
 struct MemoryRegion {
     mapping: MemoryMapping,
     guest_base: GuestAddress,
+    memfd_offset: usize,
 }
 
 fn region_end(region: &MemoryRegion) -> GuestAddress {
@@ -71,17 +83,77 @@ fn region_end(region: &MemoryRegion) -> GuestAddress {
         .unchecked_add(region.mapping.size() as u64)
 }
 
-/// Tracks a memory region and where it is mapped in the guest.
+/// Tracks a memory region and where it is mapped in the guest, along with a shm
+/// fd of the underlying memory regions.
 #[derive(Clone)]
 pub struct GuestMemory {
     regions: Arc<Vec<MemoryRegion>>,
+    memfd: Option<Arc<SharedMemory>>,
+}
+
+impl AsRawFd for GuestMemory {
+    fn as_raw_fd(&self) -> RawFd {
+        match self.memfd {
+            Some(ref memfd) => memfd.as_raw_fd(),
+            None => panic!("GuestMemory is not backed by a memfd"),
+        }
+    }
 }
 
 impl GuestMemory {
+    /// Creates backing memfd for GuestMemory regions
+    fn create_memfd(ranges: &[(GuestAddress, u64)]) -> Result<SharedMemory> {
+        let mut aligned_size = 0;
+        let pg_size = pagesize();
+        for range in ranges.iter() {
+            if range.1 % pg_size as u64 != 0 {
+                return Err(Error::MemoryNotAligned);
+            }
+
+            aligned_size += range.1;
+        }
+
+        let mut seals = MemfdSeals::new();
+
+        seals.set_shrink_seal();
+        seals.set_grow_seal();
+        seals.set_seal_seal();
+
+        let mut memfd =
+            SharedMemory::new(Some(CStr::from_bytes_with_nul(b"crosvm_guest\0").unwrap()))
+                .map_err(Error::MemoryCreationFailed)?;
+        memfd
+            .set_size(aligned_size)
+            .map_err(Error::MemorySetSizeFailed)?;
+        memfd
+            .add_seals(seals)
+            .map_err(Error::MemoryAddSealsFailed)?;
+
+        Ok(memfd)
+    }
+
     /// Creates a container for guest memory regions.
     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
     pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
+        // Create memfd
+
+        // TODO(prilik) remove optional memfd once parallel CQ lands (crbug.com/942183).
+        // Many classic CQ builders run old kernels without memfd support, resulting in test
+        // failures. It's less effort to introduce this temporary optional path than to
+        // manually mark all affected tests as ignore.
+        let memfd = match GuestMemory::create_memfd(ranges) {
+            Err(Error::MemoryCreationFailed { .. }) => {
+                warn!("GuestMemory is not backed by a memfd");
+                None
+            }
+            Err(e) => return Err(e),
+            Ok(memfd) => Some(memfd),
+        };
+
+        // Create memory regions
         let mut regions = Vec::<MemoryRegion>::new();
+        let mut offset = 0;
+
         for range in ranges.iter() {
             if let Some(last) = regions.last() {
                 if last
@@ -93,16 +165,27 @@ impl GuestMemory {
                 }
             }
 
-            let mapping =
-                MemoryMapping::new(range.1 as usize).map_err(Error::MemoryMappingFailed)?;
+            let mapping = match memfd {
+                Some(ref memfd) => MemoryMapping::from_fd_offset(memfd, range.1 as usize, offset),
+                None => MemoryMapping::new(range.1 as usize),
+            }
+            .map_err(Error::MemoryMappingFailed)?;
+
             regions.push(MemoryRegion {
                 mapping,
                 guest_base: range.0,
+                memfd_offset: offset,
             });
+
+            offset += range.1 as usize;
         }
 
         Ok(GuestMemory {
             regions: Arc::new(regions),
+            memfd: match memfd {
+                Some(memfd) => Some(Arc::new(memfd)),
+                None => None,
+            },
         })
     }
 
@@ -160,9 +243,16 @@ impl GuestMemory {
     }
 
     /// Perform the specified action on each region's addresses.
+    ///
+    /// Callback is called with arguments:
+    ///  * index: usize
+    ///  * guest_addr : GuestAddress
+    ///  * size: usize
+    ///  * host_addr: usize
+    ///  * memfd_offset: usize
     pub fn with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E>
     where
-        F: FnMut(usize, GuestAddress, usize, usize) -> result::Result<(), E>,
+        F: FnMut(usize, GuestAddress, usize, usize, usize) -> result::Result<(), E>,
     {
         for (index, region) in self.regions.iter().enumerate() {
             cb(
@@ -170,6 +260,7 @@ impl GuestMemory {
                 region.guest_base,
                 region.mapping.size(),
                 region.mapping.as_ptr() as usize,
+                region.memfd_offset,
             )?;
         }
         Ok(())
@@ -483,12 +574,22 @@ impl VolatileMemory for GuestMemory {
 #[cfg(test)]
 mod tests {
     use super::*;
+    use crate::kernel_has_memfd;
+
+    #[test]
+    fn test_alignment() {
+        let start_addr1 = GuestAddress(0x0);
+        let start_addr2 = GuestAddress(0x1000);
+
+        assert!(GuestMemory::new(&vec![(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
+        assert!(GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x1000)]).is_ok());
+    }
 
     #[test]
     fn two_regions() {
         let start_addr1 = GuestAddress(0x0);
-        let start_addr2 = GuestAddress(0x400);
-        assert!(GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)]).is_ok());
+        let start_addr2 = GuestAddress(0x4000);
+        assert!(GuestMemory::new(&vec![(start_addr1, 0x4000), (start_addr2, 0x4000)]).is_ok());
     }
 
     #[test]
@@ -572,8 +673,8 @@ mod tests {
     #[test]
     fn guest_to_host() {
         let start_addr1 = GuestAddress(0x0);
-        let start_addr2 = GuestAddress(0x100);
-        let mem = GuestMemory::new(&vec![(start_addr1, 0x100), (start_addr2, 0x400)]).unwrap();
+        let start_addr2 = GuestAddress(0x1000);
+        let mem = GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x4000)]).unwrap();
 
         // Verify the host addresses match what we expect from the mappings.
         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
@@ -587,4 +688,39 @@ mod tests {
         let bad_addr = GuestAddress(0x123456);
         assert!(mem.get_host_address(bad_addr).is_err());
     }
+
+    #[test]
+    fn memfd_offset() {
+        if !kernel_has_memfd() {
+            return;
+        }
+
+        let start_region1 = GuestAddress(0x0);
+        let size_region1 = 0x1000;
+        let start_region2 = GuestAddress(0x10000);
+        let size_region2 = 0x2000;
+        let gm = GuestMemory::new(&vec![
+            (start_region1, size_region1),
+            (start_region2, size_region2),
+        ])
+        .unwrap();
+
+        gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
+        gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
+            .unwrap();
+
+        let _ = gm.with_regions::<_, ()>(|index, _, size, _, memfd_offset| {
+            let mmap = MemoryMapping::from_fd_offset(&gm, size, memfd_offset).unwrap();
+
+            if index == 0 {
+                assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
+            }
+
+            if index == 1 {
+                assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
+            }
+
+            Ok(())
+        });
+    }
 }
diff --git a/vhost/src/lib.rs b/vhost/src/lib.rs
index fe1c656..e841294 100644
--- a/vhost/src/lib.rs
+++ b/vhost/src/lib.rs
@@ -128,7 +128,7 @@ pub trait Vhost: AsRawFd + std::marker::Sized {
 
         let _ = self
             .mem()
-            .with_regions::<_, ()>(|index, guest_addr, size, host_addr| {
+            .with_regions::<_, ()>(|index, guest_addr, size, host_addr, _| {
                 vhost_regions[index] = virtio_sys::vhost_memory_region {
                     guest_phys_addr: guest_addr.offset() as u64,
                     memory_size: size as u64,
@@ -339,8 +339,8 @@ mod tests {
 
     fn create_guest_memory() -> result::Result<GuestMemory, GuestMemoryError> {
         let start_addr1 = GuestAddress(0x0);
-        let start_addr2 = GuestAddress(0x100);
-        GuestMemory::new(&vec![(start_addr1, 0x100), (start_addr2, 0x400)])
+        let start_addr2 = GuestAddress(0x1000);
+        GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x4000)])
     }
 
     fn assert_ok_or_known_failure<T>(res: Result<T>) {
diff --git a/x86_64/src/mptable.rs b/x86_64/src/mptable.rs
index 7d5d622..9277840 100644
--- a/x86_64/src/mptable.rs
+++ b/x86_64/src/mptable.rs
@@ -330,6 +330,13 @@ pub fn setup_mptable(
 #[cfg(test)]
 mod tests {
     use super::*;
+    use sys_util::pagesize;
+
+    fn compute_page_aligned_mp_size(num_cpus: u8) -> u64 {
+        let mp_size = compute_mp_size(num_cpus);
+        let pg_size = pagesize();
+        (mp_size + pg_size - (mp_size % pg_size)) as u64
+    }
 
     fn table_entry_size(type_: u8) -> usize {
         match type_ as u32 {
@@ -347,7 +354,7 @@ mod tests {
         let num_cpus = 4;
         let mem = GuestMemory::new(&[(
             GuestAddress(MPTABLE_START),
-            compute_mp_size(num_cpus) as u64,
+            compute_page_aligned_mp_size(num_cpus),
         )])
         .unwrap();
 
@@ -356,12 +363,8 @@ mod tests {
 
     #[test]
     fn bounds_check_fails() {
-        let num_cpus = 4;
-        let mem = GuestMemory::new(&[(
-            GuestAddress(MPTABLE_START),
-            (compute_mp_size(num_cpus) - 1) as u64,
-        )])
-        .unwrap();
+        let num_cpus = 255;
+        let mem = GuestMemory::new(&[(GuestAddress(MPTABLE_START), 0x1000)]).unwrap();
 
         assert!(setup_mptable(&mem, num_cpus, Vec::new()).is_err());
     }
@@ -371,7 +374,7 @@ mod tests {
         let num_cpus = 1;
         let mem = GuestMemory::new(&[(
             GuestAddress(MPTABLE_START),
-            compute_mp_size(num_cpus) as u64,
+            compute_page_aligned_mp_size(num_cpus),
         )])
         .unwrap();
 
@@ -387,7 +390,7 @@ mod tests {
         let num_cpus = 4;
         let mem = GuestMemory::new(&[(
             GuestAddress(MPTABLE_START),
-            compute_mp_size(num_cpus) as u64,
+            compute_page_aligned_mp_size(num_cpus),
         )])
         .unwrap();
 
@@ -421,7 +424,7 @@ mod tests {
         const MAX_CPUS: u8 = 0xff;
         let mem = GuestMemory::new(&[(
             GuestAddress(MPTABLE_START),
-            compute_mp_size(MAX_CPUS) as u64,
+            compute_page_aligned_mp_size(MAX_CPUS),
         )])
         .unwrap();