summary refs log tree commit diff
path: root/sys_util
diff options
context:
space:
mode:
authorChirantan Ekbote <chirantan@chromium.org>2020-04-24 19:37:04 +0900
committerCommit Bot <commit-bot@chromium.org>2020-04-29 07:36:36 +0000
commit2d9dde9ee7ac78a9831d1e019e40107b5691f895 (patch)
tree6b055f267adfebdf828cbe680dce0e06c870cdfb /sys_util
parent887289e5d455d2cd026a2b178002ed009ea8bdd4 (diff)
downloadcrosvm-2d9dde9ee7ac78a9831d1e019e40107b5691f895.tar
crosvm-2d9dde9ee7ac78a9831d1e019e40107b5691f895.tar.gz
crosvm-2d9dde9ee7ac78a9831d1e019e40107b5691f895.tar.bz2
crosvm-2d9dde9ee7ac78a9831d1e019e40107b5691f895.tar.lz
crosvm-2d9dde9ee7ac78a9831d1e019e40107b5691f895.tar.xz
crosvm-2d9dde9ee7ac78a9831d1e019e40107b5691f895.tar.zst
crosvm-2d9dde9ee7ac78a9831d1e019e40107b5691f895.zip
Stop tracking sub-mappings in MemoryMappingArena
The kernel already takes care of tracking all our memory mappings.
Doing it again ourselves doesn't provide any benefit and also adds
additional restrictions (like not being able to overlap with existing
mappings or partially remove mappings).  Additionally, the
`MemoryMappingArena` will already unmap the entire memory mapped region
so there is no need to individually unmap the sub-mappings.

The kernel's mmap api doesn't have these restrictions and as far as I
can tell there are no safety concerns with allowing this behavior so
just stop tracking the sub-mappings.

Safe use of MAP_FIXED only requires that the address is part of a
previously mmaped region so allow any MemoryMapping to be converted into
a MemoryMappedArena.

BUG=b:147341783
TEST=unit tests

Change-Id: Iaf944a971b8ba9333802aab73c1d184fe388af89
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2162542
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Chirantan Ekbote <chirantan@chromium.org>
Reviewed-by: Stephen Barber <smbarber@chromium.org>
Diffstat (limited to 'sys_util')
-rw-r--r--sys_util/src/mmap.rs200
1 files changed, 99 insertions, 101 deletions
diff --git a/sys_util/src/mmap.rs b/sys_util/src/mmap.rs
index 006b0a8..c6a52ea 100644
--- a/sys_util/src/mmap.rs
+++ b/sys_util/src/mmap.rs
@@ -6,10 +6,9 @@
 //! mmap object leaves scope.
 
 use std::cmp::min;
-use std::collections::BTreeMap;
 use std::fmt::{self, Display};
 use std::io;
-use std::mem::{size_of, ManuallyDrop};
+use std::mem::size_of;
 use std::os::unix::io::AsRawFd;
 use std::ptr::{copy_nonoverlapping, null_mut, read_unaligned, write_unaligned};
 
@@ -28,8 +27,6 @@ pub enum Error {
     InvalidOffset,
     /// Requested mapping is not page aligned
     NotPageAligned,
-    /// Overlapping regions
-    Overlapping(usize, usize),
     /// Requested memory range spans past the end of the region.
     InvalidRange(usize, usize, usize),
     /// `mmap` returned the given error.
@@ -49,11 +46,6 @@ impl Display for Error {
             InvalidAddress => write!(f, "requested memory out of range"),
             InvalidOffset => write!(f, "requested offset is out of range of off_t"),
             NotPageAligned => write!(f, "requested memory is not page aligned"),
-            Overlapping(offset, count) => write!(
-                f,
-                "requested memory range overlaps with existing region: offset={} size={}",
-                offset, count
-            ),
             InvalidRange(offset, count, region_size) => write!(
                 f,
                 "requested memory range spans past the end of the region: offset={} count={} region_size={}",
@@ -643,13 +635,6 @@ impl Drop for MemoryMapping {
 pub struct MemoryMappingArena {
     addr: *mut u8,
     size: usize,
-    // When doing in-place swaps of MemoryMappings, the BTreeMap returns a owned
-    // instance of the old MemoryMapping. When the old MemoryMapping falls out
-    // of scope, it calls munmap on the same region as the new MemoryMapping
-    // that was just mapped in. To avoid accidentally munmapping the new,
-    // MemoryMapping, all mappings are wrapped in a ManuallyDrop, and then
-    // "forgotten" when removed from the BTreeMap
-    maps: BTreeMap<usize, ManuallyDrop<MemoryMapping>>,
 }
 
 // Send and Sync aren't automatically inherited for the raw address pointer.
@@ -666,17 +651,7 @@ impl MemoryMappingArena {
     /// * `size` - Size of memory region in bytes.
     pub fn new(size: usize) -> Result<MemoryMappingArena> {
         // Reserve the arena's memory using an anonymous read-only mmap.
-        // The actual MemoryMapping object is forgotten, with
-        // MemoryMappingArena manually calling munmap on drop.
-        let mmap = MemoryMapping::new_protection(size, Protection::none().set_read())?;
-        let addr = mmap.as_ptr();
-        let size = mmap.size();
-        std::mem::forget(mmap);
-        Ok(MemoryMappingArena {
-            addr,
-            size,
-            maps: BTreeMap::new(),
-        })
+        MemoryMapping::new_protection(size, Protection::none().set_read()).map(From::from)
     }
 
     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena.
@@ -755,58 +730,43 @@ impl MemoryMappingArena {
         let mmap = unsafe {
             match fd {
                 Some((fd, fd_offset)) => MemoryMapping::from_fd_offset_protection_fixed(
-                    (self.addr as usize + offset) as *mut u8,
+                    self.addr.add(offset),
                     fd,
                     size,
                     fd_offset,
                     prot,
                 )?,
-                None => MemoryMapping::new_protection_fixed(
-                    (self.addr as usize + offset) as *mut u8,
-                    size,
-                    prot,
-                )?,
+                None => MemoryMapping::new_protection_fixed(self.addr.add(offset), size, prot)?,
             }
         };
 
-        self.maps.insert(offset, ManuallyDrop::new(mmap));
+        // This mapping will get automatically removed when we drop the whole arena.
+        std::mem::forget(mmap);
         Ok(())
     }
 
-    /// Removes a mapping at `offset` from the start of the arena.
-    /// Returns a boolean indicating if there was a mapping present at `offset`.
-    /// If none was present, this method is a noop.
-    pub fn remove(&mut self, offset: usize) -> Result<bool> {
-        if let Some(mmap) = self.maps.remove(&offset) {
-            // Instead of munmapping the memory map, leaving an unprotected hole
-            // in the arena, swap this mmap with an anonymous protection.
-            // This is safe since the memory mapping perfectly overlaps with an
-            // existing, known good memory mapping.
-            let mmap = unsafe {
-                MemoryMapping::new_protection_fixed(
-                    mmap.as_ptr(),
-                    mmap.size(),
-                    Protection::none().set_read(),
-                )?
-            };
-            self.maps.insert(offset, ManuallyDrop::new(mmap));
-            Ok(true)
-        } else {
-            Ok(false)
-        }
+    /// Removes `size` bytes at `offset` bytes from the start of the arena. `offset` must be page
+    /// aligned.
+    ///
+    /// # Arguments
+    /// * `offset` - Page aligned offset into the arena in bytes.
+    /// * `size` - Size of memory region in bytes.
+    pub fn remove(&mut self, offset: usize, size: usize) -> Result<()> {
+        self.try_add(offset, size, Protection::read(), None)
     }
 
-    /// Calls msync with MS_SYNC on the mapping at `offset` from the start of
-    /// the arena.
-    /// Returns a boolean indicating if there was a mapping present at `offset`.
-    /// If none was present, this method is a noop.
-    pub fn msync(&self, offset: usize) -> Result<bool> {
-        if let Some(mmap) = self.maps.get(&offset) {
-            mmap.msync()?;
-            Ok(true)
-        } else {
-            Ok(false)
+    /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
+    /// the arena. `offset` must be page aligned.
+    pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
+        self.validate_range(offset, size)?;
+
+        // Safe because we've validated that this memory range is owned by this `MemoryMappingArena`.
+        let ret =
+            unsafe { libc::msync(self.addr as *mut libc::c_void, self.size(), libc::MS_SYNC) };
+        if ret == -1 {
+            return Err(Error::SystemCallFailed(errno::Error::last()));
         }
+        Ok(())
     }
 
     /// Returns a pointer to the beginning of the memory region.  Should only be
@@ -836,32 +796,26 @@ impl MemoryMappingArena {
         if end_offset > self.size {
             return Err(Error::InvalidAddress);
         }
-        // Ensure offset..offset+size doesn't overlap with existing regions
-        // Find the offset + size of the first mapping before the desired offset
-        let (prev_offset, prev_size) = match self.maps.range(..offset).rev().next() {
-            Some((offset, mmap)) => (*offset, mmap.size()),
-            None => {
-                // Empty map
-                return Ok(());
-            }
-        };
-        if offset == prev_offset {
-            // Perfectly overlapping regions are allowed
-            if size != prev_size {
-                return Err(Error::Overlapping(offset, size));
-            }
-        } else if offset < (prev_offset + prev_size) {
-            return Err(Error::Overlapping(offset, size));
-        }
-
         Ok(())
     }
 }
 
+impl From<MemoryMapping> for MemoryMappingArena {
+    fn from(mmap: MemoryMapping) -> Self {
+        let addr = mmap.as_ptr();
+        let size = mmap.size();
+
+        // Forget the original mapping because the `MemoryMappingArena` will take care of calling
+        // `munmap` when it is dropped.
+        std::mem::forget(mmap);
+        MemoryMappingArena { addr, size }
+    }
+}
+
 impl Drop for MemoryMappingArena {
     fn drop(&mut self) {
-        // This is safe because we mmap the area at addr ourselves, and nobody
-        // else is holding a reference to it.
+        // This is safe because we own this memory range, and nobody else is holding a reference to
+        // it.
         unsafe {
             libc::munmap(self.addr as *mut libc::c_void, self.size);
         }
@@ -977,22 +931,8 @@ mod tests {
     fn arena_remove() {
         let mut m = MemoryMappingArena::new(0x40000).unwrap();
         assert!(m.add_anon(0, pagesize() * 4).is_ok());
-        assert!(m.remove(0).unwrap(), true);
-        assert!(m.remove(0).unwrap(), false);
-    }
-
-    #[test]
-    fn arena_add_overlap_error() {
-        let page = pagesize();
-        let mut m = MemoryMappingArena::new(page * 4).unwrap();
-        assert!(m.add_anon(0, page * 4).is_ok());
-        let res = m.add_anon(page, page).unwrap_err();
-        match res {
-            Error::Overlapping(a, o) => {
-                assert_eq!((a, o), (page, page));
-            }
-            e => panic!("unexpected error: {}", e),
-        }
+        assert!(m.remove(0, pagesize()).is_ok());
+        assert!(m.remove(0, pagesize() * 2).is_ok());
     }
 
     #[test]
@@ -1015,4 +955,62 @@ mod tests {
             e => panic!("unexpected error: {}", e),
         }
     }
+
+    #[test]
+    fn arena_add_overlapping() {
+        let ps = pagesize();
+        let mut m =
+            MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
+        m.add_anon(ps * 4, ps * 4)
+            .expect("failed to add sub-mapping");
+
+        // Overlap in the front.
+        m.add_anon(ps * 2, ps * 3)
+            .expect("failed to add front overlapping sub-mapping");
+
+        // Overlap in the back.
+        m.add_anon(ps * 7, ps * 3)
+            .expect("failed to add back overlapping sub-mapping");
+
+        // Overlap the back of the first mapping, all of the middle mapping, and the front of the
+        // last mapping.
+        m.add_anon(ps * 3, ps * 6)
+            .expect("failed to add mapping that overlaps several mappings");
+    }
+
+    #[test]
+    fn arena_remove_overlapping() {
+        let ps = pagesize();
+        let mut m =
+            MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
+        m.add_anon(ps * 4, ps * 4)
+            .expect("failed to add sub-mapping");
+        m.add_anon(ps * 2, ps * 2)
+            .expect("failed to add front overlapping sub-mapping");
+        m.add_anon(ps * 8, ps * 2)
+            .expect("failed to add back overlapping sub-mapping");
+
+        // Remove the back of the first mapping and the front of the second.
+        m.remove(ps * 3, ps * 2)
+            .expect("failed to remove front overlapping mapping");
+
+        // Remove the back of the second mapping and the front of the third.
+        m.remove(ps * 7, ps * 2)
+            .expect("failed to remove back overlapping mapping");
+
+        // Remove a mapping that completely overlaps the middle mapping.
+        m.remove(ps * 5, ps * 2)
+            .expect("failed to remove fully overlapping mapping");
+    }
+
+    #[test]
+    fn arena_remove_unaligned() {
+        let ps = pagesize();
+        let mut m =
+            MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
+
+        m.add_anon(0, ps).expect("failed to add mapping");
+        m.remove(0, ps - 1)
+            .expect("failed to remove unaligned mapping");
+    }
 }